去掉后缀

This commit is contained in:
marsal wang
2021-12-29 16:52:23 +08:00
parent 71a81526cc
commit a13bb36e6a
23 changed files with 2505 additions and 7 deletions

21
kibana/.helmignore Normal file
View File

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

6
kibana/Chart.lock Normal file
View File

@ -0,0 +1,6 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 1.10.3
digest: sha256:710e8247ae70ea63a2fb2fde4320511ff28c7b5c7a738861427f104a7718bdf4
generated: "2021-12-01T16:53:40.197046173Z"

28
kibana/Chart.yaml Normal file
View File

@ -0,0 +1,28 @@
annotations:
category: Analytics
apiVersion: v2
appVersion: 7.16.2
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
description: Kibana is an open source, browser based analytics and search dashboard for Elasticsearch.
engine: gotpl
home: https://github.com/bitnami/charts/tree/master/bitnami/kibana
icon: https://bitnami.com/assets/stacks/kibana/img/kibana-stack-220x234.png
keywords:
- kibana
- analytics
- monitoring
- metrics
- logs
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: kibana
sources:
- https://github.com/bitnami/bitnami-docker-kibana
- https://www.elastic.co/products/kibana
version: 9.1.7

395
kibana/README.md Normal file
View File

@ -0,0 +1,395 @@
# Kibana
[Kibana](https://www.elastic.co/kibana/) is an open source, browser based analytics and search dashboard for Elasticsearch.
## TL;DR
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/kibana --set elasticsearch.hosts[0]=<Hostname of your ES instance> --set elasticsearch.port=<port of your ES instance>
```
## Introduction
This chart bootstraps a [Kibana](https://github.com/bitnami/bitnami-docker-kibana) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.12+
- Helm 3.1.0
- PV provisioner support in the underlying infrastructure
- ReadWriteMany volumes for deployment scaling
## Installing the Chart
This chart requires an Elasticsearch instance to work. You can use an already existing Elasticsearch instance.
To install the chart with the release name `my-release`:
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release \
--set elasticsearch.hosts[0]=<Hostname of your ES instance> \
--set elasticsearch.port=<port of your ES instance> \
bitnami/kibana
```
These commands deploy Kibana on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` statefulset:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release. Use the option `--purge` to delete all history too.
## Parameters
### Global parameters
| Name | Description | Value |
| ------------------------- | ----------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
### Common parameters
| Name | Description | Value |
| ------------------ | --------------------------------------------------------------------------------------------------------- | ----- |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `nameOverride` | String to partially override common.names.fullname template with a string (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template with a string | `""` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
### Kibana parameters
| Name | Description | Value |
| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
| `image.registry` | Kibana image registry | `docker.io` |
| `image.repository` | Kibana image repository | `bitnami/kibana` |
| `image.tag` | Kibana image tag (immutable tags are recommended) | `7.15.2-debian-10-r12` |
| `image.pullPolicy` | Kibana image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `replicaCount` | Number of replicas of the Kibana Pod | `1` |
| `updateStrategy.type` | Set up update strategy for Kibana installation. | `RollingUpdate` |
| `schedulerName` | Alternative scheduler | `""` |
| `hostAliases` | Add deployment host aliases | `[]` |
| `plugins` | Array containing the Kibana plugins to be installed in deployment | `[]` |
| `savedObjects.urls` | Array containing links to NDJSON files to be imported during Kibana initialization | `[]` |
| `savedObjects.configmap` | Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template) | `""` |
| `extraConfiguration` | Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template | `{}` |
| `configurationCM` | ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml | `""` |
| `extraEnvVars` | Array containing extra env vars to configure Kibana | `[]` |
| `extraEnvVarsCM` | ConfigMap containing extra env vars to configure Kibana | `""` |
| `extraEnvVarsSecret` | Secret containing extra env vars to configure Kibana (in case of sensitive data) | `""` |
| `extraVolumes` | Array to add extra volumes. Requires setting `extraVolumeMounts` | `[]` |
| `extraVolumeMounts` | Array to add extra mounts. Normally used with `extraVolumes` | `[]` |
| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `10-debian-10-r266` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
| `volumePermissions.resources` | Volume Permissions resources | `{}` |
| `persistence.enabled` | Enable persistence | `true` |
| `persistence.storageClass` | Kibana data Persistent Volume Storage Class | `""` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `""` |
| `persistence.accessMode` | Access mode to the PV | `ReadWriteOnce` |
| `persistence.size` | Size for the PV | `10Gi` |
| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `120` |
| `livenessProbe.periodSeconds` | How often to perform the probe | `10` |
| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `readinessProbe.periodSeconds` | How often to perform the probe | `10` |
| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
| `forceInitScripts` | Force execution of init scripts | `false` |
| `initScriptsCM` | Configmap with init scripts to execute | `""` |
| `initScriptsSecret` | Secret with init scripts to execute (for sensitive data) | `""` |
| `service.port` | Kubernetes Service port | `5601` |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.nodePort` | Specify the nodePort value for the LoadBalancer and NodePort service types | `""` |
| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `service.annotations` | Annotations for Kibana service (evaluated as a template) | `{}` |
| `service.labels` | Extra labels for Kibana service | `{}` |
| `service.loadBalancerIP` | loadBalancerIP if Kibana service type is `LoadBalancer` | `""` |
| `service.extraPorts` | Extra ports to expose in the service (normally used with the `sidecar` value) | `[]` |
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.pathType` | Ingress Path type | `ImplementationSpecific` |
| `ingress.apiVersion` | Override API Version (automatically detected if not set) | `""` |
| `ingress.hostname` | Default host for the ingress resource. If specified as "*" no host rule is configured | `kibana.local` |
| `ingress.path` | The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` |
| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` |
| `ingress.tls` | Enable TLS configuration for the hostname defined at ingress.hostname parameter | `false` |
| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.extraPaths` | Additional arbitrary path/backend objects | `[]` |
| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` |
| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` |
| `serviceAccount.create` | Enable creation of ServiceAccount for Kibana | `true` |
| `serviceAccount.name` | Name of serviceAccount | `""` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
| `containerPort` | Port to expose at container level | `5601` |
| `securityContext.enabled` | Enable securityContext on for Kibana deployment | `true` |
| `securityContext.fsGroup` | Group to configure permissions for volumes | `1001` |
| `securityContext.runAsUser` | User for the security context | `1001` |
| `securityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
| `resources.limits` | The resources limits for the container | `{}` |
| `resources.requests` | The requested resources for the container | `{}` |
| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | Affinity for pod assignment | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Tolerations for pod assignment | `[]` |
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Extra labels to add to Pod | `{}` |
| `sidecars` | Attach additional containers to the pod | `[]` |
| `initContainers` | Add additional init containers to the pod | `[]` |
| `configuration` | Kibana configuration | `{}` |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.service.annotations` | Prometheus annotations for the Kibana service | `{}` |
| `metrics.serviceMonitor.enabled` | If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` |
| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
### Kibana server TLS configuration
| Name | Description | Value |
| ---------------------- | ------------------------------------------------------------------------------ | ------- |
| `tls.enabled` | Enable SSL/TLS encryption for Kibana server (HTTPS) | `false` |
| `tls.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` |
| `tls.existingSecret` | Name of the existing secret containing Kibana server certificates | `""` |
| `tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of PKCS12 | `false` |
| `tls.keyPassword` | Password to access the PEM key when it is password-protected. | `""` |
| `tls.keystorePassword` | Password to access the PKCS12 keystore when it is password-protected. | `""` |
| `tls.passwordsSecret` | Name of a existing secret containing the Keystore or PEM key password | `""` |
### Elasticsearch parameters
| Name | Description | Value |
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------- |
| `elasticsearch.hosts` | List of elasticsearch hosts to connect to. | `[]` |
| `elasticsearch.port` | Elasticsearch port | `""` |
| `elasticsearch.security.auth.enabled` | Set to 'true' if Elasticsearch has authentication enabled | `false` |
| `elasticsearch.security.auth.kibanaUsername` | Kibana server user to authenticate with Elasticsearch | `elastic` |
| `elasticsearch.security.auth.kibanaPassword` | Kibana server password to authenticate with Elasticsearch | `""` |
| `elasticsearch.security.auth.existingSecret` | Name of the existing secret containing the Password for the Kibana user | `""` |
| `elasticsearch.security.tls.enabled` | Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS) | `false` |
| `elasticsearch.security.tls.verificationMode` | Verification mode for SSL communications. | `full` |
| `elasticsearch.security.tls.existingSecret` | Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none | `""` |
| `elasticsearch.security.tls.usePemCerts` | Set to 'true' to use PEM certificates instead of PKCS12. | `false` |
| `elasticsearch.security.tls.truststorePassword` | Password to access the PKCS12 trustore in case it is password-protected. | `""` |
| `elasticsearch.security.tls.passwordsSecret` | Name of a existing secret containing the Truststore password | `""` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release \
--set admin.user=admin-user bitnami/kibana
```
The above command sets the Kibana admin user to `admin-user`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
$ helm install my-release -f values.yaml bitnami/kibana
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Change Kibana version
To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/change-image-version/).
### Use custom configuration
The Bitnami Kibana chart supports using custom configuration settings. For example, to mount a custom `kibana.yml` you can create a ConfigMap like the following:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: myconfig
data:
kibana.yml: |-
# Raw text of the file
```
And now you need to pass the ConfigMap name, to the corresponding parameter: `configurationCM=myconfig`
An alternative is to provide extra configuration settings to the default kibana.yml that the chart deploys. This is done using the `extraConfiguration` value:
```yaml
extraConfiguration:
"server.maxPayloadBytes": 1048576
"server.pingTimeout": 1500
```
### Add extra environment variables
In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: ELASTICSEARCH_VERSION
value: 6
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values.
### Use custom initialization scripts
For advanced operations, the Bitnami Kibana chart allows using custom initialization scripts that will be mounted in `/docker-entrypoint.init-db`. Mount these extra scripts using a ConfigMap or a Secret (in case of sensitive data) and specify them via the `initScriptsCM` and `initScriptsSecret` chart parameters. Refer to the [chart documentation on custom initialization scripts](https://docs.bitnami.com/kubernetes/apps/kibana/administration/use-custom-init-scripts/) for an example.
### Install plugins
The Bitnami Kibana chart allows you to install a set of plugins at deployment time using the `plugins` chart parameter. Refer to the [chart documentation on installing plugins](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/install-plugins/) for an example.
```console
elasticsearch.hosts[0]=elasticsearch-host
elasticsearch.port=9200
plugins[0]=https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
```
> **NOTE** Make sure that the plugin is available for the Kibana version you are deploying
### Import saved objects
If you have visualizations and dashboards (in NDJSON format) to import to Kibana, create a ConfigMap that includes them and then install the chart with the `savedObjects.configmap` or `savedObjects.urls` parameters. Refer to the [chart documentation on importing saved objects](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/import-saved-objects/) for an example.
### Use Sidecars and Init Containers
If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter.
Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/apps/kibana/configuration/configure-sidecar-init-containers/).
#### Add a sample Elasticsearch container as sidecar
This chart requires an Elasticsearch instance to work. For production, the options are to use an already existing Elasticsearch instance or deploy the [Elasticsearch chart](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch) with the [`global.kibanaEnabled=true` parameter](https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#enable-bundled-kibana).
For testing purposes, use a sidecar Elasticsearch container setting the following parameters during the Kibana chart installation:
```
elasticsearch.hosts[0]=localhost
elasticsearch.port=9200
sidecars[0].name=elasticsearch
sidecars[0].image=bitnami/elasticsearch:latest
sidecars[0].imagePullPolicy=IfNotPresent
sidecars[0].ports[0].name=http
sidecars[0].ports[0].containerPort=9200
```
### Set Pod affinity
This chart allows you to set custom Pod affinity using the `affinity` parameter. Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters.
## Persistence
The [Bitnami Kibana](https://github.com/bitnami/bitnami-docker-kibana) image can persist data. If enabled, the persisted path is `/bitnami/kibana` by default.
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning.
### Add extra volumes
The Bitnami Kibana chart supports mounting extra volumes (either PVCs, secrets or configmaps) by using the `extraVolumes` and `extraVolumeMounts` property. This can be combined with advanced operations like adding extra init containers and sidecars.
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnamis Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
### To 9.0.0
This version updates the settings used to communicate Kibana with Elasticsearch, adapting it to Elasticsearch X-Pack Security features.
Previous setting `elasticsearch.tls` has been replaced with `elasticsearch.security.tls.enabled`. Other settings regarding certificate verification can be found under `elasticsearch.security.tls.*`, such as verification method and custom truststore.
Additionally, support for the Kibana server using TLS/SSL encryption (HTTPS for port 5601) has been added.
### To 8.0.0
The Kibana container configuration logic was migrated to bash.
From this version onwards, Kibana container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI).
Also, from now on, the Helm Chart will include the X-Pack plugin installed by default.
Regular upgrade is compatible from previous versions.
### To 6.2.0
This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
### To 6.0.0
[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/apps/kibana/administration/upgrade-helm3/).
### To 5.0.0
This version does not include Elasticsearch as a bundled dependency. From now on, you should specify an external Elasticsearch instance using the `elasticsearch.hosts[]` and `elasticsearch.port` [parameters](#parameters).
### To 3.0.0
Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec.
In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage.
This major version signifies this change.
### To 2.0.0
This version enabled by default an initContainer that modify some kernel settings to meet the Elasticsearch requirements.
Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:
- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html)
- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
You can disable the initContainer using the `elasticsearch.sysctlImage.enabled=false` parameter.

View File

@ -0,0 +1,5 @@
elasticsearch:
hosts:
- elasticsearch-1
- elasticsearch-2
port: 9300

View File

@ -0,0 +1,56 @@
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
** Please be patient while the chart is being deployed **
{{- if or (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
######################################################################################################
### ERROR: You did not provide the Elasticsearch external host or port in your 'helm install' call ###
######################################################################################################
Complete your Kibana deployment by running:
helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/kibana \
--set elasticsearch.hosts[0]=YOUR_ES_HOST,elasticsearch.port=YOUR_ES_PORT
Replacing "YOUR_ES_HOST" and "YOUR_ES_PORT" placeholders by the proper values of your Elasticsearch deployment.
{{- else -}}
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
Get the Kibana URL and associate Kibana hostname to your cluster external IP:
export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
echo "Kibana URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/"
echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "common.names.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward svc/{{ include "common.names.fullname" . }} 8080:{{ .Values.service.port }}
{{- end }}
{{- if or .Values.ingress.enabled (contains "NodePort" .Values.service.type) (contains "LoadBalancer" .Values.service.type) }}
WARNING: Kibana is externally accessible from the cluster but the dashboard does not contain authentication mechanisms. Make sure you follow the authentication guidelines in your Elastic stack.
+info https://www.elastic.co/guide/en/elastic-stack-overview/current/setting-up-authentication.html
{{- end }}
{{- if .Values.metrics.enabled }}
WARNING: For Prometheus metrics to work, make sure that the kibana-prometheus-exporter plugin is installed:
+info https://github.com/pjhampton/kibana-prometheus-exporter
{{- end }}
{{- include "kibana.validateValues" . }}
{{- include "kibana.checkRollingTags" . }}
{{- end }}

View File

@ -0,0 +1,266 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper Kibana image name
*/}}
{{- define "kibana.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "kibana.volumePermissions.image" -}}
{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "kibana.imagePullSecrets" -}}
{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
{{- end -}}
{{/*
Return true if the deployment should include dashboards
*/}}
{{- define "kibana.importSavedObjects" -}}
{{- if or .Values.savedObjects.configmap .Values.savedObjects.urls }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Set Elasticsearch URL.
*/}}
{{- define "kibana.elasticsearch.url" -}}
{{- if .Values.elasticsearch.hosts -}}
{{- $totalHosts := len .Values.elasticsearch.hosts -}}
{{- $protocol := ternary "https" "http" .Values.elasticsearch.security.tls.enabled -}}
{{- range $i, $hostTemplate := .Values.elasticsearch.hosts -}}
{{- $host := tpl $hostTemplate $ }}
{{- printf "%s://%s:%s" $protocol $host (include "kibana.elasticsearch.port" $) -}}
{{- if (lt ( add1 $i ) $totalHosts ) }}{{- printf "," -}}{{- end }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Set Elasticsearch Port.
*/}}
{{- define "kibana.elasticsearch.port" -}}
{{- .Values.elasticsearch.port -}}
{{- end -}}
{{/*
Set Elasticsearch PVC.
*/}}
{{- define "kibana.pvc" -}}
{{- .Values.persistence.existingClaim | default (include "common.names.fullname" .) -}}
{{- end -}}
{{/*
Get the initialization scripts Secret name.
*/}}
{{- define "kibana.initScriptsSecret" -}}
{{- printf "%s" (tpl .Values.initScriptsSecret $) -}}
{{- end -}}
{{/*
Get the initialization scripts configmap name.
*/}}
{{- define "kibana.initScriptsCM" -}}
{{- printf "%s" (tpl .Values.initScriptsCM $) -}}
{{- end -}}
{{/*
Get the saved objects configmap name.
*/}}
{{- define "kibana.savedObjectsCM" -}}
{{- printf "%s" (tpl .Values.savedObjects.configmap $) -}}
{{- end -}}
{{/*
Set Elasticsearch Port.
*/}}
{{- define "kibana.configurationCM" -}}
{{- .Values.configurationCM | default (printf "%s-conf" (include "common.names.fullname" .)) -}}
{{- end -}}
{{/*
Compile all warnings into a single message, and call fail.
*/}}
{{- define "kibana.validateValues" -}}
{{- $messages := list -}}
{{- $messages := append $messages (include "kibana.validateValues.noElastic" .) -}}
{{- $messages := append $messages (include "kibana.validateValues.configConflict" .) -}}
{{- $messages := append $messages (include "kibana.validateValues.extraVolumes" .) -}}
{{- $messages := append $messages (include "kibana.validateValues.tls" .) -}}
{{- $messages := append $messages (include "kibana.validateValues.elasticsearch.auth" .) -}}
{{- $messages := append $messages (include "kibana.validateValues.elasticsearch.tls" .) -}}
{{- $messages := without $messages "" -}}
{{- $message := join "\n" $messages -}}
{{- if $message -}}
{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}}
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - must provide an ElasticSearch */}}
{{- define "kibana.validateValues.noElastic" -}}
{{- if and (not .Values.elasticsearch.hosts) (not .Values.elasticsearch.port) -}}
kibana: no-elasticsearch
You did not specify an external Elasticsearch instance.
Please set elasticsearch.hosts and elasticsearch.port
{{- else if and (not .Values.elasticsearch.hosts) .Values.elasticsearch.port }}
kibana: missing-es-settings-host
You specified the external Elasticsearch port but not the host. Please
set elasticsearch.hosts
{{- else if and .Values.elasticsearch.hosts (not .Values.elasticsearch.port) }}
kibana: missing-es-settings-port
You specified the external Elasticsearch hosts but not the port. Please
set elasticsearch.port
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - configuration conflict */}}
{{- define "kibana.validateValues.configConflict" -}}
{{- if and (.Values.extraConfiguration) (.Values.configurationCM) -}}
kibana: conflict-configuration
You specified a ConfigMap with kibana.yml and a set of settings to be added
to the default kibana.yml. Please only set either extraConfiguration or configurationCM
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - Incorrect extra volume settings */}}
{{- define "kibana.validateValues.extraVolumes" -}}
{{- if and (.Values.extraVolumes) (not .Values.extraVolumeMounts) -}}
kibana: missing-extra-volume-mounts
You specified extra volumes but not mount points for them. Please set
the extraVolumeMounts value
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - No certificates for Kibana server */}}
{{- define "kibana.validateValues.tls" -}}
{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) (not .Values.tls.autoGenerated) -}}
kibana: tls.enabled
In order to enable HTTPS for Kibana, you also need to provide an existing secret
containing the TLS certificates (--set tls.existingSecret="my-secret") or enable
auto-generated certificates (--set elasticsearch.security.auth.existingSecret="true").
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - No credentials for Elasticsearch auth */}}
{{- define "kibana.validateValues.elasticsearch.auth" -}}
{{- if and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.kibanaPassword) (not .Values.elasticsearch.security.auth.existingSecret) -}}
kibana: missing-kibana-credentials
You enabled Elasticsearch authentication but you didn't provide the required credentials for
Kibana to connect. Please provide them (--set elasticsearch.security.auth.kibanaPassword="XXXXX")
or the name of an existing secret containing them (--set elasticsearch.security.auth.existingSecret="my-secret").
{{- end -}}
{{- end -}}
{{/* Validate values of Kibana - Elasticsearch HTTPS no trusted CA */}}
{{- define "kibana.validateValues.elasticsearch.tls" -}}
{{- if and .Values.elasticsearch.security.tls.enabled (ne "none" .Values.elasticsearch.security.tls.verificationMode) (not .Values.elasticsearch.security.tls.existingSecret) -}}
kibana: missing-elasticsearch-trusted-ca
You configured communication with Elasticsearch REST API using HTTPS and
verification enabled but no existing secret containing the Truststore or CA
certificate was provided (--set elasticsearch.security.tls.existingSecret="my-secret").
{{- end -}}
{{- end -}}
{{/*
Check if there are rolling tags in the images
*/}}
{{- define "kibana.checkRollingTags" -}}
{{- include "common.warnings.rollingTag" .Values.image }}
{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }}
{{- end -}}
{{/*
Return the secret containing Kibana TLS certificates
*/}}
{{- define "kibana.tlsSecretName" -}}
{{- $secretName := .Values.tls.existingSecret -}}
{{- if $secretName -}}
{{- printf "%s" (tpl $secretName $) -}}
{{- else -}}
{{- printf "%s-crt" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a TLS secret object should be created
*/}}
{{- define "kibana.createTlsSecret" -}}
{{- if and .Values.tls.enabled .Values.tls.autoGenerated (not .Values.tls.existingSecret) }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
basePath URL in use by the APIs.
*/}}
{{- define "kibana.basePath" -}}
{{- if (.Values.configuration.server.rewriteBasePath) }}
{{- .Values.configuration.server.basePath -}}
{{- end -}}
{{- end -}}
{{/*
Return true if a Passwords secret object should be created
*/}}
{{- define "kibana.createSecret" -}}
{{- $kibanaPassword := and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.existingSecret) -}}
{{- $serverTlsPassword := and .Values.tls.enabled (or .Values.tls.keystorePassword .Values.tls.keyPassword) (not .Values.tls.passwordsSecret) -}}
{{- $elasticsearchTlsPassword := and .Values.elasticsearch.security.tls.enabled .Values.elasticsearch.security.tls.truststorePassword (not .Values.elasticsearch.security.tls.passwordsSecret) -}}
{{- if or $kibanaPassword $serverTlsPassword $elasticsearchTlsPassword }}
{{- true -}}
{{- end -}}
{{- end -}}
{{/*
Return the name of secret containing the Elasticsearch auth credentials
*/}}
{{- define "kibana.elasticsearch.auth.secretName" -}}
{{- if .Values.elasticsearch.security.auth.existingSecret -}}
{{- printf "%s" .Values.elasticsearch.security.auth.existingSecret -}}
{{- else -}}
{{- printf "%s" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the name of secret containing the Elasticsearch auth credentials
*/}}
{{- define "kibana.elasticsearch.tls.secretName" -}}
{{- if .Values.elasticsearch.security.tls.passwordsSecret -}}
{{- printf "%s" .Values.elasticsearch.security.tls.passwordsSecret -}}
{{- else -}}
{{- printf "%s" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Return the name of secret containing the Elasticsearch auth credentials
*/}}
{{- define "kibana.tls.secretName" -}}
{{- if .Values.tls.passwordsSecret -}}
{{- printf "%s" .Values.tls.passwordsSecret -}}
{{- else -}}
{{- printf "%s" (include "common.names.fullname" .) -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kibana.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "common.names.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
{{- if and (not .Values.configurationCM) (and .Values.elasticsearch.hosts .Values.elasticsearch.port) }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}-conf
labels: {{- include "common.labels.standard" . | nindent 4 }}
data:
kibana.yml: |
pid.file: /opt/bitnami/kibana/tmp/kibana.pid
server.host: "::"
server.port: {{ .Values.containerPort }}
elasticsearch.hosts: [{{ include "kibana.elasticsearch.url" . }}]
{{- if .Values.configuration.server.basePath }}
server.basePath: {{ .Values.configuration.server.basePath | quote }}
{{- end }}
server.rewriteBasePath: {{ .Values.configuration.server.rewriteBasePath }}
{{- if .Values.extraConfiguration }}
{{- tpl (toYaml .Values.extraConfiguration) $ | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,283 @@
{{- if and .Values.elasticsearch.hosts .Values.elasticsearch.port -}}
apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ include "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
{{- if .Values.updateStrategy }}
strategy: {{- tpl (toYaml .Values.updateStrategy) $ | nindent 4 }}
{{- end }}
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
template:
metadata:
{{- if or .Values.podAnnotations (include "kibana.createTlsSecret" .) }}
annotations:
{{- if (include "kibana.createTlsSecret" .) }}
checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }}
{{- end }}
{{- end }}
labels: {{- include "common.labels.standard" . | nindent 8 }}
app: kibana
{{- if .Values.podLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
{{- end }}
spec:
{{- include "kibana.imagePullSecrets" . | nindent 6 }}
{{- if .Values.hostAliases }}
hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }}
{{- else }}
affinity:
podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }}
podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }}
nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.schedulerName }}
schedulerName: {{ .Values.schedulerName | quote }}
{{- end }}
serviceAccountName: {{ include "kibana.serviceAccountName" . }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
{{- end }}
{{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }}
initContainers:
{{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}
- name: volume-permissions
image: "{{ template "kibana.volumePermissions.image" . }}"
imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }}
command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "/bitnami/kibana"]
securityContext:
runAsUser: 0
resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}
volumeMounts:
- name: kibana-data
mountPath: /bitnami/kibana
{{- end }}
{{- if .Values.initContainers }}
{{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 10 }}
{{- end }}
{{- end }}
containers:
- name: kibana
image: {{ include "kibana.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.securityContext.enabled }}
securityContext:
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
env:
- name: KIBANA_PORT_NUMBER
value: {{ .Values.containerPort | quote }}
- name: KIBANA_ELASTICSEARCH_URL
value: {{ include "kibana.elasticsearch.url" . | quote }}
- name: KIBANA_ELASTICSEARCH_PORT_NUMBER
value: {{ include "kibana.elasticsearch.port" . | quote }}
- name: KIBANA_FORCE_INITSCRIPTS
value: {{ .Values.forceInitScripts | quote }}
- name: KIBANA_SERVER_ENABLE_TLS
value: {{ ternary "true" "false" .Values.tls.enabled | quote }}
{{- if or .Values.tls.usePemCerts (include "kibana.createTlsSecret" . ) }}
- name: KIBANA_SERVER_TLS_USE_PEM
value: "true"
{{- end }}
{{- if and .Values.tls.enabled .Values.tls.usePemCerts (or .Values.tls.keyPassword .Values.tls.passwordsSecret) }}
- name: KIBANA_SERVER_KEY_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "kibana.tls.secretName" . }}
key: kibana-key-password
{{- end }}
{{- if and .Values.tls.enabled (not .Values.tls.usePemCerts) (or .Values.tls.keystorePassword .Values.tls.passwordsSecret) }}
- name: KIBANA_SERVER_KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "kibana.tls.secretName" . }}
key: kibana-keystore-password
{{- end }}
{{- if .Values.elasticsearch.security.auth.enabled }}
- name: KIBANA_USERNAME
value: {{ .Values.elasticsearch.security.auth.kibanaUsername | quote }}
- name: KIBANA_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "kibana.elasticsearch.auth.secretName" . }}
key: kibana-password
{{- end }}
- name: KIBANA_ELASTICSEARCH_ENABLE_TLS
value: {{ ternary "true" "false" .Values.elasticsearch.security.tls.enabled | quote }}
- name: KIBANA_ELASTICSEARCH_TLS_USE_PEM
value: {{ ternary "true" "false" .Values.elasticsearch.security.tls.usePemCerts | quote }}
- name: KIBANA_ELASTICSEARCH_TLS_VERIFICATION_MODE
value: {{ .Values.elasticsearch.security.tls.verificationMode | quote }}
{{- if and .Values.elasticsearch.security.tls.enabled (not .Values.elasticsearch.security.tls.usePemCerts) (or .Values.elasticsearch.security.tls.truststorePassword .Values.elasticsearch.security.tls.passwordsSecret) }}
- name: KIBANA_ELASTICSEARCH_TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "kibana.elasticsearch.tls.secretName" . }}
key: elasticsearch-truststore-password
{{- end }}
{{- if .Values.extraEnvVars }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }}
{{- end }}
{{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }}
envFrom:
{{- if .Values.extraEnvVarsCM }}
- configMapRef:
name: {{ .Values.extraEnvVarsCM }}
{{- end }}
{{- if .Values.extraEnvVarsSecret }}
- secretRef:
name: {{ .Values.extraEnvVarsSecret }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.containerPort }}
protocol: TCP
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
httpGet:
{{- if .Values.configuration.server.rewriteBasePath }}
path: {{ .Values.configuration.server.basePath }}/login
{{- else }}
path: /login
{{- end }}
port: http
scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
{{- if .Values.configuration.server.rewriteBasePath }}
path: {{ .Values.configuration.server.basePath }}/login
{{- else }}
path: /login
{{- end }}
port: http
scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
{{- if .Values.resources }}
resources: {{- include "common.tplvalues.render" (dict "value" .Values.resources "context" $) | nindent 12 }}
{{- end }}
volumeMounts:
- name: kibana-data
mountPath: /bitnami/kibana
- name: kibana-config
mountPath: /bitnami/kibana/conf
{{- if .Values.tls.enabled }}
- name: kibana-certificates
mountPath: /opt/bitnami/kibana/config/certs/server
readOnly: true
{{- end }}
{{- if and .Values.elasticsearch.security.tls.enabled (not (eq .Values.elasticsearch.security.tls.verificationMode "none" )) }}
- name: elasticsearch-certificates
mountPath: /opt/bitnami/kibana/config/certs/elasticsearch
readOnly: true
{{- end }}
{{- if .Values.plugins }}
- name: plugins-init-scripts
mountPath: /docker-entrypoint-initdb.d/plugin-install
{{- end }}
{{- if (include "kibana.importSavedObjects" .) }}
- name: saved-objects-init-scripts
mountPath: /docker-entrypoint-initdb.d/saved-objects-import
{{- end }}
{{- if .Values.savedObjects.configmap }}
- name: saved-objects-configmap
mountPath: /bitnami/kibana/saved-objects
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
mountPath: /docker-entrypoint-initdb.d/cm
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
mountPath: /docker-entrypoint-initdb.d/secret
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- if .Values.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: kibana-data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ include "kibana.pvc" . }}
{{- else }}
emptyDir: {}
{{ end }}
{{- if .Values.tls.enabled }}
- name: kibana-certificates
secret:
secretName: {{ include "kibana.tlsSecretName" . }}
defaultMode: 256
{{- end }}
{{- if and .Values.elasticsearch.security.tls.enabled (ne .Values.elasticsearch.security.tls.verificationMode "none" ) }}
- name: elasticsearch-certificates
secret:
secretName: {{ required "A secret containing the Truststore or CA certificate for Elasticsearch is required" .Values.elasticsearch.security.tls.existingSecret }}
defaultMode: 256
{{- end }}
- name: kibana-config
configMap:
name: {{ include "kibana.configurationCM" . }}
{{- if (include "kibana.importSavedObjects" .) }}
- name: saved-objects-init-scripts
configMap:
name: {{ include "common.names.fullname" . }}-saved-objects
defaultMode: 0755
{{- end }}
{{- if .Values.plugins }}
- name: plugins-init-scripts
configMap:
name: {{ include "common.names.fullname" . }}-plugins
defaultMode: 0755
{{- end }}
{{- if .Values.initScriptsCM }}
- name: custom-init-scripts-cm
configMap:
name: {{ template "kibana.initScriptsCM" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.initScriptsSecret }}
- name: custom-init-scripts-secret
secret:
name: {{ template "kibana.initScriptsSecret" . }}
defaultMode: 0755
{{- end }}
{{- if .Values.savedObjects.configmap }}
- name: saved-objects-configmap
configMap:
name: {{ template "kibana.savedObjectsCM" . }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,4 @@
{{- range .Values.extraDeploy }}
---
{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
{{- end }}

View File

@ -0,0 +1,58 @@
{{- if .Values.ingress.enabled }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ include "common.names.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.ingress.annotations .Values.commonAnnotations .Values.ingress.certManager }}
annotations:
{{- if .Values.ingress.certManager }}
kubernetes.io/tls-acme: "true"
{{- end }}
{{- if .Values.ingress.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingress.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
rules:
{{- if .Values.ingress.hostname }}
- http:
paths:
- path: {{ .Values.ingress.path }}
{{- if eq "true" (include "common.ingress.supportsPathType" .) }}
pathType: {{ .Values.ingress.pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }}
{{- if ne .Values.ingress.hostname "*" }}
host: {{ .Values.ingress.hostname }}
{{- end }}
{{- end }}
{{- range .Values.ingress.extraHosts }}
- host: {{ .name }}
http:
paths:
- path: {{ default "/" .path }}
{{- if eq "true" (include "common.ingress.supportsPathType" $) }}
pathType: {{ default "ImplementationSpecific" .pathType }}
{{- end }}
backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }}
{{- end }}
{{- if or .Values.ingress.tls .Values.ingress.extraTls }}
tls:
{{- if .Values.ingress.tls }}
- hosts:
- {{ .Values.ingress.hostname }}
secretName: {{ printf "%s-tls" .Values.ingress.hostname }}
{{- end }}
{{- if .Values.ingress.extraTls }}
{{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraTls "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.plugins -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}-plugins
labels: {{- include "common.labels.standard" . | nindent 4 }}
data:
install-plugins.sh: |
#!/bin/bash
echo "==> Plugin installation"
{{- $totalPlugins := len .Values.plugins }}
echo "Total plugins defined in chart installation: {{ $totalPlugins }}"
{{- range $i, $plugin := .Values.plugins }}
echo "Installing plugin {{ add $i 1 }} out of {{ $totalPlugins }}: {{ $plugin }}"
kibana-plugin install "{{ $plugin }}"
{{- end }}
echo "==> End of Plugin installation"
{{- end -}}

14
kibana/templates/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 2 }}
{{- end -}}

View File

@ -0,0 +1,39 @@
{{- if (include "kibana.importSavedObjects" .) -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}-saved-objects
labels: {{- include "common.labels.standard" . | nindent 4 }}
data:
{{- $savedObjectsUrl := printf "localhost:%d%s/api/saved_objects/_import" (int .Values.containerPort) (include "kibana.basePath" .) }}
import-saved-objects.sh: |
#!/bin/bash
echo "==> Saved objects import"
{{- if .Values.savedObjects.urls }}
{{- $totalURLs := len .Values.savedObjects.urls }}
echo "Total saved objects NDJSON URLs to import: {{ $totalURLs }}"
{{- range $i, $url := .Values.savedObjects.urls }}
echo "Importing saved objects from NDJSON in url {{ add $i 1 }} out of {{ $totalURLs }}: {{ $url }}"
download_tmp_file="$(mktemp)"
curl "{{$url}}" > "${download_tmp_file}.ndjson"
curl -s --connect-timeout 60 --max-time 60 -XPOST {{ $savedObjectsUrl }} -H 'kbn-xsrf:true' --form file=@${download_tmp_file}.ndjson
{{- end }}
{{- end }}
{{- if .Values.savedObjects.configmap }}
echo "Searching for dashboard NDJSON files from ConfigMap mounted in /bitnami/kibana/saved-objects"
ndjson_file_list_tmp="$(mktemp)"
find /bitnami/kibana/saved-objects -type f -regex ".*\.ndjson" > $ndjson_file_list_tmp
while read -r f; do
case "$f" in
*.ndjson)
echo "Importing $f"
curl -s --connect-timeout 60 --max-time 60 -XPOST {{ $savedObjectsUrl }} -H 'kbn-xsrf:true' --form file=@${f}
;;
*)
echo "Ignoring $f"
;;
esac
done < $ndjson_file_list_tmp
{{- end }}
echo "==> End of Saved objects import"
{{- end -}}

View File

@ -0,0 +1,29 @@
{{- if (include "kibana.createSecret" .) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- if and .Values.elasticsearch.security.auth.enabled (not .Values.elasticsearch.security.auth.existingSecret) }}
kibana-password: {{ required "A Kibana password is required!" .Values.elasticsearch.security.auth.kibanaPassword | b64enc }}
{{- end }}
{{- if and .Values.tls.enabled (not .Values.tls.passwordsSecret) }}
{{- if .Values.tls.keyPassword }}
kibana-key-password: {{ .Values.tls.keyPassword | b64enc | quote }}
{{- end }}
{{- if .Values.tls.keystorePassword }}
kibana-keystore-password: {{.Values.tls.keystorePassword | b64enc | quote }}
{{- end }}
{{- end }}
{{- if and .Values.elasticsearch.security.tls.enabled .Values.elasticsearch.security.tls.truststorePassword (not .Values.elasticsearch.security.tls.passwordsSecret) }}
elasticsearch-truststore-password: {{ .Values.elasticsearch.security.tls.truststorePassword | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "common.names.fullname" . }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.service.labels }}
{{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }}
{{- end }}
{{- if or (and .Values.metrics.enabled .Values.metrics.service.annotations) .Values.service.annotations }}
annotations:
{{- if and .Values.metrics.enabled .Values.metrics.service.annotations }}
{{- tpl (toYaml .Values.metrics.service.annotations) $ | nindent 4 }}
{{- end }}
{{- if .Values.service.annotations }}
{{- tpl (toYaml .Values.service.annotations) $ | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if eq .Values.service.type "LoadBalancer" }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- end }}
{{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
targetPort: http
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)))}}
nodePort: {{ .Values.service.nodePort }}
{{- else if eq .Values.service.type "ClusterIP" }}
nodePort: null
{{- end }}
{{- if .Values.service.extraPorts }}
{{- tpl (toYaml .Values.service.extraPorts) $ | nindent 4 }}
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}

View File

@ -0,0 +1,20 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kibana.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }}
annotations:
{{- if or .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.serviceAccount.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "common.names.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
spec:
selector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
{{- if .Values.metrics.serviceMonitor.selector }}
{{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }}
{{- end }}
endpoints:
- port: http
path: "/_prometheus/metrics"
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if (include "kibana.createTlsSecret" .) }}
{{- $ca := genCA "kibana-ca" 365 }}
{{- $releaseNamespace := .Release.Namespace }}
{{- $clusterDomain := .Values.clusterDomain }}
{{- $serviceName := include "common.names.fullname" . }}
{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $serviceName }}
{{- $crt := genSignedCert $serviceName nil $altNames 365 $ca }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-crt" (include "common.names.fullname" .) }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }}
{{- end }}
type: kubernetes.io/tls
data:
ca.crt: {{ $ca.Cert | b64enc | quote }}
tls.crt: {{ $crt.Cert | b64enc | quote }}
tls.key: {{ $crt.Key | b64enc | quote }}
{{- end }}

569
kibana/values.yaml Normal file
View File

@ -0,0 +1,569 @@
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## @section Common parameters
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname template with a string
##
fullnameOverride: ""
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @section Kibana parameters
## Bitnami Kibana image version
## ref: https://hub.docker.com/r/bitnami/kibana/tags/
## @param image.registry Kibana image registry
## @param image.repository Kibana image repository
## @param image.tag Kibana image tag (immutable tags are recommended)
## @param image.pullPolicy Kibana image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/kibana
tag: 7.16.2-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param replicaCount Number of replicas of the Kibana Pod
##
replicaCount: 1
## @param updateStrategy.type Set up update strategy for Kibana installation.
## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## Example:
## updateStrategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
updateStrategy:
type: RollingUpdate
## @param schedulerName Alternative scheduler
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param plugins Array containing the Kibana plugins to be installed in deployment
## eg:
## plugins:
## - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
##
plugins: []
## Saved objects to import (NDJSON format)
##
savedObjects:
## @param savedObjects.urls Array containing links to NDJSON files to be imported during Kibana initialization
## e.g:
## urls:
## - www.example.com/dashboard.ndjson
##
urls: []
## @param savedObjects.configmap Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template)
##
configmap: ""
## @param extraConfiguration Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template
##
extraConfiguration: {}
## @param configurationCM ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml
##
configurationCM: ""
## @param extraEnvVars Array containing extra env vars to configure Kibana
## For example:
## extraEnvVars:
## - name: KIBANA_ELASTICSEARCH_URL
## value: test
##
extraEnvVars: []
## @param extraEnvVarsCM ConfigMap containing extra env vars to configure Kibana
##
extraEnvVarsCM: ""
## @param extraEnvVarsSecret Secret containing extra env vars to configure Kibana (in case of sensitive data)
##
extraEnvVarsSecret: ""
## @param extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts`
##
extraVolumes: []
## @param extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes`
##
extraVolumeMounts: []
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
##
enabled: false
## @param volumePermissions.image.registry Init container volume-permissions image registry
## @param volumePermissions.image.repository Init container volume-permissions image name
## @param volumePermissions.image.tag Init container volume-permissions image tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
##
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r284
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param volumePermissions.resources Volume Permissions resources
## resources:
## requests:
## memory: 128Mi
## cpu: 100m
resources: {}
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## @param persistence.enabled Enable persistence
##
enabled: true
## @param persistence.storageClass Kibana data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim`
##
existingClaim: ""
## @param persistence.accessMode Access mode to the PV
##
accessMode: ReadWriteOnce
## @param persistence.size Size for the PV
##
size: 10Gi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param livenessProbe.enabled Enable/disable the Liveness probe
## @param livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param livenessProbe.periodSeconds How often to perform the probe
## @param livenessProbe.timeoutSeconds When the probe times out
## @param livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param readinessProbe.enabled Enable/disable the Readiness probe
## @param readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param readinessProbe.periodSeconds How often to perform the probe
## @param readinessProbe.timeoutSeconds When the probe times out
## @param readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param forceInitScripts Force execution of init scripts
##
forceInitScripts: false
## @param initScriptsCM Configmap with init scripts to execute
##
initScriptsCM: ""
## @param initScriptsSecret Secret with init scripts to execute (for sensitive data)
##
initScriptsSecret: ""
## Service configuration
##
service:
## @param service.port Kubernetes Service port
##
port: 5601
## @param service.type Kubernetes Service type
##
type: ClusterIP
## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## @param service.externalTrafficPolicy Enable client source IP preservation
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param service.annotations Annotations for Kibana service (evaluated as a template)
## This can be used to set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## @param service.labels Extra labels for Kibana service
##
labels: {}
## @param service.loadBalancerIP loadBalancerIP if Kibana service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
loadBalancerIP: ""
## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
##
extraPorts: []
## Configure the ingress resource that allows you to access the
## Kibana installation. Set up the URL
## ref: https://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## @param ingress.enabled Enable ingress controller resource
##
enabled: false
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param ingress.pathType Ingress Path type
##
pathType: ImplementationSpecific
## @param ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured
##
hostname: kibana.local
## @param ingress.path The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: kibana.local
## path: /
##
extraHosts: []
## @param ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - kibana.local
## secretName: kibana.local-tls
##
extraTls: []
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## - name: kibana.local-tls
## key:
## certificate:
##
secrets: []
## @param serviceAccount.create Enable creation of ServiceAccount for Kibana
## @param serviceAccount.name Name of serviceAccount
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
serviceAccount:
create: true
name: ""
annotations: {}
## @param containerPort Port to expose at container level
##
containerPort: 5601
## @param securityContext.enabled Enable securityContext on for Kibana deployment
## @param securityContext.fsGroup Group to configure permissions for volumes
## @param securityContext.runAsUser User for the security context
## @param securityContext.runAsNonRoot Set container's Security Context runAsNonRoot
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
runAsNonRoot: true
## Kibana resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
## Example:
## limits:
## cpu: 100m
## memory: 256Mi
limits: {}
## Examples:
## requests:
## cpu: 100m
## memory: 256Mi
requests: {}
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## Allowed values: soft, hard
##
nodeAffinityPreset:
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podLabels Extra labels to add to Pod
##
podLabels: {}
## @param sidecars Attach additional containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param initContainers Add additional init containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param configuration [object] Kibana configuration
##
configuration:
server:
basePath: ""
rewriteBasePath: false
## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
##
metrics:
## @param metrics.enabled Start a side-car prometheus exporter
##
enabled: false
service:
## @param metrics.service.annotations [object] Prometheus annotations for the Kibana service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "80"
prometheus.io/path: "_prometheus/metrics"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##
namespace: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## interval: 10s
##
interval: ""
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## scrapeTimeout: 10s
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
## e.g:
## selector:
## prometheus: my-prometheus
##
# selector:
# prometheus: my-prometheus
selector: {}
## @section Kibana server TLS configuration
##
tls:
## @param tls.enabled Enable SSL/TLS encryption for Kibana server (HTTPS)
##
enabled: false
## @param tls.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates.
##
autoGenerated: false
## @param tls.existingSecret Name of the existing secret containing Kibana server certificates
##
existingSecret: ""
## @param tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of PKCS12
## Note: Ignored when using autoGenerated certs.
##
usePemCerts: false
## @param tls.keyPassword Password to access the PEM key when it is password-protected.
##
keyPassword: ""
## @param tls.keystorePassword Password to access the PKCS12 keystore when it is password-protected.
##
keystorePassword: ""
## @param tls.passwordsSecret Name of a existing secret containing the Keystore or PEM key password
##
passwordsSecret: ""
## @section Elasticsearch parameters
##
elasticsearch:
## @param elasticsearch.hosts List of elasticsearch hosts to connect to.
## e.g:
## hosts:
## - elasticsearch-1
## - elasticsearch-2
##
hosts: []
## @param elasticsearch.port Elasticsearch port
##
port: ""
security:
auth:
## @param elasticsearch.security.auth.enabled Set to 'true' if Elasticsearch has authentication enabled
##
enabled: false
## @param elasticsearch.security.auth.kibanaUsername Kibana server user to authenticate with Elasticsearch
##
kibanaUsername: "elastic"
## @param elasticsearch.security.auth.kibanaPassword Kibana server password to authenticate with Elasticsearch
##
kibanaPassword: ""
## @param elasticsearch.security.auth.existingSecret Name of the existing secret containing the Password for the Kibana user
##
existingSecret: ""
tls:
## @param elasticsearch.security.tls.enabled Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS)
##
enabled: false
## @param elasticsearch.security.tls.verificationMode Verification mode for SSL communications.
## Supported values: full, certificate, none.
## Ref: https://www.elastic.co/guide/en/kibana/7.x/settings.html#elasticsearch-ssl-verificationmode
verificationMode: "full"
## @param elasticsearch.security.tls.existingSecret Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none
##
existingSecret: ""
## @param elasticsearch.security.tls.usePemCerts Set to 'true' to use PEM certificates instead of PKCS12.
##
usePemCerts: false
## @param elasticsearch.security.tls.truststorePassword Password to access the PKCS12 trustore in case it is password-protected.
##
truststorePassword: ""
## @param elasticsearch.security.tls.passwordsSecret Name of a existing secret containing the Truststore password
##
passwordsSecret: ""

View File

@ -19,7 +19,7 @@ global:
storageClass: ""
coordinating:
name: coordinating-only
kibanaEnabled: true
kibanaEnabled: false
## @section Common parameters
@ -752,7 +752,7 @@ data:
fullnameOverride: ""
## @param data.replicas Desired number of Elasticsearch data nodes
##
replicas: 2
replicas: 1
## @param data.hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
@ -953,7 +953,7 @@ data:
## then accept the value as an existing Persistent Volume Claim to which
## the container should be bound
##
existingClaim: ""
existingClaim: "data-elasticsearch-crm1-data-0"
## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` ist set.
##
existingVolume: ""

View File

@ -0,0 +1,573 @@
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## @section Common parameters
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname template with a string
##
fullnameOverride: ""
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @section Kibana parameters
## Bitnami Kibana image version
## ref: https://hub.docker.com/r/bitnami/kibana/tags/
## @param image.registry Kibana image registry
## @param image.repository Kibana image repository
## @param image.tag Kibana image tag (immutable tags are recommended)
## @param image.pullPolicy Kibana image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/kibana
tag: 7.16.2-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param replicaCount Number of replicas of the Kibana Pod
##
replicaCount: 1
## @param updateStrategy.type Set up update strategy for Kibana installation.
## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## Example:
## updateStrategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
updateStrategy:
type: RollingUpdate
## @param schedulerName Alternative scheduler
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param plugins Array containing the Kibana plugins to be installed in deployment
## eg:
## plugins:
## - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
##
plugins: []
## Saved objects to import (NDJSON format)
##
savedObjects:
## @param savedObjects.urls Array containing links to NDJSON files to be imported during Kibana initialization
## e.g:
## urls:
## - www.example.com/dashboard.ndjson
##
urls: []
## @param savedObjects.configmap Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template)
##
configmap: ""
## @param extraConfiguration Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template
##
extraConfiguration: {}
## @param configurationCM ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml
##
configurationCM: ""
## @param extraEnvVars Array containing extra env vars to configure Kibana
## For example:
## extraEnvVars:
## - name: KIBANA_ELASTICSEARCH_URL
## value: test
##
##
extraEnvVars:
- name: TZ
value: "Asia/Shanghai"
# extraEnvVars: []
## @param extraEnvVarsCM ConfigMap containing extra env vars to configure Kibana
##
extraEnvVarsCM: ""
## @param extraEnvVarsSecret Secret containing extra env vars to configure Kibana (in case of sensitive data)
##
extraEnvVarsSecret: ""
## @param extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts`
##
extraVolumes: []
## @param extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes`
##
extraVolumeMounts: []
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
##
enabled: false
## @param volumePermissions.image.registry Init container volume-permissions image registry
## @param volumePermissions.image.repository Init container volume-permissions image name
## @param volumePermissions.image.tag Init container volume-permissions image tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
##
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r284
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param volumePermissions.resources Volume Permissions resources
## resources:
## requests:
## memory: 128Mi
## cpu: 100m
resources: {}
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## @param persistence.enabled Enable persistence
##
enabled: true
## @param persistence.storageClass Kibana data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim`
##
existingClaim: ""
## @param persistence.accessMode Access mode to the PV
##
accessMode: ReadWriteOnce
## @param persistence.size Size for the PV
##
size: 10Gi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param livenessProbe.enabled Enable/disable the Liveness probe
## @param livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param livenessProbe.periodSeconds How often to perform the probe
## @param livenessProbe.timeoutSeconds When the probe times out
## @param livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param readinessProbe.enabled Enable/disable the Readiness probe
## @param readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param readinessProbe.periodSeconds How often to perform the probe
## @param readinessProbe.timeoutSeconds When the probe times out
## @param readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param forceInitScripts Force execution of init scripts
##
forceInitScripts: false
## @param initScriptsCM Configmap with init scripts to execute
##
initScriptsCM: ""
## @param initScriptsSecret Secret with init scripts to execute (for sensitive data)
##
initScriptsSecret: ""
## Service configuration
##
service:
## @param service.port Kubernetes Service port
##
port: 5601
## @param service.type Kubernetes Service type
##
type: ClusterIP
## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## @param service.externalTrafficPolicy Enable client source IP preservation
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param service.annotations Annotations for Kibana service (evaluated as a template)
## This can be used to set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## @param service.labels Extra labels for Kibana service
##
labels: {}
## @param service.loadBalancerIP loadBalancerIP if Kibana service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
loadBalancerIP: ""
## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
##
extraPorts: []
## Configure the ingress resource that allows you to access the
## Kibana installation. Set up the URL
## ref: https://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## @param ingress.enabled Enable ingress controller resource
##
enabled: true
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param ingress.pathType Ingress Path type
##
pathType: ImplementationSpecific
## @param ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured
##
hostname: crm1.sino-assist.com
## @param ingress.path The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /kibana
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: kibana.local
## path: /
##
extraHosts: []
## @param ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - kibana.local
## secretName: kibana.local-tls
##
extraTls: []
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## - name: kibana.local-tls
## key:
## certificate:
##
secrets: []
## @param serviceAccount.create Enable creation of ServiceAccount for Kibana
## @param serviceAccount.name Name of serviceAccount
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
serviceAccount:
create: true
name: ""
annotations: {}
## @param containerPort Port to expose at container level
##
containerPort: 5601
## @param securityContext.enabled Enable securityContext on for Kibana deployment
## @param securityContext.fsGroup Group to configure permissions for volumes
## @param securityContext.runAsUser User for the security context
## @param securityContext.runAsNonRoot Set container's Security Context runAsNonRoot
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
runAsNonRoot: true
## Kibana resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
## Example:
## limits:
## cpu: 100m
## memory: 256Mi
limits: {}
## Examples:
## requests:
## cpu: 100m
## memory: 256Mi
requests: {}
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## Allowed values: soft, hard
##
nodeAffinityPreset:
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podLabels Extra labels to add to Pod
##
podLabels: {}
## @param sidecars Attach additional containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param initContainers Add additional init containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param configuration [object] Kibana configuration
##
configuration:
server:
basePath: ""
rewriteBasePath: false
## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
##
metrics:
## @param metrics.enabled Start a side-car prometheus exporter
##
enabled: false
service:
## @param metrics.service.annotations [object] Prometheus annotations for the Kibana service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "80"
prometheus.io/path: "_prometheus/metrics"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##
namespace: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## interval: 10s
##
interval: ""
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## scrapeTimeout: 10s
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
## e.g:
## selector:
## prometheus: my-prometheus
##
# selector:
# prometheus: my-prometheus
selector: {}
## @section Kibana server TLS configuration
##
tls:
## @param tls.enabled Enable SSL/TLS encryption for Kibana server (HTTPS)
##
enabled: false
## @param tls.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates.
##
autoGenerated: false
## @param tls.existingSecret Name of the existing secret containing Kibana server certificates
##
existingSecret: ""
## @param tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of PKCS12
## Note: Ignored when using autoGenerated certs.
##
usePemCerts: false
## @param tls.keyPassword Password to access the PEM key when it is password-protected.
##
keyPassword: ""
## @param tls.keystorePassword Password to access the PKCS12 keystore when it is password-protected.
##
keystorePassword: ""
## @param tls.passwordsSecret Name of a existing secret containing the Keystore or PEM key password
##
passwordsSecret: ""
## @section Elasticsearch parameters
##
elasticsearch:
## @param elasticsearch.hosts List of elasticsearch hosts to connect to.
## e.g:
## hosts:
## - elasticsearch-1
## - elasticsearch-2
##
hosts: []
## @param elasticsearch.port Elasticsearch port
##
port: ""
security:
auth:
## @param elasticsearch.security.auth.enabled Set to 'true' if Elasticsearch has authentication enabled
##
enabled: false
## @param elasticsearch.security.auth.kibanaUsername Kibana server user to authenticate with Elasticsearch
##
kibanaUsername: "elastic"
## @param elasticsearch.security.auth.kibanaPassword Kibana server password to authenticate with Elasticsearch
##
kibanaPassword: ""
## @param elasticsearch.security.auth.existingSecret Name of the existing secret containing the Password for the Kibana user
##
existingSecret: ""
tls:
## @param elasticsearch.security.tls.enabled Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS)
##
enabled: false
## @param elasticsearch.security.tls.verificationMode Verification mode for SSL communications.
## Supported values: full, certificate, none.
## Ref: https://www.elastic.co/guide/en/kibana/7.x/settings.html#elasticsearch-ssl-verificationmode
verificationMode: "full"
## @param elasticsearch.security.tls.existingSecret Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none
##
existingSecret: ""
## @param elasticsearch.security.tls.usePemCerts Set to 'true' to use PEM certificates instead of PKCS12.
##
usePemCerts: false
## @param elasticsearch.security.tls.truststorePassword Password to access the PKCS12 trustore in case it is password-protected.
##
truststorePassword: ""
## @param elasticsearch.security.tls.passwordsSecret Name of a existing secret containing the Truststore password
##
passwordsSecret: ""

View File

@ -866,11 +866,11 @@ service:
ingress:
## @param ingress.enabled Enable ingress resource for Management console
##
enabled: false
enabled: true
## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /
path: /mq
## @param ingress.pathType Ingress path type
##
@ -878,8 +878,8 @@ ingress:
## @param ingress.hostname Default host for the ingress resource
##
hostname: rabbitmq.local
hostname: crm1.sino-assist.com
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md