diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml
deleted file mode 100644
index 11f2ed8..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml
+++ /dev/null
@@ -1,189 +0,0 @@
----
-kind: Service
-apiVersion: v1
-metadata:
- name: csi-cephfsplugin-provisioner
- labels:
- app: csi-metrics
-spec:
- selector:
- app: csi-cephfsplugin-provisioner
- ports:
- - name: http-metrics
- port: 8080
- protocol: TCP
- targetPort: 8681
-
----
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: csi-cephfsplugin-provisioner
-spec:
- selector:
- matchLabels:
- app: csi-cephfsplugin-provisioner
- replicas: 3
- template:
- metadata:
- labels:
- app: csi-cephfsplugin-provisioner
- spec:
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - csi-cephfsplugin-provisioner
- topologyKey: "kubernetes.io/hostname"
- serviceAccountName: cephfs-csi-provisioner
- priorityClassName: system-cluster-critical
- containers:
- - name: csi-provisioner
- image: opsdockerimage/sig-storage-csi-provisioner:v3.0.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--leader-election=true"
- - "--retry-interval-start=500ms"
- - "--feature-gates=Topology=false"
- - "--extra-create-metadata=true"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-resizer
- image: opsdockerimage/sig-storage-csi-resizer:v1.3.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--leader-election"
- - "--retry-interval-start=500ms"
- - "--handle-volume-inuse-error=false"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-snapshotter
- image: opsdockerimage/sig-storage-csi-snapshotter:v4.2.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--leader-election=true"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-cephfsplugin-attacher
- image: opsdockerimage/sig-storage-csi-attacher:v3.3.0
- args:
- - "--v=5"
- - "--csi-address=$(ADDRESS)"
- - "--leader-election=true"
- - "--retry-interval-start=500ms"
- env:
- - name: ADDRESS
- value: /csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-cephfsplugin
- # for stable functionality replace canary with latest release version
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--nodeid=$(NODE_ID)"
- - "--type=cephfs"
- - "--controllerserver=true"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--v=5"
- - "--drivername=cephfs.csi.ceph.com"
- - "--pidlimit=-1"
- - "--enableprofiling=false"
- env:
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: NODE_ID
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: CSI_ENDPOINT
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: host-sys
- mountPath: /sys
- - name: lib-modules
- mountPath: /lib/modules
- readOnly: true
- - name: host-dev
- mountPath: /dev
- - name: ceph-config
- mountPath: /etc/ceph/
- - name: ceph-csi-config
- mountPath: /etc/ceph-csi-config/
- - name: keys-tmp-dir
- mountPath: /tmp/csi/keys
- - name: liveness-prometheus
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--type=liveness"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--metricsport=8681"
- - "--metricspath=/metrics"
- - "--polltime=60s"
- - "--timeout=3s"
- env:
- - name: CSI_ENDPOINT
- value: unix:///csi/csi-provisioner.sock
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- imagePullPolicy: "IfNotPresent"
- volumes:
- - name: socket-dir
- emptyDir: {
- medium: "Memory"
- }
- - name: host-sys
- hostPath:
- path: /sys
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: host-dev
- hostPath:
- path: /dev
- - name: ceph-config
- configMap:
- name: ceph-config
- - name: ceph-csi-config
- configMap:
- name: ceph-csi-config
- - name: keys-tmp-dir
- emptyDir: {
- medium: "Memory"
- }
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin.yaml
deleted file mode 100644
index 09f29bf..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-cephfsplugin.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
----
-kind: DaemonSet
-apiVersion: apps/v1
-metadata:
- name: csi-cephfsplugin
-spec:
- selector:
- matchLabels:
- app: csi-cephfsplugin
- template:
- metadata:
- labels:
- app: csi-cephfsplugin
- spec:
- serviceAccountName: cephfs-csi-nodeplugin
- priorityClassName: system-node-critical
- hostNetwork: true
- # to use e.g. Rook orchestrated cluster, and mons' FQDN is
- # resolved through k8s service, set dns policy to cluster first
- dnsPolicy: ClusterFirstWithHostNet
- containers:
- - name: driver-registrar
- # This is necessary only for systems with SELinux, where
- # non-privileged sidecar containers cannot access unix domain socket
- # created by privileged CSI driver container.
- securityContext:
- privileged: true
- image: opsdockerimage/sig-storage-csi-node-driver-registrar:v2.3.0
- args:
- - "--v=5"
- - "--csi-address=/csi/csi.sock"
- - "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
- env:
- - name: KUBE_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: registration-dir
- mountPath: /registration
- - name: csi-cephfsplugin
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- # for stable functionality replace canary with latest release version
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--nodeid=$(NODE_ID)"
- - "--type=cephfs"
- - "--nodeserver=true"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--v=5"
- - "--drivername=cephfs.csi.ceph.com"
- - "--enableprofiling=false"
- # If topology based provisioning is desired, configure required
- # node labels representing the nodes topology domain
- # and pass the label names below, for CSI to consume and advertise
- # its equivalent topology domain
- # - "--domainlabels=failure-domain/region,failure-domain/zone"
- env:
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: NODE_ID
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: CSI_ENDPOINT
- value: unix:///csi/csi.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: mountpoint-dir
- mountPath: /var/lib/kubelet/pods
- mountPropagation: Bidirectional
- - name: plugin-dir
- mountPath: /var/lib/kubelet/plugins
- mountPropagation: "Bidirectional"
- - name: host-sys
- mountPath: /sys
- - name: etc-selinux
- mountPath: /etc/selinux
- readOnly: true
- - name: lib-modules
- mountPath: /lib/modules
- readOnly: true
- - name: host-dev
- mountPath: /dev
- - name: host-mount
- mountPath: /run/mount
- - name: ceph-config
- mountPath: /etc/ceph/
- - name: ceph-csi-config
- mountPath: /etc/ceph-csi-config/
- - name: keys-tmp-dir
- mountPath: /tmp/csi/keys
- - name: liveness-prometheus
- securityContext:
- privileged: true
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--type=liveness"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--metricsport=8681"
- - "--metricspath=/metrics"
- - "--polltime=60s"
- - "--timeout=3s"
- env:
- - name: CSI_ENDPOINT
- value: unix:///csi/csi.sock
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- imagePullPolicy: "IfNotPresent"
- volumes:
- - name: socket-dir
- hostPath:
- path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
- type: DirectoryOrCreate
- - name: registration-dir
- hostPath:
- path: /var/lib/kubelet/plugins_registry/
- type: Directory
- - name: mountpoint-dir
- hostPath:
- path: /var/lib/kubelet/pods
- type: DirectoryOrCreate
- - name: plugin-dir
- hostPath:
- path: /var/lib/kubelet/plugins
- type: Directory
- - name: host-sys
- hostPath:
- path: /sys
- - name: etc-selinux
- hostPath:
- path: /etc/selinux
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: host-dev
- hostPath:
- path: /dev
- - name: host-mount
- hostPath:
- path: /run/mount
- - name: ceph-config
- configMap:
- name: ceph-config
- - name: ceph-csi-config
- configMap:
- name: ceph-csi-config
- - name: keys-tmp-dir
- emptyDir: {
- medium: "Memory"
- }
----
-# This is a service to expose the liveness metrics
-apiVersion: v1
-kind: Service
-metadata:
- name: csi-metrics-cephfsplugin
- labels:
- app: csi-metrics
-spec:
- ports:
- - name: http-metrics
- port: 8080
- protocol: TCP
- targetPort: 8681
- selector:
- app: csi-cephfsplugin
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-config-map.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-config-map.yaml
deleted file mode 100644
index 3efb0c1..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-config-map.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-data:
- config.json: |-
- []
-metadata:
- name: ceph-csi-config
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-psp.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-psp.yaml
deleted file mode 100644
index 5349b6d..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-psp.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: cephfs-csi-nodeplugin-psp
-spec:
- allowPrivilegeEscalation: true
- allowedCapabilities:
- - 'SYS_ADMIN'
- fsGroup:
- rule: RunAsAny
- privileged: true
- hostNetwork: true
- hostPID: true
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - 'configMap'
- - 'emptyDir'
- - 'projected'
- - 'secret'
- - 'hostPath'
- allowedHostPaths:
- - pathPrefix: '/dev'
- readOnly: false
- - pathPrefix: '/run/mount'
- readOnly: false
- - pathPrefix: '/sys'
- readOnly: false
- - pathPrefix: '/etc/selinux'
- readOnly: true
- - pathPrefix: '/lib/modules'
- readOnly: true
- - pathPrefix: '/var/lib/kubelet/pods'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins/cephfs.csi.ceph.com'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins_registry'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins'
- readOnly: false
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-nodeplugin-psp
- # replace with non-default namespace name
- namespace: default
-rules:
- - apiGroups: ['policy']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['cephfs-csi-nodeplugin-psp']
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-nodeplugin-psp
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: cephfs-csi-nodeplugin
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: cephfs-csi-nodeplugin-psp
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-rbac.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-rbac.yaml
deleted file mode 100644
index 1c1ccda..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-nodeplugin-rbac.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: cephfs-csi-nodeplugin
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-nodeplugin
-rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-nodeplugin
-subjects:
- - kind: ServiceAccount
- name: cephfs-csi-nodeplugin
- namespace: default
-roleRef:
- kind: ClusterRole
- name: cephfs-csi-nodeplugin
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-psp.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-psp.yaml
deleted file mode 100644
index 82ba308..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-psp.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: cephfs-csi-provisioner-psp
-spec:
- fsGroup:
- rule: RunAsAny
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - 'configMap'
- - 'emptyDir'
- - 'projected'
- - 'secret'
- - 'hostPath'
- allowedHostPaths:
- - pathPrefix: '/dev'
- readOnly: false
- - pathPrefix: '/sys'
- readOnly: false
- - pathPrefix: '/lib/modules'
- readOnly: true
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-provisioner-psp
- # replace with non-default namespace name
- namespace: default
-rules:
- - apiGroups: ['policy']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['cephfs-csi-provisioner-psp']
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-provisioner-psp
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: cephfs-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: cephfs-csi-provisioner-psp
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-rbac.yaml b/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-rbac.yaml
deleted file mode 100644
index 227e0e8..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csi-provisioner-rbac.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: cephfs-csi-provisioner
-
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-external-provisioner-runner
-rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get", "list"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["list", "watch", "create", "update", "patch"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch", "create", "delete", "patch"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshots"]
- verbs: ["get", "list"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents"]
- verbs: ["create", "get", "list", "watch", "update", "delete"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["volumeattachments"]
- verbs: ["get", "list", "watch", "update", "patch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["volumeattachments/status"]
- verbs: ["patch"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims/status"]
- verbs: ["update", "patch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["csinodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents/status"]
- verbs: ["update"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-provisioner-role
-subjects:
- - kind: ServiceAccount
- name: cephfs-csi-provisioner
- namespace: default
-roleRef:
- kind: ClusterRole
- name: cephfs-external-provisioner-runner
- apiGroup: rbac.authorization.k8s.io
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- # replace with non-default namespace name
- namespace: default
- name: cephfs-external-provisioner-cfg
-rules:
- # remove this once we stop supporting v1.0.0
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get", "list", "create", "delete"]
- - apiGroups: ["coordination.k8s.io"]
- resources: ["leases"]
- verbs: ["get", "watch", "list", "delete", "update", "create"]
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: cephfs-csi-provisioner-role-cfg
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: cephfs-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: cephfs-external-provisioner-cfg
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/cephfs/kubernetes/csidriver.yaml b/ceph/ceph-csi/cephfs/kubernetes/csidriver.yaml
deleted file mode 100644
index 49d0eaa..0000000
--- a/ceph/ceph-csi/cephfs/kubernetes/csidriver.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# if Kubernetes version is less than 1.18 change
-# apiVersion to storage.k8s.io/v1beta1
-apiVersion: storage.k8s.io/v1
-kind: CSIDriver
-metadata:
- name: cephfs.csi.ceph.com
-spec:
- attachRequired: true
- podInfoOnMount: false
diff --git a/ceph/ceph-csi/rbd/deploy/secret.yaml b/ceph/ceph-csi/rbd/deploy/secret.yaml
deleted file mode 100644
index 543cb29..0000000
--- a/ceph/ceph-csi/rbd/deploy/secret.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: csi-rbd-secret
- namespace: default
-stringData:
- userID: admin
- userKey: AQBg4llf+9CAGdsAds4tQzS+0O7dscB5ZTiTEQ==
- encryptionPassphrase: test_passphrase
\ No newline at end of file
diff --git a/ceph/ceph-csi/rbd/deploy/storageclass.yaml b/ceph/ceph-csi/rbd/deploy/storageclass.yaml
deleted file mode 100644
index 74bc541..0000000
--- a/ceph/ceph-csi/rbd/deploy/storageclass.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: csi-rbd-sc
-provisioner: rbd.csi.ceph.com
-parameters:
- clusterID: 837817cc-7148-11ec-8c46-c81f66de6d53
- pool: k8s
- imageFeatures: layering
- csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
- csi.storage.k8s.io/provisioner-secret-namespace: default
- csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
- csi.storage.k8s.io/controller-expand-secret-namespace: default
- csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
- csi.storage.k8s.io/node-stage-secret-namespace: default
- csi.storage.k8s.io/fstype: ext4
-reclaimPolicy: Delete
-allowVolumeExpansion: true
-mountOptions:
- - discard
-
\ No newline at end of file
diff --git a/ceph/ceph-csi/rbd/kubernetes/ceph-conf.yaml b/ceph/ceph-csi/rbd/kubernetes/ceph-conf.yaml
deleted file mode 100644
index 8ef1b96..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/ceph-conf.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# This is a sample configmap that helps define a Ceph configuration as required
-# by the CSI plugins.
-
-# Sample ceph.conf available at
-# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
-# documentation is available at
-# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
-apiVersion: v1
-kind: ConfigMap
-data:
- ceph.conf: |
- [global]
- auth_cluster_required = cephx
- auth_service_required = cephx
- auth_client_required = cephx
-
- # Workaround for http://tracker.ceph.com/issues/23446
- fuse_set_user_groups = false
-
- # ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
- # adding 'fuse_big_writes = true' option by default to override this limit
- # see https://github.com/ceph/ceph-csi/issues/1928
- fuse_big_writes = true
- # keyring is a required key and its value should be empty
- keyring: |
-metadata:
- name: ceph-config
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-config-map.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-config-map.yaml
deleted file mode 100644
index ce23459..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-config-map.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# /!\ DO NOT MODIFY THIS FILE
-#
-# This file has been automatically generated by Ceph-CSI yamlgen.
-# The source for the contents can be found in the api/deploy directory, make
-# your modifications there.
-#
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: "ceph-csi-config"
-data:
- config.json: |-
- [
- {
- "clusterID": "837817cc-7148-11ec-8c46-c81f66de6d53",
- "monitors": [
- "192.168.1.207:6789",
- "192.168.1.208:6789",
- "192.168.1.209:6789"
- ]
- }
- ]
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-psp.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-psp.yaml
deleted file mode 100644
index 5ca5b36..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-psp.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: rbd-csi-nodeplugin-psp
-spec:
- allowPrivilegeEscalation: true
- allowedCapabilities:
- - 'SYS_ADMIN'
- fsGroup:
- rule: RunAsAny
- privileged: true
- hostNetwork: true
- hostPID: true
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - 'configMap'
- - 'emptyDir'
- - 'projected'
- - 'hostPath'
- - 'secret'
- allowedHostPaths:
- - pathPrefix: '/dev'
- readOnly: false
- - pathPrefix: '/run/mount'
- readOnly: false
- - pathPrefix: '/sys'
- readOnly: false
- - pathPrefix: '/etc/selinux'
- readOnly: true
- - pathPrefix: '/lib/modules'
- readOnly: true
- - pathPrefix: '/var/lib/kubelet/pods'
- readOnly: false
- - pathPrefix: '/var/log/ceph'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins/rbd.csi.ceph.com'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins_registry'
- readOnly: false
- - pathPrefix: '/var/lib/kubelet/plugins'
- readOnly: false
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-nodeplugin-psp
- # replace with non-default namespace name
- namespace: default
-rules:
- - apiGroups: ['policy']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['rbd-csi-nodeplugin-psp']
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-nodeplugin-psp
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: rbd-csi-nodeplugin
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: rbd-csi-nodeplugin-psp
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-rbac.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-rbac.yaml
deleted file mode 100644
index 98ffbca..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-nodeplugin-rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: rbd-csi-nodeplugin
- # replace with non-default namespace name
- namespace: default
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-nodeplugin
-rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get"]
- # allow to read Vault Token and connection options from the Tenants namespace
- - apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get"]
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get"]
- - apiGroups: [""]
- resources: ["serviceaccounts"]
- verbs: ["get"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["volumeattachments"]
- verbs: ["list", "get"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-nodeplugin
-subjects:
- - kind: ServiceAccount
- name: rbd-csi-nodeplugin
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: ClusterRole
- name: rbd-csi-nodeplugin
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-psp.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-psp.yaml
deleted file mode 100644
index b6200b6..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-psp.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: rbd-csi-provisioner-psp
-spec:
- fsGroup:
- rule: RunAsAny
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - 'configMap'
- - 'emptyDir'
- - 'projected'
- - 'secret'
- - 'hostPath'
- allowedHostPaths:
- - pathPrefix: '/dev'
- readOnly: false
- - pathPrefix: '/sys'
- readOnly: false
- - pathPrefix: '/lib/modules'
- readOnly: true
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- # replace with non-default namespace name
- namespace: default
- name: rbd-csi-provisioner-psp
-rules:
- - apiGroups: ['policy']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames: ['rbd-csi-provisioner-psp']
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-provisioner-psp
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: rbd-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: rbd-csi-provisioner-psp
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-rbac.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-rbac.yaml
deleted file mode 100644
index 32b8973..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-provisioner-rbac.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: rbd-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-external-provisioner-runner
-rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["list", "watch", "create", "update", "patch"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims/status"]
- verbs: ["update", "patch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshots"]
- verbs: ["get", "list"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents"]
- verbs: ["create", "get", "list", "watch", "update", "delete"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["volumeattachments"]
- verbs: ["get", "list", "watch", "update", "patch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["volumeattachments/status"]
- verbs: ["patch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["csinodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents/status"]
- verbs: ["update"]
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get"]
- - apiGroups: [""]
- resources: ["serviceaccounts"]
- verbs: ["get"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-provisioner-role
-subjects:
- - kind: ServiceAccount
- name: rbd-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: ClusterRole
- name: rbd-external-provisioner-runner
- apiGroup: rbac.authorization.k8s.io
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- # replace with non-default namespace name
- namespace: default
- name: rbd-external-provisioner-cfg
-rules:
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get", "list", "watch", "create", "update", "delete"]
- - apiGroups: ["coordination.k8s.io"]
- resources: ["leases"]
- verbs: ["get", "watch", "list", "delete", "update", "create"]
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: rbd-csi-provisioner-role-cfg
- # replace with non-default namespace name
- namespace: default
-subjects:
- - kind: ServiceAccount
- name: rbd-csi-provisioner
- # replace with non-default namespace name
- namespace: default
-roleRef:
- kind: Role
- name: rbd-external-provisioner-cfg
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin-provisioner.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
deleted file mode 100644
index 03595fa..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
----
-kind: Service
-apiVersion: v1
-metadata:
- name: csi-rbdplugin-provisioner
- # replace with non-default namespace name
- namespace: default
- labels:
- app: csi-metrics
-spec:
- selector:
- app: csi-rbdplugin-provisioner
- ports:
- - name: http-metrics
- port: 8080
- protocol: TCP
- targetPort: 8680
-
----
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: csi-rbdplugin-provisioner
- # replace with non-default namespace name
- namespace: default
-spec:
- replicas: 3
- selector:
- matchLabels:
- app: csi-rbdplugin-provisioner
- template:
- metadata:
- labels:
- app: csi-rbdplugin-provisioner
- spec:
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - csi-rbdplugin-provisioner
- topologyKey: "kubernetes.io/hostname"
- serviceAccountName: rbd-csi-provisioner
- priorityClassName: system-cluster-critical
- containers:
- - name: csi-provisioner
- image: opsdockerimage/sig-storage-csi-provisioner:v3.0.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--retry-interval-start=500ms"
- - "--leader-election=true"
- # set it to true to use topology based provisioning
- - "--feature-gates=Topology=false"
- # if fstype is not specified in storageclass, ext4 is default
- - "--default-fstype=ext4"
- - "--extra-create-metadata=true"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-snapshotter
- image: opsdockerimage/sig-storage-csi-snapshotter:v4.2.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--leader-election=true"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-attacher
- image: opsdockerimage/sig-storage-csi-attacher:v3.3.0
- args:
- - "--v=5"
- - "--csi-address=$(ADDRESS)"
- - "--leader-election=true"
- - "--retry-interval-start=500ms"
- env:
- - name: ADDRESS
- value: /csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-resizer
- image: opsdockerimage/sig-storage-csi-resizer:v1.3.0
- args:
- - "--csi-address=$(ADDRESS)"
- - "--v=5"
- - "--timeout=150s"
- - "--leader-election"
- - "--retry-interval-start=500ms"
- - "--handle-volume-inuse-error=false"
- env:
- - name: ADDRESS
- value: unix:///csi/csi-provisioner.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: csi-rbdplugin
- # for stable functionality replace canary with latest release version
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--nodeid=$(NODE_ID)"
- - "--type=rbd"
- - "--controllerserver=true"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- - "--v=5"
- - "--drivername=rbd.csi.ceph.com"
- - "--pidlimit=-1"
- - "--rbdhardmaxclonedepth=8"
- - "--rbdsoftmaxclonedepth=4"
- - "--enableprofiling=false"
- env:
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: NODE_ID
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- # - name: KMS_CONFIGMAP_NAME
- # value: encryptionConfig
- - name: CSI_ENDPOINT
- value: unix:///csi/csi-provisioner.sock
- - name: CSI_ADDONS_ENDPOINT
- value: unix:///csi/csi-addons.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - mountPath: /dev
- name: host-dev
- - mountPath: /sys
- name: host-sys
- - mountPath: /lib/modules
- name: lib-modules
- readOnly: true
- - name: ceph-csi-config
- mountPath: /etc/ceph-csi-config/
- # - name: ceph-csi-encryption-kms-config
- # mountPath: /etc/ceph-csi-encryption-kms-config/
- - name: keys-tmp-dir
- mountPath: /tmp/csi/keys
- - name: ceph-config
- mountPath: /etc/ceph/
- - name: csi-rbdplugin-controller
- # for stable functionality replace canary with latest release version
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--type=controller"
- - "--v=5"
- - "--drivername=rbd.csi.ceph.com"
- - "--drivernamespace=$(DRIVER_NAMESPACE)"
- env:
- - name: DRIVER_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: ceph-csi-config
- mountPath: /etc/ceph-csi-config/
- - name: keys-tmp-dir
- mountPath: /tmp/csi/keys
- - name: ceph-config
- mountPath: /etc/ceph/
- - name: liveness-prometheus
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--type=liveness"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--metricsport=8680"
- - "--metricspath=/metrics"
- - "--polltime=60s"
- - "--timeout=3s"
- env:
- - name: CSI_ENDPOINT
- value: unix:///csi/csi-provisioner.sock
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- imagePullPolicy: "IfNotPresent"
- volumes:
- - name: host-dev
- hostPath:
- path: /dev
- - name: host-sys
- hostPath:
- path: /sys
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: socket-dir
- emptyDir: {
- medium: "Memory"
- }
- - name: ceph-config
- configMap:
- name: ceph-config
- - name: ceph-csi-config
- configMap:
- name: ceph-csi-config
- # - name: ceph-csi-encryption-kms-config
- # configMap:
- # name: ceph-csi-encryption-kms-config
- - name: keys-tmp-dir
- emptyDir: {
- medium: "Memory"
- }
diff --git a/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin.yaml b/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin.yaml
deleted file mode 100644
index dc2ecbf..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csi-rbdplugin.yaml
+++ /dev/null
@@ -1,209 +0,0 @@
----
-kind: DaemonSet
-apiVersion: apps/v1
-metadata:
- name: csi-rbdplugin
- # replace with non-default namespace name
- namespace: default
-spec:
- selector:
- matchLabels:
- app: csi-rbdplugin
- template:
- metadata:
- labels:
- app: csi-rbdplugin
- spec:
- serviceAccountName: rbd-csi-nodeplugin
- hostNetwork: true
- hostPID: true
- priorityClassName: system-node-critical
- # to use e.g. Rook orchestrated cluster, and mons' FQDN is
- # resolved through k8s service, set dns policy to cluster first
- dnsPolicy: ClusterFirstWithHostNet
- containers:
- - name: driver-registrar
- # This is necessary only for systems with SELinux, where
- # non-privileged sidecar containers cannot access unix domain socket
- # created by privileged CSI driver container.
- securityContext:
- privileged: true
- image: opsdockerimage/sig-storage-csi-node-driver-registrar:v2.3.0
- args:
- - "--v=5"
- - "--csi-address=/csi/csi.sock"
- - "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
- env:
- - name: KUBE_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - name: registration-dir
- mountPath: /registration
- - name: csi-rbdplugin
- securityContext:
- privileged: true
- capabilities:
- add: ["SYS_ADMIN"]
- allowPrivilegeEscalation: true
- # for stable functionality replace canary with latest release version
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--nodeid=$(NODE_ID)"
- - "--pluginpath=/var/lib/kubelet/plugins"
- - "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/pv/"
- - "--type=rbd"
- - "--nodeserver=true"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- - "--v=5"
- - "--drivername=rbd.csi.ceph.com"
- - "--enableprofiling=false"
- # If topology based provisioning is desired, configure required
- # node labels representing the nodes topology domain
- # and pass the label names below, for CSI to consume and advertise
- # its equivalent topology domain
- # - "--domainlabels=failure-domain/region,failure-domain/zone"
- env:
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- - name: NODE_ID
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- # - name: KMS_CONFIGMAP_NAME
- # value: encryptionConfig
- - name: CSI_ENDPOINT
- value: unix:///csi/csi.sock
- - name: CSI_ADDONS_ENDPOINT
- value: unix:///csi/csi-addons.sock
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- - mountPath: /dev
- name: host-dev
- - mountPath: /sys
- name: host-sys
- - mountPath: /run/mount
- name: host-mount
- - mountPath: /etc/selinux
- name: etc-selinux
- readOnly: true
- - mountPath: /lib/modules
- name: lib-modules
- readOnly: true
- - name: ceph-csi-config
- mountPath: /etc/ceph-csi-config/
- # - name: ceph-csi-encryption-kms-config
- # mountPath: /etc/ceph-csi-encryption-kms-config/
- - name: plugin-dir
- mountPath: /var/lib/kubelet/plugins
- mountPropagation: "Bidirectional"
- - name: mountpoint-dir
- mountPath: /var/lib/kubelet/pods
- mountPropagation: "Bidirectional"
- - name: keys-tmp-dir
- mountPath: /tmp/csi/keys
- - name: ceph-logdir
- mountPath: /var/log/ceph
- - name: ceph-config
- mountPath: /etc/ceph/
- - name: liveness-prometheus
- securityContext:
- privileged: true
- image: quay.io/cephcsi/cephcsi:canary
- args:
- - "--type=liveness"
- - "--endpoint=$(CSI_ENDPOINT)"
- - "--metricsport=8680"
- - "--metricspath=/metrics"
- - "--polltime=60s"
- - "--timeout=3s"
- env:
- - name: CSI_ENDPOINT
- value: unix:///csi/csi.sock
- - name: POD_IP
- valueFrom:
- fieldRef:
- fieldPath: status.podIP
- volumeMounts:
- - name: socket-dir
- mountPath: /csi
- imagePullPolicy: "IfNotPresent"
- volumes:
- - name: socket-dir
- hostPath:
- path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
- type: DirectoryOrCreate
- - name: plugin-dir
- hostPath:
- path: /var/lib/kubelet/plugins
- type: Directory
- - name: mountpoint-dir
- hostPath:
- path: /var/lib/kubelet/pods
- type: DirectoryOrCreate
- - name: ceph-logdir
- hostPath:
- path: /var/log/ceph
- type: DirectoryOrCreate
- - name: registration-dir
- hostPath:
- path: /var/lib/kubelet/plugins_registry/
- type: Directory
- - name: host-dev
- hostPath:
- path: /dev
- - name: host-sys
- hostPath:
- path: /sys
- - name: etc-selinux
- hostPath:
- path: /etc/selinux
- - name: host-mount
- hostPath:
- path: /run/mount
- - name: lib-modules
- hostPath:
- path: /lib/modules
- - name: ceph-config
- configMap:
- name: ceph-config
- - name: ceph-csi-config
- configMap:
- name: ceph-csi-config
- # - name: ceph-csi-encryption-kms-config
- # configMap:
- # name: ceph-csi-encryption-kms-config
- - name: keys-tmp-dir
- emptyDir: {
- medium: "Memory"
- }
----
-# This is a service to expose the liveness metrics
-apiVersion: v1
-kind: Service
-metadata:
- name: csi-metrics-rbdplugin
- # replace with non-default namespace name
- namespace: default
- labels:
- app: csi-metrics
-spec:
- ports:
- - name: http-metrics
- port: 8080
- protocol: TCP
- targetPort: 8680
- selector:
- app: csi-rbdplugin
diff --git a/ceph/ceph-csi/rbd/kubernetes/csidriver.yaml b/ceph/ceph-csi/rbd/kubernetes/csidriver.yaml
deleted file mode 100644
index 1ece4e9..0000000
--- a/ceph/ceph-csi/rbd/kubernetes/csidriver.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# /!\ DO NOT MODIFY THIS FILE
-#
-# This file has been automatically generated by Ceph-CSI yamlgen.
-# The source for the contents can be found in the api/deploy directory, make
-# your modifications there.
-#
----
-# if Kubernetes version is less than 1.18 change
-# apiVersion to storage.k8s.io/v1beta1
-apiVersion: storage.k8s.io/v1
-kind: CSIDriver
-metadata:
- name: "rbd.csi.ceph.com"
-spec:
- attachRequired: true
- podInfoOnMount: false
diff --git a/ceph/external-snapshotter/crd/kustomization.yaml b/ceph/external-snapshotter/crd/kustomization.yaml
deleted file mode 100644
index 34af2b7..0000000
--- a/ceph/external-snapshotter/crd/kustomization.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-resources:
- - snapshot.storage.k8s.io_volumesnapshotclasses.yaml
- - snapshot.storage.k8s.io_volumesnapshotcontents.yaml
- - snapshot.storage.k8s.io_volumesnapshots.yaml
diff --git a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
deleted file mode 100644
index c6f0a3c..0000000
--- a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.4.0
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
- creationTimestamp: null
- name: volumesnapshotclasses.snapshot.storage.k8s.io
-spec:
- group: snapshot.storage.k8s.io
- names:
- kind: VolumeSnapshotClass
- listKind: VolumeSnapshotClassList
- plural: volumesnapshotclasses
- shortNames:
- - vsclass
- - vsclasses
- singular: volumesnapshotclass
- scope: Cluster
- versions:
- - additionalPrinterColumns:
- - jsonPath: .driver
- name: Driver
- type: string
- - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
- jsonPath: .deletionPolicy
- name: DeletionPolicy
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1
- schema:
- openAPIV3Schema:
- description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- deletionPolicy:
- description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
- enum:
- - Delete
- - Retain
- type: string
- driver:
- description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- parameters:
- additionalProperties:
- type: string
- description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
- type: object
- required:
- - deletionPolicy
- - driver
- type: object
- served: true
- storage: true
- subresources: {}
- - additionalPrinterColumns:
- - jsonPath: .driver
- name: Driver
- type: string
- - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
- jsonPath: .deletionPolicy
- name: DeletionPolicy
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
- # This indicates the v1beta1 version of the custom resource is deprecated.
- # API requests to this version receive a warning in the server response.
- deprecated: true
- # This overrides the default warning returned to clients making v1beta1 API requests.
- deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass"
- schema:
- openAPIV3Schema:
- description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- deletionPolicy:
- description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
- enum:
- - Delete
- - Retain
- type: string
- driver:
- description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- parameters:
- additionalProperties:
- type: string
- description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
- type: object
- required:
- - deletionPolicy
- - driver
- type: object
- served: true
- storage: false
- subresources: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
deleted file mode 100644
index ad133fb..0000000
--- a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
+++ /dev/null
@@ -1,308 +0,0 @@
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.4.0
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
- creationTimestamp: null
- name: volumesnapshotcontents.snapshot.storage.k8s.io
-spec:
- group: snapshot.storage.k8s.io
- names:
- kind: VolumeSnapshotContent
- listKind: VolumeSnapshotContentList
- plural: volumesnapshotcontents
- shortNames:
- - vsc
- - vscs
- singular: volumesnapshotcontent
- scope: Cluster
- versions:
- - additionalPrinterColumns:
- - description: Indicates if the snapshot is ready to be used to restore a volume.
- jsonPath: .status.readyToUse
- name: ReadyToUse
- type: boolean
- - description: Represents the complete size of the snapshot in bytes
- jsonPath: .status.restoreSize
- name: RestoreSize
- type: integer
- - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
- jsonPath: .spec.deletionPolicy
- name: DeletionPolicy
- type: string
- - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
- jsonPath: .spec.driver
- name: Driver
- type: string
- - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
- jsonPath: .spec.volumeSnapshotClassName
- name: VolumeSnapshotClass
- type: string
- - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
- jsonPath: .spec.volumeSnapshotRef.name
- name: VolumeSnapshot
- type: string
- - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
- jsonPath: .spec.volumeSnapshotRef.namespace
- name: VolumeSnapshotNamespace
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1
- schema:
- openAPIV3Schema:
- description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- spec:
- description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
- properties:
- deletionPolicy:
- description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
- enum:
- - Delete
- - Retain
- type: string
- driver:
- description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
- type: string
- source:
- description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
- properties:
- snapshotHandle:
- description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
- type: string
- volumeHandle:
- description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
- type: string
- type: object
- oneOf:
- - required: ["snapshotHandle"]
- - required: ["volumeHandle"]
- volumeSnapshotClassName:
- description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
- type: string
- volumeSnapshotRef:
- description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
- type: string
- kind:
- description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
- type: string
- namespace:
- description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
- type: string
- resourceVersion:
- description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
- type: string
- uid:
- description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
- type: string
- type: object
- required:
- - deletionPolicy
- - driver
- - source
- - volumeSnapshotRef
- type: object
- status:
- description: status represents the current information of a snapshot.
- properties:
- creationTime:
- description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
- format: int64
- type: integer
- error:
- description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
- properties:
- message:
- description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
- type: string
- time:
- description: time is the timestamp when the error was encountered.
- format: date-time
- type: string
- type: object
- readyToUse:
- description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
- type: boolean
- restoreSize:
- description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
- format: int64
- minimum: 0
- type: integer
- snapshotHandle:
- description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
- type: string
- type: object
- required:
- - spec
- type: object
- served: true
- storage: true
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Indicates if the snapshot is ready to be used to restore a volume.
- jsonPath: .status.readyToUse
- name: ReadyToUse
- type: boolean
- - description: Represents the complete size of the snapshot in bytes
- jsonPath: .status.restoreSize
- name: RestoreSize
- type: integer
- - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
- jsonPath: .spec.deletionPolicy
- name: DeletionPolicy
- type: string
- - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
- jsonPath: .spec.driver
- name: Driver
- type: string
- - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
- jsonPath: .spec.volumeSnapshotClassName
- name: VolumeSnapshotClass
- type: string
- - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
- jsonPath: .spec.volumeSnapshotRef.name
- name: VolumeSnapshot
- type: string
- - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
- jsonPath: .spec.volumeSnapshotRef.namespace
- name: VolumeSnapshotNamespace
- type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
- # This indicates the v1beta1 version of the custom resource is deprecated.
- # API requests to this version receive a warning in the server response.
- deprecated: true
- # This overrides the default warning returned to clients making v1beta1 API requests.
- deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent"
- schema:
- openAPIV3Schema:
- description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- spec:
- description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
- properties:
- deletionPolicy:
- description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
- enum:
- - Delete
- - Retain
- type: string
- driver:
- description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
- type: string
- source:
- description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
- properties:
- snapshotHandle:
- description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
- type: string
- volumeHandle:
- description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
- type: string
- type: object
- volumeSnapshotClassName:
- description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
- type: string
- volumeSnapshotRef:
- description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
- properties:
- apiVersion:
- description: API version of the referent.
- type: string
- fieldPath:
- description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
- type: string
- kind:
- description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
- type: string
- namespace:
- description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
- type: string
- resourceVersion:
- description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
- type: string
- uid:
- description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
- type: string
- type: object
- required:
- - deletionPolicy
- - driver
- - source
- - volumeSnapshotRef
- type: object
- status:
- description: status represents the current information of a snapshot.
- properties:
- creationTime:
- description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
- format: int64
- type: integer
- error:
- description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
- properties:
- message:
- description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
- type: string
- time:
- description: time is the timestamp when the error was encountered.
- format: date-time
- type: string
- type: object
- readyToUse:
- description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
- type: boolean
- restoreSize:
- description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
- format: int64
- minimum: 0
- type: integer
- snapshotHandle:
- description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
- type: string
- type: object
- required:
- - spec
- type: object
- served: true
- storage: false
- subresources:
- status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml
deleted file mode 100644
index a1871ae..0000000
--- a/ceph/external-snapshotter/crd/snapshot.storage.k8s.io_volumesnapshots.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.4.0
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
- creationTimestamp: null
- name: volumesnapshots.snapshot.storage.k8s.io
-spec:
- group: snapshot.storage.k8s.io
- names:
- kind: VolumeSnapshot
- listKind: VolumeSnapshotList
- plural: volumesnapshots
- shortNames:
- - vs
- singular: volumesnapshot
- scope: Namespaced
- versions:
- - additionalPrinterColumns:
- - description: Indicates if the snapshot is ready to be used to restore a volume.
- jsonPath: .status.readyToUse
- name: ReadyToUse
- type: boolean
- - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
- jsonPath: .spec.source.persistentVolumeClaimName
- name: SourcePVC
- type: string
- - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
- jsonPath: .spec.source.volumeSnapshotContentName
- name: SourceSnapshotContent
- type: string
- - description: Represents the minimum size of volume required to rehydrate from this snapshot.
- jsonPath: .status.restoreSize
- name: RestoreSize
- type: string
- - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
- jsonPath: .spec.volumeSnapshotClassName
- name: SnapshotClass
- type: string
- - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
- jsonPath: .status.boundVolumeSnapshotContentName
- name: SnapshotContent
- type: string
- - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
- jsonPath: .status.creationTime
- name: CreationTime
- type: date
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1
- schema:
- openAPIV3Schema:
- description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- spec:
- description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
- properties:
- source:
- description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
- properties:
- persistentVolumeClaimName:
- description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
- type: string
- volumeSnapshotContentName:
- description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
- type: string
- type: object
- oneOf:
- - required: ["persistentVolumeClaimName"]
- - required: ["volumeSnapshotContentName"]
- volumeSnapshotClassName:
- description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
- type: string
- required:
- - source
- type: object
- status:
- description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
- properties:
- boundVolumeSnapshotContentName:
- description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
- type: string
- creationTime:
- description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
- format: date-time
- type: string
- error:
- description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
- properties:
- message:
- description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
- type: string
- time:
- description: time is the timestamp when the error was encountered.
- format: date-time
- type: string
- type: object
- readyToUse:
- description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
- type: boolean
- restoreSize:
- type: string
- description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- required:
- - spec
- type: object
- served: true
- storage: true
- subresources:
- status: {}
- - additionalPrinterColumns:
- - description: Indicates if the snapshot is ready to be used to restore a volume.
- jsonPath: .status.readyToUse
- name: ReadyToUse
- type: boolean
- - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
- jsonPath: .spec.source.persistentVolumeClaimName
- name: SourcePVC
- type: string
- - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
- jsonPath: .spec.source.volumeSnapshotContentName
- name: SourceSnapshotContent
- type: string
- - description: Represents the minimum size of volume required to rehydrate from this snapshot.
- jsonPath: .status.restoreSize
- name: RestoreSize
- type: string
- - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
- jsonPath: .spec.volumeSnapshotClassName
- name: SnapshotClass
- type: string
- - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
- jsonPath: .status.boundVolumeSnapshotContentName
- name: SnapshotContent
- type: string
- - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
- jsonPath: .status.creationTime
- name: CreationTime
- type: date
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
- # This indicates the v1beta1 version of the custom resource is deprecated.
- # API requests to this version receive a warning in the server response.
- deprecated: true
- # This overrides the default warning returned to clients making v1beta1 API requests.
- deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot"
- schema:
- openAPIV3Schema:
- description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- spec:
- description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
- properties:
- source:
- description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
- properties:
- persistentVolumeClaimName:
- description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
- type: string
- volumeSnapshotContentName:
- description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
- type: string
- type: object
- volumeSnapshotClassName:
- description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
- type: string
- required:
- - source
- type: object
- status:
- description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
- properties:
- boundVolumeSnapshotContentName:
- description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
- type: string
- creationTime:
- description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
- format: date-time
- type: string
- error:
- description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
- properties:
- message:
- description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
- type: string
- time:
- description: time is the timestamp when the error was encountered.
- format: date-time
- type: string
- type: object
- readyToUse:
- description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
- type: boolean
- restoreSize:
- type: string
- description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
- pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
- x-kubernetes-int-or-string: true
- type: object
- required:
- - spec
- type: object
- served: true
- storage: false
- subresources:
- status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/ceph/external-snapshotter/snapshot-controller/kustomization.yaml b/ceph/external-snapshotter/snapshot-controller/kustomization.yaml
deleted file mode 100644
index 883ea15..0000000
--- a/ceph/external-snapshotter/snapshot-controller/kustomization.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-resources:
- - rbac-snapshot-controller.yaml
- - setup-snapshot-controller.yaml
diff --git a/ceph/external-snapshotter/snapshot-controller/rbac-snapshot-controller.yaml b/ceph/external-snapshotter/snapshot-controller/rbac-snapshot-controller.yaml
deleted file mode 100644
index abdbdd8..0000000
--- a/ceph/external-snapshotter/snapshot-controller/rbac-snapshot-controller.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-# RBAC file for the snapshot controller.
-#
-# The snapshot controller implements the control loop for CSI snapshot functionality.
-# It should be installed as part of the base Kubernetes distribution in an appropriate
-# namespace for components implementing base system functionality. For installing with
-# Vanilla Kubernetes, kube-system makes sense for the namespace.
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: snapshot-controller
- namespace: kube-system
-
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: snapshot-controller-runner
-rules:
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["list", "watch", "create", "update", "patch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents"]
- verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshotcontents/status"]
- verbs: ["patch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshots"]
- verbs: ["get", "list", "watch", "update", "patch"]
- - apiGroups: ["snapshot.storage.k8s.io"]
- resources: ["volumesnapshots/status"]
- verbs: ["update", "patch"]
- # Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true
- # - apiGroups: [""]
- # resources: ["nodes"]
- # verbs: ["get", "list", "watch"]
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: snapshot-controller-role
-subjects:
- - kind: ServiceAccount
- name: snapshot-controller
- namespace: kube-system
-roleRef:
- kind: ClusterRole
- name: snapshot-controller-runner
- apiGroup: rbac.authorization.k8s.io
-
----
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: snapshot-controller-leaderelection
- namespace: kube-system
-rules:
-- apiGroups: ["coordination.k8s.io"]
- resources: ["leases"]
- verbs: ["get", "watch", "list", "delete", "update", "create"]
-
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: snapshot-controller-leaderelection
- namespace: kube-system
-subjects:
- - kind: ServiceAccount
- name: snapshot-controller
-roleRef:
- kind: Role
- name: snapshot-controller-leaderelection
- apiGroup: rbac.authorization.k8s.io
diff --git a/ceph/external-snapshotter/snapshot-controller/setup-snapshot-controller.yaml b/ceph/external-snapshotter/snapshot-controller/setup-snapshot-controller.yaml
deleted file mode 100644
index 30da482..0000000
--- a/ceph/external-snapshotter/snapshot-controller/setup-snapshot-controller.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-# This YAML file shows how to deploy the snapshot controller
-
-# The snapshot controller implements the control loop for CSI snapshot functionality.
-# It should be installed as part of the base Kubernetes distribution in an appropriate
-# namespace for components implementing base system functionality. For installing with
-# Vanilla Kubernetes, kube-system makes sense for the namespace.
-
----
-kind: Deployment
-apiVersion: apps/v1
-metadata:
- name: snapshot-controller
- namespace: kube-system
-spec:
- replicas: 2
- selector:
- matchLabels:
- app: snapshot-controller
- # the snapshot controller won't be marked as ready if the v1 CRDs are unavailable
- # in #504 the snapshot-controller will exit after around 7.5 seconds if it
- # can't find the v1 CRDs so this value should be greater than that
- minReadySeconds: 15
- strategy:
- rollingUpdate:
- maxSurge: 0
- maxUnavailable: 1
- type: RollingUpdate
- template:
- metadata:
- labels:
- app: snapshot-controller
- spec:
- serviceAccount: snapshot-controller
- containers:
- - name: snapshot-controller
- image: opsdockerimage/sig-storage-snapshot-controller:v5.0.0
- args:
- - "--v=5"
- - "--leader-election=true"
- imagePullPolicy: IfNotPresent
diff --git a/local-values/es/crm1.yaml b/local-values/es/crm1.yaml
index f5a8546..5e67bbe 100644
--- a/local-values/es/crm1.yaml
+++ b/local-values/es/crm1.yaml
@@ -771,7 +771,7 @@ data:
rollingUpdatePartition: ""
## @param data.heapSize Data node heap size
##
- heapSize: 1024m
+ heapSize: 8192m
## @param data.podAnnotations Annotations for data pods.
##
podAnnotations: {}
diff --git a/local-values/fluentd/values.yaml b/local-values/fluentd/values.yaml
new file mode 100644
index 0000000..ada4704
--- /dev/null
+++ b/local-values/fluentd/values.yaml
@@ -0,0 +1,1119 @@
+## @section Global parameters
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
+
+## @param global.imageRegistry Global Docker image registry
+## @param global.imagePullSecrets Global Docker registry secret names as an array
+## @param global.storageClass Global StorageClass for Persistent Volume(s)
+##
+global:
+ imageRegistry: ""
+ ## E.g.
+ ## imagePullSecrets:
+ ## - myRegistryKeySecretName
+ ##
+ imagePullSecrets: []
+ storageClass: ""
+
+## @section Common parameters
+
+## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
+##
+kubeVersion: ""
+## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
+##
+nameOverride: ""
+## @param fullnameOverride String to fully override common.names.fullname template
+##
+fullnameOverride: ""
+## @param clusterDomain Cluster Domain
+##
+clusterDomain: cluster.local
+## @param extraDeploy Array of extra objects to deploy with the release
+##
+extraDeploy: []
+
+## Enable diagnostic mode in the deployment
+##
+diagnosticMode:
+ ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
+ ##
+ enabled: false
+ ## @param diagnosticMode.command Command to override all containers in the deployment
+ ##
+ command:
+ - sleep
+ ## @param diagnosticMode.args Args to override all containers in the deployment
+ ##
+ args:
+ - infinity
+
+## @section Fluentd parameters
+
+## Bitnami Fluentd image version
+## ref: https://hub.docker.com/r/bitnami/fluentd/tags/
+## @param image.registry Fluentd image registry
+## @param image.repository Fluentd image repository
+## @param image.tag Fluentd image tag (immutable tags are recommended)
+## @param image.pullPolicy Fluentd image pull policy
+## @param image.pullSecrets Fluentd image pull secrets
+## @param image.debug Enable image debug mode
+##
+image:
+ registry: docker.io
+ repository: bitnami/fluentd
+ tag: 1.14.4-debian-10-r0
+ ## Specify a imagePullPolicy
+ ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
+ ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
+ ##
+ pullPolicy: IfNotPresent
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ ## pullSecrets:
+ ## - myRegistryKeySecretName
+ pullSecrets: []
+ ## Enable debug mode
+ ##
+ debug: false
+## Forwarder parameters
+##
+forwarder:
+ ## @param forwarder.enabled Enable forwarder daemonset
+ ##
+ enabled: true
+ ## @param forwarder.image.registry Fluentd forwarder image registry override
+ ## @param forwarder.image.repository Fluentd forwarder image repository override
+ ## @param forwarder.image.tag Fluentd forwarder image tag override (immutable tags are recommended)
+ image:
+ registry: ""
+ repository: ""
+ tag: ""
+ ## @param forwarder.daemonUser Forwarder daemon user and group (set to root by default because it reads from host paths)
+ ##
+ daemonUser: root
+ ## @param forwarder.daemonGroup Fluentd forwarder daemon system group
+ ##
+ daemonGroup: root
+ ## @param forwarder.hostAliases Add deployment host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## K8s Security Context for forwarder pods
+ ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param forwarder.securityContext.enabled Enable security context for forwarder pods
+ ## @param forwarder.securityContext.runAsUser User ID for forwarder's containers
+ ## @param forwarder.securityContext.runAsGroup Group ID for forwarder's containers
+ ## @param forwarder.securityContext.fsGroup Group ID for forwarder's containers filesystem
+ ##
+ securityContext:
+ enabled: true
+ runAsUser: 0
+ runAsGroup: 0
+ fsGroup: 0
+ ## K8s Security Context for forwarder container
+ ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param forwarder.containerSecurityContext.enabled Enable security context for the forwarder container
+ ## @param forwarder.containerSecurityContext.privileged Run as privileged
+ ## @param forwarder.containerSecurityContext.allowPrivilegeEscalation Allow Privilege Escalation
+ ## @param forwarder.containerSecurityContext.readOnlyRootFilesystem Require the use of a read only root file system
+ ## @param forwarder.containerSecurityContext.capabilities.drop [array] Drop capabilities for the securityContext
+ ##
+ containerSecurityContext:
+ enabled: true
+ privileged: false
+ allowPrivilegeEscalation: false
+ ## Requires mounting an `extraVolume` of type `emptyDir` into /tmp
+ ##
+ readOnlyRootFilesystem: false
+ capabilities:
+ drop:
+ - ALL
+ ## @param forwarder.terminationGracePeriodSeconds Duration in seconds the pod needs to terminate gracefully
+ ## https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
+ ##
+ terminationGracePeriodSeconds: 30
+ ## @param forwarder.configFile Name of the config file that will be used by Fluentd at launch under the `/opt/bitnami/fluentd/conf` directory
+ ##
+ configFile: fluentd.conf
+ ## @param forwarder.configMap Name of the config map that contains the Fluentd configuration files
+ ## If not specified, one will be created by default
+ ##
+ configMap: ""
+ ## @param forwarder.configMapFiles [object] Files to be added to be config map. Ignored if `forwarder.configMap` is set
+ ##
+ configMapFiles:
+ fluentd.conf: |
+ # Ignore fluentd own events
+
+ @type null
+
+
+ @include fluentd-inputs.conf
+ @include fluentd-output.conf
+ {{- if .Values.metrics.enabled }}
+ @include metrics.conf
+ {{- end }}
+ fluentd-inputs.conf: |
+ # HTTP input for the liveness and readiness probes
+
+ @type http
+ port 9880
+
+ # Get the logs from the containers running in the node
+
+ @type tail
+ path /var/log/containers/*.log
+ # exclude Fluentd logs
+ exclude_path /var/log/containers/*fluentd*.log
+ pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
+ tag kubernetes.*
+ read_from_head true
+
+ @type json
+ time_key time
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+ # enrich with kubernetes metadata
+
+ @type kubernetes_metadata
+
+ fluentd-output.conf: |
+ # Throw the healthcheck to the standard output instead of forwarding it
+
+ @type stdout
+
+ {{- if .Values.aggregator.enabled }}
+ # Forward all logs to the aggregators
+
+ @type forward
+ {{- if .Values.tls.enabled }}
+ transport tls
+ tls_cert_path /opt/bitnami/fluentd/certs/out_forward/ca.crt
+ tls_client_cert_path /opt/bitnami/fluentd/certs/out_forward/tls.crt
+ tls_client_private_key_path /opt/bitnami/fluentd/certs/out_forward/tls.key
+ {{- end }}
+
+ {{- $fullName := (include "common.names.fullname" .) }}
+ {{- $global := . }}
+ {{- $domain := default "cluster.local" .Values.clusterDomain }}
+ {{- $port := .Values.aggregator.port | int }}
+ {{- range $i, $e := until (.Values.aggregator.replicaCount | int) }}
+
+ {{ printf "host %s-%d.%s-headless.%s.svc.%s" $fullName $i $fullName $global.Release.Namespace $domain }}
+ {{ printf "port %d" $port }}
+ {{- if ne $i 0 }}
+ standby
+ {{- end }}
+
+ {{- end }}
+
+ @type file
+ path /opt/bitnami/fluentd/logs/buffers/logs.buffer
+ flush_thread_count 2
+ flush_interval 2s
+
+
+ {{- else }}
+ # Send the logs to the standard output
+
+ @type stdout
+
+ {{- end }}
+ metrics.conf: |
+ # Prometheus Exporter Plugin
+ # input plugin that exports metrics
+
+ @type prometheus
+ port {{ .Values.metrics.service.port }}
+
+ # input plugin that collects metrics from MonitorAgent
+
+ @type prometheus_monitor
+
+ host ${hostname}
+
+
+ # input plugin that collects metrics for output plugin
+
+ @type prometheus_output_monitor
+
+ host ${hostname}
+
+
+ # input plugin that collects metrics for in_tail plugin
+
+ @type prometheus_tail_monitor
+
+ host ${hostname}
+
+
+ ## @param forwarder.extraArgs Extra arguments for the Fluentd command line
+ ## ref: https://docs.fluentd.org/deployment/command-line-option
+ ##
+ extraArgs: ""
+ ## @param forwarder.extraEnv Extra environment variables to pass to the container
+ ## extraEnv:
+ ## - name: MY_ENV_VAR
+ ## value: my_value
+ ##
+ extraEnv: []
+ ## @param forwarder.containerPorts [array] Ports the forwarder containers will listen on
+ ##
+ containerPorts:
+ ## - name: syslog-tcp
+ ## containerPort: 5140
+ ## protocol: TCP
+ ## - name: syslog-udp
+ ## containerPort: 5140
+ ## protocol: UDP
+ ## - name: tcp
+ ## containerPort: 24224
+ ## protocol: TCP
+ - name: http
+ containerPort: 9880
+ protocol: TCP
+ ## Service parameters
+ ##
+ service:
+ ## @param forwarder.service.type Kubernetes service type (`ClusterIP`, `NodePort`, or `LoadBalancer`) for the forwarders
+ ##
+ type: ClusterIP
+ ## @param forwarder.service.ports [object] Array containing the forwarder service ports
+ ##
+ ports:
+ ## syslog-udp:
+ ## port: 5140
+ ## targetPort: syslog-udp
+ ## protocol: UDP
+ ## nodePort: 31514
+ ## syslog-tcp:
+ ## port: 5140
+ ## targetPort: syslog-tcp
+ ## protocol: TCP
+ ## nodePort: 31514
+ ## tcp:
+ ## port: 24224
+ ## targetPort: tcp
+ ## protocol: TCP
+ http:
+ port: 9880
+ targetPort: http
+ protocol: TCP
+ ## @param forwarder.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific)
+ ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param forwarder.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ ##
+ loadBalancerSourceRanges: []
+ ## @param forwarder.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param forwarder.service.annotations Provide any additional annotations which may be required
+ ##
+ annotations: {}
+ ## Configure extra options for liveness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param forwarder.livenessProbe.enabled Enable livenessProbe
+ ## @param forwarder.livenessProbe.httpGet.path Request path for livenessProbe
+ ## @param forwarder.livenessProbe.httpGet.port Port for livenessProbe
+ ## @param forwarder.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param forwarder.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param forwarder.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param forwarder.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param forwarder.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ httpGet:
+ path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
+ port: http
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## Configure extra options for readiness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param forwarder.readinessProbe.enabled Enable readinessProbe
+ ## @param forwarder.readinessProbe.httpGet.path Request path for readinessProbe
+ ## @param forwarder.readinessProbe.httpGet.port Port for readinessProbe
+ ## @param forwarder.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param forwarder.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param forwarder.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param forwarder.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param forwarder.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ httpGet:
+ path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
+ port: http
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param forwarder.updateStrategy.type Set up update strategy.
+ ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy
+ ## Example:
+ ## updateStrategy:
+ ## type: RollingUpdate
+ ## rollingUpdate:
+ ## maxSurge: 25%
+ ## maxUnavailable: 25%
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ ## Forwarder containers' resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ## @param forwarder.resources.limits The resources limits for the container
+ ## @param forwarder.resources.requests The requested resources for the container
+ ##
+ resources:
+ ## Example:
+ ## limits:
+ ## cpu: 500m
+ ## memory: 1Gi
+ limits: {}
+ ## Examples:
+ ## requests:
+ ## cpu: 300m
+ ## memory: 512Mi
+ requests: {}
+ ## @param forwarder.priorityClassName Set Priority Class Name to allow priority control over other pods
+ ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+ ##
+ priorityClassName: ""
+ ## @param forwarder.podAffinityPreset Forwarder Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param forwarder.podAntiAffinityPreset Forwarder Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: ""
+ ## Node affinity preset
+ ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param forwarder.nodeAffinityPreset.type Forwarder Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param forwarder.nodeAffinityPreset.key Forwarder Node label key to match Ignored if `affinity` is set.
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## @param forwarder.nodeAffinityPreset.values Forwarder Node label values to match. Ignored if `affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param forwarder.affinity Forwarder Affinity for pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param forwarder.nodeSelector Forwarder Node labels for pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param forwarder.tolerations Forwarder Tolerations for pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param forwarder.podAnnotations Pod annotations
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param forwarder.podLabels Extra labels to add to Pod
+ ##
+ podLabels: {}
+ ## Pods Service Account
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ ## @param forwarder.serviceAccount.create Specify whether a ServiceAccount should be created.
+ ##
+ create: true
+ ## @param forwarder.serviceAccount.name The name of the ServiceAccount to create
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ name: ""
+ ## @param forwarder.serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
+ ##
+ annotations: {}
+ ## Role Based Access
+ ## ref: https://kubernetes.io/docs/admin/authorization/rbac/
+ ## @param forwarder.rbac.create Specify whether RBAC resources should be created and used, allowing the get, watch and list of pods/namespaces
+ ## @param forwarder.rbac.pspEnabled Whether to create a PodSecurityPolicy and bound it with RBAC. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later
+ ##
+ rbac:
+ create: true
+ pspEnabled: false
+ ## Persist data to a persistent volume
+ ##
+ persistence:
+ ## @param forwarder.persistence.enabled Enable persistence volume for the forwarder
+ ##
+ enabled: false
+ ## @param forwarder.persistence.hostPath.path Directory from the host node's filesystem to mount as hostPath volume for persistence.
+ ## The host directory you chose is mounted into /opt/bitnami/fluentd/logs/buffers in your Pod
+ ## Example use case: mount host directory /tmp/buffer (if the directory doesn't exist, it creates it) into forwarder pod.
+ ## persistence:
+ ## enabled: true
+ ## hostPath:
+ ## path: /tmp/buffer
+ ##
+ hostPath:
+ path: /opt/bitnami/fluentd/logs/buffers
+ ## @param forwarder.lifecycle Additional lifecycles to add to the pods
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
+ ## e.g:
+ ## postStart:
+ ## exec:
+ ## command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
+ ## preStop:
+ ## exec:
+ ## command: ["/bin/sh","-c","nginx -s quit; while killall -0 nginx; do sleep 1; done"]
+ lifecycle: {}
+ ## @param forwarder.initContainers Additional init containers to add to the pods
+ ## For example:
+ ## initContainers:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ##
+ initContainers: []
+ ## @param forwarder.sidecars Add sidecars to forwarder pods
+ ##
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param forwarder.extraVolumes Extra volumes
+ ## Example Use Case: mount systemd journal volume
+ ## - name: systemd
+ ## hostPath:
+ ## path: /run/log/journal/
+ ##
+ extraVolumes: []
+ ## @param forwarder.extraVolumeMounts Mount extra volume(s)
+ ## - name: systemd
+ ## mountPath: /run/log/journal/
+ ##
+ extraVolumeMounts: []
+## Aggregator parameters
+##
+aggregator:
+ ## @param aggregator.enabled Enable Fluentd aggregator statefulset
+ ##
+ enabled: true
+ ## @param aggregator.image.registry Fluentd aggregator image registry override
+ ## @param aggregator.image.repository Fluentd aggregator image repository override
+ ## @param aggregator.image.tag Fluentd aggregator image tag override (immutable tags are recommended)
+ image:
+ registry: ""
+ repository: ""
+ tag: ""
+ ## @param aggregator.replicaCount Number of aggregator pods to deploy in the Stateful Set
+ ##
+ replicaCount: 1
+ ## K8s Security Context for Aggregator pods
+ ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param aggregator.securityContext.enabled Enable security context for aggregator pods
+ ## @param aggregator.securityContext.runAsUser User ID for aggregator's containers
+ ## @param aggregator.securityContext.runAsGroup Group ID for aggregator's containers
+ ## @param aggregator.securityContext.fsGroup Group ID for aggregator's containers filesystem
+ ##
+ securityContext:
+ enabled: true
+ runAsUser: 1001
+ runAsGroup: 1001
+ fsGroup: 1001
+ ## @param aggregator.hostAliases Add deployment host aliases
+ ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+ ##
+ hostAliases: []
+ ## K8s Security Context for Aggregator containers
+ ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ## @param aggregator.containerSecurityContext.enabled Enable security context for the aggregator container
+ ## @param aggregator.containerSecurityContext.privileged Run as privileged
+ ## @param aggregator.containerSecurityContext.allowPrivilegeEscalation Allow Privilege Escalation
+ ## @param aggregator.containerSecurityContext.readOnlyRootFilesystem Require the use of a read only root file system
+ ## @param aggregator.containerSecurityContext.capabilities.drop [array] Drop capabilities for the securityContext
+ ##
+ containerSecurityContext:
+ enabled: true
+ privileged: false
+ allowPrivilegeEscalation: false
+ ## Requires mounting an `extraVolume` of type `emptyDir` into /tmp
+ ##
+ readOnlyRootFilesystem: false
+ capabilities:
+ drop:
+ - ALL
+ ## @param aggregator.terminationGracePeriodSeconds Duration in seconds the pod needs to terminate gracefully
+ ## https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
+ ##
+ terminationGracePeriodSeconds: 30
+ ## @param aggregator.configFile Name of the config file that will be used by Fluentd at launch under the `/opt/bitnami/fluentd/conf` directory
+ ##
+ configFile: fluentd.conf
+ ## @param aggregator.configMap Name of the config map that contains the Fluentd configuration files
+ ##
+ configMap: ""
+ ## @param aggregator.configMapFiles [object] Files to be added to be config map. Ignored if `aggregator.configMap` is set
+ ##
+ configMapFiles:
+ fluentd.conf: |
+ # Ignore fluentd own events
+
+ @type null
+
+
+ @include fluentd-inputs.conf
+ @include fluentd-output.conf
+ {{- if .Values.metrics.enabled }}
+ @include metrics.conf
+ {{- end }}
+ fluentd-inputs.conf: |
+ # TCP input to receive logs from
+ {{- if .Values.aggregator.port }}
+
+ @type forward
+ bind 0.0.0.0
+ port {{ .Values.aggregator.port }}
+ {{- if .Values.tls.enabled }}
+
+ ca_path /opt/bitnami/fluentd/certs/in_forward/ca.crt
+ cert_path /opt/bitnami/fluentd/certs/in_forward/tls.crt
+ private_key_path /opt/bitnami/fluentd/certs/in_forward/tls.key
+ client_cert_auth true
+
+ {{- end }}
+
+ {{- end }}
+
+ # HTTP input for the liveness and readiness probes
+
+ @type http
+ bind 0.0.0.0
+ port 9880
+
+ fluentd-output.conf: |
+ # Throw the healthcheck to the standard output
+
+ @type stdout
+
+
+ # Send the logs to the standard output
+
+ @type elasticsearch
+ include_tag_key true
+ hosts "#{ENV['ELASTICSEARCH_HOSTS']}"
+ logstash_format true
+
+
+ @type file
+ path /opt/bitnami/fluentd/logs/buffers/logs.buffer
+ flush_thread_count 2
+ flush_interval 2s
+
+
+ metrics.conf: |
+ # Prometheus Exporter Plugin
+ # input plugin that exports metrics
+
+ @type prometheus
+ port {{ .Values.metrics.service.port }}
+
+
+ # input plugin that collects metrics from MonitorAgent
+
+ @type prometheus_monitor
+
+ host ${hostname}
+
+
+
+ # input plugin that collects metrics for output plugin
+
+ @type prometheus_output_monitor
+
+ host ${hostname}
+
+
+
+ ## @param aggregator.port Port the Aggregator container will listen for logs. Leave it blank to ignore.
+ ## You can specify other ports in the aggregator.containerPorts parameter
+ ##
+ port: 24224
+ ## @param aggregator.extraArgs Extra arguments for the Fluentd command line
+ ## ref: https://docs.fluentd.org/deployment/command-line-option
+ ##
+ extraArgs: ""
+ ## @param aggregator.extraEnv Extra environment variables to pass to the container
+ ## extraEnv:
+ ## - name: MY_ENV_VAR
+ ## value: my_value
+ ##
+ extraEnv:
+ - name: ELASTICSEARCH_HOSTS
+ value: elasticsearch-data:9200
+ ## @param aggregator.containerPorts [array] Ports the aggregator containers will listen on
+ ##
+ containerPorts:
+ # - name: my-port
+ # containerPort: 24222
+ # protocol: TCP
+ - name: http
+ containerPort: 9880
+ protocol: TCP
+ ## Service parameters
+ ##
+ service:
+ ## @param aggregator.service.type Kubernetes service type (`ClusterIP`, `NodePort`, or `LoadBalancer`) for the aggregators
+ ##
+ type: ClusterIP
+ ## @param aggregator.service.ports [object] Array containing the aggregator service ports
+ ##
+ ports:
+ http:
+ port: 9880
+ targetPort: http
+ protocol: TCP
+ tcp:
+ port: 24224
+ targetPort: tcp
+ protocol: TCP
+ ## @param aggregator.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific)
+ ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param aggregator.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
+ ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
+ ##
+ ## loadBalancerSourceRanges:
+ ## - 10.10.10.0/24
+ loadBalancerSourceRanges: []
+ ## @param aggregator.service.clusterIP Static clusterIP or None for headless services
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
+ ## e.g:
+ ## clusterIP: None
+ ##
+ clusterIP: ""
+ ## @param aggregator.service.annotations Provide any additional annotations which may be required
+ ##
+ annotations: {}
+ ## @param aggregator.service.annotationsHeadless Provide any additional annotations which may be required on headless service
+ ##
+ annotationsHeadless: {}
+ ## Configure the ingress resource that allows you to access the
+ ## Fluentd aggregator. Set up the URL
+ ## ref: https://kubernetes.io/docs/user-guide/ingress/
+ ##
+ ingress:
+ ## @param aggregator.ingress.enabled Set to true to enable ingress record generation
+ ##
+ enabled: false
+ ## DEPRECATED: Use ingress.annotations instead of ingress.certManager
+ ## certManager: false
+ ##
+
+ ## @param aggregator.ingress.pathType Ingress Path type. How the path matching is interpreted
+ ##
+ pathType: ImplementationSpecific
+ ## @param aggregator.ingress.apiVersion Override API Version (automatically detected if not set)
+ ##
+ apiVersion: ""
+ ## @param aggregator.ingress.hostname Default host for the ingress resource
+ ##
+ hostname: fluentd.local
+ ## @param aggregator.ingress.path Default path for the ingress resource
+ ## You may need to set this to '/*' in order to use this with ALB ingress controllers.
+ ##
+ path: /
+ ## @param aggregator.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
+ ## For a full list of possible ingress annotations, please see
+ ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+ ## Use this parameter to set the required annotations for cert-manager, see
+ ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
+ ##
+ ## e.g:
+ ## annotations:
+ ## kubernetes.io/ingress.class: nginx
+ ## cert-manager.io/cluster-issuer: cluster-issuer-name
+ ##
+ annotations: {}
+ ## @param aggregator.ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
+ ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.aggregator.ingress.hostname }}
+ ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
+ ##
+ tls: false
+ ## @param aggregator.ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
+ ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
+ ## extraHosts:
+ ## - name: fluentd.local
+ ## path: /
+ ##
+ extraHosts: []
+ ## @param aggregator.ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host.
+ ## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
+ ## extraPaths:
+ ## - path: /*
+ ## backend:
+ ## serviceName: ssl-redirect
+ ## servicePort: use-annotation
+ ##
+ extraPaths: []
+ ## @param aggregator.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
+ ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+ ## extraTls:
+ ## - hosts:
+ ## - fluentd.local
+ ## secretName: fluentd.local-tls
+ ##
+ extraTls: []
+ ## @param aggregator.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
+ ## key and certificate should start with -----BEGIN CERTIFICATE----- or
+ ## -----BEGIN RSA PRIVATE KEY-----
+ ##
+ ## name should line up with a tlsSecret set further up
+ ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
+ ##
+ ## It is also possible to create and manage the certificates outside of this helm chart
+ ## Please see README.md for more information
+ ## e.g:
+ ## - name: fluentd.local-tls
+ ## key:
+ ## certificate:
+ ##
+ secrets: []
+ ## Configure extra options for liveness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param aggregator.livenessProbe.enabled Enable livenessProbe
+ ## @param aggregator.livenessProbe.httpGet.path Request path for livenessProbe
+ ## @param aggregator.livenessProbe.httpGet.port Port for livenessProbe
+ ## @param aggregator.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
+ ## @param aggregator.livenessProbe.periodSeconds Period seconds for livenessProbe
+ ## @param aggregator.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
+ ## @param aggregator.livenessProbe.failureThreshold Failure threshold for livenessProbe
+ ## @param aggregator.livenessProbe.successThreshold Success threshold for livenessProbe
+ ##
+ livenessProbe:
+ enabled: true
+ httpGet:
+ path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
+ port: http
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## Configure extra options for readiness probe
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+ ## @param aggregator.readinessProbe.enabled Enable readinessProbe
+ ## @param aggregator.readinessProbe.httpGet.path Request path for readinessProbe
+ ## @param aggregator.readinessProbe.httpGet.port Port for readinessProbe
+ ## @param aggregator.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
+ ## @param aggregator.readinessProbe.periodSeconds Period seconds for readinessProbe
+ ## @param aggregator.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
+ ## @param aggregator.readinessProbe.failureThreshold Failure threshold for readinessProbe
+ ## @param aggregator.readinessProbe.successThreshold Success threshold for readinessProbe
+ ##
+ readinessProbe:
+ enabled: true
+ httpGet:
+ path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
+ port: http
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ ## @param aggregator.updateStrategy.type Set up update strategy.
+ ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
+ ## Example:
+ ## updateStrategy:
+ ## type: RollingUpdate
+ ## rollingUpdate:
+ ## maxSurge: 25%
+ ## maxUnavailable: 25%
+ ##
+ updateStrategy:
+ type: RollingUpdate
+ ## Aggregator containers' resource requests and limits
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ## @param aggregator.resources.limits The resources limits for the container
+ ## @param aggregator.resources.requests The requested resources for the container
+ ##
+ resources:
+ ## Example:
+ ## limits:
+ ## cpu: 500m
+ ## memory: 1Gi
+ limits: {}
+ ## Examples:
+ ## requests:
+ ## cpu: 300m
+ ## memory: 512Mi
+ requests: {}
+ ## @param aggregator.podAffinityPreset Aggregator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAffinityPreset: ""
+ ## @param aggregator.podAntiAffinityPreset Aggregator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ ##
+ podAntiAffinityPreset: soft
+ ## Node affinity preset
+ ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ ##
+ nodeAffinityPreset:
+ ## @param aggregator.nodeAffinityPreset.type Aggregator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
+ ##
+ type: ""
+ ## @param aggregator.nodeAffinityPreset.key Aggregator Node label key to match Ignored if `affinity` is set.
+ ##
+ key: ""
+ ## @param aggregator.nodeAffinityPreset.values Aggregator Node label values to match. Ignored if `affinity` is set.
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+ ## @param aggregator.affinity Aggregator Affinity for pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
+ ##
+ affinity: {}
+ ## @param aggregator.nodeSelector Aggregator Node labels for pod assignment
+ ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+ ## @param aggregator.tolerations Aggregator Tolerations for pod assignment
+ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ ## @param aggregator.podAnnotations Pod annotations
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations: {}
+ ## @param aggregator.podLabels Extra labels to add to Pod
+ ##
+ podLabels: {}
+ ## Pods Service Account
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ ## @param aggregator.serviceAccount.create Specify whether a ServiceAccount should be created
+ ##
+ create: false
+ ## @param aggregator.serviceAccount.name The name of the ServiceAccount to create
+ ## If not set and create is true, a name is generated using the common.names.fullname template
+ name: ""
+ ## @param aggregator.serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
+ ##
+ annotations: {}
+ ## Autoscaling parameters
+ ## This is not recommended in a forwarder+aggregator architecture
+ ## @param aggregator.autoscaling.enabled Create an Horizontal Pod Autoscaler
+ ## @param aggregator.autoscaling.minReplicas Minimum number of replicas for the HPA
+ ## @param aggregator.autoscaling.maxReplicas Maximum number of replicas for the HPA
+ ## @param aggregator.autoscaling.metrics [array] Metrics for the HPA to manage the scaling
+ ##
+ autoscaling:
+ enabled: false
+ minReplicas: 2
+ maxReplicas: 5
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 60
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: 60
+ ## Persist data to a persistent volume
+ ## @param aggregator.persistence.enabled Enable persistence volume for the aggregator
+ ## @param aggregator.persistence.storageClass Persistent Volume storage class
+ ## @param aggregator.persistence.accessMode Persistent Volume access mode
+ ## @param aggregator.persistence.size Persistent Volume size
+ ##
+ persistence:
+ enabled: false
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack)
+ ##
+ storageClass: ""
+ accessMode: ReadWriteOnce
+ size: 10Gi
+ ## @param aggregator.lifecycle Additional lifecycles to add to the pods
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
+ ## e.g:
+ ## postStart:
+ ## exec:
+ ## command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
+ ## preStop:
+ ## exec:
+ ## command: ["/bin/sh","-c","nginx -s quit; while killall -0 nginx; do sleep 1; done"]
+ lifecycle: {}
+ ## @param aggregator.initContainers Add init containers to aggregator pods
+ ## Example
+ ##
+ ## initContainers:
+ ## - name: do-something
+ ## image: busybox
+ ## command: ['do', 'something']
+ ##
+ initContainers: []
+ ## @param aggregator.sidecars Add sidecars to aggregator pods
+ ##
+ ## For example:
+ ## sidecars:
+ ## - name: your-image-name
+ ## image: your-image
+ ## imagePullPolicy: Always
+ ## ports:
+ ## - name: portname
+ ## containerPort: 1234
+ ##
+ sidecars: []
+ ## @param aggregator.extraVolumes Extra volumes
+ ## Example Use Case: mount an emptyDir into /tmp to support running with readOnlyRootFileSystem
+ ## - name: tmpDir
+ ## emptyDir: {}
+ ##
+ extraVolumes: []
+ ## @param aggregator.extraVolumeMounts Mount extra volume(s)
+ ## - name: tmpDir
+ ## mountPath: /tmp
+ ##
+ extraVolumeMounts: []
+ ## @param aggregator.extraVolumeClaimTemplates Optionally specify extra list of additional volume claim templates for the Fluentd Aggregator pods in StatefulSet
+ ##
+ extraVolumeClaimTemplates: []
+## @param serviceAccount Pods Service Account. This top-level global entry is DEPRECATED. Please use "forwarder.serviceAccount" instead.
+## Only the forwarder was affected by the historical usage here.
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+##
+serviceAccount: {}
+## @param rbac Role Based Access. This top-level global entry is DEPRECATED. Please use "forwarder.rbac" instead.
+## Only the forwarder was affected by the historical usage here.
+## ref: https://kubernetes.io/docs/admin/authorization/rbac/
+##
+rbac: {}
+## Prometheus Exporter / Metrics
+##
+metrics:
+ ## @param metrics.enabled Enable the export of Prometheus metrics
+ ##
+ enabled: false
+ ## Prometheus Exporter service parameters
+ ##
+ service:
+ ## @param metrics.service.type Prometheus metrics service type
+ ##
+ type: ClusterIP
+ ## @param metrics.service.port Prometheus metrics service port
+ ##
+ port: 24231
+ ## @param metrics.service.loadBalancerIP Load Balancer IP if the Prometheus metrics server type is `LoadBalancer`
+ ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ loadBalancerIP: ""
+ ## @param metrics.service.annotations [object] Annotations for the Prometheus Exporter service service
+ ##
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "24231"
+ prometheus.io/path: "/metrics"
+ ## Prometheus Operator ServiceMonitor configuration
+ ##
+ serviceMonitor:
+ ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
+ ##
+ enabled: false
+ ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
+ ##
+ namespace: ""
+ ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ## e.g:
+ ## interval: 10s
+ ##
+ interval: ""
+ ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
+ ## e.g:
+ ## scrapeTimeout: 10s
+ ##
+ scrapeTimeout: ""
+ ## @param metrics.serviceMonitor.selector Prometheus instance selector labels
+ ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
+ ## e.g:
+ ## selector:
+ ## prometheus: my-prometheus
+ ##
+ selector: {}
+ ## @param metrics.serviceMonitor.labels ServiceMonitor extra labels
+ ##
+ labels: {}
+ ## @param metrics.serviceMonitor.annotations ServiceMonitor annotations
+ ##
+ annotations: {}
+## Enable internal SSL/TLS encryption
+##
+tls:
+ ## @param tls.enabled Enable TLS/SSL encrytion for internal communications
+ ##
+ enabled: false
+ ## @param tls.autoGenerated Generate automatically self-signed TLS certificates.
+ ##
+ autoGenerated: false
+ ## @param tls.forwarder.existingSecret Name of the existing secret containing the TLS certificates for the Fluentd forwarder
+ ##
+ forwarder:
+ existingSecret: ""
+ ## @param tls.aggregator.existingSecret Name of the existing secret containing the TLS certificates for the Fluentd aggregator
+ ##
+ aggregator:
+ existingSecret: ""