This commit is contained in:
marsal wang
2023-07-26 10:07:34 +08:00
parent f884cb1020
commit 1e5a703cce
5384 changed files with 618283 additions and 4002 deletions

View File

@ -0,0 +1,116 @@
# Default values for canal-server.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: canal/canal-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v1.1.5"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 11110
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
extraEnvVars:
- name: "canal.instance.master.address"
value: "mysql:3306"
- name: "canal.instance.dbUsername"
value: "root"
- name: "canal.instance.dbPassword"
value: "gkxl650"
- name: "canal.instance.gtidon"
value: "false"
- name: "canal.instance.connectionCharset"
value: "UTF-8"
- name: "canal.instance.tsdb.enable"
value: "true"
- name: "canal.instance.enableDruid"
value: "false"
- name: "canal.instance.filter.regex"
value: "zd_rescue\\.user_order_his.*,zd_rescue\\.task_order_his.*,zd_rescue\\.task_order_cost_his.*,zd_rescue.supplier_account_record,zd_rescue.customer_order_account"
- name: "canal.mq.topic"
value: "canal_example"
- name: "canal.serverMode"
value: "rabbitMQ"
- name: "canal.instance.parser.parallel"
value: "true"
- name: "rabbitmq.host"
value: "rabbitmq:5672"
- name: "rabbitmq.virtual.host"
value: "canal"
- name: "rabbitmq.exchange"
value: "canal_exchange"
- name: "rabbitmq.username"
value: "root"
- name: "rabbitmq.password"
value: "gkxl650"
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,116 @@
# Default values for canal-server.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: canal/canal-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v1.1.5"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 11110
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
extraEnvVars:
- name: "canal.instance.master.address"
value: "192.168.10.10:3306"
- name: "canal.instance.dbUsername"
value: "repl"
- name: "canal.instance.dbPassword"
value: "nczl@sino_db"
- name: "canal.instance.gtidon"
value: "false"
- name: "canal.instance.connectionCharset"
value: "UTF-8"
- name: "canal.instance.tsdb.enable"
value: "true"
- name: "canal.instance.enableDruid"
value: "false"
- name: "canal.instance.filter.regex"
value: "zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*"
- name: "canal.mq.topic"
value: "canal_example"
- name: "canal.serverMode"
value: "rabbitMQ"
- name: "canal.instance.parser.parallel"
value: "true"
- name: "rabbitmq.host"
value: "rabbitmq:5672"
- name: "rabbitmq.virtual.host"
value: "canal"
- name: "rabbitmq.exchange"
value: "canal_exchange"
- name: "rabbitmq.username"
value: "root"
- name: "rabbitmq.password"
value: "gkxl650"
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,23 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs-sc
namespace: ceph-csi
labels:
app: ceph-csi-rbd
chart: ceph-csi-rbd-3-canary
release: rbd.csi.ceph.com
heritage: Helm
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 837817cc-7148-11ec-8c46-c81f66de6d53
fsName: k8s_fs
pool: cephfs_data
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
reclaimPolicy: Delete
allowVolumeExpansion: true

View File

@ -0,0 +1,319 @@
---
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccounts:
nodeplugin:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
provisioner:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
# Configuration for the CSI to connect to the cluster
# Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md
csiConfig:
- clusterID: "837817cc-7148-11ec-8c46-c81f66de6d53"
monitors:
- "192.168.1.207:6789"
- "192.168.1.208:6789"
- "192.168.1.209:6789"
# csiConfig: []
# Set logging level for csi containers.
# Supported values from 0 to 5. 0 for general useful logs,
# 5 for trace level verbosity.
logLevel: 5
nodeplugin:
name: nodeplugin
# if you are using ceph-fuse client set this value to OnDelete
updateStrategy: RollingUpdate
# set user created priorityclassName for csi plugin pods. default is
# system-node-critical which is highest priority
priorityClassName: system-node-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8083
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
enabled: false
registrar:
image:
repository: opsdockerimage/sig-storage-csi-node-driver-registrar
tag: v2.3.0
pullPolicy: IfNotPresent
resources: {}
plugin:
image:
repository: quay.io/cephcsi/cephcsi
tag: canary
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Set to true to enable Ceph Kernel clients
# on kernel < 4.17 which support quotas
# forcecephkernelclient: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
provisioner:
name: provisioner
replicaCount: 1
strategy:
# RollingUpdate strategy replaces old pods with new ones gradually,
# without incurring downtime.
type: RollingUpdate
rollingUpdate:
# maxUnavailable is the maximum number of pods that can be
# unavailable during the update process.
maxUnavailable: 50%
# Timeout for waiting for creation or deletion of a volume
timeout: 60s
# set user created priorityclassName for csi provisioner pods. default is
# system-cluster-critical which is less priority than system-node-critical
priorityClassName: system-cluster-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: false
# The port of the container to expose the metrics
containerPort: 8083
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
enabled: false
provisioner:
image:
repository: opsdockerimage/sig-storage-csi-provisioner
tag: v3.0.0
pullPolicy: IfNotPresent
resources: {}
attacher:
name: attacher
enabled: true
image:
repository: opsdockerimage/sig-storage-csi-attacher
tag: v3.3.0
pullPolicy: IfNotPresent
resources: {}
resizer:
name: resizer
enabled: true
image:
repository: opsdockerimage/sig-storage-csi-resizer
tag: v1.3.0
pullPolicy: IfNotPresent
resources: {}
snapshotter:
image:
repository: opsdockerimage/sig-storage-csi-snapshotter
tag: v4.2.0
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
topology:
# Specifies whether topology based provisioning support should
# be exposed by CSI
enabled: false
# domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
domainLabels:
- failure-domain/region
- failure-domain/zone
storageClass:
# Specifies whether the Storage class should be created
create: true
name: csi-cephfs-sc
# Annotations for the storage class
# Example:
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations: {}
# String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
clusterID: 837817cc-7148-11ec-8c46-c81f66de6d53
# (required) CephFS filesystem name into which the volume shall be created
# eg: fsName: myfs
fsName: k8s_fs
# (optional) Ceph pool into which volume data shall be stored
# pool: <cephfs-data-pool>
# For eg:
# pool: "replicapool"
pool: "cephfs_data"
# (optional) Comma separated string of Ceph-fuse mount options.
# For eg:
# fuseMountOptions: debug
fuseMountOptions: ""
# (optional) Comma separated string of Cephfs kernel mount options.
# Check man mount.ceph for mount options. For eg:
# kernelMountOptions: readdir_max_bytes=1048576,norbytes
kernelMountOptions: ""
# (optional) The driver can use either ceph-fuse (fuse) or
# ceph kernelclient (kernel).
# If omitted, default volume mounter will be used - this is
# determined by probing for ceph-fuse and mount.ceph
# mounter: kernel
mounter: ""
# (optional) Prefix to use for naming subvolumes.
# If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-"
volumeNamePrefix: ""
# The secrets have to contain user and/or Ceph admin credentials.
provisionerSecret: csi-cephfs-secret
# If the Namespaces are not specified, the secrets are assumed to
# be in the Release namespace.
provisionerSecretNamespace: ""
controllerExpandSecret: csi-cephfs-secret
controllerExpandSecretNamespace: ""
nodeStageSecret: csi-cephfs-secret
nodeStageSecretNamespace: ""
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# Mount Options
# Example:
# mountOptions:
# - discard
secret:
# Specifies whether the secret should be created
create: true
name: csi-cephfs-secret
# Key values correspond to a user name and its key, as defined in the
# ceph cluster. User ID should have required access to the 'pool'
# specified in the storage class
adminID: admin
adminKey: AQBj1tphduCvMRAApOGBx8WryG847dQ8Gi5LHg==
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
cephconf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
# adding 'fuse_big_writes = true' option by default to override this limit
# see https://github.com/ceph/ceph-csi/issues/1928
fuse_big_writes = true
#########################################################
# Variables for 'internal' use please use with caution! #
#########################################################
# The filename of the provisioner socket
provisionerSocketFile: csi-provisioner.sock
# The filename of the plugin socket
pluginSocketFile: csi.sock
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
kubeletDir: /var/lib/kubelet
# Name of the csi-driver
driverName: cephfs.csi.ceph.com
# Name of the configmap used for state
configMapName: ceph-csi-config
# Key to use in the Configmap if not config.json
# configMapKey:
# Use an externally provided configmap
externallyManagedConfigmap: false

View File

@ -0,0 +1,465 @@
---
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccounts:
nodeplugin:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
provisioner:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname
name:
# Configuration for the CSI to connect to the cluster
# Ref: https://github.com/ceph/ceph-csi/blob/devel/examples/README.md
# Example:
# csiConfig:
# - clusterID: "<cluster-id>"
# monitors:
# - "<MONValue1>"
# - "<MONValue2>"
csiConfig:
- clusterID: "837817cc-7148-11ec-8c46-c81f66de6d53"
monitors:
- "192.168.1.207:6789"
- "192.168.1.208:6789"
- "192.168.1.209:6789"
# csiConfig: []
# Configuration details of clusterID,PoolID and FscID mapping
# csiMapping:
# - clusterIDMapping:
# clusterID on site1: clusterID on site2
# RBDPoolIDMapping:
# - poolID on site1: poolID on site2
# CephFSFscIDMapping:
# - CephFS FscID on site1: CephFS FscID on site2
csiMapping: []
# Configuration for the encryption KMS
# Ref: https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md
# Example:
# encryptionKMSConfig:
# vault-unique-id-1:
# encryptionKMSType: vault
# vaultAddress: https://vault.example.com
# vaultAuthPath: /v1/auth/kubernetes/login
# vaultRole: csi-kubernetes
# vaultPassphraseRoot: /v1/secret
# vaultPassphrasePath: ceph-csi/
# vaultCAVerify: "false"
encryptionKMSConfig: {}
# Set logging level for csi containers.
# Supported values from 0 to 5. 0 for general useful logs,
# 5 for trace level verbosity.
logLevel: 5
nodeplugin:
name: nodeplugin
# set user created priorityclassName for csi plugin pods. default is
# system-node-critical which is high priority
priorityClassName: system-node-critical
# if you are using rbd-nbd client set this value to OnDelete
updateStrategy: RollingUpdate
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8080
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "8080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
# enable profiling to check for memory leaks
enabled: false
registrar:
image:
repository: opsdockerimage/sig-storage-csi-node-driver-registrar
tag: v2.3.0
pullPolicy: IfNotPresent
resources: {}
plugin:
image:
repository: quay.io/cephcsi/cephcsi
tag: canary
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
provisioner:
name: provisioner
replicaCount: 3
strategy:
# RollingUpdate strategy replaces old pods with new ones gradually,
# without incurring downtime.
type: RollingUpdate
rollingUpdate:
# maxUnavailable is the maximum number of pods that can be
# unavailable during the update process.
maxUnavailable: 50%
# if fstype is not specified in storageclass, ext4 is default
defaultFSType: ext4
# deployController to enable or disable the deployment of controller which
# generates the OMAP data if its not Present.
deployController: true
# Timeout for waiting for creation or deletion of a volume
timeout: 60s
# Hard limit for maximum number of nested volume clones that are taken before
# a flatten occurs
hardMaxCloneDepth: 8
# Soft limit for maximum number of nested volume clones that are taken before
# a flatten occurs
softMaxCloneDepth: 4
# Maximum number of snapshots allowed on rbd image without flattening
maxSnapshotsOnImage: 450
# Minimum number of snapshots allowed on rbd image to trigger flattening
minSnapshotsOnImage: 250
# skip image flattening if kernel support mapping of rbd images
# which has the deep-flatten feature
# skipForceFlatten: false
# set user created priorityclassName for csi provisioner pods. default is
# system-cluster-critical which is less priority than system-node-critical
priorityClassName: system-cluster-critical
httpMetrics:
# Metrics only available for cephcsi/cephcsi => 1.2.0
# Specifies whether http metrics should be exposed
enabled: true
# The port of the container to expose the metrics
containerPort: 8080
service:
# Specifies whether a service should be created for the metrics
enabled: true
# The port to use for the service
servicePort: 8080
type: ClusterIP
# Annotations for the service
# Example:
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "8080"
annotations: {}
clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
profiling:
# enable profiling to check for memory leaks
enabled: false
provisioner:
image:
repository: opsdockerimage/sig-storage-csi-provisioner
tag: v3.0.0
pullPolicy: IfNotPresent
resources: {}
attacher:
name: attacher
enabled: true
image:
repository: opsdockerimage/sig-storage-csi-attacher
tag: v3.3.0
pullPolicy: IfNotPresent
resources: {}
resizer:
name: resizer
enabled: true
image:
repository: opsdockerimage/sig-storage-csi-resizer
tag: v1.3.0
pullPolicy: IfNotPresent
resources: {}
snapshotter:
image:
repository: opsdockerimage/sig-storage-csi-snapshotter
tag: v4.2.0
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
topology:
# Specifies whether topology based provisioning support should
# be exposed by CSI
enabled: false
# domainLabels define which node labels to use as domains
# for CSI nodeplugins to advertise their domains
# NOTE: the value here serves as an example and needs to be
# updated with node labels that define domains of interest
domainLabels:
- failure-domain/region
- failure-domain/zone
storageClass:
# Specifies whether the storageclass should be created
create: true
name: csi-rbd-sc
# Annotations for the storage class
# Example:
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
annotations: {}
# (required) String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
clusterID: "837817cc-7148-11ec-8c46-c81f66de6d53"
# (optional) If you want to use erasure coded pool with RBD, you need to
# create two pools. one erasure coded and one replicated.
# You need to specify the replicated pool here in the `pool` parameter, it is
# used for the metadata of the images.
# The erasure coded pool must be set as the `dataPool` parameter below.
# dataPool: <ec-data-pool>
dataPool: ""
# (required) Ceph pool into which the RBD image shall be created
# eg: pool: replicapool
pool: k8s
# Set thickProvision to true if you want RBD images to be fully allocated on
# creation (thin provisioning is the default).
thickProvision: false
# (required) RBD image features, CSI creates image with image-format 2
# CSI RBD currently supports `layering`, `journaling`, `exclusive-lock`,
# `object-map`, `fast-diff` features. If `journaling` is enabled, must
# enable `exclusive-lock` too.
# imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
imageFeatures: "layering"
# (optional) Specifies whether to try other mounters in case if the current
# mounter fails to mount the rbd image for any reason. True means fallback
# to next mounter, default is set to false.
# Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd
# in case if any of the specified imageFeatures is not supported by krbd
# driver on node scheduled for application pod launch, but in the future this
# should work with any mounter type.
# tryOtherMounters: false
# (optional) uncomment the following to use rbd-nbd as mounter
# on supported nodes
# mounter: rbd-nbd
mounter: ""
# (optional) ceph client log location, eg: rbd-nbd
# By default host-path /var/log/ceph of node is bind-mounted into
# csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
# target bindmount path used inside container for ceph clients logging.
# See docs/rbd-nbd.md for available configuration options.
# cephLogDir: /var/log/ceph
cephLogDir: ""
# (optional) ceph client log strategy
# By default, log file belonging to a particular volume will be deleted
# on unmap, but you can choose to just compress instead of deleting it
# or even preserve the log file in text format as it is.
# Available options `remove` or `compress` or `preserve`
# cephLogStrategy: remove
cephLogStrategy: ""
# (optional) Prefix to use for naming RBD images.
# If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-"
volumeNamePrefix: ""
# (optional) Instruct the plugin it has to encrypt the volume
# By default it is disabled. Valid values are "true" or "false".
# A string is expected here, i.e. "true", not true.
# encrypted: "true"
encrypted: ""
# (optional) Use external key management system for encryption passphrases by
# specifying a unique ID matching KMS ConfigMap. The ID is only used for
# correlation to configmap entry.
encryptionKMSID: ""
# Add topology constrained pools configuration, if topology based pools
# are setup, and topology constrained provisioning is required.
# For further information read TODO<doc>
# topologyConstrainedPools: |
# [{"poolName":"pool0",
# "dataPool":"ec-pool0" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone1"}]},
# {"poolName":"pool1",
# "dataPool":"ec-pool1" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone2"}]},
# {"poolName":"pool2",
# "dataPool":"ec-pool2" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"west"},
# {"domainLabel":"zone","value":"zone1"}]}
# ]
topologyConstrainedPools: []
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# Format:
# mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
# An empty mounter field is treated as krbd type for compatibility.
# eg:
# mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink"
mapOptions: ""
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# Format:
# unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
# An empty mounter field is treated as krbd type for compatibility.
# eg:
# unmapOptions: "krbd:force;nbd:force"
unmapOptions: ""
# The secrets have to contain Ceph credentials with required access
# to the 'pool'.
provisionerSecret: csi-rbd-secret
# If Namespaces are left empty, the secrets are assumed to be in the
# Release namespace.
provisionerSecretNamespace: ""
controllerExpandSecret: csi-rbd-secret
controllerExpandSecretNamespace: ""
nodeStageSecret: csi-rbd-secret
nodeStageSecretNamespace: ""
# Specify the filesystem type of the volume. If not specified,
# csi-provisioner will set default as `ext4`.
fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
# Mount Options
# Example:
# mountOptions:
# - discard
secret:
# Specifies whether the secret should be created
create: true
name: csi-rbd-secret
# Key values correspond to a user name and its key, as defined in the
# ceph cluster. User ID should have required access to the 'pool'
# specified in the storage class
userID: admin
userKey: AQBj1tphduCvMRAApOGBx8WryG847dQ8Gi5LHg==
# Encryption passphrase
encryptionPassphrase: test_passphrase
# This is a sample configmap that helps define a Ceph configuration as required
# by the CSI plugins.
# Sample ceph.conf available at
# https://github.com/ceph/ceph/blob/master/src/sample.ceph.conf Detailed
# documentation is available at
# https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/
cephconf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# Workaround for http://tracker.ceph.com/issues/23446
fuse_set_user_groups = false
# ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
# adding 'fuse_big_writes = true' option by default to override this limit
# see https://github.com/ceph/ceph-csi/issues/1928
fuse_big_writes = true
#########################################################
# Variables for 'internal' use please use with caution! #
#########################################################
# The filename of the provisioner socket
provisionerSocketFile: csi-provisioner.sock
# The filename of the plugin socket
pluginSocketFile: csi.sock
# kubelet working directory,can be set using `--root-dir` when starting kubelet.
kubeletDir: /var/lib/kubelet
# Host path location for ceph client processes logging, ex: rbd-nbd
cephLogDirHostPath: /var/log/ceph
# Name of the csi-driver
driverName: rbd.csi.ceph.com
# Name of the configmap used for state
configMapName: ceph-csi-rbd-config
# Key to use in the Configmap if not config.json
# configMapKey:
# Use an externally provided configmap
externallyManagedConfigmap: false
# Name of the configmap used for encryption kms configuration
kmsConfigMapName: ceph-csi-encryption-kms-config

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,148 @@
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
##
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
## @param global.redis.password Global Redis&trade; password (overrides `auth.password`)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
# - myRegistryKeySecretName
## @section Common parameters
## @param nameOverride String to partially override nginx.fullname template (will maintain the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override nginx.fullname template
##
fullnameOverride: ""
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param clusterDomain Kubernetes Cluster Domain
##
clusterDomain: cluster.local
## @param extraDeploy Extra objects to deploy (value evaluated as a template)
##
extraDeploy: []
## @param commonLabels Add labels to all the deployed resources
##
commonLabels: {}
## @param commonAnnotations Add annotations to all the deployed resources
##
commonAnnotations: {}
# Default values for yapi.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## replicas number and pvc size
fastdfs:
storage:
- name: group1
replica: 1
size: 20Gi
- name: group2
replica: 0
size: 2Gi
tracker:
replica: 1
size: 5Gi
service:
type: ClusterIP
annotations: {}
image:
repository: ygqygq2/fastdfs-nginx
tag: V6.08-tengine
pullPolicy: IfNotPresent
## Headless service.
##
headless:
annotations: {}
trackerPorts:
tracker:
containerPort: 22122 # Port number for fastdfs container tracker port.
protocol: TCP # Protocol
trackernginx:
containerPort: 80 # Port number for fastdfs container nginx port.
protocol: TCP # Protocol
storagePorts:
storage:
containerPort: 23000 # Port number for fastdfs container storage port.
protocol: TCP # Protocol
storagenginx:
containerPort: 8080 # Port number for fastdfs container nginx port.
protocol: TCP # Protocol
ingress:
enabled: true
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
ingressClassName: ""
path: /
pathType: ImplementationSpecific
hosts:
- file.sino-assist.com
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Configure resource requests and limits
### ref: http://kubernetes.io/docs/user-guide/compute-resources/
###
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
tracker:
requests:
cpu: 100m
memory: 128Mi
storage:
requests:
cpu: 100m
memory: 128Mi
## Node labels and tolerations for pod assignment
### ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
### ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistentVolume:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, azure-disk on
## Azure, standard on GKE, AWS & OpenStack)
##
storageClass: "nfs-client-209"
accessMode: ReadWriteMany
annotations: {}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,882 @@
expose:
# Set the way how to expose the service. Set the type as "ingress",
# "clusterIP", "nodePort" or "loadBalancer" and fill the information
# in the corresponding section
type: ingress
tls:
# Enable the tls or not.
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
# Note: if the "expose.type" is "ingress" and the tls
# is disabled, the port must be included in the command when pull/push
# images. Refer to https://github.com/goharbor/harbor/issues/5291
# for the detail.
enabled: true
# The source of the tls certificate. Set it as "auto", "secret"
# or "none" and fill the information in the corresponding section
# 1) auto: generate the tls certificate automatically
# 2) secret: read the tls certificate from the specified secret.
# The tls certificate can be generated manually or by cert manager
# 3) none: configure no tls certificate for the ingress. If the default
# tls certificate is configured in the ingress controller, choose this option
certSource: auto
auto:
# The common name used to generate the certificate, it's necessary
# when the type isn't "ingress"
commonName: ""
secret:
# The name of secret which contains keys named:
# "tls.crt" - the certificate
# "tls.key" - the private key
secretName: "sino-assist-ssl-key"
# The name of secret which contains keys named:
# "tls.crt" - the certificate
# "tls.key" - the private key
# Only needed when the "expose.type" is "ingress".
notarySecretName: "sino-assist-ssl-key"
ingress:
hosts:
core: harbor.sino-assist.com
notary: notary.harbor.sino-assist.com
# set to the type of ingress controller if it has specific requirements.
# leave as `default` for most ingress controllers.
# set to `gce` if using the GCE ingress controller
# set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller
controller: default
## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress
kubeVersionOverride: ""
annotations:
# note different ingress controllers may require a different ssl-redirect annotation
# for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below
ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
notary:
# notary-specific annotations
annotations: {}
harbor:
# harbor ingress-specific annotations
annotations: {}
clusterIP:
# The name of ClusterIP service
name: harbor
# Annotations on the ClusterIP service
annotations: {}
ports:
# The service port Harbor listens on when serving with HTTP
httpPort: 80
# The service port Harbor listens on when serving with HTTPS
httpsPort: 443
# The service port Notary listens on. Only needed when notary.enabled
# is set to true
notaryPort: 4443
nodePort:
# The name of NodePort service
name: harbor
ports:
http:
# The service port Harbor listens on when serving with HTTP
port: 80
# The node port Harbor listens on when serving with HTTP
nodePort: 30002
https:
# The service port Harbor listens on when serving with HTTPS
port: 443
# The node port Harbor listens on when serving with HTTPS
nodePort: 30003
# Only needed when notary.enabled is set to true
notary:
# The service port Notary listens on
port: 4443
# The node port Notary listens on
nodePort: 30004
loadBalancer:
# The name of LoadBalancer service
name: harbor
# Set the IP if the LoadBalancer supports assigning IP
IP: ""
ports:
# The service port Harbor listens on when serving with HTTP
httpPort: 80
# The service port Harbor listens on when serving with HTTPS
httpsPort: 443
# The service port Notary listens on. Only needed when notary.enabled
# is set to true
notaryPort: 4443
annotations: {}
sourceRanges: []
# The external URL for Harbor core service. It is used to
# 1) populate the docker/helm commands showed on portal
# 2) populate the token service URL returned to docker/notary client
#
# Format: protocol://domain[:port]. Usually:
# 1) if "expose.type" is "ingress", the "domain" should be
# the value of "expose.ingress.hosts.core"
# 2) if "expose.type" is "clusterIP", the "domain" should be
# the value of "expose.clusterIP.name"
# 3) if "expose.type" is "nodePort", the "domain" should be
# the IP address of k8s node
#
# If Harbor is deployed behind the proxy, set it as the URL of proxy
externalURL: https://harbor.sino-assist.com
# The internal TLS used for harbor components secure communicating. In order to enable https
# in each components tls cert files need to provided in advance.
internalTLS:
# If internal TLS enabled
enabled: false
# There are three ways to provide tls
# 1) "auto" will generate cert automatically
# 2) "manual" need provide cert file manually in following value
# 3) "secret" internal certificates from secret
certSource: "auto"
# The content of trust ca, only available when `certSource` is "manual"
trustCa: ""
# core related cert configuration
core:
# secret name for core's tls certs
secretName: ""
# Content of core's TLS cert file, only available when `certSource` is "manual"
crt: ""
# Content of core's TLS key file, only available when `certSource` is "manual"
key: ""
# jobservice related cert configuration
jobservice:
# secret name for jobservice's tls certs
secretName: ""
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of jobservice's TLS key file, only available when `certSource` is "manual"
key: ""
# registry related cert configuration
registry:
# secret name for registry's tls certs
secretName: ""
# Content of registry's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of registry's TLS key file, only available when `certSource` is "manual"
key: ""
# portal related cert configuration
portal:
# secret name for portal's tls certs
secretName: ""
# Content of portal's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of portal's TLS key file, only available when `certSource` is "manual"
key: ""
# chartmuseum related cert configuration
chartmuseum:
# secret name for chartmuseum's tls certs
secretName: ""
# Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of chartmuseum's TLS key file, only available when `certSource` is "manual"
key: ""
# trivy related cert configuration
trivy:
# secret name for trivy's tls certs
secretName: ""
# Content of trivy's TLS key file, only available when `certSource` is "manual"
crt: ""
# Content of trivy's TLS key file, only available when `certSource` is "manual"
key: ""
ipFamily:
# ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component
ipv6:
enabled: true
# ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component
ipv4:
enabled: true
# The persistence is enabled by default and a default StorageClass
# is needed in the k8s cluster to provision volumes dynamicly.
# Specify another StorageClass in the "storageClass" or set "existingClaim"
# if you have already existing persistent volumes to use
#
# For storing images and charts, you can also use "azure", "gcs", "s3",
# "swift" or "oss". Set it in the "imageChartStorage" section
persistence:
enabled: true
# Setting it to "keep" to avoid removing PVCs during a helm delete
# operation. Leaving it empty will delete PVCs after the chart deleted
# (this does not apply for PVCs that are created for internal database
# and redis components, i.e. they are never deleted automatically)
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
# Use the existing PVC which must be created manually before bound,
# and specify the "subPath" if the PVC is shared with other components
existingClaim: "harbor-registry"
# Specify the "storageClass" used to provision the volume. Or the default
# StorageClass will be used(the default).
# Set it to "-" to disable dynamic provisioning
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
chartmuseum:
existingClaim: "harbor-chartmuseum"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
jobservice:
existingClaim: "harbor-jobservice"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
# If external database is used, the following settings for database will
# be ignored
database:
existingClaim: "harbor-database"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
# If external Redis is used, the following settings for Redis will
# be ignored
redis:
existingClaim: "harbor-redis"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 1Gi
trivy:
existingClaim: "harbor-trivy"
storageClass: ""
subPath: ""
accessMode: ReadWriteOnce
size: 5Gi
# Define which storage backend is used for registry and chartmuseum to store
# images and charts. Refer to
# https://github.com/docker/distribution/blob/master/docs/configuration.md#storage
# for the detail.
imageChartStorage:
# Specify whether to disable `redirect` for images and chart storage, for
# backends which not supported it (such as using minio for `s3` storage type), please disable
# it. To disable redirects, simply set `disableredirect` to `true` instead.
# Refer to
# https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect
# for the detail.
disableredirect: false
# Specify the "caBundleSecretName" if the storage service uses a self-signed certificate.
# The secret must contain keys named "ca.crt" which will be injected into the trust store
# of registry's and chartmuseum's containers.
# caBundleSecretName:
# Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift",
# "oss" and fill the information needed in the corresponding section. The type
# must be "filesystem" if you want to use persistent volumes for registry
# and chartmuseum
type: filesystem
filesystem:
rootdirectory: /storage
#maxthreads: 100
azure:
accountname: accountname
accountkey: base64encodedaccountkey
container: containername
#realm: core.windows.net
gcs:
bucket: bucketname
# The base64 encoded json file which contains the key
encodedkey: base64-encoded-json-key-file
#rootdirectory: /gcs/object/name/prefix
#chunksize: "5242880"
s3:
region: us-west-1
bucket: bucketname
#accesskey: awsaccesskey
#secretkey: awssecretkey
#regionendpoint: http://myobjects.local
#encrypt: false
#keyid: mykeyid
#secure: true
#skipverify: false
#v4auth: true
#chunksize: "5242880"
#rootdirectory: /s3/object/name/prefix
#storageclass: STANDARD
#multipartcopychunksize: "33554432"
#multipartcopymaxconcurrency: 100
#multipartcopythresholdsize: "33554432"
swift:
authurl: https://storage.myprovider.com/v3/auth
username: username
password: password
container: containername
#region: fr
#tenant: tenantname
#tenantid: tenantid
#domain: domainname
#domainid: domainid
#trustid: trustid
#insecureskipverify: false
#chunksize: 5M
#prefix:
#secretkey: secretkey
#accesskey: accesskey
#authversion: 3
#endpointtype: public
#tempurlcontainerkey: false
#tempurlmethods:
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: regionname
bucket: bucketname
#endpoint: endpoint
#internal: false
#encrypt: false
#secure: true
#chunksize: 10M
#rootdirectory: rootdirectory
imagePullPolicy: IfNotPresent
# Use this set to assign a list of default pullSecrets
imagePullSecrets:
# - name: docker-registry-secret
# - name: internal-registry-secret
# The update strategy for deployments with persistent volumes(jobservice, registry
# and chartmuseum): "RollingUpdate" or "Recreate"
# Set it as "Recreate" when "RWM" for volumes isn't supported
updateStrategy:
type: RollingUpdate
# debug, info, warning, error or fatal
logLevel: info
# The initial password of Harbor admin. Change it from portal after launching Harbor
harborAdminPassword: "Harbor12345"
# The name of the secret which contains key named "ca.crt". Setting this enables the
# download link on portal to download the certificate of CA when the certificate isn't
# generated automatically
caSecretName: ""
# The secret key used for encryption. Must be a string of 16 chars.
secretKey: "not-a-secure-key"
# The proxy settings for updating trivy vulnerabilities from the Internet and replicating
# artifacts from/to the registries that cannot be reached directly
proxy:
httpProxy:
httpsProxy:
noProxy: 127.0.0.1,localhost,.local,.internal
components:
- core
- jobservice
- trivy
# The custom ca bundle secret, the secret must contain key named "ca.crt"
# which will be injected into the trust store for chartmuseum, core, jobservice, registry, trivy components
# caBundleSecretName: ""
## UAA Authentication Options
# If you're using UAA for authentication behind a self-signed
# certificate you will need to provide the CA Cert.
# Set uaaSecretName below to provide a pre-created secret that
# contains a base64 encoded CA Certificate named `ca.crt`.
# uaaSecretName:
# If expose the service via "ingress", the Nginx will not be used
nginx:
image:
repository: goharbor/nginx-photon
tag: v2.4.1
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
portal:
image:
repository: goharbor/harbor-portal
tag: v2.4.1
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
core:
image:
repository: goharbor/harbor-core
tag: v2.4.1
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 1
## Startup probe values
startupProbe:
enabled: true
initialDelaySeconds: 10
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
# Secret is used when core server communicates with other components.
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# Fill the name of a kubernetes secret if you want to use your own
# TLS certificate and private key for token encryption/decryption.
# The secret must contain keys named:
# "tls.crt" - the certificate
# "tls.key" - the private key
# The default key pair will be used if it isn't set
secretName: ""
# The XSRF key. Will be generated automatically if it isn't specified
xsrfKey: ""
## The priority class to run the pod as
priorityClassName:
jobservice:
image:
repository: goharbor/harbor-jobservice
tag: v2.4.1
replicas: 1
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
maxJobWorkers: 10
# The logger for jobs: "file", "database" or "stdout"
jobLoggers:
- file
# - database
# - stdout
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
# Secret is used when job service communicates with other components.
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
## The priority class to run the pod as
priorityClassName:
registry:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
registry:
image:
repository: goharbor/registry-photon
tag: v2.4.1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
controller:
image:
repository: goharbor/harbor-registryctl
tag: v2.4.1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# Secret is used to secure the upload state from client
# and registry storage backend.
# See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http
# If a secret key is not specified, Helm will generate one.
# Must be a string of 16 chars.
secret: ""
# If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL.
relativeurls: false
credentials:
username: "harbor_registry_user"
password: "harbor_registry_password"
middleware:
enabled: false
type: cloudFront
cloudFront:
baseurl: example.cloudfront.net
keypairid: KEYPAIRID
duration: 3000s
ipfilteredby: none
# The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key
# that allows access to CloudFront
privateKeySecret: "my-secret"
chartmuseum:
enabled: true
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# Harbor defaults ChartMuseum to returning relative urls, if you want using absolute url you should enable it by change the following value to 'true'
absoluteUrl: false
image:
repository: goharbor/chartmuseum-photon
tag: v2.4.1
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
## limit the number of parallel indexers
indexLimit: 0
trivy:
# enabled the flag to enable Trivy scanner
enabled: true
image:
# repository the repository for Trivy adapter image
repository: goharbor/trivy-adapter-photon
# tag the tag for Trivy adapter image
tag: v2.4.1
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# replicas the number of Pod replicas
replicas: 1
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
debugMode: false
# vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`.
vulnType: "os,library"
# severity a comma-separated list of severities to be checked
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
# ignoreUnfixed the flag to display only fixed vulnerabilities
ignoreUnfixed: false
# insecure the flag to skip verifying registry certificate
insecure: false
# gitHubToken the GitHub access token to download Trivy DB
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update
# timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one.
# Currently, the database is updated every 12 hours and published as a new release to GitHub.
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://developer.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
gitHubToken: ""
# skipUpdate the flag to disable Trivy DB downloads from GitHub
#
# You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the
# `/home/scanner/.cache/trivy/db/trivy.db` path.
skipUpdate: false
# The duration to wait for scan completion
timeout: 5m0s
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1
memory: 1Gi
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
notary:
enabled: true
server:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/notary-server-photon
tag: v2.4.1
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
signer:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/notary-signer-photon
tag: v2.4.1
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## Additional deployment annotations
podAnnotations: {}
## The priority class to run the pod as
priorityClassName:
# Fill the name of a kubernetes secret if you want to use your own
# TLS certificate authority, certificate and private key for notary
# communications.
# The secret must contain keys named ca.crt, tls.crt and tls.key that
# contain the CA, certificate and private key.
# They will be generated if not set.
secretName: ""
database:
# if external database is used, set "type" to "external"
# and fill the connection informations in "external" section
type: internal
internal:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/harbor-db
tag: v2.4.1
# The initial superuser password for internal database
password: "changeit"
# The size limit for Shared memory, pgSQL use it for shared_buffer
# More details see:
# https://github.com/goharbor/harbor/issues/15034
shmSizeLimit: 512Mi
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
initContainer:
migrator: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
permissions: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
external:
host: "192.168.0.1"
port: "5432"
username: "user"
password: "password"
coreDatabase: "registry"
notaryServerDatabase: "notary_server"
notarySignerDatabase: "notary_signer"
# "disable" - No SSL
# "require" - Always SSL (skip verification)
# "verify-ca" - Always SSL (verify that the certificate presented by the
# server was signed by a trusted CA)
# "verify-full" - Always SSL (verify that the certification presented by the
# server was signed by a trusted CA and the server host name matches the one
# in the certificate)
sslmode: "disable"
# The maximum number of connections in the idle connection pool per pod (core+exporter).
# If it <=0, no idle connections are retained.
maxIdleConns: 100
# The maximum number of open connections to the database per pod (core+exporter).
# If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgre of harbor.
maxOpenConns: 900
## Additional deployment annotations
podAnnotations: {}
redis:
# if external Redis is used, set "type" to "external"
# and fill the connection informations in "external" section
type: internal
internal:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/redis-photon
tag: v2.4.1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
external:
# support redis, redis+sentinel
# addr for redis: <host_redis>:<port_redis>
# addr for redis+sentinel: <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
addr: "192.168.0.2:6379"
# The name of the set of Redis instances to monitor, it must be set to support redis+sentinel
sentinelMasterSet: ""
# The "coreDatabaseIndex" must be "0" as the library Harbor
# used doesn't support configuring it
coreDatabaseIndex: "0"
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"
chartmuseumDatabaseIndex: "3"
trivyAdapterIndex: "5"
password: ""
## Additional deployment annotations
podAnnotations: {}
exporter:
replicas: 1
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
podAnnotations: {}
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/harbor-exporter
tag: v2.4.1
nodeSelector: {}
tolerations: []
affinity: {}
cacheDuration: 23
cacheCleanInterval: 14400
## The priority class to run the pod as
priorityClassName:
metrics:
enabled: false
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
## Create prometheus serviceMonitor to scrape harbor metrics.
## This requires the monitoring.coreos.com/v1 CRD. Please see
## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md
##
serviceMonitor:
enabled: false
additionalLabels: {}
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: ""
# Metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# Relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
trace:
enabled: false
# trace provider: jaeger or otel
# jaeger should be 1.26+
provider: jaeger
# set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
sample_rate: 1
# namespace used to differentiate different harbor services
# namespace:
# attributes is a key value dict contains user defined attributes used to initialize trace provider
# attributes:
# application: harbor
jaeger:
# jaeger supports two modes:
# agent mode(uncomment endpoint and uncomment username, password if needed)
# collector mode(uncomment agent_host and agent_port)
endpoint: http://hostname:14268/api/traces
# username:
# password:
# agent_host: hostname
# export trace data by jaeger.thrift in compact mode
# agent_port: 6831
otel:
endpoint: hostname:4318
url_path: /v1/traces
compression: false
insecure: true
timeout: 10s

View File

@ -0,0 +1,599 @@
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## @section Common parameters
## @param kubeVersion Override Kubernetes version
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param clusterDomain Kubernetes cluster domain name
##
clusterDomain: cluster.local
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @section Jenkins Image parameters
## Bitnami Jenkins image
## ref: https://hub.docker.com/r/bitnami/jenkins/tags/
## @param image.registry Jenkins image registry
## @param image.repository Jenkins image repository
## @param image.tag Jenkins image tag (immutable tags are recommended)
## @param image.pullPolicy Jenkins image pull policy
## @param image.pullSecrets Jenkins image pull secrets
## @param image.debug Enable image debug mode
##
image:
registry: docker.io
repository: bitnami/jenkins
tag: 2.319.1-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Enable debug mode
##
debug: false
## @section Jenkins Configuration parameters
## Jenkins settings based on environment variables
## ref: https://github.com/bitnami/bitnami-docker-jenkins#configuration
## @param jenkinsUser Jenkins username
##
jenkinsUser: admin
## @param jenkinsPassword Jenkins user password
## Defaults to a random 10-character alphanumeric string if not set
##
jenkinsPassword: "gkxl###650"
## @param jenkinsHost Jenkins host to create application URLs
##
jenkinsHost: "jenkins.sino-assist.com"
## @param jenkinsHome Jenkins home directory
##
jenkinsHome: /bitnami/jenkins/home
## @param javaOpts Custom JVM parameters
##
javaOpts: []
## @param disableInitialization Skip performing the initial bootstrapping for Jenkins
##
disableInitialization: "no"
## @param command Override default container command (useful when using custom images)
##
command: []
## @param args Override default container args (useful when using custom images)
##
args: []
## @param extraEnvVars Array with extra environment variables to add to the Jenkins container
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
extraEnvVars: []
## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars
##
extraEnvVarsCM: ""
## @param extraEnvVarsSecret Name of existing Secret containing extra env vars
##
extraEnvVarsSecret: ""
## @section Jenkins deployment parameters
## @param updateStrategy.type Jenkins deployment strategy type
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## NOTE: Set it to `Recreate` if you use a PV that cannot be mounted on multiple pods
## e.g:
## updateStrategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
updateStrategy:
type: RollingUpdate
## @param priorityClassName Jenkins pod priority class name
##
priorityClassName: ""
## @param hostAliases Jenkins pod host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param extraVolumes Optionally specify extra list of additional volumes for Jenkins pods
##
extraVolumes: []
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Jenkins container(s)
##
extraVolumeMounts: []
## @param sidecars Add additional sidecar containers to the Jenkins pod
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param initContainers Add additional init containers to the Jenkins pods
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
## e.g:
## initContainers:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param lifecycleHooks Add lifecycle hooks to the Jenkins deployment
##
lifecycleHooks: {}
## @param podLabels Extra labels for Jenkins pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param podAnnotations Annotations for Jenkins pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set
##
key: ""
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param nodeSelector Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## Jenkins containers' resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param resources.limits The resources limits for the Jenkins container
## @param resources.requests [object] The requested resources for the Jenkins container
##
resources:
limits: {}
requests:
memory: 512Mi
cpu: 300m
## Container ports
## @param containerPorts.http Jenkins HTTP container port
## @param containerPorts.https Jenkins HTTPS container port
##
containerPorts:
http: 8080
https: 8443
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param podSecurityContext.enabled Enabled Jenkins pods' Security Context
## @param podSecurityContext.fsGroup Set Jenkins pod's Security Context fsGroup
##
podSecurityContext:
enabled: true
fsGroup: 1001
## Configure Container Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param containerSecurityContext.enabled Enabled Jenkins containers' Security Context
## @param containerSecurityContext.runAsUser Set Jenkins container's Security Context runAsUser
## @param containerSecurityContext.runAsNonRoot Set Jenkins container's Security Context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
runAsNonRoot: true
## Configure extra options for Jenkins containers' liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param livenessProbe.enabled Enable livenessProbe
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
initialDelaySeconds: 180
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe.enabled Enable readinessProbe
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
successThreshold: 1
failureThreshold: 3
## @param customLivenessProbe Custom livenessProbe that overrides the default one
##
customLivenessProbe: {}
## @param customReadinessProbe Custom readinessProbe that overrides the default one
#
customReadinessProbe: {}
## @section Traffic Exposure Parameters
## Jenkins service parameters
##
service:
## @param service.type Jenkins service type
##
type: LoadBalancer
## @param service.port Jenkins service HTTP port
##
port: 80
## @param service.httpsPort Jenkins service HTTPS port
##
httpsPort: 443
## Node ports to expose
## @param service.nodePorts.http Node port for HTTP
## @param service.nodePorts.https Node port for HTTPS
## NOTE: choose port between <30000-32767>
##
nodePorts:
http: ""
https: ""
## @param service.clusterIP Jenkins service Cluster IP
## e.g.:
## clusterIP: None
##
clusterIP: ""
## @param service.loadBalancerIP Jenkins service Load Balancer IP
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerIP: ""
## @param service.loadBalancerSourceRanges Jenkins service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param service.externalTrafficPolicy Jenkins service external traffic policy
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param service.annotations Additional custom annotations for Jenkins service
##
annotations: {}
## Configure the ingress resource that allows you to access the Jenkins installation
## ref: https://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## @param ingress.enabled Enable ingress record generation for Jenkins
##
enabled: false
## @param ingress.pathType Ingress path type
##
pathType: ImplementationSpecific
## @param ingress.apiVersion Force Ingress API version (automatically detected if not set)
##
apiVersion: ""
## @param ingress.hostname Default host for the ingress record
##
hostname: jenkins.local
## @param ingress.path Default path for the ingress record
## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers
##
path: /
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter
## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}`
## You can:
## - Use the `ingress.secrets` parameter to create this TLS secret
## - Relay on cert-manager to create it by setting the corresponding annotations
## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true`
##
tls: false
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm
##
selfSigned: false
## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record
## e.g:
## extraHosts:
## - name: jenkins.local
## path: /
##
extraHosts: []
## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host
## e.g:
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## e.g:
## extraTls:
## - hosts:
## - jenkins.local
## secretName: jenkins.local-tls
##
extraTls: []
## @param ingress.secrets Custom TLS certificates as secrets
## NOTE: 'key' and 'certificate' are expected in PEM format
## NOTE: 'name' should line up with a 'secretName' set further up
## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates
## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## secrets:
## - name: jenkins.local-tls
## key: |-
## -----BEGIN RSA PRIVATE KEY-----
## ...
## -----END RSA PRIVATE KEY-----
## certificate: |-
## -----BEGIN CERTIFICATE-----
## ...
## -----END CERTIFICATE-----
##
secrets: []
## @section Persistence Parameters
## Persistence Parameters
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## @param persistence.enabled Enable persistence using Persistent Volume Claims
##
enabled: true
## @param persistence.storageClass Persistent Volume storage class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner
##
storageClass: "nfs-client-207"
## @param persistence.annotations Additional custom annotations for the PVC
##
annotations: {}
## @param persistence.accessModes [array] Persistent Volume access modes
##
accessModes:
- ReadWriteOnce
## @param persistence.size Persistent Volume size
##
size: 8Gi
## 'volumePermissions' init container parameters
## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values
## based on the podSecurityContext/containerSecurityContext parameters
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup`
##
enabled: true
## Bitnami Shell image
## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/
## @param volumePermissions.image.registry Bitnami Shell image registry
## @param volumePermissions.image.repository Bitnami Shell image repository
## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended)
## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy
## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets
##
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r273
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Init container's resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param volumePermissions.resources.limits The resources limits for the init container
## @param volumePermissions.resources.requests The requested resources for the init container
##
resources:
limits: {}
requests: {}
## Init container Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param volumePermissions.securityContext.runAsUser Set init container's Security Context runAsUser
## NOTE: when runAsUser is set to special value "auto", init container will try to chown the
## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed)
##
securityContext:
runAsUser: 0
## @section Metrics Parameters
metrics:
## @param metrics.enabled Start a sidecar prometheus exporter to expose Jenkins metrics
##
enabled: false
## Bitnami Jenkins Exporter image
## ref: https://hub.docker.com/r/bitnami/jenkins-exporter/tags/
## @param metrics.image.registry Jenkins Exporter image registry
## @param metrics.image.repository Jenkins Exporter image repository
## @param metrics.image.tag Jenkins Jenkins Exporter image tag (immutable tags are recommended)
## @param metrics.image.pullPolicy Jenkins Exporter image pull policy
## @param metrics.image.pullSecrets Jenkins Exporter image pull secrets
##
image:
registry: docker.io
repository: bitnami/jenkins-exporter
tag: 0.20171225.0-debian-10-r641
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## e.g:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param metrics.containerSecurityContext.enabled Enabled Jenkins exporter containers' Security Context
## @param metrics.containerSecurityContext.runAsUser Set Jenkins exporter containers' Security Context runAsUser
##
containerSecurityContext:
enabled: true
runAsUser: 1001
## Jenkins exporter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param metrics.resources.limits The resources limits for the Jenkins exporter container
## @param metrics.resources.requests The requested resources for the Jenkins exporter container
##
resources:
limits: {}
requests: {}
## Jenkins exporter service parameters
##
service:
## @param metrics.service.type Jenkins exporter service type
##
type: ClusterIP
## @param metrics.service.port Jenkins exporter service port
##
port: 9122
## @param metrics.service.nodePort Node port for exporter
##
nodePort: ""
## @param metrics.service.externalTrafficPolicy Jenkins exporter service external traffic policy
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param metrics.service.loadBalancerIP Jenkins exporter service Load Balancer IP
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param metrics.service.loadBalancerSourceRanges Jenkins exporter service Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g.
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param metrics.service.annotations [object] Additional custom annotations for Jenkins exporter service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.service.port }}"
## Prometheus Service Monitor
## ref: https://github.com/coreos/prometheus-operator
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator
##
enabled: false
## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created
##
namespace: ""
## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped
##
interval: 30s
## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.relabellings Metrics relabellings to add to the scrape endpoint
##
relabellings: []
## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint
##
honorLabels: false
## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus
##
additionalLabels: {}

View File

@ -0,0 +1,573 @@
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## @section Common parameters
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname template with a string
##
fullnameOverride: ""
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## @section Kibana parameters
## Bitnami Kibana image version
## ref: https://hub.docker.com/r/bitnami/kibana/tags/
## @param image.registry Kibana image registry
## @param image.repository Kibana image repository
## @param image.tag Kibana image tag (immutable tags are recommended)
## @param image.pullPolicy Kibana image pull policy
## @param image.pullSecrets Specify docker-registry secret names as an array
##
image:
registry: docker.io
repository: bitnami/kibana
tag: 7.16.2-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param replicaCount Number of replicas of the Kibana Pod
##
replicaCount: 1
## @param updateStrategy.type Set up update strategy for Kibana installation.
## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods are destroyed first.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## Example:
## updateStrategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
updateStrategy:
type: RollingUpdate
## @param schedulerName Alternative scheduler
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## @param plugins Array containing the Kibana plugins to be installed in deployment
## eg:
## plugins:
## - https://github.com/fbaligand/kibana-enhanced-table/releases/download/v1.5.0/enhanced-table-1.5.0_7.3.2.zip
##
plugins: []
## Saved objects to import (NDJSON format)
##
savedObjects:
## @param savedObjects.urls Array containing links to NDJSON files to be imported during Kibana initialization
## e.g:
## urls:
## - www.example.com/dashboard.ndjson
##
urls: []
## @param savedObjects.configmap Configmap containing NDJSON files to be imported during Kibana initialization (evaluated as a template)
##
configmap: ""
## @param extraConfiguration Extra settings to be added to the default kibana.yml configmap that the chart creates (unless replaced using `configurationCM`). Evaluated as a template
##
extraConfiguration: {}
## @param configurationCM ConfigMap containing a kibana.yml file that will replace the default one specified in configuration.yaml
##
configurationCM: ""
## @param extraEnvVars Array containing extra env vars to configure Kibana
## For example:
## extraEnvVars:
## - name: KIBANA_ELASTICSEARCH_URL
## value: test
##
##
extraEnvVars:
- name: TZ
value: "Asia/Shanghai"
# extraEnvVars: []
## @param extraEnvVarsCM ConfigMap containing extra env vars to configure Kibana
##
extraEnvVarsCM: ""
## @param extraEnvVarsSecret Secret containing extra env vars to configure Kibana (in case of sensitive data)
##
extraEnvVarsSecret: ""
## @param extraVolumes Array to add extra volumes. Requires setting `extraVolumeMounts`
##
extraVolumes: []
## @param extraVolumeMounts Array to add extra mounts. Normally used with `extraVolumes`
##
extraVolumeMounts: []
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work)
##
enabled: false
## @param volumePermissions.image.registry Init container volume-permissions image registry
## @param volumePermissions.image.repository Init container volume-permissions image name
## @param volumePermissions.image.tag Init container volume-permissions image tag
## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy
## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets
##
image:
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r284
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## @param volumePermissions.resources Volume Permissions resources
## resources:
## requests:
## memory: 128Mi
## cpu: 100m
resources: {}
## Enable persistence using Persistent Volume Claims
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## @param persistence.enabled Enable persistence
##
enabled: true
## @param persistence.storageClass Kibana data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim`
##
existingClaim: ""
## @param persistence.accessMode Access mode to the PV
##
accessMode: ReadWriteOnce
## @param persistence.size Size for the PV
##
size: 10Gi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param livenessProbe.enabled Enable/disable the Liveness probe
## @param livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param livenessProbe.periodSeconds How often to perform the probe
## @param livenessProbe.timeoutSeconds When the probe times out
## @param livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## @param readinessProbe.enabled Enable/disable the Readiness probe
## @param readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param readinessProbe.periodSeconds How often to perform the probe
## @param readinessProbe.timeoutSeconds When the probe times out
## @param readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded.
## @param readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed.
##
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param forceInitScripts Force execution of init scripts
##
forceInitScripts: false
## @param initScriptsCM Configmap with init scripts to execute
##
initScriptsCM: ""
## @param initScriptsSecret Secret with init scripts to execute (for sensitive data)
##
initScriptsSecret: ""
## Service configuration
##
service:
## @param service.port Kubernetes Service port
##
port: 5601
## @param service.type Kubernetes Service type
##
type: ClusterIP
## @param service.nodePort Specify the nodePort value for the LoadBalancer and NodePort service types
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
nodePort: ""
## @param service.externalTrafficPolicy Enable client source IP preservation
## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param service.annotations Annotations for Kibana service (evaluated as a template)
## This can be used to set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
## @param service.labels Extra labels for Kibana service
##
labels: {}
## @param service.loadBalancerIP loadBalancerIP if Kibana service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
loadBalancerIP: ""
## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value)
##
extraPorts: []
## Configure the ingress resource that allows you to access the
## Kibana installation. Set up the URL
## ref: https://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## @param ingress.enabled Enable ingress controller resource
##
enabled: true
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param ingress.pathType Ingress Path type
##
pathType: ImplementationSpecific
## @param ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param ingress.hostname Default host for the ingress resource. If specified as "*" no host rule is configured
##
hostname: crm1.sino-assist.com
## @param ingress.path The Path to Kibana. You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /kibana
## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }}
## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it
##
tls: false
## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: kibana.local
## path: /
##
extraHosts: []
## @param ingress.extraPaths Additional arbitrary path/backend objects
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - kibana.local
## secretName: kibana.local-tls
##
extraTls: []
## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## - name: kibana.local-tls
## key:
## certificate:
##
secrets: []
## @param serviceAccount.create Enable creation of ServiceAccount for Kibana
## @param serviceAccount.name Name of serviceAccount
## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount
serviceAccount:
create: true
name: ""
annotations: {}
## @param containerPort Port to expose at container level
##
containerPort: 5601
## @param securityContext.enabled Enable securityContext on for Kibana deployment
## @param securityContext.fsGroup Group to configure permissions for volumes
## @param securityContext.runAsUser User for the security context
## @param securityContext.runAsNonRoot Set container's Security Context runAsNonRoot
##
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
runAsNonRoot: true
## Kibana resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
## Example:
## limits:
## cpu: 100m
## memory: 256Mi
limits: {}
## Examples:
## requests:
## cpu: 100m
## memory: 256Mi
requests: {}
## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
## Allowed values: soft, hard
##
nodeAffinityPreset:
## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set.
## E.g.
## key: "kubernetes.io/e2e-az-name"
##
key: ""
## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param affinity Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param nodeSelector Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param tolerations Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param podLabels Extra labels to add to Pod
##
podLabels: {}
## @param sidecars Attach additional containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param initContainers Add additional init containers to the pod
## e.g.
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
initContainers: []
## @param configuration [object] Kibana configuration
##
configuration:
server:
basePath: "/kibana"
rewriteBasePath: true
i18n.locale: "zh-CN"
## Prometheus metrics (requires the kibana-prometheus-exporter plugin)
##
metrics:
## @param metrics.enabled Start a side-car prometheus exporter
##
enabled: false
service:
## @param metrics.service.annotations [object] Prometheus annotations for the Kibana service
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "80"
prometheus.io/path: "_prometheus/metrics"
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled If `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##
namespace: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## interval: 10s
##
interval: ""
## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
## e.g:
## scrapeTimeout: 10s
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.selector Prometheus instance selector labels
## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration
## e.g:
## selector:
## prometheus: my-prometheus
##
# selector:
# prometheus: my-prometheus
selector: {}
## @section Kibana server TLS configuration
##
tls:
## @param tls.enabled Enable SSL/TLS encryption for Kibana server (HTTPS)
##
enabled: false
## @param tls.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates.
##
autoGenerated: false
## @param tls.existingSecret Name of the existing secret containing Kibana server certificates
##
existingSecret: ""
## @param tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of PKCS12
## Note: Ignored when using autoGenerated certs.
##
usePemCerts: false
## @param tls.keyPassword Password to access the PEM key when it is password-protected.
##
keyPassword: ""
## @param tls.keystorePassword Password to access the PKCS12 keystore when it is password-protected.
##
keystorePassword: ""
## @param tls.passwordsSecret Name of a existing secret containing the Keystore or PEM key password
##
passwordsSecret: ""
## @section Elasticsearch parameters
##
elasticsearch:
## @param elasticsearch.hosts List of elasticsearch hosts to connect to.
## e.g:
hosts:
- elasticsearch-data
##
# hosts: []
## @param elasticsearch.port Elasticsearch port
##
port: "9200"
security:
auth:
## @param elasticsearch.security.auth.enabled Set to 'true' if Elasticsearch has authentication enabled
##
enabled: false
## @param elasticsearch.security.auth.kibanaUsername Kibana server user to authenticate with Elasticsearch
##
kibanaUsername: "elastic"
## @param elasticsearch.security.auth.kibanaPassword Kibana server password to authenticate with Elasticsearch
##
kibanaPassword: ""
## @param elasticsearch.security.auth.existingSecret Name of the existing secret containing the Password for the Kibana user
##
existingSecret: ""
tls:
## @param elasticsearch.security.tls.enabled Set to 'true' if Elasticsearch API uses TLS/SSL (HTTPS)
##
enabled: false
## @param elasticsearch.security.tls.verificationMode Verification mode for SSL communications.
## Supported values: full, certificate, none.
## Ref: https://www.elastic.co/guide/en/kibana/7.x/settings.html#elasticsearch-ssl-verificationmode
verificationMode: "full"
## @param elasticsearch.security.tls.existingSecret Name of the existing secret containing Elasticsearch Truststore or CA certificate. Required unless verificationMode=none
##
existingSecret: ""
## @param elasticsearch.security.tls.usePemCerts Set to 'true' to use PEM certificates instead of PKCS12.
##
usePemCerts: false
## @param elasticsearch.security.tls.truststorePassword Password to access the PKCS12 trustore in case it is password-protected.
##
truststorePassword: ""
## @param elasticsearch.security.tls.passwordsSecret Name of a existing secret containing the Truststore password
##
passwordsSecret: ""

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
# Default values for nacos.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
mode: standalone
# mode: cluster
############################nacos###########################
nacos:
image:
repository: nacos/nacos-server
tag: v2.0.3
pullPolicy: IfNotPresent
plugin:
enable: true
image:
repository: nacos/nacos-peer-finder-plugin
tag: 1.1
replicaCount: 1
domainName: cluster.local
preferhostmode: hostname
serverPort: 8848
health:
enabled: false
storage:
# type: embedded
type: mysql
db:
host: mysql
name: nacos
port: 3306
username: root
password: gkxl650
param: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
persistence:
enabled: false
data:
accessModes:
- ReadWriteOnce
storageClassName: manual
resources:
requests:
storage: 5Gi
service:
#type: ClusterIP
type: NodePort
port: 8848
nodePort: 30000
ingress:
enabled: false
annotations: { }
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: nacos.example.com
paths: [ ]
tls: [ ]
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
requests:
cpu: 500m
memory: 2Gi
annotations: { }
nodeSelector: { }
tolerations: [ ]
affinity: { }

View File

@ -0,0 +1,89 @@
# Default values for nacos.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
mode: standalone
# mode: cluster
############################nacos###########################
nacos:
image:
repository: nacos/nacos-server
tag: v2.0.3
pullPolicy: IfNotPresent
plugin:
enable: true
image:
repository: nacos/nacos-peer-finder-plugin
tag: 1.1
replicaCount: 1
domainName: cluster.local
preferhostmode: hostname
serverPort: 8848
health:
enabled: false
storage:
# type: embedded
type: mysql
db:
host: mysql
name: nacos
port: 3306
username: root
password: gkxl650
param: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
persistence:
enabled: false
data:
accessModes:
- ReadWriteOnce
storageClassName: manual
resources:
requests:
storage: 5Gi
service:
#type: ClusterIP
type: NodePort
port: 8848
nodePort: 30000
ingress:
enabled: false
annotations: { }
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: nacos.example.com
paths: [ ]
tls: [ ]
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
requests:
cpu: 500m
memory: 2Gi
annotations: { }
nodeSelector: { }
tolerations: [ ]
affinity: { }

View File

@ -0,0 +1,218 @@
/*
* Copyright 1999-2018 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info */
/******************************************/
CREATE TABLE `config_info` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) DEFAULT NULL,
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
`c_desc` varchar(256) DEFAULT NULL,
`c_use` varchar(64) DEFAULT NULL,
`effect` varchar(64) DEFAULT NULL,
`type` varchar(64) DEFAULT NULL,
`c_schema` text,
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_aggr */
/******************************************/
CREATE TABLE `config_info_aggr` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(255) NOT NULL COMMENT 'group_id',
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
`content` longtext NOT NULL COMMENT '内容',
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
`app_name` varchar(128) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_beta */
/******************************************/
CREATE TABLE `config_info_beta` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_info_tag */
/******************************************/
CREATE TABLE `config_info_tag` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL COMMENT 'content',
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
`src_user` text COMMENT 'source user',
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = config_tags_relation */
/******************************************/
CREATE TABLE `config_tags_relation` (
`id` bigint(20) NOT NULL COMMENT 'id',
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
`nid` bigint(20) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`nid`),
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = group_capacity */
/******************************************/
CREATE TABLE `group_capacity` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID空字符表示整个集群',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数0表示使用默认值',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_group_id` (`group_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = his_config_info */
/******************************************/
CREATE TABLE `his_config_info` (
`id` bigint(64) unsigned NOT NULL,
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`data_id` varchar(255) NOT NULL,
`group_id` varchar(128) NOT NULL,
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
`content` longtext NOT NULL,
`md5` varchar(32) DEFAULT NULL,
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
`src_user` text,
`src_ip` varchar(50) DEFAULT NULL,
`op_type` char(10) DEFAULT NULL,
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
PRIMARY KEY (`nid`),
KEY `idx_gmt_create` (`gmt_create`),
KEY `idx_gmt_modified` (`gmt_modified`),
KEY `idx_did` (`data_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
/******************************************/
/* 数据库全名 = nacos_config */
/* 表名称 = tenant_capacity */
/******************************************/
CREATE TABLE `tenant_capacity` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额0表示使用默认值',
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限单位为字节0表示使用默认值',
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限单位为字节0表示使用默认值',
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
CREATE TABLE `tenant_info` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
`kp` varchar(128) NOT NULL COMMENT 'kp',
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
KEY `idx_tenant_id` (`tenant_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
CREATE TABLE `users` (
`username` varchar(50) NOT NULL PRIMARY KEY,
`password` varchar(500) NOT NULL,
`enabled` boolean NOT NULL
);
CREATE TABLE `roles` (
`username` varchar(50) NOT NULL,
`role` varchar(50) NOT NULL,
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
);
CREATE TABLE `permissions` (
`role` varchar(50) NOT NULL,
`resource` varchar(255) NOT NULL,
`action` varchar(8) NOT NULL,
UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
);
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');

View File

@ -0,0 +1,103 @@
replicaCount: 1
strategyType: Recreate
image:
repository: dyrnq/nfs-subdir-external-provisioner
tag: v4.0.2
pullPolicy: IfNotPresent
imagePullSecrets: []
nfs:
server: 192.168.1.201
path: /nfs
mountOptions:
volumeName: nfs-subdir-external-provisioner-root
# Reclaim policy for the main nfs volume
reclaimPolicy: Retain
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-client-201
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Retain
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: true
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
# Overrides archiveOnDelete.
# Ignored if value not set.
onDelete:
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
# Ignored if value not set.
pathPattern:
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteOnce
# Storage class annotations
annotations: {}
leaderElection:
# When set to false leader election will be disabled
enabled: true
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
# Deployment pod annotations
podAnnotations: {}
## Set pod priorityClassName
# priorityClassName: ""
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Additional labels for any resource created
labels: {}

View File

@ -0,0 +1,103 @@
replicaCount: 1
strategyType: Recreate
image:
repository: dyrnq/nfs-subdir-external-provisioner
tag: v4.0.2
pullPolicy: IfNotPresent
imagePullSecrets: []
nfs:
server: 192.168.1.207
path: /nfs
mountOptions:
volumeName: nfs-subdir-external-provisioner-root
# Reclaim policy for the main nfs volume
reclaimPolicy: Retain
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-client-207
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Retain
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: true
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
# Overrides archiveOnDelete.
# Ignored if value not set.
onDelete:
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
# Ignored if value not set.
pathPattern:
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteOnce
# Storage class annotations
annotations: {}
leaderElection:
# When set to false leader election will be disabled
enabled: true
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
# Deployment pod annotations
podAnnotations: {}
## Set pod priorityClassName
# priorityClassName: ""
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Additional labels for any resource created
labels: {}

View File

@ -0,0 +1,103 @@
replicaCount: 1
strategyType: Recreate
image:
repository: dyrnq/nfs-subdir-external-provisioner
tag: v4.0.2
pullPolicy: IfNotPresent
imagePullSecrets: []
nfs:
server: 192.168.1.208
path: /nfs
mountOptions:
volumeName: nfs-subdir-external-provisioner-root
# Reclaim policy for the main nfs volume
reclaimPolicy: Retain
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-client-208
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Retain
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: true
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
# Overrides archiveOnDelete.
# Ignored if value not set.
onDelete:
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
# Ignored if value not set.
pathPattern:
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteOnce
# Storage class annotations
annotations: {}
leaderElection:
# When set to false leader election will be disabled
enabled: true
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
# Deployment pod annotations
podAnnotations: {}
## Set pod priorityClassName
# priorityClassName: ""
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Additional labels for any resource created
labels: {}

View File

@ -0,0 +1,103 @@
replicaCount: 1
strategyType: Recreate
image:
repository: dyrnq/nfs-subdir-external-provisioner
tag: v4.0.2
pullPolicy: IfNotPresent
imagePullSecrets: []
nfs:
server: 192.168.1.209
path: /nfs
mountOptions:
volumeName: nfs-subdir-external-provisioner-root
# Reclaim policy for the main nfs volume
reclaimPolicy: Retain
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-client-209
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Retain
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: true
# If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory.
# Overrides archiveOnDelete.
# Ignored if value not set.
onDelete:
# Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace.
# Ignored if value not set.
pathPattern:
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteOnce
# Storage class annotations
annotations: {}
leaderElection:
# When set to false leader election will be disabled
enabled: true
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
# Deployment pod annotations
podAnnotations: {}
## Set pod priorityClassName
# priorityClassName: ""
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Additional labels for any resource created
labels: {}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
# Default values for torna-server.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: tanghc2020/torna
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
configuration: |-
server.port=7700
mysql.host=mysql-common:3306
mysql.username=root
mysql.password=gkxl650
extraEnvVars:
- name: JAVA_OPTS
value: "-server -Xmx512m -Xms512m -Djava.awt.headless=true"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: NodePort
port: 7700
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,113 @@
# Default values for aldi_back_merchant-web.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: xuxueli/xxl-job-admin
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "2.2.0"
imagePullSecrets: []
# - name: images.pull.secret
nameOverride: ""
fullnameOverride: ""
PARAMS_VALUE: "--spring.datasource.url=jdbc:mysql://mysql:3306/xxl-job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=root --spring.datasource.password=123456"
healthChk:
enabled: true
content:
livenessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 60
readinessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 300
successThreshold: 1
timeoutSeconds: 60
resources:
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
enabled: true
content:
type: NodePort
port: 8080
targetPort: 8080
nodePort: 31033
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,113 @@
# Default values for aldi_back_merchant-web.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: xuxueli/xxl-job-admin
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "2.2.0"
imagePullSecrets: []
# - name: images.pull.secret
nameOverride: ""
fullnameOverride: ""
PARAMS_VALUE: "--spring.datasource.url=jdbc:mysql://mysql:3306/xxl-job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=root --spring.datasource.password=gkxl650"
healthChk:
enabled: true
content:
livenessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 60
readinessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 300
successThreshold: 1
timeoutSeconds: 60
resources:
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
enabled: true
content:
type: NodePort
port: 8080
targetPort: 8080
nodePort: 31033
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,113 @@
# Default values for aldi_back_merchant-web.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: xuxueli/xxl-job-admin
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "2.3.0"
imagePullSecrets: []
# - name: images.pull.secret
nameOverride: ""
fullnameOverride: ""
PARAMS_VALUE: "--spring.datasource.url=jdbc:mysql://192.168.10.21:3306/xxl_job?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=root --spring.datasource.password=nczl@sino_db"
healthChk:
enabled: true
content:
livenessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 60
readinessProbe:
failureThreshold: 3
httpGet:
path: /xxl-job-admin/chk.html
port: 8080
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 300
successThreshold: 1
timeoutSeconds: 60
resources:
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
enabled: true
content:
type: NodePort
port: 8080
targetPort: 8080
nodePort: 31033
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,121 @@
#
# XXL-JOB v2.3.1-SNAPSHOT
# Copyright (c) 2015-present, xuxueli.
CREATE database if NOT EXISTS `xxl_job` default character set utf8mb4 collate utf8mb4_unicode_ci;
use `xxl_job`;
SET NAMES utf8mb4;
CREATE TABLE `xxl_job_info` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_desc` varchar(255) NOT NULL,
`add_time` datetime DEFAULT NULL,
`update_time` datetime DEFAULT NULL,
`author` varchar(64) DEFAULT NULL COMMENT '作者',
`alarm_email` varchar(255) DEFAULT NULL COMMENT '报警邮件',
`schedule_type` varchar(50) NOT NULL DEFAULT 'NONE' COMMENT '调度类型',
`schedule_conf` varchar(128) DEFAULT NULL COMMENT '调度配置,值含义取决于调度类型',
`misfire_strategy` varchar(50) NOT NULL DEFAULT 'DO_NOTHING' COMMENT '调度过期策略',
`executor_route_strategy` varchar(50) DEFAULT NULL COMMENT '执行器路由策略',
`executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_block_strategy` varchar(50) DEFAULT NULL COMMENT '阻塞处理策略',
`executor_timeout` int(11) NOT NULL DEFAULT '0' COMMENT '任务执行超时时间,单位秒',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`glue_type` varchar(50) NOT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext COMMENT 'GLUE源代码',
`glue_remark` varchar(128) DEFAULT NULL COMMENT 'GLUE备注',
`glue_updatetime` datetime DEFAULT NULL COMMENT 'GLUE更新时间',
`child_jobid` varchar(255) DEFAULT NULL COMMENT '子任务ID多个逗号分隔',
`trigger_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '调度状态0-停止1-运行',
`trigger_last_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '上次调度时间',
`trigger_next_time` bigint(13) NOT NULL DEFAULT '0' COMMENT '下次调度时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`job_group` int(11) NOT NULL COMMENT '执行器主键ID',
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`executor_address` varchar(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址',
`executor_handler` varchar(255) DEFAULT NULL COMMENT '执行器任务handler',
`executor_param` varchar(512) DEFAULT NULL COMMENT '执行器任务参数',
`executor_sharding_param` varchar(20) DEFAULT NULL COMMENT '执行器任务分片参数,格式如 1/2',
`executor_fail_retry_count` int(11) NOT NULL DEFAULT '0' COMMENT '失败重试次数',
`trigger_time` datetime DEFAULT NULL COMMENT '调度-时间',
`trigger_code` int(11) NOT NULL COMMENT '调度-结果',
`trigger_msg` text COMMENT '调度-日志',
`handle_time` datetime DEFAULT NULL COMMENT '执行-时间',
`handle_code` int(11) NOT NULL COMMENT '执行-状态',
`handle_msg` text COMMENT '执行-日志',
`alarm_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '告警状态0-默认、1-无需告警、2-告警成功、3-告警失败',
PRIMARY KEY (`id`),
KEY `I_trigger_time` (`trigger_time`),
KEY `I_handle_code` (`handle_code`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_log_report` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`trigger_day` datetime DEFAULT NULL COMMENT '调度-时间',
`running_count` int(11) NOT NULL DEFAULT '0' COMMENT '运行中-日志数量',
`suc_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行成功-日志数量',
`fail_count` int(11) NOT NULL DEFAULT '0' COMMENT '执行失败-日志数量',
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `i_trigger_day` (`trigger_day`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_logglue` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`job_id` int(11) NOT NULL COMMENT '任务主键ID',
`glue_type` varchar(50) DEFAULT NULL COMMENT 'GLUE类型',
`glue_source` mediumtext COMMENT 'GLUE源代码',
`glue_remark` varchar(128) NOT NULL COMMENT 'GLUE备注',
`add_time` datetime DEFAULT NULL,
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_registry` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`registry_group` varchar(50) NOT NULL,
`registry_key` varchar(255) NOT NULL,
`registry_value` varchar(255) NOT NULL,
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `i_g_k_v` (`registry_group`,`registry_key`,`registry_value`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`app_name` varchar(64) NOT NULL COMMENT '执行器AppName',
`title` varchar(12) NOT NULL COMMENT '执行器名称',
`address_type` tinyint(4) NOT NULL DEFAULT '0' COMMENT '执行器地址类型0=自动注册、1=手动录入',
`address_list` text COMMENT '执行器地址列表,多地址逗号分隔',
`update_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(50) NOT NULL COMMENT '账号',
`password` varchar(50) NOT NULL COMMENT '密码',
`role` tinyint(4) NOT NULL COMMENT '角色0-普通用户、1-管理员',
`permission` varchar(255) DEFAULT NULL COMMENT '权限执行器ID列表多个逗号分割',
PRIMARY KEY (`id`),
UNIQUE KEY `i_username` (`username`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `xxl_job_lock` (
`lock_name` varchar(50) NOT NULL COMMENT '锁名称',
PRIMARY KEY (`lock_name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`, `update_time`) VALUES (1, 'xxl-job-executor-sample', '示例执行器', 0, NULL, '2018-11-03 22:21:31' );
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `schedule_type`, `schedule_conf`, `misfire_strategy`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`) VALUES (1, 1, '测试任务1', '2018-11-03 22:21:31', '2018-11-03 22:21:31', 'XXL', '', 'CRON', '0 0 0 * * ? *', 'DO_NOTHING', 'FIRST', 'demoJobHandler', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2018-11-03 22:21:31', '');
INSERT INTO `xxl_job_user`(`id`, `username`, `password`, `role`, `permission`) VALUES (1, 'admin', 'e10adc3949ba59abbe56e057f20f883e', 1, NULL);
INSERT INTO `xxl_job_lock` ( `lock_name`) VALUES ( 'schedule_lock');
commit;

View File

@ -0,0 +1,4 @@
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`) VALUES (1, 'job-executor-sa-admin', 'sa-admin', 1, 'http://192.168.10.5:9801/,http://192.168.10.7:9801/');
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`) VALUES (2, 'job-executor-sa-app', 'sa-app', 1, 'http://192.168.10.5:9803/');
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`) VALUES (3, 'job-executor-sa-response', 'sa-response', 1, 'http://192.168.10.5:9804/');
INSERT INTO `xxl_job_group`(`id`, `app_name`, `title`, `address_type`, `address_list`) VALUES (4, 'job-executor-gps-pass', 'gps-pass', 1, 'http://192.168.10.4:9806/');

View File

@ -0,0 +1,43 @@
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (1, 1, '0/10 * * * * ?', '调度超时', '2021-04-15 11:36:11', '2021-04-21 22:18:18', 'w', '', 'ROUND', 'OverTimeDispatchOrderTimer', '', 'SERIAL_EXECUTION', 20, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:36:11', '', 1, 1640663090000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (2, 1, '0/10 * * * * ?', '预约任务处理', '2021-04-15 11:37:35', '2021-11-16 09:01:16', 'wangyuhang', '', 'FAILOVER', 'OrderAppointTimer', '', 'SERIAL_EXECUTION', 60, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:37:35', '', 1, 1640663090000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (3, 2, '0/20 * * * * ?', '车辆轨迹上传至高德', '2021-04-15 11:38:30', '2021-04-23 10:52:19', 'Q', '', 'ROUND', 'SupplierVehicleTravelTimeJobs', '', 'SERIAL_EXECUTION', 60, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:38:30', '', 1, 1640663080000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (4, 2, '0/5 * * * * ?', '救援师傅app订单推送', '2021-04-15 11:42:09', '2021-04-21 22:18:06', 'Q', '', 'ROUND', 'NewOrderPushTimer', '', 'SERIAL_EXECUTION', 60, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:42:09', '', 1, 1640663095000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (5, 1, '0/30 * * * * ?', '订单数量', '2021-04-15 11:42:52', '2021-04-21 22:18:23', 'w', '', 'ROUND', 'OrderCountTimer', '', 'SERIAL_EXECUTION', 30, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:42:52', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (6, 1, '0 0 1 * * ?', '坐席统计处理', '2021-04-15 11:43:42', '2021-04-26 15:35:24', 'w', '', 'FIRST', 'OrderSeatStatisticsTimer', '', 'SERIAL_EXECUTION', 300, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:43:42', '', 1, 1640624400000, 1640710800000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (7, 1, '0/10 * * * * ? ', '跟踪记录推送', '2021-04-15 11:44:37', '2021-10-08 23:11:13', '二哈', '', 'FAILOVER', 'OrderTrackRecordTimer', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:44:37', '', 0, 0, 0);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (8, 3, '0 0/5 * * * ?', 'dms 回传是时效修改', '2021-04-15 11:45:09', '2021-10-18 11:54:24', '杨成', '', 'FIRST', 'DMSOrderByIsUpdateTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:45:09', '', 1, 1640662800000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (9, 3, '0/20 * * * * ?', 'dms 根据时间定时回传', '2021-04-15 11:45:42', '2021-04-19 17:43:13', '杨成', '', 'FIRST', 'DMSOrderByCreateTimeTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:45:42', '', 0, 0, 0);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (10, 1, '0/10 * * * * ?', '定时获得电话记录', '2021-04-15 11:46:35', '2021-04-23 13:53:02', '杨成', '', 'FAILOVER', 'OrderPhoneRecordTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:46:35', '', 1, 1640663090000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (11, 1, '0 0 1 * * ?', '自动审核太保', '2021-04-15 11:47:24', '2021-05-18 17:34:13', 'wyh', '', 'FAILOVER', 'AutoCheckCPIC', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:47:24', '', 1, 1640624400000, 1640710800000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (12, 1, '0 2 1 * * ? ', '自动审核平安', '2021-04-15 11:48:01', '2021-05-18 17:34:29', 'wyh', '', 'FAILOVER', 'AutoCheckPingan', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:48:01', '', 1, 1640624520000, 1640710920000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (13, 1, '0 4 1 * * ?', '自动审核cyy', '2021-04-15 11:48:29', '2021-05-18 17:34:39', 'wyh', '', 'FAILOVER', 'AutoCheckCYY', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:48:29', '', 1, 1640624640000, 1640711040000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (14, 1, '0 6 1 * * ?', '自动审核上汽', '2021-04-15 11:49:19', '2021-05-18 17:35:25', 'wyh', '', 'FAILOVER', 'AutoCheckShangqi', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:49:19', '', 1, 1640624760000, 1640711160000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (15, 1, '0 8 1 * * ?', '自动审核太平', '2021-04-15 11:49:49', '2021-05-18 17:34:56', 'wyh', '', 'FAILOVER', 'AutoCheckTaiping', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:49:49', '', 1, 1640624880000, 1640711280000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (16, 1, '0 10 1 * * ?', '自动审核亚美', '2021-04-15 11:50:17', '2021-05-18 17:35:04', 'wyh', '', 'FAILOVER', 'AutoCheckYamei', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:50:17', '', 1, 1640625000000, 1640711400000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (17, 1, '0 12 1 * * ?', '自动审核其他', '2021-04-15 11:50:42', '2021-05-18 17:35:12', 'wyh', '', 'FAILOVER', 'AutoCheckOther', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:50:42', '', 1, 1640625120000, 1640711520000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (18, 1, '0/30 * * * * ?', '事件流转定时触发', '2021-04-15 11:51:13', '2021-07-30 19:31:36', 'Y', '', 'FAILOVER', 'orderEventFlowTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:51:13', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (19, 1, '0/30 * * * * ?', '工作提醒定时触发生成', '2021-04-15 11:51:42', '2021-04-23 13:50:14', 'Y', '', 'FAILOVER', 'workRemindTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:51:42', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (20, 1, '0 0 10 * * ?', '自动退回记账', '2021-04-15 11:52:05', '2021-05-17 10:37:03', 'Y', '', 'FAILOVER', 'supplierAccountAutoRollBackTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:52:05', '', 1, 1640656800000, 1640743200000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (21, 1, '0/30 * * * * ?', '子公司未联系客户提醒', '2021-04-15 11:52:29', '2021-05-17 10:39:10', 'Y', '', 'FAILOVER', 'noConcatCustomerTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:52:29', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (22, 1, '* 10 1 * * ?', '自动审核车厂', '2021-04-15 11:52:53', '2021-04-16 13:54:42', 'wyh', '', 'FIRST', 'AutoCheckZhengche', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:52:53', '', 0, 0, 0);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (23, 3, '0 0/5 * * * ?', '太保定时回传', '2021-04-15 11:53:44', '2021-08-16 15:12:21', 'Y', '', 'FIRST', 'cpicSettleListReturnTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:53:44', '', 1, 1640662800000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (24, 4, '0/30 * * * * ? *', 'GPS接收任务', '2021-04-15 11:56:43', '2021-04-21 22:17:50', '王海', '', 'FIRST', 'GpsRecevieTaskJob', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:56:43', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (25, 4, '0/30 * * * * ?', '多个GPS 定时上传(平安,太保,等)', '2021-04-15 11:57:27', '2021-08-20 10:08:02', '王海', '', 'FIRST', 'MultiGpsInfoUpJob', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:57:27', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (26, 3, '0/20 * * * * ?', '太保预约案件到达', '2021-04-15 11:58:17', '2021-04-21 22:17:46', '杨成', '', 'FIRST', 'CpicOrderArriveTimer', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:58:17', '', 1, 1640663080000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (27, 3, '0/20 * * * * ?', '平安订单到达定时器', '2021-04-15 11:58:50', '2021-04-21 22:17:42', '杨成', '', 'FIRST', 'PingAnOrderArriveTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:58:50', '', 1, 1640663080000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (28, 4, '0/30 * * * * ?', '平安CYY上传GPS数据', '2021-04-15 11:59:34', '2021-08-24 12:00:43', '杨成', '', 'FIRST', 'PinganCYYSupplierGpsUpdateTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 11:59:34', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (29, 4, '0/30 * * * * ?', '平安上传gps', '2021-04-15 12:00:06', '2021-04-21 22:18:08', '王海', '', 'FIRST', 'PinganSupplierGpsUpdateTimeJobs', '', 'SERIAL_EXECUTION', 30, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 12:00:06', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (30, 2, '0/20 * * * * ?', '车辆上下线', '2021-04-15 12:00:48', '2021-09-13 14:44:01', 'Q', '', 'FIRST', 'SupplierVehicleOfflineTimer', '', 'SERIAL_EXECUTION', 6000, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 12:00:48', '', 1, 1640663080000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (31, 2, '0/30 * * * * ?', '车辆上传经纬度', '2021-04-15 12:01:12', '2021-04-22 23:52:54', 'Q', '', 'FIRST', 'SupplierVehicleUploadGPSTime', '', 'SERIAL_EXECUTION', 60, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-15 12:01:12', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (32, 1, '0 0 2 * * ?', '记账订单自动推送', '2021-04-19 20:23:42', '2021-08-09 10:54:13', 'Y', '', 'FIRST', 'orderAutoPushTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-19 20:23:42', '', 1, 1640628000000, 1640714400000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (33, 2, '0/30 * * * * ?', '安师傅获取小修车辆', '2021-04-26 16:44:31', '2021-04-26 16:45:04', '杨成', '', 'FIRST', 'AnMasterDriverTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-26 16:44:31', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (34, 1, '0/20 * * * * ?', '代驾轨迹', '2021-04-28 15:57:47', '2021-04-28 15:58:05', 'Q', '', 'FIRST', 'orderDriveTravelTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-04-28 15:57:47', '', 1, 1640663080000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (36, 2, '0/30 * * * * ?', '车辆空移报警', '2021-05-14 16:28:29', '2021-05-15 22:46:08', 'Q', '', 'FIRST', 'SupplierVehicleAlarmTimer', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-05-14 16:28:29', '', 1, 1640663070000, 1640663100000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (37, 1, '0 0 9 * * ?', '合同订单数报警', '2021-06-25 09:52:56', '2021-08-10 11:40:37', 'Y', '', 'FIRST', 'contractOrderCountWarningTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-06-25 09:52:56', '', 1, 1640653200000, 1640739600000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (38, 1, '0 0/10 * * * ?', '车辆掉线30分钟报警', '2021-07-14 14:31:06', '2021-07-14 14:57:33', 'Y', '', 'FIRST', 'supplierAlarmWarningTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-07-14 14:31:06', '', 1, 1640662800000, 1640663400000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (39, 1, '1 0 0 * * ?', 'lowest服务商记录生成', '2021-08-27 10:04:00', '2021-08-30 16:26:47', 'Y', '', 'FIRST', 'orderLowestRecordTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-08-27 10:04:00', '', 1, 1640620801000, 1640707201000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (40, 1, '1 0 0,8,12,16,20,22 * * ?', '记录在线车辆', '2021-09-02 19:44:45', '2021-11-29 18:15:26', 'wyf', '', 'FIRST', 'supplierOnlineVehicleRecordTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-09-02 19:44:45', '', 1, 1640649601000, 1640664001000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (41, 1, '0 10 * * * ?', '电话记录统计', '2021-11-04 20:11:04', '2021-11-05 16:40:42', 'wyf', '', 'FIRST', 'orderPhoneRecordStatisticTimeJobs', '', 'SERIAL_EXECUTION', 0, 1, 'BEAN', '', 'GLUE代码初始化', '2021-11-04 20:11:04', '', 1, 1640661000000, 1640664600000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (42, 1, '0 0 * * * ?', '呼叫中心部门坐席同步', '2021-11-05 14:09:11', '2021-11-09 10:18:30', 'wyh', '', 'FIRST', 'ccSeatingGroupTimerJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-11-05 14:09:11', '', 1, 1640660400000, 1640664000000);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (43, 2, '0 0 1 1/1 * ?', '定期删除es数据', '2021-11-25 13:44:41', '2021-11-25 13:48:04', 'qian', '', 'FIRST', 'EsDataTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-11-25 13:44:41', '', 0, 0, 0);
INSERT INTO `xxl_job_info`(`id`, `job_group`, `job_cron`, `job_desc`, `add_time`, `update_time`, `author`, `alarm_email`, `executor_route_strategy`, `executor_handler`, `executor_param`, `executor_block_strategy`, `executor_timeout`, `executor_fail_retry_count`, `glue_type`, `glue_source`, `glue_remark`, `glue_updatetime`, `child_jobid`, `trigger_status`, `trigger_last_time`, `trigger_next_time`) VALUES (44, 1, '0 * * * * ?', '备注处理超时15分钟没人处理', '2021-12-23 19:50:11', '2021-12-23 19:50:11', 'Y', '', 'FIRST', 'gt15OrderRemarkDealTimeJobs', '', 'SERIAL_EXECUTION', 0, 0, 'BEAN', '', 'GLUE代码初始化', '2021-12-23 19:50:11', '', 0, 0, 0);