docker swarm
This commit is contained in:
59
docker-swarm/10.5x环境配置记录/1. 安装docker.md
Normal file
59
docker-swarm/10.5x环境配置记录/1. 安装docker.md
Normal file
@ -0,0 +1,59 @@
|
||||
1、卸载旧版本
|
||||
执行如下指令对旧版本进行卸载:
|
||||
|
||||
sudo yum remove docker \
|
||||
docker-client \
|
||||
docker-client-latest \
|
||||
docker-common \
|
||||
docker-latest \
|
||||
docker-latest-logrotate \
|
||||
docker-logrotate \
|
||||
docker-engine
|
||||
执行完毕后,如果输入docker version发现docker依然还存在,则说明当前机器上存在的是历史版本,输入如下指令进行卸载即可:
|
||||
|
||||
sudo yum remove docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
|
||||
|
||||
在卸载完毕后,也要对之前的数据进行清理(这里是默认路径,如果自己更改过的话,则需要清理自己更改后的数据所在路径):
|
||||
|
||||
sudo rm -rf /var/lib/docker
|
||||
sudo rm -rf /var/lib/containerd
|
||||
|
||||
2、配置仓库
|
||||
这里我们可以借助yum-config-manager这个工具来实现仓库的配置,在利用之前,先下载一个yum工具包
|
||||
|
||||
sudo yum install -y yum-utils
|
||||
|
||||
在安装完毕后,输入如下指令进行配置:
|
||||
|
||||
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
|
||||
接下来为了提高安装速度,我们进行配置使用国内源,输入如下指令:
|
||||
|
||||
sed -i 's@//download.docker.com@//mirrors.ustc.edu.cn/docker-ce@g' /etc/yum.repos.d/docker-ce.repo
|
||||
|
||||
3、安装
|
||||
输入如下指令,进行docker安装:
|
||||
|
||||
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin d
|
||||
|
||||
4、启动docker
|
||||
安装完毕后,进行启动,先输入如下指令加载配置:
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
接下来启动服务:
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
启动完毕后我们可以输入如下指令,查看是否已经启动:
|
||||
|
||||
systemctl status docker
|
||||
|
||||
systemctl enable docker
|
||||
|
||||
5. 停用firewalld
|
||||
|
||||
```
|
||||
[root@ZD-CRM1 ~]# systemctl stop firewalld
|
||||
[root@ZD-CRM1 ~]# systemctl disable firewalld
|
||||
```
|
28
docker-swarm/10.5x环境配置记录/2. docker-swarm 初始化.md
Normal file
28
docker-swarm/10.5x环境配置记录/2. docker-swarm 初始化.md
Normal file
@ -0,0 +1,28 @@
|
||||
1. 10.51上执行 docker swarm init
|
||||
```
|
||||
[root@ZD-CRM1 ~]# docker swarm init
|
||||
Swarm initialized: current node (pbbaiutisn0vsvwt8tfxwusev) is now a manager.
|
||||
|
||||
To add a worker to this swarm, run the following command:
|
||||
|
||||
docker swarm join --token SWMTKN-1-2jliqh8rns5afbnzrrwr036p7c0kkj38188290at4xb35zgctg-ek7ku7qskkfiu2pl0dmu8q5v6 192.168.10.51:2377
|
||||
|
||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||
```
|
||||
|
||||
2. 其他机器执行上面的 docker swarm join
|
||||
|
||||
```
|
||||
docker swarm join --token SWMTKN-1-2jliqh8rns5afbnzrrwr036p7c0kkj38188290at4xb35zgctg-ek7ku7qskkfiu2pl0dmu8q5v6 192.168.10.51:2377
|
||||
|
||||
```
|
||||
|
||||
3. 10.51上查看集群情况
|
||||
|
||||
```
|
||||
[root@ZD-CRM1 ~]# docker node ls
|
||||
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
|
||||
pbbaiutisn0vsvwt8tfxwusev * ZD-CRM1 Ready Active Leader 25.0.1
|
||||
je9g46e68diiryiz1cddd7765 ZD-CRM2 Ready Active 25.0.1
|
||||
fqim3l4inkscd4px8jzi9j7nc ZD-CRM3 Ready Active 25.0.1
|
||||
```
|
20
docker-swarm/10.5x环境配置记录/3. 基本服务安装.md
Normal file
20
docker-swarm/10.5x环境配置记录/3. 基本服务安装.md
Normal file
@ -0,0 +1,20 @@
|
||||
1. 初始化网路
|
||||
docker network create \
|
||||
--driver=overlay \
|
||||
--subnet=10.17.0.0/16 \
|
||||
--scope swarm \
|
||||
--attachable \
|
||||
prod
|
||||
|
||||
1. rabbitmq
|
||||
管理界面: 192.168.10.51:15672
|
||||
内部:prod_rabbitmq_stats:5672,prod_rabbitmq_queue1:5672,prod_rabbitmq_queue2:5672
|
||||
stomp url:192.168.10.51:15674
|
||||
|
||||
2. xxl-job-adin
|
||||
|
||||
管理界面: 192.168.10.51:9991
|
||||
|
||||
3. nacos
|
||||
|
||||
管理界面: http://192.168.10.51:25848/nacos/
|
27
docker-swarm/10.5x环境配置记录/portainer_template.json
Normal file
27
docker-swarm/10.5x环境配置记录/portainer_template.json
Normal file
@ -0,0 +1,27 @@
|
||||
{
|
||||
"version": "3",
|
||||
"templates": [
|
||||
{
|
||||
"id": 52,
|
||||
"type": 2,
|
||||
"title": "nacos cluser",
|
||||
"description": "nacos集群",
|
||||
"categories": ["sino"],
|
||||
"platform": "linux",
|
||||
"logo": "",
|
||||
"repository": {
|
||||
"url": "https://git.sino-assist.com//templates",
|
||||
"stackfile": "stacks/liveswitch/docker-stack.yml"
|
||||
},
|
||||
"env": [
|
||||
{
|
||||
"name": "POSTGRES_PASSWORD",
|
||||
"label": "Postgres password"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
|
||||
|
||||
]
|
||||
}
|
2
docker-swarm/10.5x环境配置记录/前期处理.md
Normal file
2
docker-swarm/10.5x环境配置记录/前期处理.md
Normal file
@ -0,0 +1,2 @@
|
||||
1. redis的数据需要全部切换为无状态
|
||||
2.
|
@ -1,8 +1,8 @@
|
||||
NAMESPACE=review
|
||||
canal_instance_master_address=192.168.10.10:3306
|
||||
canal_instance_master_address=192.168.3.123:3306
|
||||
canal_instance_dbUsername=repl
|
||||
canal_instance_dbPassword=nczl@sino_db
|
||||
canal_instance_filter_regex=zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*
|
||||
canal_instance_filter_regex=zd_rescue.user_order_20.*,zd_rescue.task_order_20.*,zd_rescue.task_order_cost_20.*,zd_rescue.supplier_account_record_20.*,zd_rescue.customer_order_account_20.*,zd_rescue.customer_order_relation_20.*,zd_rescue.order_lowest_record
|
||||
canal_mq_topic=canal_mysql_bin
|
||||
rabbitmq_host=192.168.3.110:5672
|
||||
rabbitmq_exchange=canal_exchange
|
||||
|
@ -18,6 +18,13 @@ services:
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
resources:
|
||||
limits:
|
||||
cpus: "8"
|
||||
memory: 24G
|
||||
reservations:
|
||||
cpus: "2"
|
||||
memory: 12G
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_clickhouse==1
|
||||
|
@ -16,6 +16,13 @@ services:
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1"
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: "0.1"
|
||||
memory: 200M
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_datart==1
|
||||
@ -43,6 +50,13 @@ services:
|
||||
- source: my_conf
|
||||
target: /opt/bitnami/mysql/conf/my_custom.cnf
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "2"
|
||||
memory: 2G
|
||||
reservations:
|
||||
cpus: "0.1"
|
||||
memory: 500M
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_datart==1
|
||||
@ -53,6 +67,13 @@ services:
|
||||
ports:
|
||||
- '${CHROME_PORT}:4444'
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "2"
|
||||
memory: 6G
|
||||
reservations:
|
||||
cpus: "1"
|
||||
memory: 2G
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
|
@ -1,4 +1,9 @@
|
||||
|
||||
# crm1环境下 部署 单机 es
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_es --with-registry-auth
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_es --with-registry-auth
|
||||
|
||||
|
||||
# prod环境下 部署 单机 es 仅用于日志
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_log_es --with-registry-auth
|
@ -5,7 +5,7 @@ networks:
|
||||
external: true
|
||||
services:
|
||||
elasticsearch:
|
||||
image: harbor.sino-assist.com/marsal1212/elasticsearch:7.17.3
|
||||
image: docker.io/bitnami/elasticsearch:8.13.4
|
||||
hostname: ${NAMESPACE}-es-elasticsearch
|
||||
ports:
|
||||
- '${NODE_PORT}:9200'
|
||||
@ -13,20 +13,22 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- 'data_db:/bitnami/elasticsearch/data'
|
||||
- '/mnt/data/volumes/elasticsearch:/bitnami/elasticsearch/data'
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_es==1
|
||||
kibana:
|
||||
image: docker.io/bitnami/kibana:7.17.3
|
||||
image: docker.io/bitnami/kibana:8.13.4
|
||||
hostname: ${NAMESPACE}-es-kibana
|
||||
ports:
|
||||
- "${NODE_PORT_KIBANA}:5601"
|
||||
volumes:
|
||||
- "kibana_data:/bitnami/kibana"
|
||||
- "/mnt/data/volumes/kibana/data:/bitnami/kibana/data"
|
||||
- "/mnt/data/volumes/kibana/conf:/opt/bitnami/kibana/conf"
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- KIBANA_ELASTICSEARCH_URL=${NAMESPACE}_es_elasticsearch
|
||||
- KIBANA_ELASTICSEARCH_URL=${NAMESPACE}-es-elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
deploy:
|
||||
@ -35,10 +37,10 @@ services:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_es==1
|
||||
volumes:
|
||||
data_db:
|
||||
driver: local
|
||||
kibana_data:
|
||||
driver: local
|
||||
# volumes:
|
||||
# data_db:
|
||||
# driver: local
|
||||
# kibana_data:
|
||||
# driver: local
|
||||
|
||||
|
||||
|
4
docker-swarm/elasticsearch/env_prod
Normal file
4
docker-swarm/elasticsearch/env_prod
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=9200
|
||||
NODE_PORT_2=9300
|
||||
NODE_PORT_KIBANA=5601
|
9
docker-swarm/jenkins/README
Normal file
9
docker-swarm/jenkins/README
Normal file
@ -0,0 +1,9 @@
|
||||
|
||||
# crm1环境下 部署canal
|
||||
|
||||
docker network create --driver overlay review
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_canal
|
||||
|
||||
|
||||
env $(cat ./env_review | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - review_canal
|
16
docker-swarm/jenkins/docker-compose.yml
Normal file
16
docker-swarm/jenkins/docker-compose.yml
Normal file
@ -0,0 +1,16 @@
|
||||
version: '3.8'
|
||||
networks:
|
||||
jenkins:
|
||||
external: false
|
||||
services:
|
||||
jenkins:
|
||||
image: harbor.sino-assist.com/marsal1212/jenkins:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
networks:
|
||||
- 'jenkins'
|
||||
volumes:
|
||||
- './jenkins_home:/var/jenkins_home'
|
||||
ports:
|
||||
- 8081:8080
|
||||
|
11
docker-swarm/jenkins/env_crm1
Normal file
11
docker-swarm/jenkins/env_crm1
Normal file
@ -0,0 +1,11 @@
|
||||
NAMESPACE=crm1
|
||||
canal_instance_master_address=crm_mysql_db:3306
|
||||
canal_instance_dbUsername=root
|
||||
canal_instance_dbPassword=gkxl650
|
||||
canal_instance_filter_regex=zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*
|
||||
canal_mq_topic=canal_mysql_bin
|
||||
rabbitmq_host=crm1_rabbitmq_stats:5672
|
||||
rabbitmq_exchange=canal_exchange
|
||||
rabbitmq_username=root
|
||||
rabbitmq_password=gkxl650
|
||||
rabbitmq_virtual_host=canal
|
11
docker-swarm/jenkins/env_review
Normal file
11
docker-swarm/jenkins/env_review
Normal file
@ -0,0 +1,11 @@
|
||||
NAMESPACE=review
|
||||
canal_instance_master_address=192.168.10.10:3306
|
||||
canal_instance_dbUsername=repl
|
||||
canal_instance_dbPassword=nczl@sino_db
|
||||
canal_instance_filter_regex=zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*
|
||||
canal_mq_topic=canal_mysql_bin
|
||||
rabbitmq_host=192.168.3.110:5672
|
||||
rabbitmq_exchange=canal_exchange
|
||||
rabbitmq_username=root
|
||||
rabbitmq_password=gkxl650
|
||||
rabbitmq_virtual_host=review
|
@ -9,4 +9,7 @@
|
||||
# }
|
||||
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_log --with-registry-auth
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_log --with-registry-auth
|
||||
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_log --with-registry-auth
|
@ -5,7 +5,8 @@ networks:
|
||||
external: true
|
||||
services:
|
||||
logstash:
|
||||
image: docker.elastic.co/logstash/logstash:7.17.3
|
||||
image: docker.elastic.co/logstash/logstash:8.13.4
|
||||
hostname: ${NAMESPACE}-log-logstash
|
||||
ports:
|
||||
- '${NODE_PORT}:5044'
|
||||
environment:
|
||||
@ -18,13 +19,13 @@ services:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_es==1
|
||||
filebeat:
|
||||
image: docker.elastic.co/beats/filebeat:7.17.3
|
||||
image: docker.elastic.co/beats/filebeat:8.13.4
|
||||
volumes:
|
||||
- "kibana_data:/bitnami/kibana"
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- LOGSTASH_URL=${NAMESPACE}_log_logstash:5044
|
||||
- KIBANA_HOSTS=${NAMESPACE}_es_kibana
|
||||
- LOGSTASH_URL=${NAMESPACE}-log-logstash:5044
|
||||
- KIBANA_HOSTS=${NAMESPACE}-es-kibana
|
||||
configs:
|
||||
- source: filebeat_conf
|
||||
target: /usr/share/filebeat/filebeat.yml
|
||||
|
2
docker-swarm/log/env_prod
Normal file
2
docker-swarm/log/env_prod
Normal file
@ -0,0 +1,2 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=5044
|
@ -18,14 +18,14 @@ filter {
|
||||
output {
|
||||
if [servicename] {
|
||||
elasticsearch {
|
||||
hosts => [ "crm1-es-elasticsearch:9200" ]
|
||||
hosts => [ "prod-es-elasticsearch:9200" ]
|
||||
index => "sslog-%{[service]}"
|
||||
action => "create"
|
||||
ilm_enabled => false
|
||||
}
|
||||
}else{
|
||||
elasticsearch {
|
||||
hosts => [ "crm1-es-elasticsearch:9200" ]
|
||||
hosts => [ "prod-es-elasticsearch:9200" ]
|
||||
index => "sslog-default"
|
||||
action => "create"
|
||||
ilm_enabled => false
|
||||
|
@ -9,4 +9,10 @@
|
||||
# }
|
||||
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_log --with-registry-auth
|
||||
docker network create \
|
||||
--driver overlay \
|
||||
review
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_log --with-registry-auth
|
||||
|
||||
env $(cat ./env_review | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - minio --with-registry-auth
|
@ -1,4 +1,12 @@
|
||||
|
||||
# crm1环境下 部署集群mongodb
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_mongodb
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_mongodb
|
||||
|
||||
|
||||
|
||||
# crm1环境下 部署集群mongodb rs
|
||||
|
||||
|
||||
|
||||
docker stack deploy --compose-file docker-stack-rs.yml prod_mongodb --with-registry-auth
|
||||
|
64
docker-swarm/mongodb/docker-stack-rs.yml
Normal file
64
docker-swarm/mongodb/docker-stack-rs.yml
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright Broadcom, Inc. All Rights Reserved.
|
||||
# SPDX-License-Identifier: APACHE-2.0
|
||||
|
||||
version: '3.8'
|
||||
networks:
|
||||
default:
|
||||
name: prod
|
||||
external: true
|
||||
services:
|
||||
primary:
|
||||
image: docker.io/bitnami/mongodb:7.0
|
||||
hostname: mongodb-primary
|
||||
ports:
|
||||
- 27015:27017
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- MONGODB_ADVERTISED_HOSTNAME=mongodb-primary
|
||||
- MONGODB_REPLICA_SET_MODE=primary
|
||||
- MONGODB_ROOT_PASSWORD=123456
|
||||
- MONGODB_REPLICA_SET_KEY=replicasetkey123
|
||||
volumes:
|
||||
- '/mnt/data/volumes/mongodb/primary:/bitnami/mongodb'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM5]
|
||||
secondary:
|
||||
image: docker.io/bitnami/mongodb:7.0
|
||||
hostname: mongodb-secondary
|
||||
ports:
|
||||
- 27016:27017
|
||||
depends_on:
|
||||
- mongodb-primary
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- MONGODB_ADVERTISED_HOSTNAME=mongodb-secondary
|
||||
- MONGODB_REPLICA_SET_MODE=secondary
|
||||
- MONGODB_INITIAL_PRIMARY_HOST=mongodb-primary
|
||||
- MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD=123456
|
||||
- MONGODB_REPLICA_SET_KEY=replicasetkey123
|
||||
volumes:
|
||||
- '/mnt/data/volumes/mongodb/secondary:/bitnami/mongodb'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM6]
|
||||
arbiter:
|
||||
image: docker.io/bitnami/mongodb:7.0
|
||||
depends_on:
|
||||
- mongodb-primary
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- MONGODB_ADVERTISED_HOSTNAME=mongodb-arbiter
|
||||
- MONGODB_REPLICA_SET_MODE=arbiter
|
||||
- MONGODB_INITIAL_PRIMARY_HOST=mongodb-primary
|
||||
- MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD=password123
|
||||
- MONGODB_REPLICA_SET_KEY=replicasetkey123
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM3]
|
19
docker-swarm/monitor/README
Normal file
19
docker-swarm/monitor/README
Normal file
@ -0,0 +1,19 @@
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-stack.yml | docker stack deploy --compose-file - monitor
|
||||
|
||||
docker stack deploy --compose-file docker-compose.yml monitor --with-registry-auth
|
||||
|
||||
|
||||
docker run \
|
||||
-p 9090:9090 \
|
||||
-v /opt/support/prometheus.yml:/etc/prometheus/prometheus.yml \
|
||||
prom/prometheus:v2.52.0
|
||||
|
||||
docker service create --name cadvisor -l prometheus-job=cadvisor \
|
||||
--mode=global --publish target=8080,mode=host \
|
||||
--mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock,ro \
|
||||
--mount type=bind,src=/,dst=/rootfs,ro \
|
||||
--mount type=bind,src=/var/run,dst=/var/run \
|
||||
--mount type=bind,src=/sys,dst=/sys,ro \
|
||||
--mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro \
|
||||
spcodes/cadvisor:v0.49.1 -docker_only
|
70
docker-swarm/monitor/docker-compose.yml
Normal file
70
docker-swarm/monitor/docker-compose.yml
Normal file
@ -0,0 +1,70 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.52.0
|
||||
ports:
|
||||
- "9090:9090"
|
||||
configs:
|
||||
- source: prometheus_conf
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
deploy:
|
||||
mode: replicated
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM2]
|
||||
replicas: 1
|
||||
|
||||
# alertmanager:
|
||||
# image: prom/alertmanager:v0.27.0
|
||||
# ports:
|
||||
# - "9093:9093"
|
||||
# volumes:
|
||||
# - ./alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
||||
# command:
|
||||
# - --config.file=/etc/alertmanager/alertmanager.yml
|
||||
# deploy:
|
||||
# mode: replicated
|
||||
# replicas: 1
|
||||
|
||||
# node-exporter:
|
||||
# image: prom/node-exporter:v1.8.1
|
||||
# volumes:
|
||||
# - /proc:/host/proc:ro
|
||||
# - /sys:/host/sys:ro
|
||||
# - /:/rootfs:ro
|
||||
# deploy:
|
||||
# mode: global
|
||||
# placement:
|
||||
# constraints: [node.role == manager]
|
||||
cadvisor:
|
||||
image: spcodes/cadvisor:v0.49.1
|
||||
ports:
|
||||
- 8180:8080
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run:rw
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker/:/var/lib/docker:ro
|
||||
deploy:
|
||||
mode: global
|
||||
grafana:
|
||||
image: grafana/grafana:11.0.0
|
||||
ports:
|
||||
- 23000:3000
|
||||
volumes:
|
||||
- /opt/data/grafana/:/var/lib/grafana:ro
|
||||
deploy:
|
||||
mode: replicated
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM2]
|
||||
replicas: 1
|
||||
configs:
|
||||
prometheus_conf:
|
||||
# file: ./prometheus.yml
|
||||
external: true
|
||||
name: monitor_prometheus_conf_v2
|
125
docker-swarm/monitor/docker-stack.yml
Normal file
125
docker-swarm/monitor/docker-stack.yml
Normal file
@ -0,0 +1,125 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
grafana:
|
||||
image: portainer/template-swarm-monitoring:grafana-9.5.2
|
||||
ports:
|
||||
- target: 3000
|
||||
published: 3000
|
||||
protocol: tcp
|
||||
mode: ingress
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
- node.labels.monitoring == true
|
||||
volumes:
|
||||
- type: volume
|
||||
source: grafana-data
|
||||
target: /var/lib/grafana
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=${GRAFANA_USER}
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
networks:
|
||||
- net
|
||||
|
||||
prometheus:
|
||||
image: portainer/template-swarm-monitoring:prometheus-v2.44.0
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--log.level=error'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--storage.tsdb.retention.time=7d'
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
- node.labels.monitoring == true
|
||||
volumes:
|
||||
- type: volume
|
||||
source: prometheus-data
|
||||
target: /prometheus
|
||||
networks:
|
||||
- net
|
||||
|
||||
cadvisor:
|
||||
image: spcodes/cadvisor:v0.49.1
|
||||
command: -logtostderr -docker_only
|
||||
deploy:
|
||||
mode: global
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /
|
||||
target: /rootfs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /var/run
|
||||
target: /var/run
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /sys
|
||||
target: /sys
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /var/lib/docker
|
||||
target: /var/lib/docker
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /dev/disk
|
||||
target: /dev/disk
|
||||
read_only: true
|
||||
networks:
|
||||
- net
|
||||
|
||||
node-exporter:
|
||||
image: prom/node-exporter:v1.5.0
|
||||
command:
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
- '--no-collector.ipvs'
|
||||
deploy:
|
||||
mode: global
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
reservations:
|
||||
memory: 64M
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /
|
||||
target: /rootfs
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /proc
|
||||
target: /host/proc
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: /sys
|
||||
target: /host/sys
|
||||
read_only: true
|
||||
networks:
|
||||
- net
|
||||
|
||||
volumes:
|
||||
grafana-data:
|
||||
prometheus-data:
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: overlay
|
||||
|
||||
|
||||
|
30
docker-swarm/monitor/prometheus.yml
Normal file
30
docker-swarm/monitor/prometheus.yml
Normal file
@ -0,0 +1,30 @@
|
||||
scrape_configs:
|
||||
# Make Prometheus scrape itself for metrics.
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
# Create a job for Docker Swarm containers.
|
||||
- job_name: 'dockerswarm'
|
||||
dockerswarm_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
role: nodes
|
||||
relabel_configs:
|
||||
# Fetch metrics on port 9323.
|
||||
- source_labels: [__meta_dockerswarm_node_address]
|
||||
target_label: __address__
|
||||
replacement: $1:9323
|
||||
# Set hostname as instance label
|
||||
- source_labels: [__meta_dockerswarm_node_hostname]
|
||||
target_label: instance
|
||||
# Only keep containers that should be running.
|
||||
- source_labels: [__meta_dockerswarm_task_desired_state]
|
||||
regex: running
|
||||
action: keep
|
||||
# Only keep containers that have a `prometheus-job` label.
|
||||
- source_labels: [__meta_dockerswarm_service_label_prometheus_job]
|
||||
regex: .+
|
||||
action: keep
|
||||
# Use the prometheus-job Swarm label as Prometheus job label.
|
||||
- source_labels: [__meta_dockerswarm_service_label_prometheus_job]
|
||||
target_label: job
|
50
docker-swarm/mysql-repl-tool/README.md
Normal file
50
docker-swarm/mysql-repl-tool/README.md
Normal file
@ -0,0 +1,50 @@
|
||||
生产环境用于nacos xxl-job的专用数据库
|
||||
|
||||
|
||||
|
||||
# prod环境下 部署 附属工具类服务使用的mysql
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_tool_mysql
|
||||
|
||||
[mysqladmin]
|
||||
user=
|
||||
|
||||
[mysqld]
|
||||
skip_name_resolve
|
||||
explicit_defaults_for_timestamp
|
||||
basedir=/opt/bitnami/mysql
|
||||
port=3306
|
||||
tmpdir=/opt/bitnami/mysql/tmp
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
pid_file=/opt/bitnami/mysql/tmp/mysqld.pid
|
||||
max_allowed_packet=16M
|
||||
bind_address=0.0.0.0
|
||||
log_error=/opt/bitnami/mysql/logs/mysqld.log
|
||||
slow_query_log=0
|
||||
slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log
|
||||
long_query_time=10
|
||||
character_set_server=utf8mb4
|
||||
collation_server=utf8mb4_unicode_ci
|
||||
plugin_dir=/opt/bitnami/mysql/lib/plugin
|
||||
|
||||
[client]
|
||||
port=3306
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
default_character_set=utf8mb4
|
||||
plugin_dir=/opt/bitnami/mysql/lib/plugin
|
||||
|
||||
[manager]
|
||||
port=3306
|
||||
socket=/opt/bitnami/mysql/tmp/mysql.sock
|
||||
pid_file=/opt/bitnami/mysql/tmp/mysqld.pid
|
||||
|
||||
|
||||
---- 实际版本
|
||||
|
||||
[mysqld]
|
||||
max_connections=500
|
||||
max_allowed_packet=64M
|
||||
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
|
||||
log-bin=mysql-bin # 开启 binlog
|
||||
binlog-format=ROW # 选择 ROW 模式
|
||||
server_id=209
|
91
docker-swarm/mysql-repl-tool/docker-compose.yml
Normal file
91
docker-swarm/mysql-repl-tool/docker-compose.yml
Normal file
@ -0,0 +1,91 @@
|
||||
version: '3.8'
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
mysql-master:
|
||||
image: docker.io/bitnami/mysql:8.0
|
||||
hostname: ${NAMESPACE}-tool-mysql-master
|
||||
ports:
|
||||
- '${NODE_PORT_MASTER}:3306'
|
||||
volumes:
|
||||
- 'mysql_repl_master_data:/bitnami/mysql/data'
|
||||
environment:
|
||||
- MYSQL_REPLICATION_MODE=master
|
||||
- MYSQL_REPLICATION_USER=repl_user
|
||||
- MYSQL_REPLICATION_PASSWORD=${MYSQL_REPLICATION_PASSWORD}
|
||||
# - MYSQL_DATABASE=my_database
|
||||
# ALLOW_EMPTY_PASSWORD is recommended only for development.
|
||||
- MYSQL_USER=${MYSQL_USER}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
|
||||
- MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password
|
||||
- MYSQL_ENABLE_SLOW_QUERY=0
|
||||
- MYSQL_LONG_QUERY_TIME=10
|
||||
configs:
|
||||
- source: custome_config_master
|
||||
target: /opt/bitnami/mysql/conf/my_custom.cnf
|
||||
healthcheck:
|
||||
test: ['CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh']
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM2]
|
||||
mysql-slave:
|
||||
image: docker.io/bitnami/mysql:8.0
|
||||
hostname: ${NAMESPACE}-tool-mysql-slave
|
||||
ports:
|
||||
- '${NODE_PORT_SLAVE}:3306'
|
||||
volumes:
|
||||
- 'mysql_repl_slave_data:/bitnami/mysql/data'
|
||||
depends_on:
|
||||
- mysql-master
|
||||
environment:
|
||||
- MYSQL_REPLICATION_MODE=slave
|
||||
- MYSQL_REPLICATION_USER=repl_user
|
||||
- MYSQL_REPLICATION_PASSWORD=${MYSQL_REPLICATION_PASSWORD}
|
||||
# - MYSQL_DATABASE=my_database
|
||||
- MYSQL_MASTER_HOST=mysql-master
|
||||
- MYSQL_MASTER_PORT_NUMBER=3306
|
||||
- MYSQL_MASTER_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
|
||||
- MYSQL_USER=${MYSQL_USER}
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
|
||||
- MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password
|
||||
- MYSQL_ENABLE_SLOW_QUERY=0
|
||||
- MYSQL_LONG_QUERY_TIME=10
|
||||
# ALLOW_EMPTY_PASSWORD is recommended only for development.
|
||||
# - ALLOW_EMPTY_PASSWORD=yes
|
||||
# In case of missing binary files on master, use `true` to reset those binary files. Creating a previous backup is recommended.
|
||||
- MYSQL_REPLICATION_SLAVE_DUMP=false
|
||||
healthcheck:
|
||||
test: ['CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh']
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
configs:
|
||||
- source: custome_config_slave
|
||||
target: /opt/bitnami/mysql/conf/my_custom.cnf
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM3]
|
||||
volumes:
|
||||
mysql_repl_master_data:
|
||||
driver: local
|
||||
mysql_repl_slave_data:
|
||||
driver: local
|
||||
|
||||
configs:
|
||||
custome_config_master:
|
||||
external: true
|
||||
name: ${CUSTOME_CONFIG_MASTER}
|
||||
custome_config_slave:
|
||||
external: true
|
||||
name: ${CUSTOME_CONFIG_SLAVE}
|
9
docker-swarm/mysql-repl-tool/env_prod
Normal file
9
docker-swarm/mysql-repl-tool/env_prod
Normal file
@ -0,0 +1,9 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT_MASTER=25306
|
||||
NODE_PORT_SLAVE=25307
|
||||
MYSQL_USER=zd_tool
|
||||
MYSQL_PASSWORD=gkxl2024#@
|
||||
MYSQL_ROOT_PASSWORD=gkxl2024#@
|
||||
MYSQL_REPLICATION_PASSWORD=gkxl2024#@
|
||||
CUSTOME_CONFIG_MASTER=prod_tool_mysql_master_conf_v1
|
||||
CUSTOME_CONFIG_SLAVE=prod_tool_mysql_slave_conf_v1
|
10
docker-swarm/nacos-cluser/README
Normal file
10
docker-swarm/nacos-cluser/README
Normal file
@ -0,0 +1,10 @@
|
||||
|
||||
# crm1环境下 部署单机nacos
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./standalone-derby.yml | docker stack deploy --compose-file - crm1_nacos
|
||||
|
||||
|
||||
|
||||
# prod环境下 部署单机nacos
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./cluster-docker-compose.yml | docker stack deploy --compose-file - prod_nacos
|
103
docker-swarm/nacos-cluser/cluster-docker-compose.yml
Normal file
103
docker-swarm/nacos-cluser/cluster-docker-compose.yml
Normal file
@ -0,0 +1,103 @@
|
||||
version: '3.8'
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
nacos1:
|
||||
image: nacos/nacos-server:${NACOS_VERSION}
|
||||
hostname: ${NAMESPACE}-nacos1
|
||||
ports:
|
||||
- ${NODE_PORT_11}:8848
|
||||
- ${NODE_PORT_12}:9848
|
||||
- ${NODE_PORT_13}:9849
|
||||
volumes:
|
||||
- nacos_cluster_log:/home/nacos/logs #配置docker存储日志的卷
|
||||
environment:
|
||||
MODE: cluster
|
||||
PREFER_HOST_MODE: hostname
|
||||
NACOS_SERVERS: ${NAMESPACE}-nacos1:8848 ${NAMESPACE}-nacos2:8848 ${NAMESPACE}-nacos3:8848
|
||||
NACOS_SERVER_PORT: 8848
|
||||
NACOS_AUTH_ENABLE: 'true' #1.2.0版本默认关闭登陆界面
|
||||
SPRING_DATASOURCE_PLATFORM: mysql
|
||||
MYSQL_SERVICE_HOST: ${MYSQL_SERVICE_HOST}
|
||||
MYSQL_SERVICE_DB_NAME: nacos
|
||||
MYSQL_SERVICE_PORT: 3306
|
||||
MYSQL_SERVICE_USER: ${MYSQL_SERVICE_USER}
|
||||
MYSQL_SERVICE_PASSWORD: ${MYSQL_SERVICE_PASSWORD}
|
||||
NACOS_AUTH_IDENTITY_KEY: ${NACOS_AUTH_IDENTITY_KEY}
|
||||
NACOS_AUTH_IDENTITY_VALUE: ${NACOS_AUTH_IDENTITY_VALUE}
|
||||
NACOS_AUTH_TOKEN: ${NACOS_AUTH_TOKEN}
|
||||
deploy:
|
||||
replicas: 1 #部署时,指定部署一个副本
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM1]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
nacos2:
|
||||
image: nacos/nacos-server:${NACOS_VERSION}
|
||||
hostname: ${NAMESPACE}-nacos2
|
||||
ports:
|
||||
- ${NODE_PORT_21}:8848
|
||||
- ${NODE_PORT_22}:9848
|
||||
- ${NODE_PORT_23}:9849
|
||||
volumes:
|
||||
- nacos_cluster_log:/home/nacos/logs #配置docker存储日志的卷
|
||||
environment:
|
||||
MODE: cluster
|
||||
PREFER_HOST_MODE: hostname
|
||||
NACOS_SERVERS: ${NAMESPACE}-nacos1:8848 ${NAMESPACE}-nacos2:8848 ${NAMESPACE}-nacos3:8848
|
||||
NACOS_SERVER_PORT: 8848
|
||||
NACOS_AUTH_ENABLE: 'true' #1.2.0版本默认关闭登陆界面
|
||||
SPRING_DATASOURCE_PLATFORM: mysql
|
||||
MYSQL_SERVICE_HOST: ${MYSQL_SERVICE_HOST}
|
||||
MYSQL_SERVICE_DB_NAME: nacos
|
||||
MYSQL_SERVICE_PORT: 3306
|
||||
MYSQL_SERVICE_USER: ${MYSQL_SERVICE_USER}
|
||||
MYSQL_SERVICE_PASSWORD: ${MYSQL_SERVICE_PASSWORD}
|
||||
NACOS_AUTH_IDENTITY_KEY: ${NACOS_AUTH_IDENTITY_KEY}
|
||||
NACOS_AUTH_IDENTITY_VALUE: ${NACOS_AUTH_IDENTITY_VALUE}
|
||||
NACOS_AUTH_TOKEN: ${NACOS_AUTH_TOKEN}
|
||||
deploy:
|
||||
replicas: 1 #部署时,指定部署一个副本
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM2]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
nacos3:
|
||||
image: nacos/nacos-server:${NACOS_VERSION}
|
||||
hostname: ${NAMESPACE}-nacos3
|
||||
ports:
|
||||
- ${NODE_PORT_31}:8848
|
||||
- ${NODE_PORT_32}:9848
|
||||
- ${NODE_PORT_33}:9849
|
||||
volumes:
|
||||
- nacos_cluster_log:/home/nacos/logs #配置docker存储日志的卷
|
||||
environment:
|
||||
MODE: cluster
|
||||
PREFER_HOST_MODE: hostname
|
||||
NACOS_SERVERS: ${NAMESPACE}-nacos1:8848 ${NAMESPACE}-nacos2:8848 ${NAMESPACE}-nacos3:8848
|
||||
NACOS_SERVER_PORT: 8848
|
||||
NACOS_AUTH_ENABLE: 'true' #1.2.0版本默认关闭登陆界面
|
||||
SPRING_DATASOURCE_PLATFORM: mysql
|
||||
MYSQL_SERVICE_HOST: ${MYSQL_SERVICE_HOST}
|
||||
MYSQL_SERVICE_DB_NAME: nacos
|
||||
MYSQL_SERVICE_PORT: 3306
|
||||
MYSQL_SERVICE_USER: ${MYSQL_SERVICE_USER}
|
||||
MYSQL_SERVICE_PASSWORD: ${MYSQL_SERVICE_PASSWORD}
|
||||
NACOS_AUTH_IDENTITY_KEY: ${NACOS_AUTH_IDENTITY_KEY}
|
||||
NACOS_AUTH_IDENTITY_VALUE: ${NACOS_AUTH_IDENTITY_VALUE}
|
||||
NACOS_AUTH_TOKEN: ${NACOS_AUTH_TOKEN}
|
||||
deploy:
|
||||
replicas: 1 #部署时,指定部署一个副本
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM3]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
volumes:
|
||||
nacos_cluster_log:
|
||||
driver: local
|
||||
|
||||
|
5
docker-swarm/nacos-cluser/env_crm1
Normal file
5
docker-swarm/nacos-cluser/env_crm1
Normal file
@ -0,0 +1,5 @@
|
||||
NAMESPACE=crm1
|
||||
NACOS_VERSION=v2.2.2
|
||||
NODE_PORT=8848
|
||||
NODE_PORT_2=9848
|
||||
NACOS_SERVER_IP=192.168.1.209
|
17
docker-swarm/nacos-cluser/env_prod
Normal file
17
docker-swarm/nacos-cluser/env_prod
Normal file
@ -0,0 +1,17 @@
|
||||
NAMESPACE=prod
|
||||
NACOS_VERSION=v2.3.0
|
||||
NODE_PORT_11=21848
|
||||
NODE_PORT_12=22848
|
||||
NODE_PORT_13=22849
|
||||
NODE_PORT_21=23848
|
||||
NODE_PORT_22=24848
|
||||
NODE_PORT_23=24849
|
||||
NODE_PORT_31=25848
|
||||
NODE_PORT_32=26848
|
||||
NODE_PORT_33=26849
|
||||
MYSQL_SERVICE_HOST=prod-tool-mysql-master
|
||||
MYSQL_SERVICE_USER=zd_tool
|
||||
MYSQL_SERVICE_PASSWORD=gkxl2024#@
|
||||
NACOS_AUTH_IDENTITY_KEY=nacos
|
||||
NACOS_AUTH_IDENTITY_VALUE=gkxl2024#@
|
||||
NACOS_AUTH_TOKEN=OTg1NjRzZnJ0Z2RmZzIwMjQ1NTU1NTExZWZnZGVmZGVz
|
213
docker-swarm/nacos-cluser/mysql-schema.sql
Normal file
213
docker-swarm/nacos-cluser/mysql-schema.sql
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = config_info */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) DEFAULT NULL COMMENT 'group_id',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
`c_desc` varchar(256) DEFAULT NULL COMMENT 'configuration description',
|
||||
`c_use` varchar(64) DEFAULT NULL COMMENT 'configuration usage',
|
||||
`effect` varchar(64) DEFAULT NULL COMMENT '配置生效的描述',
|
||||
`type` varchar(64) DEFAULT NULL COMMENT '配置的类型',
|
||||
`c_schema` text COMMENT '配置的模式',
|
||||
`encrypted_data_key` text NOT NULL COMMENT '密钥',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = config_info_aggr */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_aggr` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
|
||||
`content` longtext NOT NULL COMMENT '内容',
|
||||
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = config_info_beta */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_beta` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
`encrypted_data_key` text NOT NULL COMMENT '密钥',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = config_info_tag */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_tag` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = config_tags_relation */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_tags_relation` (
|
||||
`id` bigint(20) NOT NULL COMMENT 'id',
|
||||
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
|
||||
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`nid` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增长标识',
|
||||
PRIMARY KEY (`nid`),
|
||||
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = group_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `group_capacity` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_group_id` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = his_config_info */
|
||||
/******************************************/
|
||||
CREATE TABLE `his_config_info` (
|
||||
`id` bigint(20) unsigned NOT NULL COMMENT 'id',
|
||||
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'nid, 自增标识',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`op_type` char(10) DEFAULT NULL COMMENT 'operation type',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
`encrypted_data_key` text NOT NULL COMMENT '密钥',
|
||||
PRIMARY KEY (`nid`),
|
||||
KEY `idx_gmt_create` (`gmt_create`),
|
||||
KEY `idx_gmt_modified` (`gmt_modified`),
|
||||
KEY `idx_did` (`data_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 表名称 = tenant_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `tenant_capacity` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
|
||||
|
||||
|
||||
CREATE TABLE `tenant_info` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kp` varchar(128) NOT NULL COMMENT 'kp',
|
||||
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
|
||||
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
|
||||
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
|
||||
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
|
||||
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
|
||||
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
|
||||
|
||||
CREATE TABLE `users` (
|
||||
`username` varchar(50) NOT NULL PRIMARY KEY COMMENT 'username',
|
||||
`password` varchar(500) NOT NULL COMMENT 'password',
|
||||
`enabled` boolean NOT NULL COMMENT 'enabled'
|
||||
);
|
||||
|
||||
CREATE TABLE `roles` (
|
||||
`username` varchar(50) NOT NULL COMMENT 'username',
|
||||
`role` varchar(50) NOT NULL COMMENT 'role',
|
||||
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
|
||||
);
|
||||
|
||||
CREATE TABLE `permissions` (
|
||||
`role` varchar(50) NOT NULL COMMENT 'role',
|
||||
`resource` varchar(255) NOT NULL COMMENT 'resource',
|
||||
`action` varchar(8) NOT NULL COMMENT 'action',
|
||||
UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
|
||||
);
|
||||
|
||||
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
|
||||
|
||||
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
|
38
docker-swarm/nacos-cluser/standalone-derby.yml
Normal file
38
docker-swarm/nacos-cluser/standalone-derby.yml
Normal file
@ -0,0 +1,38 @@
|
||||
version: '3.8'
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
server:
|
||||
hostname: ${NAMESPACE}_nacos_server
|
||||
image: nacos/nacos-server:${NACOS_VERSION}
|
||||
environment:
|
||||
- PREFER_HOST_MODE=hostname
|
||||
- NACOS_SERVER_IP=${NACOS_SERVER_IP}
|
||||
- MODE=standalone
|
||||
- NACOS_AUTH_ENABLE=true
|
||||
- NACOS_AUTH_IDENTITY_KEY=bndmsdsad
|
||||
- NACOS_AUTH_IDENTITY_VALUE=wepqweq#dasld
|
||||
- NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456587012345678901234567890123456789
|
||||
ports:
|
||||
- target: 8848
|
||||
published: ${NODE_PORT}
|
||||
protocol: tcp
|
||||
mode: host # 解析:默认是ingress就是通过swarm的负载均衡模式,无论通过集群节点的映射端口都能访问到业务容器,此种方式类似于k8s的NodePort的svc服务暴露方式,而host则属于,业务容器运行在哪个节点,则就通过节点地址+映射端口访问对应的业务容器。
|
||||
- target: 9848
|
||||
published: ${NODE_PORT_2}
|
||||
protocol: tcp
|
||||
mode: host
|
||||
volumes:
|
||||
- data_server:/home/nacos/
|
||||
deploy:
|
||||
update_config:
|
||||
order: stop-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_nacos_server==1
|
||||
volumes:
|
||||
data_server:
|
||||
driver: local
|
10
docker-swarm/nginx-prod-50/README
Normal file
10
docker-swarm/nginx-prod-50/README
Normal file
@ -0,0 +1,10 @@
|
||||
|
||||
# crm1环境下 部署nginx
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_nginx
|
||||
|
||||
|
||||
|
||||
# prod环境下 部署nginx
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_nginx
|
62
docker-swarm/nginx-prod-50/docker-compose.yml
Normal file
62
docker-swarm/nginx-prod-50/docker-compose.yml
Normal file
@ -0,0 +1,62 @@
|
||||
version: '3.8'
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
server:
|
||||
image: 'docker.io/bitnami/nginx:1.24'
|
||||
ports:
|
||||
- '8080:8080'
|
||||
- '8443:8443'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- /opt/logs/nginx/:/opt/bitnami/nginx/logs/
|
||||
configs:
|
||||
- source: nginx_conf
|
||||
target: /opt/bitnami/nginx/conf/nginx.conf
|
||||
- source: nginx_ssl_sinoassist_config
|
||||
target: /opt/bitnami/nginx/conf/ssl.sinoassist.conf
|
||||
- source: ssl_sinoassist_key
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.key
|
||||
- source: ssl_sinoassist_pem
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.pem
|
||||
- source: nginx_prod_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/prod.conf
|
||||
- source: nginx_prod_sup_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/prod-sup.conf
|
||||
- source: nginx_other_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/zd-other.conf
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 2
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_nginx==1
|
||||
configs:
|
||||
nginx_conf:
|
||||
external: true
|
||||
name: nginx_conf_v1
|
||||
nginx_ssl_sinoassist_config:
|
||||
external: true
|
||||
name: nginx_ssl_sinoassist_conf_v1
|
||||
nginx_prod_config:
|
||||
external: true
|
||||
name: nginx_prod_config_v1
|
||||
nginx_prod_sup_config:
|
||||
external: true
|
||||
name: nginx_prod_sup_config_v1
|
||||
nginx_other_config:
|
||||
external: true
|
||||
name: nginx_other_config_v1
|
||||
ssl_sinoassist_key:
|
||||
external: true
|
||||
name: ssl_sinoassist_key_2024
|
||||
ssl_sinoassist_pem:
|
||||
external: true
|
||||
name: ssl_sinoassist_pem_2024
|
4
docker-swarm/nginx-prod-50/env_crm1
Normal file
4
docker-swarm/nginx-prod-50/env_crm1
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=crm1
|
||||
NODE_PORT=8080
|
||||
CUSTOME_CONFIG=nginx_conf_v1
|
||||
CUSTOME_CONFIG=nginx_conf_v1
|
1
docker-swarm/nginx-prod-50/env_prod
Normal file
1
docker-swarm/nginx-prod-50/env_prod
Normal file
@ -0,0 +1 @@
|
||||
NAMESPACE=prod
|
60
docker-swarm/nginx-prod-50/nginx.conf
Normal file
60
docker-swarm/nginx-prod-50/nginx.conf
Normal file
@ -0,0 +1,60 @@
|
||||
# Based on https://www.nginx.com/resources/wiki/start/topics/examples/full/#nginx-conf
|
||||
user www www; ## Default: nobody
|
||||
|
||||
worker_processes auto;
|
||||
error_log "/opt/bitnami/nginx/logs/error.log";
|
||||
pid "/opt/bitnami/nginx/tmp/nginx.pid";
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
log_format main '$remote_addr - $remote_user [$time_local] '
|
||||
'"$request" $status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log "/opt/bitnami/nginx/logs/access.log" main;
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
|
||||
client_body_temp_path "/opt/bitnami/nginx/tmp/client_body" 1 2;
|
||||
proxy_temp_path "/opt/bitnami/nginx/tmp/proxy" 1 2;
|
||||
fastcgi_temp_path "/opt/bitnami/nginx/tmp/fastcgi" 1 2;
|
||||
scgi_temp_path "/opt/bitnami/nginx/tmp/scgi" 1 2;
|
||||
uwsgi_temp_path "/opt/bitnami/nginx/tmp/uwsgi" 1 2;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay off;
|
||||
gzip on;
|
||||
gzip_http_version 1.0;
|
||||
gzip_comp_level 2;
|
||||
gzip_proxied any;
|
||||
gzip_types text/plain text/css application/javascript text/xml application/xml+rss;
|
||||
keepalive_timeout 65;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5:!DES:!3DES;
|
||||
client_max_body_size 105M;
|
||||
server_tokens off;
|
||||
|
||||
absolute_redirect off;
|
||||
port_in_redirect off;
|
||||
|
||||
include "/opt/bitnami/nginx/conf/server_blocks/*.conf";
|
||||
|
||||
# # HTTP Server
|
||||
# server {
|
||||
# # Port to listen on, can also be set in IP:PORT format
|
||||
# listen 80;
|
||||
|
||||
# include "/opt/bitnami/nginx/conf/bitnami/*.conf";
|
||||
|
||||
# location /status {
|
||||
# stub_status on;
|
||||
# access_log off;
|
||||
# allow 127.0.0.1;
|
||||
# deny all;
|
||||
# }
|
||||
# }
|
||||
}
|
44
docker-swarm/nginx-prod-50/nginx_other_config_v1
Normal file
44
docker-swarm/nginx-prod-50/nginx_other_config_v1
Normal file
@ -0,0 +1,44 @@
|
||||
## 公司其他域名的切换
|
||||
|
||||
# 4s店微信
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wx4s.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8777;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 微信供应商
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wxdd.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8568;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apicc.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:8080;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口websocket
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apiccws.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:1884;
|
||||
}
|
||||
}
|
167
docker-swarm/nginx-prod-50/nginx_prod_config_v1
Normal file
167
docker-swarm/nginx-prod-50/nginx_prod_config_v1
Normal file
@ -0,0 +1,167 @@
|
||||
|
||||
|
||||
upstream api.zhongdao {
|
||||
server ss52_sa-gateway_svc:8080;
|
||||
server ss53_sa-gateway_svc:8080;
|
||||
}
|
||||
|
||||
# 中道外部接口
|
||||
server {
|
||||
listen 8080;
|
||||
server_name api.sinoassist.net api.sinoassist.com xcx-api.sinoassist.com interface.review.sino-assist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
|
||||
location / {
|
||||
proxy_pass http://api.zhongdao;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location ~ .*actuator.* {
|
||||
deny all;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 接口
|
||||
server {
|
||||
listen 8080;
|
||||
server_name api-nj.do-dec.com api-cd.do-dec.com api-wh.do-dec.com api-hz.do-dec.com api-sh.do-dec.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://api.zhongdao;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location ~ .*actuator.* {
|
||||
deny all;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
server {
|
||||
server_name site.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
|
||||
location / {
|
||||
root /zd/cc-site/dist/;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
server {
|
||||
server_name www.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location /h5/rescue {
|
||||
alias /zd/rescue-h5/dist/;
|
||||
try_files $uri $uri/ /h5/rescue/index.html;
|
||||
index index.html;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
}
|
||||
|
||||
location /dev/h5/rescue {
|
||||
proxy_pass http://192.168.1.209:8030/dev/h5/rescue;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
client_max_body_size 200m;
|
||||
}
|
||||
|
||||
location /h5/client/ {
|
||||
alias /zd/sino-client-h5/dist/build/h5/;
|
||||
index index.html;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
location /h5/supplier/dispatch {
|
||||
alias /zd/supplier-dispatch-h5/dist/;
|
||||
try_files $uri $uri/ /h5/supplier/dispatch/index.html;
|
||||
index index.html;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 太科app 海豚湾 微信验证配置
|
||||
|
||||
location /FowqINu4W1.txt {
|
||||
default_type text/html;
|
||||
return 200 "90d7811c9e948fe95df1fd46ca3c1984";
|
||||
}
|
||||
|
||||
location /HQgOV1DbaM.txt {
|
||||
default_type text/html;
|
||||
return 200 "91aad82c4fadf3b6b4843771561dac64";
|
||||
}
|
||||
location /pay/gateway/ {
|
||||
proxy_pass http://192.168.3.121:9226/pay/gateway/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
client_max_body_size 200m;
|
||||
}
|
||||
|
||||
|
||||
location /pay/gateway/api/ {
|
||||
proxy_next_upstream http_502 http_504 error timeout invalid_header;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://192.168.3.121:9216/api/;
|
||||
# 启用支持websocket连接
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://prod_ss_sa-cc_svc:8080/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location ~ ^/(export-app|common|order|supplier|contract|base) {
|
||||
proxy_pass http://api.zhongdao;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
## 前端http强制转https
|
||||
server {
|
||||
listen 8080;
|
||||
|
||||
server_name www.sinoassist.com;
|
||||
add_header Strict-Transport-Security max-age=15768000;
|
||||
return 301 https://www.sinoassist.com$request_uri;
|
||||
|
||||
}
|
65
docker-swarm/nginx-prod-50/nginx_prod_sup_config_v1
Normal file
65
docker-swarm/nginx-prod-50/nginx_prod_sup_config_v1
Normal file
@ -0,0 +1,65 @@
|
||||
## 救援生产环境其他服务域名切换
|
||||
|
||||
|
||||
## rabbitmq stomp
|
||||
upstream stomp.zhongdao {
|
||||
server prod_rabbitmq_queue1:15674;
|
||||
server prod_rabbitmq_queue2:15674;
|
||||
server prod_rabbitmq_stats:15674;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8080;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
server_name stomp.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://stomp.zhongdao;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 备份文件服务器
|
||||
server {
|
||||
listen 8080;
|
||||
server_name file.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
# 录音文件服务器
|
||||
location /ly/ {
|
||||
proxy_pass http://192.168.5.204:8088/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# 备份文件服务器
|
||||
location / {
|
||||
proxy_pass http://192.168.10.18:8888;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
11
docker-swarm/nginx-prod-50/nginx_ssl_sinoassist_conf_v1
Normal file
11
docker-swarm/nginx-prod-50/nginx_ssl_sinoassist_conf_v1
Normal file
@ -0,0 +1,11 @@
|
||||
listen 8443 ssl;
|
||||
|
||||
ssl_certificate /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.pem;
|
||||
|
||||
ssl_certificate_key /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.key;
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5:!DES:!3DES;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
39
docker-swarm/portainer/docker-compose.yml
Normal file
39
docker-swarm/portainer/docker-compose.yml
Normal file
@ -0,0 +1,39 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
agent:
|
||||
image: portainer/agent:2.20.3
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: global
|
||||
placement:
|
||||
constraints: [node.platform.os == linux]
|
||||
|
||||
portainer:
|
||||
image: portainer/portainer-ce:2.20.3
|
||||
command: -H tcp://tasks.agent:9001 --tlsskipverify
|
||||
ports:
|
||||
- "9443:9443"
|
||||
- "9000:9000"
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- portainer_data:/data
|
||||
networks:
|
||||
- agent_network
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
placement:
|
||||
constraints: [node.hostname == ZD-CRM1]
|
||||
|
||||
networks:
|
||||
agent_network:
|
||||
driver: overlay
|
||||
attachable: true
|
||||
|
||||
volumes:
|
||||
portainer_data:
|
@ -1,4 +1,23 @@
|
||||
|
||||
# crm1环境下 部署rabbitmq集群
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_rabbitmq
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_rabbitmq
|
||||
|
||||
rabbitmqctl add_user root gkxl650
|
||||
rabbitmqctl set_user_tags root administrator
|
||||
|
||||
rabbitmqctl set_permissions -p / root ".*" ".*" ".*"
|
||||
|
||||
|
||||
|
||||
rabbitmqctl add_user admin gkxl650
|
||||
rabbitmqctl set_user_tags admin administrator
|
||||
|
||||
rabbitmqctl set_permissions -p / admin ".*" ".*" ".*"
|
||||
|
||||
|
||||
|
||||
|
||||
# prod环境下 部署rabbitmq集群
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose-prod.yml | docker stack deploy --compose-file - prod_rabbitmq
|
83
docker-swarm/rabbitmq/docker-compose-prod.yml
Normal file
83
docker-swarm/rabbitmq/docker-compose-prod.yml
Normal file
@ -0,0 +1,83 @@
|
||||
version: '3.8'
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
stats:
|
||||
image: docker.io/bitnami/rabbitmq:3.11
|
||||
hostname: ${NAMESPACE}-rabbitmq-stats
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- RABBITMQ_NODE_TYPE=stats
|
||||
- RABBITMQ_NODE_NAME=rabbit@stats
|
||||
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
|
||||
- RABBITMQ_SECURE_PASSWORD=yes
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_PLUGINS=rabbitmq_management,rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
ports:
|
||||
- '${NODE_PORT}:15672'
|
||||
volumes:
|
||||
- 'data_stats:/bitnami/rabbitmq/mnesia'
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.rabbit_stats==1
|
||||
queue1:
|
||||
image: docker.io/bitnami/rabbitmq:3.11
|
||||
hostname: ${NAMESPACE}-rabbitmq-queue1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- RABBITMQ_NODE_TYPE=queue-disc
|
||||
- RABBITMQ_NODE_NAME=rabbit@queue1
|
||||
- RABBITMQ_CLUSTER_NODE_NAME=rabbit@stats
|
||||
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
|
||||
- RABBITMQ_SECURE_PASSWORD=yes
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
volumes:
|
||||
- 'data_disc:/bitnami/rabbitmq/mnesia'
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.rabbit_queue==1
|
||||
queue2:
|
||||
image: docker.io/bitnami/rabbitmq:3.11
|
||||
hostname: ${NAMESPACE}-rabbitmq-queue2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- RABBITMQ_NODE_TYPE=queue-disc
|
||||
- RABBITMQ_NODE_NAME=rabbit@queue2
|
||||
- RABBITMQ_CLUSTER_NODE_NAME=rabbit@stats
|
||||
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
|
||||
- RABBITMQ_SECURE_PASSWORD=yes
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
volumes:
|
||||
- 'data_disc:/bitnami/rabbitmq/mnesia'
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.rabbit_queue==1
|
||||
volumes:
|
||||
data_stats:
|
||||
driver: local
|
||||
data_disc:
|
||||
driver: local
|
||||
|
3
docker-swarm/rabbitmq/env_prod
Normal file
3
docker-swarm/rabbitmq/env_prod
Normal file
@ -0,0 +1,3 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=15672
|
||||
RABBITMQ_PASSWORD=gkxl650
|
8
docker-swarm/redis copy/README
Normal file
8
docker-swarm/redis copy/README
Normal file
@ -0,0 +1,8 @@
|
||||
|
||||
# crm1环境下 部署redis sentinel
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_redis
|
||||
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_redis
|
||||
|
71
docker-swarm/redis copy/docker-compose.yml
Normal file
71
docker-swarm/redis copy/docker-compose.yml
Normal file
@ -0,0 +1,71 @@
|
||||
version: '3.8'
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
master:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_REPLICATION_MODE=master
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
ports:
|
||||
- '${NODE_PORT}:6379'
|
||||
volumes:
|
||||
- data_master:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_master==1
|
||||
slave:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- data_slave:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_slave==1
|
||||
redis-sentinel:
|
||||
image: 'bitnami/redis-sentinel:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_PASSWORD=${REDIS_SENTINEL_PASSWORD}
|
||||
depends_on:
|
||||
- master
|
||||
- slave
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
mode: global
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_sentinel==1
|
||||
max_replicas_per_node: 1
|
||||
volumes:
|
||||
- data_sentinel:/bitnami
|
||||
volumes:
|
||||
data_sentinel:
|
||||
driver: local
|
||||
data_master:
|
||||
driver: local
|
||||
data_slave:
|
||||
driver: local
|
||||
|
||||
|
||||
|
4
docker-swarm/redis copy/env_crm1
Normal file
4
docker-swarm/redis copy/env_crm1
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=crm1
|
||||
NODE_PORT=6379
|
||||
REDIS_PASSWORD=gkxl650
|
||||
REDIS_SENTINEL_PASSWORD=gkxl650
|
4
docker-swarm/redis copy/env_prod
Normal file
4
docker-swarm/redis copy/env_prod
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=6379
|
||||
REDIS_PASSWORD=gkxl650
|
||||
REDIS_SENTINEL_PASSWORD=gkxl650
|
@ -3,4 +3,8 @@
|
||||
# 此处为配置索引的生命周期,需要在es中添加此项
|
||||
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_skywalking --with-registry-auth
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_skywalking --with-registry-auth
|
||||
|
||||
#
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_skywalking --with-registry-auth
|
||||
|
@ -5,7 +5,7 @@ networks:
|
||||
external: true
|
||||
services:
|
||||
oap:
|
||||
image: apache/skywalking-oap-server:9.4.0
|
||||
image: apache/skywalking-oap-server:10.0.0
|
||||
hostname: ${NAMESPACE}-skywalking-oap
|
||||
ports:
|
||||
- '${NODE_PORT}:11800'
|
||||
@ -13,7 +13,7 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- SW_STORAGE=elasticsearch
|
||||
- SW_STORAGE_ES_CLUSTER_NODES=${NAMESPACE}_es_elasticsearch:9200
|
||||
- SW_STORAGE_ES_CLUSTER_NODES=${NAMESPACE}-es-elasticsearch:9200
|
||||
- SW_HEALTH_CHECKER=default
|
||||
- SW_TELEMETRY=prometheus
|
||||
- SW_STORAGE_ES_ADVANCED={"index.lifecycle.name":"sw-policy"}
|
||||
@ -25,13 +25,13 @@ services:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_skywalking==1
|
||||
ui:
|
||||
image: apache/skywalking-ui:9.4.0
|
||||
image: apache/skywalking-ui:10.0.0
|
||||
ports:
|
||||
- "${NODE_PORT_UI}:8080"
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- SW_OAP_ADDRESS=http://${NAMESPACE}_skywalking_oap:12800
|
||||
- SW_ZIPKIN_ADDRESS=http://${NAMESPACE}_skywalking_oap:9412
|
||||
- SW_OAP_ADDRESS=http://${NAMESPACE}-skywalking-oap:12800
|
||||
- SW_ZIPKIN_ADDRESS=http://${NAMESPACE}-skywalking-oap:9412
|
||||
depends_on:
|
||||
- oap
|
||||
deploy:
|
||||
|
4
docker-swarm/skywalking/env_prod
Normal file
4
docker-swarm/skywalking/env_prod
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=11800
|
||||
NODE_PORT_2=12800
|
||||
NODE_PORT_UI=18080
|
@ -1,4 +1,10 @@
|
||||
|
||||
# crm1环境下 部署xxl-job
|
||||
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_xxl_job
|
||||
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_xxl_job
|
||||
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_xxl_job
|
||||
|
||||
|
||||
docker stack deploy --compose-file docker-compose.yml - portainer
|
@ -6,18 +6,19 @@ networks:
|
||||
external: true
|
||||
services:
|
||||
server:
|
||||
image: 'xuxueli/xxl-job-admin:2.2.0'
|
||||
image: 'xuxueli/xxl-job-admin:2.4.1'
|
||||
hostname: ${NAMESPACE}-xxl-job-admin
|
||||
ports:
|
||||
- '${NODE_PORT}:8080'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- PARAMS=--spring.datasource.url=jdbc:mysql://${DATASOURCE_URL}?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=${DATASOURCE_USERNAME} --spring.datasource.password=${DATASOURCE_PASSWORD}
|
||||
- PARAMS=--xxl.job.logretentiondays=90 --xxl.job.accessToken= --spring.datasource.url=jdbc:mysql://${DATASOURCE_URL}?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=${DATASOURCE_USERNAME} --spring.datasource.password=${DATASOURCE_PASSWORD}
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: ${REPLICAS}
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_xxl_job_admin==1
|
||||
|
||||
|
||||
|
||||
- node.labels.xxl_job_admin==1
|
||||
max_replicas_per_node: 1
|
@ -2,4 +2,5 @@ NAMESPACE=crm1
|
||||
NODE_PORT=9991
|
||||
DATASOURCE_URL=crm1_mysql_db:3306/xxl_job
|
||||
DATASOURCE_USERNAME=root
|
||||
DATASOURCE_PASSWORD=gkxl650
|
||||
DATASOURCE_PASSWORD=gkxl650
|
||||
REPLICAS=1
|
6
docker-swarm/xxl-job-admin/env_prod
Normal file
6
docker-swarm/xxl-job-admin/env_prod
Normal file
@ -0,0 +1,6 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=9991
|
||||
DATASOURCE_URL=prod-tool-mysql-master:3306/xxl_job
|
||||
DATASOURCE_USERNAME=zd_tool
|
||||
DATASOURCE_PASSWORD=gkxl2024#@
|
||||
REPLICAS=2
|
Reference in New Issue
Block a user