This commit is contained in:
marsal wang
2023-07-26 10:07:34 +08:00
parent f884cb1020
commit 1e5a703cce
5384 changed files with 618283 additions and 4002 deletions

View File

@ -0,0 +1,9 @@
# crm1环境下 部署canal
docker network create --driver overlay review
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_canal
env $(cat ./env_review | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - review_canal

View File

@ -0,0 +1,33 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
db:
image: canal/canal-server:v1.1.5
environment:
- TZ=Asia/Shanghai
- canal.instance.master.address=${canal_instance_master_address}
- canal.instance.dbUsername=${canal_instance_dbUsername}
- canal.instance.dbPassword=${canal_instance_dbPassword}
- canal.instance.gtidon=false
- canal.instance.connectionCharset=UTF-8
- canal.instance.tsdb.enable=true
- canal.instance.enableDruid=false
- canal.instance.filter.regex=${canal_instance_filter_regex}
- canal.instance.parser.parallel=true
- canal.serverMode=rabbitMQ
- canal.mq.topic=${canal_mq_topic}
- rabbitmq.host=${rabbitmq_host}
- rabbitmq.exchange=${rabbitmq_exchange}
- rabbitmq.username=${rabbitmq_username}
- rabbitmq.password=${rabbitmq_password}
- rabbitmq.virtual.host=${rabbitmq_virtual_host}
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_canal==1

View File

@ -0,0 +1,11 @@
NAMESPACE=crm1
canal_instance_master_address=crm_mysql_db:3306
canal_instance_dbUsername=root
canal_instance_dbPassword=gkxl650
canal_instance_filter_regex=zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*
canal_mq_topic=canal_mysql_bin
rabbitmq_host=crm1_rabbitmq_stats:5672
rabbitmq_exchange=canal_exchange
rabbitmq_username=root
rabbitmq_password=gkxl650
rabbitmq_virtual_host=canal

View File

@ -0,0 +1,11 @@
NAMESPACE=review
canal_instance_master_address=192.168.10.10:3306
canal_instance_dbUsername=repl
canal_instance_dbPassword=nczl@sino_db
canal_instance_filter_regex=zd_rescue\\.user_order_20.*,zd_rescue\\.task_order_20.*,zd_rescue\\.task_order_cost_20.*,zd_rescue\\.supplier_account_record_20.*,zd_rescue\\.customer_order_account_20.*,zd_rescue\\.customer_order_relation_20.*
canal_mq_topic=canal_mysql_bin
rabbitmq_host=192.168.3.110:5672
rabbitmq_exchange=canal_exchange
rabbitmq_username=root
rabbitmq_password=gkxl650
rabbitmq_virtual_host=review

View File

@ -0,0 +1,7 @@
# crm1环境下 部署clickhouse
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_clickhouse
env $(cat ./env_review | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - review_clickhouse

View File

@ -0,0 +1,28 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
db:
image: docker.io/bitnami/clickhouse:23
ports:
- '${NODE_PORT}:8123'
environment:
- TZ=Asia/Shanghai
- CLICKHOUSE_ADMIN_USER=${CLICKHOUSE_ADMIN_USER}
- CLICKHOUSE_ADMIN_PASSWORD=${CLICKHOUSE_ADMIN_PASSWORD}
volumes:
- 'data_db:/bitnami/clickhouse'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_clickhouse==1
volumes:
data_db:
driver: local

View File

@ -0,0 +1,4 @@
NAMESPACE=crm1
NODE_PORT=8123
CLICKHOUSE_ADMIN_USER=default
CLICKHOUSE_ADMIN_PASSWORD=gkxl650

View File

@ -0,0 +1,4 @@
NAMESPACE=review
NODE_PORT=8123
CLICKHOUSE_ADMIN_USER=default
CLICKHOUSE_ADMIN_PASSWORD=gkxl650

View File

@ -0,0 +1,10 @@
# crm1环境下 部署redis sentinel
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_redis
env $(cat ./env_review | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - review_datart
java -Dspring.profiles.active=prod -Xms512M -Dsa.nacos.namespace=prod -Dspring.cloud.nacos.config.server-addr=192.168.10.12:8848 -Dspring.cloud.nacos.discovery.server-addr=192.168.10.12:8848 -Dfile.encoding=UTF-8 -jar /zd/gps/zhongdao-gps.jar

View File

@ -0,0 +1,68 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
redis:
image: 'bitnami/redis:7.0.11'
environment:
- TZ=Asia/Shanghai
- REDIS_REPLICATION_MODE=master
- REDIS_PASSWORD=${REDIS_PASSWORD}
ports:
- '${REDIS_PORT}:6379'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_datart==1
db:
image: docker.io/bitnami/mysql:8.0
ports:
- '${MYSQL_PORT}:3306'
environment:
- TZ=Asia/Shanghai
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_ENABLE_SLOW_QUERY=0
- MYSQL_LONG_QUERY_TIME=10
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password
volumes:
- 'mysql:/bitnami/mysql/data'
healthcheck:
test: ['CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh']
interval: 15s
timeout: 5s
retries: 6
configs:
- source: my_conf
target: /opt/bitnami/mysql/conf/my_custom.cnf
deploy:
placement:
constraints:
- node.labels.${NAMESPACE}_datart==1
chrome:
image: 'selenium/standalone-chrome:latest'
environment:
- TZ=Asia/Shanghai
ports:
- '${CHROME_PORT}:4444'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_datart==1
volumes:
mysql:
driver: local
configs:
my_conf:
file: ./my.conf

View File

@ -0,0 +1,9 @@
NAMESPACE=crm1
REDIS_PORT=16379
REDIS_PASSWORD=gkxl650
MYSQL_PORT=13306
MYSQL_ROOT_PASSWORD=gkxl650
MYSQL_DATABASE=datart
MYSQL_USER=datart
MYSQL_PASSWORD=gkxl650
CHROME_PORT=14444

View File

@ -0,0 +1,9 @@
NAMESPACE=review
REDIS_PORT=16379
REDIS_PASSWORD=gkxl650
MYSQL_PORT=13306
MYSQL_ROOT_PASSWORD=gkxl650
MYSQL_DATABASE=datart
MYSQL_USER=datart
MYSQL_PASSWORD=gkxl650
CHROME_PORT=14444

View File

@ -0,0 +1,6 @@
[mysqld]
max_allowed_packet=64M
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
log-bin=mysql-bin # 开启 binlog
binlog-format=ROW # 选择 ROW 模式
server_id=123

View File

@ -0,0 +1,4 @@
# crm1环境下 部署 单机 es
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_es --with-registry-auth

View File

@ -0,0 +1,44 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
elasticsearch:
image: harbor.sino-assist.com/marsal1212/elasticsearch:7.17.3
hostname: ${NAMESPACE}-es-elasticsearch
ports:
- '${NODE_PORT}:9200'
- '${NODE_PORT_2}:9300'
environment:
- TZ=Asia/Shanghai
volumes:
- 'data_db:/bitnami/elasticsearch/data'
deploy:
placement:
constraints:
- node.labels.${NAMESPACE}_es==1
kibana:
image: docker.io/bitnami/kibana:7.17.3
ports:
- "${NODE_PORT_KIBANA}:5601"
volumes:
- "kibana_data:/bitnami/kibana"
environment:
- TZ=Asia/Shanghai
- KIBANA_ELASTICSEARCH_URL=${NAMESPACE}_es_elasticsearch
depends_on:
- elasticsearch
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_es==1
volumes:
data_db:
driver: local
kibana_data:
driver: local

View File

@ -0,0 +1,4 @@
NAMESPACE=crm1
NODE_PORT=9200
NODE_PORT_2=9300
NODE_PORT_KIBANA=5601

12
docker-swarm/log/README Normal file
View File

@ -0,0 +1,12 @@
# es index生命周期参考 https://developer.aliyun.com/article/793119为索引配置索引模板即可索引模板中添加ilm
# {
# "index": {
# "lifecycle": {
# "name": "90-days-default"
# }
# }
# }
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_log --with-registry-auth

View File

@ -0,0 +1,46 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
logstash:
image: docker.elastic.co/logstash/logstash:7.17.3
ports:
- '${NODE_PORT}:5044'
environment:
- TZ=Asia/Shanghai
configs:
- source: logstash_conf
target: /usr/share/logstash/pipeline/my.conf
deploy:
placement:
constraints:
- node.labels.${NAMESPACE}_es==1
filebeat:
image: docker.elastic.co/beats/filebeat:7.17.3
volumes:
- "kibana_data:/bitnami/kibana"
environment:
- TZ=Asia/Shanghai
- LOGSTASH_URL=${NAMESPACE}_log_logstash:5044
- KIBANA_HOSTS=${NAMESPACE}_es_kibana
configs:
- source: filebeat_conf
target: /usr/share/filebeat/filebeat.yml
volumes:
- ${NAMESPACE}_logs:/logs
deploy:
update_config:
order: start-first
mode: global
placement:
constraints: [node.platform.os == linux]
configs:
logstash_conf:
file: ./logstash.conf
filebeat_conf:
file: ./filebeat.yml
volumes:
${NAMESPACE}_logs:
external: true

View File

@ -0,0 +1,2 @@
NAMESPACE=crm1
NODE_PORT=5045

View File

@ -0,0 +1,16 @@
filebeat.inputs:
- type: filestream
id: new-sino-log
paths:
- "/logs/*/*.log"
parsers:
- multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
setup.kibana.host: "${KIBANA_HOSTS:kibana:5601}"
output.logstash:
hosts: '${LOGSTASH_URL:logstash:5044}'

View File

@ -0,0 +1,36 @@
filter {
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:oldtimestamp}\s+\[%{DATA:service}\]\s+\[TID:%{NOTSPACE:tid}\]\s+\[%{DATA:thread}\]\s+%{LOGLEVEL:loglevel}\s+%{NOTSPACE:class}\s+-%{GREEDYDATA:oldmessage}"}
}
date {
match => ["oldtimestamp", "ISO8601"]
target => "@timestamp"
}
mutate {
replace => { "message" => "%{oldmessage}" }
remove_field => [ "oldmessage","oldtimestamp","agent","host","input","log.flags","log.flags.keyword","tags" ]
}
}
output {
if [servicename] {
elasticsearch {
hosts => [ "crm1-es-elasticsearch:9200" ]
index => "sslog-%{[service]}"
action => "create"
ilm_enabled => false
}
}else{
elasticsearch {
hosts => [ "crm1-es-elasticsearch:9200" ]
index => "sslog-default"
action => "create"
ilm_enabled => false
}
}
}

View File

@ -0,0 +1,4 @@
# crm1环境下 部署集群mongodb
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_mongodb

View File

@ -0,0 +1,28 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
db:
image: docker.io/bitnami/mongodb:6.0
ports:
- '${NODE_PORT}:27017'
environment:
- TZ=Asia/Shanghai
- MONGODB_ROOT_USER=root
- MONGODB_ROOT_PASSWORD=123456
- MONGODB_DATABASE=${MONGODB_DATABASE}
volumes:
- 'data_db:/bitnami/mongodb'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_mongodb==1
volumes:
data_db:
driver: local

View File

@ -0,0 +1,3 @@
NAMESPACE=crm1
NODE_PORT=27017
MONGODB_DATABASE=gps_data

36
docker-swarm/mysql/README Normal file
View File

@ -0,0 +1,36 @@
# crm1环境下 部署 mysql
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_mysql
[mysqladmin]
user=
[mysqld]
skip_name_resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
port=3306
tmpdir=/opt/bitnami/mysql/tmp
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid_file=/opt/bitnami/mysql/tmp/mysqld.pid
max_allowed_packet=16M
bind_address=0.0.0.0
log_error=/opt/bitnami/mysql/logs/mysqld.log
slow_query_log=0
slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log
long_query_time=10
character_set_server=utf8mb4
collation_server=utf8mb4_unicode_ci
plugin_dir=/opt/bitnami/mysql/lib/plugin
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default_character_set=utf8mb4
plugin_dir=/opt/bitnami/mysql/lib/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid_file=/opt/bitnami/mysql/tmp/mysqld.pid

View File

@ -0,0 +1,42 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
db:
image: docker.io/bitnami/mysql:8.0
ports:
- '${NODE_PORT}:3306'
environment:
- TZ=Asia/Shanghai
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_ENABLE_SLOW_QUERY=0
- MYSQL_LONG_QUERY_TIME=10
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password
volumes:
- 'data_db:/bitnami/mysql/data'
healthcheck:
test: ['CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh']
interval: 15s
timeout: 5s
retries: 6
configs:
- source: custome_config
target: /opt/bitnami/mysql/conf/my_custom.cnf
deploy:
placement:
constraints:
- node.labels.${NAMESPACE}_mysql==1
volumes:
data_db:
driver: local
configs:
custome_config:
external: true
name: ${CUSTOME_CONFIG}

View File

@ -0,0 +1,7 @@
NAMESPACE=crm1
NODE_PORT=3306
MYSQL_USER=gkxl650
MYSQL_PASSWORD=gkxl650
MYSQL_ROOT_PASSWORD=gkxl650
MYSQL_DATABASE=zd_rescue
CUSTOME_CONFIG=crm1_mysql_conf_v1

View File

@ -0,0 +1,5 @@
# crm1环境下 部署单机nacos
env $(cat ./env_crm1 | xargs) envsubst < ./standalone-derby.yml | docker stack deploy --compose-file - crm1_nacos

View File

@ -0,0 +1,155 @@
version: '3.8'
services:
nacos1:
container_name: nacos1
image: nacos/nacos-server:latest
hostname: nacos1
restart: always
ports:
- target: 8848
published: 8848
protocol: tcp
mode: host #采用host模式默认为ingress配置较灵活根据自己的需求也可调整为ingress本案例防止nacos 采用 swarm集群调度所以改为host模式两台服务器之间通过内网及nacos端口访问通过nginx配置对外服务
volumes:
- cluster1_logs:/home/nacos/logs #配置docker存储日志的卷
environment:
MODE: cluster
PREFER_HOST_MODE: hostname
NACOS_SERVERS: 192.168.3.75:8848 192.168.3.94:8848 192.168.3.142:8848
NACOS_SERVER_IP: 192.168.3.75
NACOS_SERVER_PORT: 8848
NACOS_AUTH_ENABLE: 'true' #1.2.0版本默认关闭登陆界面
MYSQL_SERVICE_HOST: mysql
MYSQL_SERVICE_DB_NAME: nacos_devtest
MYSQL_SERVICE_PORT: 3306
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: 123456
deploy:
replicas: 1 #部署时,指定部署一个副本
placement:
constraints:
- node.labels.env==docker-server-1
restart_policy:
condition: on-failure
depends_on:
- mysql
networks:
- srm
nacos2:
container_name: nacos2
image: nacos/nacos-server:latest
restart: always
hostname: nacos2
ports:
- target: 8848
published: 8848
protocol: tcp
mode: host
volumes:
- cluster2_logs:/home/nacos/logs
environment:
MODE: cluster
PREFER_HOST_MODE: hostname
NACOS_SERVERS: 192.168.3.75:8848 192.168.3.94:8848 192.168.3.142:8848
NACOS_SERVER_IP: 192.168.3.94
NACOS_SERVER_PORT: 8848
NACOS_AUTH_ENABLE: 'true'
MYSQL_SERVICE_HOST: mysql
MYSQL_SERVICE_DB_NAME: nacos_devtest
MYSQL_SERVICE_PORT: 3306
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: 123456
deploy:
replicas: 1
placement:
constraints:
- node.labels.env==docker-server-2
restart_policy:
condition: on-failure
depends_on:
- mysql
networks:
- srm
nacos3:
container_name: nacos3
image: nacos/nacos-server:latest
restart: always
hostname: nacos3
ports:
- target: 8848
published: 8848
protocol: tcp
mode: host
volumes:
- cluster3_logs:/home/nacos/logs
environment:
MODE: cluster
PREFER_HOST_MODE: hostname
NACOS_SERVERS: 192.168.3.75:8848 192.168.3.94:8848 192.168.3.142:8848
NACOS_SERVER_IP: 192.168.3.142
NACOS_SERVER_PORT: 8848
NACOS_AUTH_ENABLE: 'true'
MYSQL_SERVICE_HOST: mysql
MYSQL_SERVICE_DB_NAME: nacos_devtest
MYSQL_SERVICE_PORT: 3306
MYSQL_SERVICE_USER: nacos
MYSQL_SERVICE_PASSWORD: 123456
deploy:
replicas: 1
placement:
constraints:
- node.labels.env==docker-server-3
restart_policy:
condition: on-failure
depends_on:
- mysql
networks:
- srm
mysql:
image: mysql:5.7.33
restart: always
container_name: mysql
hostname: mysql
ports:
- 3306:3306
volumes:
- /data/software/nacos/mysql/data:/var/lib/mysql
- /etc/localtime:/etc/localtime:ro
- /etc/my.cnf:/etc/mysql/mysql.conf.d/my.cnf
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: sonar
MYSQL_DATABASE: nacos_devtest
deploy:
replicas: 1
placement:
constraints:
- node.labels.env==docker-server-1
restart_policy:
condition: on-failure
networks:
- srm
volumes:
cluster1_logs:
cluster2_logs:
cluster3_logs:
networks:
srm:
external: true
#https://blog.51cto.com/u_12898848/4054447

View File

@ -0,0 +1,5 @@
NAMESPACE=crm1
NACOS_VERSION=v2.2.2
NODE_PORT=8848
NODE_PORT_2=9848
NACOS_SERVER_IP=192.168.1.209

View File

@ -0,0 +1,38 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
server:
hostname: ${NAMESPACE}_nacos_server
image: nacos/nacos-server:${NACOS_VERSION}
environment:
- PREFER_HOST_MODE=hostname
- NACOS_SERVER_IP=${NACOS_SERVER_IP}
- MODE=standalone
- NACOS_AUTH_ENABLE=true
- NACOS_AUTH_IDENTITY_KEY=bndmsdsad
- NACOS_AUTH_IDENTITY_VALUE=wepqweq#dasld
- NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456587012345678901234567890123456789
ports:
- target: 8848
published: ${NODE_PORT}
protocol: tcp
mode: host # 解析默认是ingress就是通过swarm的负载均衡模式无论通过集群节点的映射端口都能访问到业务容器此种方式类似于k8s的NodePort的svc服务暴露方式而host则属于业务容器运行在哪个节点则就通过节点地址+映射端口访问对应的业务容器。
- target: 9848
published: ${NODE_PORT_2}
protocol: tcp
mode: host
volumes:
- data_server:/home/nacos/
deploy:
update_config:
order: stop-first
placement:
constraints:
- node.labels.${NAMESPACE}_nacos_server==1
volumes:
data_server:
driver: local

View File

@ -0,0 +1,4 @@
# crm1环境下 部署nginx
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_nginx

View File

@ -0,0 +1,26 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
server:
image: 'docker.io/bitnami/nginx:1.24'
ports:
- '${NODE_PORT}:8080'
environment:
- TZ=Asia/Shanghai
configs:
- source: custome_config
target: /opt/bitnami/nginx/conf/server_blocks/crm1.conf
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_nginx==1
configs:
custome_config:
external: true
name: ${CUSTOME_CONFIG}

View File

@ -0,0 +1,3 @@
NAMESPACE=crm1
NODE_PORT=8080
CUSTOME_CONFIG=crm1_nginx_config

View File

@ -0,0 +1,4 @@
# crm1环境下 部署rabbitmq集群
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_rabbitmq

View File

@ -0,0 +1,81 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
stats:
image: docker.io/bitnami/rabbitmq:3.11
environment:
- TZ=Asia/Shanghai
- RABBITMQ_NODE_TYPE=stats
- RABBITMQ_NODE_NAME=rabbit@stats
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
- RABBITMQ_SECURE_PASSWORD=yes
- RABBITMQ_VHOSTS=/ ${NAMESPACE}
- RABBITMQ_USERNAME=root
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
- RABBITMQ_PLUGINS=rabbitmq_management,rabbitmq_stomp,rabbitmq_web_stomp
- RABBITMQ_LOGS=-
ports:
- '${NODE_PORT}:15672'
volumes:
- 'data_stats:/bitnami/rabbitmq/mnesia'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_rabbit_stats==1
queue-disc1:
image: docker.io/bitnami/rabbitmq:3.11
environment:
- TZ=Asia/Shanghai
- RABBITMQ_NODE_TYPE=queue-disc
- RABBITMQ_NODE_NAME=rabbit@queue-disc1
- RABBITMQ_CLUSTER_NODE_NAME=rabbit@stats
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
- RABBITMQ_SECURE_PASSWORD=yes
- RABBITMQ_VHOSTS=/ ${NAMESPACE}
- RABBITMQ_USERNAME=root
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
- RABBITMQ_LOGS=-
volumes:
- 'data_disc1:/bitnami/rabbitmq/mnesia'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_rabbit_queue-disc1==1
queue-ram1:
image: docker.io/bitnami/rabbitmq:3.11
environment:
- TZ=Asia/Shanghai
- RABBITMQ_NODE_TYPE=queue-ram
- RABBITMQ_NODE_NAME=rabbit@queue-ram1
- RABBITMQ_CLUSTER_NODE_NAME=rabbit@stats
- RABBITMQ_ERL_COOKIE=s3cr3tc00ki3
- RABBITMQ_SECURE_PASSWORD=yes
- RABBITMQ_VHOSTS=/ ${NAMESPACE}
- RABBITMQ_USERNAME=root
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
- RABBITMQ_LOGS=-
volumes:
- 'data_ram1:/bitnami/rabbitmq/mnesia'
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_rabbit_queue-ram1==1
volumes:
data_stats:
driver: local
data_disc1:
driver: local
data_ram1:
driver: local

View File

@ -0,0 +1,3 @@
NAMESPACE=crm1
NODE_PORT=15672
RABBITMQ_PASSWORD=gkxl650

View File

@ -0,0 +1,4 @@
# crm1环境下 部署redis sentinel
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_redis

View File

@ -0,0 +1,71 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
master:
image: 'bitnami/redis:7.0.11'
environment:
- TZ=Asia/Shanghai
- REDIS_REPLICATION_MODE=master
- REDIS_PASSWORD=${REDIS_PASSWORD}
ports:
- '${NODE_PORT}:6379'
volumes:
- data_master:/bitnami
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_redis_master==1
slave:
image: 'bitnami/redis:7.0.11'
environment:
- TZ=Asia/Shanghai
- REDIS_REPLICATION_MODE=slave
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
- REDIS_PASSWORD=${REDIS_PASSWORD}
depends_on:
- master
volumes:
- data_slave:/bitnami
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_redis_slave==1
redis-sentinel:
image: 'bitnami/redis-sentinel:7.0.11'
environment:
- TZ=Asia/Shanghai
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
- REDIS_SENTINEL_PASSWORD=${REDIS_SENTINEL_PASSWORD}
depends_on:
- master
- slave
deploy:
update_config:
order: start-first
mode: replicated
replicas: 3 # replicas模式 副本数目为1
placement:
constraints:
- node.labels.${NAMESPACE}_redis_sentinel==1
volumes:
- data_sentinel:/bitnami
volumes:
data_sentinel:
driver: local
data_master:
driver: local
data_slave:
driver: local

View File

@ -0,0 +1,4 @@
NAMESPACE=crm1
NODE_PORT=6379
REDIS_PASSWORD=gkxl650
REDIS_SENTINEL_PASSWORD=gkxl650

View File

@ -0,0 +1,6 @@
### - SW_STORAGE_ES_ADVANCED={"index.lifecycle.name":"sw-policy"}
# 此处为配置索引的生命周期需要在es中添加此项
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_skywalking --with-registry-auth

View File

@ -0,0 +1,48 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
oap:
image: apache/skywalking-oap-server:9.4.0
hostname: ${NAMESPACE}-skywalking-oap
ports:
- '${NODE_PORT}:11800'
- '${NODE_PORT_2}:12800'
environment:
- TZ=Asia/Shanghai
- SW_STORAGE=elasticsearch
- SW_STORAGE_ES_CLUSTER_NODES=${NAMESPACE}_es_elasticsearch:9200
- SW_HEALTH_CHECKER=default
- SW_TELEMETRY=prometheus
- SW_STORAGE_ES_ADVANCED={"index.lifecycle.name":"sw-policy"}
- JAVA_OPTS=-Xms2048m -Xmx2048m
volumes:
- 'ext_config:/skywalking/ext-config'
deploy:
placement:
constraints:
- node.labels.${NAMESPACE}_skywalking==1
ui:
image: apache/skywalking-ui:9.4.0
ports:
- "${NODE_PORT_UI}:8080"
environment:
- TZ=Asia/Shanghai
- SW_OAP_ADDRESS=http://${NAMESPACE}_skywalking_oap:12800
- SW_ZIPKIN_ADDRESS=http://${NAMESPACE}_skywalking_oap:9412
depends_on:
- oap
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_skywalking==1
volumes:
ext_config:
driver: local

View File

@ -0,0 +1,4 @@
NAMESPACE=crm1
NODE_PORT=11800
NODE_PORT_2=12800
NODE_PORT_UI=18080

34
docker-swarm/test Normal file
View File

@ -0,0 +1,34 @@
ssh root@192.168.1.209 << EOF
touch /data/swarm/crm1_ss_return-order.yml && echo '''
version: "3.8"
services:
svc:
image: harbor.sino-assist.com/sa-server/return-order:crm1
environment:
- active_profile=prod
- nacos_address=crm1_nacos_server:8848
- namespace=crm1
- TZ=Asia/Shanghai
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
resources:
limits:
cpus: "1"
memory: 800M
reservations:
cpus: "0.1"
memory: 300M
placement:
constraints:
- "node.labels.crm1_return-order==1"
networks:
default:
name: crm1
external: true
''' > /data/swarm/crm1_ss_return-order.yml && docker stack deploy -c /data/swarm/crm1_ss_return-order.yml crm1_ss_return-order
EOF

View File

@ -0,0 +1,4 @@
# crm1环境下 部署xxl-job
env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - crm1_xxl_job

View File

@ -0,0 +1,23 @@
version: '3.8'
networks:
default:
name: ${NAMESPACE}
external: true
services:
server:
image: 'xuxueli/xxl-job-admin:2.2.0'
ports:
- '${NODE_PORT}:8080'
environment:
- TZ=Asia/Shanghai
- PARAMS=--spring.datasource.url=jdbc:mysql://${DATASOURCE_URL}?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&serverTimezone=Asia/Shanghai --spring.datasource.username=${DATASOURCE_USERNAME} --spring.datasource.password=${DATASOURCE_PASSWORD}
deploy:
update_config:
order: start-first
placement:
constraints:
- node.labels.${NAMESPACE}_xxl_job_admin==1

View File

@ -0,0 +1,5 @@
NAMESPACE=crm1
NODE_PORT=9991
DATASOURCE_URL=crm1_mysql_db:3306/xxl_job
DATASOURCE_USERNAME=root
DATASOURCE_PASSWORD=gkxl650