Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
bdd2b461dc | |||
15dd2396d6 | |||
b2eb2928d8 | |||
74efe62045 | |||
5aa3536ae7 | |||
9f2bd6db5f |
5
ai/openai-test.py
Normal file
5
ai/openai-test.py
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("/opt/local/openai/whisper-large-v2")
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")
|
7
ai/配置服务器.md
Normal file
7
ai/配置服务器.md
Normal file
@ -0,0 +1,7 @@
|
||||
1. 安装anaconda,维护python版本
|
||||
|
||||
https://www.anaconda.com/download/success
|
||||
|
||||
|
||||
```wget https://repo.anaconda.com/archive/Anaconda3-2024.10-1-Linux-x86_64.sh```
|
||||
|
BIN
builder-docker/.DS_Store
vendored
Normal file
BIN
builder-docker/.DS_Store
vendored
Normal file
Binary file not shown.
46
builder-docker/fastdfs/docker-compose-old.yml
Normal file
46
builder-docker/fastdfs/docker-compose-old.yml
Normal file
@ -0,0 +1,46 @@
|
||||
version: '3'
|
||||
services:
|
||||
tracker:
|
||||
image: harbor.sino-assist.com/season/fastdfs:1.2
|
||||
container_name: tracker
|
||||
network_mode: host
|
||||
restart: always
|
||||
volumes:
|
||||
- "./tracker_data:/fastdfs/tracker/data"
|
||||
# ports:
|
||||
# - "22122:22122"
|
||||
command: "tracker"
|
||||
|
||||
storage:
|
||||
image: harbor.sino-assist.com/season/fastdfs:1.2
|
||||
container_name: storage
|
||||
network_mode: host
|
||||
# links:
|
||||
# - tracker
|
||||
restart: always
|
||||
volumes:
|
||||
- "./storage.conf:/fdfs_conf/storage.conf"
|
||||
- "./storage_base_path:/fastdfs/storage/data"
|
||||
- "./store_path0:/fastdfs/store_path"
|
||||
#ports:
|
||||
# - "23000:23000"
|
||||
environment:
|
||||
TRACKER_SERVER: "192.168.1.204:22122"
|
||||
command: "storage"
|
||||
|
||||
nginx:
|
||||
image: harbor.sino-assist.com/season/fastdfs:1.2
|
||||
container_name: fdfs-nginx
|
||||
network_mode: host
|
||||
restart: always
|
||||
volumes:
|
||||
- "./nginx.conf:/etc/nginx/conf/nginx.conf"
|
||||
- "./store_path0:/fastdfs/store_path"
|
||||
# links:
|
||||
# - tracker
|
||||
# ports:
|
||||
# - "8088:8088"
|
||||
environment:
|
||||
TRACKER_SERVER: "192.168.1.204:22122"
|
||||
command: "nginx"
|
||||
|
@ -1,46 +1,28 @@
|
||||
version: '3'
|
||||
services:
|
||||
tracker:
|
||||
image: season/fastdfs:1.2
|
||||
container_name: tracker
|
||||
image: ygqygq2/fastdfs-nginx:latest
|
||||
command: tracker
|
||||
network_mode: host
|
||||
restart: always
|
||||
volumes:
|
||||
- "./tracker_data:/fastdfs/tracker/data"
|
||||
# ports:
|
||||
# - "22122:22122"
|
||||
command: "tracker"
|
||||
|
||||
storage:
|
||||
image: season/fastdfs:1.2
|
||||
container_name: storage
|
||||
- /data/tracker:/var/fdfs
|
||||
ports:
|
||||
- 22122:22122
|
||||
storage0:
|
||||
container_name: storage0
|
||||
image: ygqygq2/fastdfs-nginx:latest
|
||||
command: storage
|
||||
network_mode: host
|
||||
# links:
|
||||
# - tracker
|
||||
restart: always
|
||||
volumes:
|
||||
- "./storage.conf:/fdfs_conf/storage.conf"
|
||||
- "./storage_base_path:/fastdfs/storage/data"
|
||||
- "./store_path0:/fastdfs/store_path"
|
||||
#ports:
|
||||
# - "23000:23000"
|
||||
extra_hosts:
|
||||
- "tracker:192.168.1.204"
|
||||
environment:
|
||||
TRACKER_SERVER: "192.168.1.206:22122"
|
||||
command: "storage"
|
||||
|
||||
nginx:
|
||||
image: season/fastdfs:1.2
|
||||
container_name: fdfs-nginx
|
||||
network_mode: host
|
||||
restart: always
|
||||
- TRACKER_SERVER=tracker:22122
|
||||
volumes:
|
||||
- "./nginx.conf:/etc/nginx/conf/nginx.conf"
|
||||
- "./store_path0:/fastdfs/store_path"
|
||||
# links:
|
||||
# - tracker
|
||||
# ports:
|
||||
# - "8088:8088"
|
||||
environment:
|
||||
TRACKER_SERVER: "192.168.1.206:22122"
|
||||
command: "nginx"
|
||||
- ./data/storage0:/var/fdfs
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- tracker
|
||||
|
||||
|
||||
## https://github.com/ygqygq2/fastdfs-nginx
|
@ -111,7 +111,7 @@ subdir_count_per_path=256
|
||||
|
||||
# tracker_server can ocur more than once, and tracker_server format is
|
||||
# "host:port", host can be hostname or ip address
|
||||
tracker_server=192.168.209.121:22122
|
||||
# tracker_server=192.168.209.121:22122
|
||||
|
||||
#standard log level as syslog, case insensitive, value list:
|
||||
### emerg for emergency
|
||||
|
BIN
builder-docker/java11/skywalking-agent/.DS_Store
vendored
Normal file
BIN
builder-docker/java11/skywalking-agent/.DS_Store
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -7,7 +7,7 @@ ADD gradle-8.7 /opt/gradle-8.7
|
||||
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash -
|
||||
|
||||
RUN apt-get install -y nodejs git vim curl sshpass
|
||||
RUN apt-get install -y nodejs git vim curl sshpass pigz
|
||||
|
||||
|
||||
# ADD node-v18.20.2-linux-x64 /usr/local/node-v18.20.2-linux-x64
|
||||
|
Binary file not shown.
Binary file not shown.
25
docker-compose-3.129/Untitled-12.seqdiag
Normal file
25
docker-compose-3.129/Untitled-12.seqdiag
Normal file
@ -0,0 +1,25 @@
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant 服务商系统
|
||||
participant 中道系统
|
||||
|
||||
服务商系统->>中道系统: 提交司机信息(/provider/driver-info)
|
||||
中道系统-->>服务商系统: 返回受理结果
|
||||
|
||||
alt 数据校验失败
|
||||
中道系统-->>服务商系统: code=2001
|
||||
else 校验通过
|
||||
中道系统->>中道系统: 状态变更为「认证中」
|
||||
|
||||
loop 核验流程
|
||||
中道系统->>中道系统: 人工审核/系统核验
|
||||
end
|
||||
|
||||
中道系统->>服务商系统: POST核验通知(/provider/verification-notify)
|
||||
|
||||
alt 核验成功
|
||||
服务商系统->>服务商系统: 锁定认证字段
|
||||
else 核验失败/过期
|
||||
服务商系统->>服务商系统: 开放对应修改权限
|
||||
end
|
||||
end
|
17
docker-compose-3.129/funasr/docker-compose.yml
Normal file
17
docker-compose-3.129/funasr/docker-compose.yml
Normal file
@ -0,0 +1,17 @@
|
||||
networks:
|
||||
funasr:
|
||||
external: false
|
||||
services:
|
||||
server:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.11
|
||||
networks:
|
||||
- funasr
|
||||
ports:
|
||||
- 10096:10095
|
||||
privileged: true
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./funasr-runtime-resources/models://workspace/models
|
||||
|
||||
|
94
docker-compose-3.129/kong/docker-compose.yml
Normal file
94
docker-compose-3.129/kong/docker-compose.yml
Normal file
@ -0,0 +1,94 @@
|
||||
version: '2'
|
||||
networks:
|
||||
kong-net:
|
||||
driver: bridge
|
||||
services:
|
||||
kong-database:
|
||||
image: postgres:9.6
|
||||
container_name: kong-database
|
||||
restart: always
|
||||
networks:
|
||||
- kong-net
|
||||
environment:
|
||||
POSTGRES_USER: kong
|
||||
POSTGRES_DB: kong
|
||||
POSTGRES_PASSWORD: kong
|
||||
ports:
|
||||
- "5432:5432"
|
||||
#kong数据库的初始化
|
||||
kong-migration:
|
||||
container_name: kong-migration
|
||||
image: kong:latest
|
||||
command: "kong migrations bootstrap"
|
||||
networks:
|
||||
- kong-net
|
||||
restart: on-failure
|
||||
environment:
|
||||
KONG_PG_HOST: kong-database
|
||||
KONG_DATABASE: postgres
|
||||
KONG_PG_USER: kong
|
||||
KONG_PG_PASSWORD: kong
|
||||
KONG_CASSANDRA_CONTACT_POINTS: kong-database
|
||||
links:
|
||||
- kong-database
|
||||
depends_on:
|
||||
- kong-database
|
||||
|
||||
# 启动kong
|
||||
kong:
|
||||
container_name: kong
|
||||
image: kong:latest
|
||||
restart: always
|
||||
networks:
|
||||
- kong-net
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
KONG_DATABASE: postgres
|
||||
KONG_PG_HOST: kong-database
|
||||
KONG_PG_USER: kong
|
||||
KONG_PG_PASSWORD: kong
|
||||
KONG_CASSANDRA_CONTACT_POINTS: kong-database
|
||||
KONG_PROXY_ACCESS_LOG: /dev/stdout
|
||||
KONG_ADMIN_ACCESS_LOG: /dev/stdout
|
||||
KONG_PROXY_ERROR_LOG: /dev/stderr
|
||||
KONG_ADMIN_ERROR_LOG: /dev/stderr
|
||||
KONG_ADMIN_LISTEN: 0.0.0.0:8001, 0.0.0.0:8444
|
||||
depends_on:
|
||||
- kong-migration
|
||||
- kong-database
|
||||
ports:
|
||||
- "8001:8001"
|
||||
- "8000:8000"
|
||||
- "8443:8443"
|
||||
- "8444:8444"
|
||||
#konga数据库的初始化
|
||||
konga-prepare:
|
||||
container_name: konga-prepare
|
||||
image: pantsel/konga:latest
|
||||
command: "-c prepare -a postgres -u postgresql://kong:kong@kong-database:5432/konga"
|
||||
networks:
|
||||
- kong-net
|
||||
restart: on-failure
|
||||
links:
|
||||
- kong-database
|
||||
depends_on:
|
||||
- kong
|
||||
- kong-database
|
||||
#postgres数据库存储数据
|
||||
konga:
|
||||
container_name: konga
|
||||
image: pantsel/konga:latest
|
||||
restart: always
|
||||
networks:
|
||||
- kong-net
|
||||
environment:
|
||||
DB_ADAPTER: postgres
|
||||
DB_HOST: kong-database
|
||||
DB_USER: kong
|
||||
DB_DATABASE: konga
|
||||
DB_PASSWORD: kong
|
||||
depends_on:
|
||||
- kong
|
||||
- kong-database
|
||||
ports:
|
||||
- "1337:1337"
|
41
docker-compose-3.129/readme.MD
Normal file
41
docker-compose-3.129/readme.MD
Normal file
@ -0,0 +1,41 @@
|
||||
1. tts服务 https://github.com/remsky/Kokoro-FastAPI
|
||||
|
||||
git clone https://github.com/remsky/Kokoro-FastAPI.git
|
||||
cd Kokoro-FastAPI
|
||||
|
||||
cd docker/gpu # For GPU support
|
||||
# or cd docker/cpu # For CPU support
|
||||
docker compose up --build
|
||||
|
||||
# Models will auto-download, but if needed you can manually download:
|
||||
python docker/scripts/download_model.py --output api/src/models/v1_0
|
||||
|
||||
# Or run directly via UV:
|
||||
./start-gpu.sh # For GPU support
|
||||
./start-cpu.sh # For CPU support
|
||||
|
||||
|
||||
|
||||
2. ASR服务 https://github.com/modelscope/FunASR/blob/main/runtime/docs/SDK_advanced_guide_offline_en_zh.md
|
||||
|
||||
镜像启动
|
||||
通过下述命令拉取并启动FunASR runtime-SDK的docker镜像:
|
||||
|
||||
sudo docker pull \
|
||||
registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.7
|
||||
mkdir -p ./funasr-runtime-resources/models
|
||||
sudo docker run -p 10097:10095 -it --privileged=true \
|
||||
-v $PWD/funasr-runtime-resources/models:/workspace/models \
|
||||
registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.7
|
||||
服务端启动
|
||||
docker启动之后,启动 funasr-wss-server服务程序:
|
||||
|
||||
cd FunASR/runtime
|
||||
nohup bash run_server.sh \
|
||||
--download-model-dir /workspace/models \
|
||||
--vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
|
||||
--model-dir damo/speech_paraformer-large_asr_nat-en-16k-common-vocab10020-onnx \
|
||||
--punc-dir damo/punc_ct-transformer_cn-en-common-vocab471067-large-onnx > log.txt 2>&1 &
|
||||
|
||||
# 如果您想关闭ssl,增加参数:--certfile 0
|
||||
服务端详细参数介绍可参考服务端用法详解
|
75
docker-compose-3.129/test.md
Normal file
75
docker-compose-3.129/test.md
Normal file
@ -0,0 +1,75 @@
|
||||
# 司机/车辆信息核验接口文档
|
||||
|
||||
## 一、接口规范
|
||||
|
||||
### 1.1 信息提交接口(服务商→中道)
|
||||
|
||||
**接口地址**: `/provider/driver-info`
|
||||
**请求方式**: POST
|
||||
**Content-Type**: application/json
|
||||
|
||||
#### 请求参数说明:
|
||||
|
||||
| 参数名 | 必填 | 类型 | 说明 |
|
||||
|----------------------|------|--------|--------------------------------------------------------------------|
|
||||
| providerCode | 是 | String | 服务商编码(由中道分配) |
|
||||
| operateType | 是 | String | 操作类型:1-新增 2-修改 3-停用 |
|
||||
| rescueNo | 是 | String | 救援师傅工号(唯一标识) |
|
||||
| rescueName | 是 | String | 师傅姓名 |
|
||||
| rescuePhone | 是 | String | 师傅联系电话 |
|
||||
| sex | 是 | String | 性别:0-女 1-男 |
|
||||
| identity | 是 | String | 身份证号码 |
|
||||
| nonMotorVehicle | 是 | String | 是否非机动车驾驶员:1-是 0-否(选1时驾照相关字段可不填) |
|
||||
| identityPhoto_1 | 是 | String | 身份证正面照片URL |
|
||||
| identityPhoto_2 | 是 | String | 身份证反面照片URL |
|
||||
| licenseType | 否 | String | 驾照类型(A1/A2/A3/B1/B2/C1/C2) |
|
||||
| licenseStartDay | 否 | String | 驾照领证时间(格式:yyyy-MM-dd) |
|
||||
| licenseEndDay | 否 | String | 驾照失效时间(格式:yyyy-MM-dd) |
|
||||
| LicensePhoto | 否 | String | 驾照照片URL |
|
||||
| rescuePersonPhoto | 否 | String | 师傅正面照URL |
|
||||
| belongType | 是 | String | 归属类型:1-自有师傅 0-外协师傅 |
|
||||
| timestamp | 是 | String | 请求时间戳(格式:yyyy-MM-dd HH:mm:ss) |
|
||||
|
||||
### 1.2 核验通知接口(中道→服务商)
|
||||
|
||||
**回调地址**: 需服务商提前配置
|
||||
**通知方式**: POST
|
||||
**Content-Type**: application/json
|
||||
|
||||
#### 通知参数说明:
|
||||
|
||||
| 参数名 | 必填 | 类型 | 说明 |
|
||||
|---------------|------|--------|--------------------------------------|
|
||||
| providerCode | 是 | String | 服务商编码 |
|
||||
| rescueNo | 是 | String | 救援工号 |
|
||||
| status | 是 | String | 核验状态:certifying/fail/success/expired |
|
||||
| timestamp | 是 | String | 状态变更时间(格式同上) |
|
||||
| remark | 否 | String | 失败原因说明 |
|
||||
|
||||
## 二、业务流程
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant 服务商系统
|
||||
participant 中道系统
|
||||
|
||||
服务商系统->>中道系统: 提交司机信息(/provider/driver-info)
|
||||
中道系统-->>服务商系统: 返回受理结果
|
||||
|
||||
alt 数据校验失败
|
||||
中道系统-->>服务商系统: code=2001
|
||||
else 校验通过
|
||||
中道系统->>中道系统: 状态变更为「认证中」
|
||||
|
||||
loop 核验流程
|
||||
中道系统->>中道系统: 人工审核/系统核验
|
||||
end
|
||||
|
||||
中道系统->>服务商系统: POST核验通知(/provider/verification-notify)
|
||||
|
||||
alt 核验成功
|
||||
服务商系统->>服务商系统: 锁定认证字段
|
||||
else 核验失败/过期
|
||||
服务商系统->>服务商系统: 开放对应修改权限
|
||||
end
|
||||
end
|
BIN
docker-swarm/.DS_Store
vendored
Normal file
BIN
docker-swarm/.DS_Store
vendored
Normal file
Binary file not shown.
15
docker-swarm/10.5x环境配置记录/0.使用阿里云的源.md
Normal file
15
docker-swarm/10.5x环境配置记录/0.使用阿里云的源.md
Normal file
@ -0,0 +1,15 @@
|
||||
更换yum源为阿里云的yum源,因为后续Centos7可能也会停止官方的yum源支持,所以需要手动更换
|
||||
|
||||
备份官方yum源配置文件:
|
||||
|
||||
cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
|
||||
|
||||
下载阿里云yum源配置文件:
|
||||
|
||||
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
|
||||
|
||||
4、清除缓存生产新的缓存
|
||||
|
||||
yum clean all
|
||||
|
||||
yum makecache
|
@ -29,12 +29,27 @@ sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/dock
|
||||
|
||||
接下来为了提高安装速度,我们进行配置使用国内源,输入如下指令:
|
||||
|
||||
sed -i 's@//download.docker.com@//mirrors.ustc.edu.cn/docker-ce@g' /etc/yum.repos.d/docker-ce.repo
|
||||
### sed -i 's@//download.docker.com@//mirrors.ustc.edu.cn/docker-ce@g' /etc/yum.repos.d/docker-ce.repo
|
||||
|
||||
sed -i 's@//download.docker.com@//mirrors.aliyun.com/docker-ce@g' /etc/yum.repos.d/docker-ce.repo
|
||||
|
||||
|
||||
3、安装
|
||||
输入如下指令,进行docker安装:
|
||||
|
||||
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin d
|
||||
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin d
|
||||
|
||||
设置日志大小
|
||||
|
||||
vim /etc/docker/daemon.json
|
||||
|
||||
{
|
||||
"log-opts": {"max-size":"1g", "max-file":"3"},
|
||||
"registry-mirrors": ["https://dockerproxy.net"]
|
||||
}
|
||||
|
||||
systemctl reload docker
|
||||
|
||||
|
||||
4、启动docker
|
||||
安装完毕后,进行启动,先输入如下指令加载配置:
|
||||
@ -57,3 +72,16 @@ systemctl enable docker
|
||||
[root@ZD-CRM1 ~]# systemctl stop firewalld
|
||||
[root@ZD-CRM1 ~]# systemctl disable firewalld
|
||||
```
|
||||
|
||||
|
||||
6. ulimit
|
||||
|
||||
ulimit -SHn 65536
|
||||
|
||||
|
||||
vim /etc/security/limits.conf
|
||||
|
||||
|
||||
* soft nofile 65535
|
||||
* hard nofile 65535
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
1. 初始化网路
|
||||
|
||||
## 基础配置
|
||||
|
||||
### 1. 初始化网路 prod
|
||||
docker network create \
|
||||
--driver=overlay \
|
||||
--subnet=10.17.0.0/16 \
|
||||
@ -6,15 +9,93 @@
|
||||
--attachable \
|
||||
prod
|
||||
|
||||
1. rabbitmq
|
||||
管理界面: 192.168.10.51:15672
|
||||
内部:prod_rabbitmq_stats:5672,prod_rabbitmq_queue1:5672,prod_rabbitmq_queue2:5672
|
||||
stomp url:192.168.10.51:15674
|
||||
### 2. [portainer](../portainer) 管理工具
|
||||
|
||||
2. xxl-job-adin
|
||||
docker stack deploy --compose-file docker-compose.yml portainer
|
||||
|
||||
管理界面: 192.168.10.51:9991
|
||||
管理界面: https://192.168.10.51:9443
|
||||
|
||||
3. nacos
|
||||
页面出现报错,docker service update portainer_agent --force
|
||||
|
||||
|
||||
### 3. [monitor](../monitor)
|
||||
|
||||
用于 grafana 监控
|
||||
|
||||
管理界面: 192.168.10.51:3000
|
||||
admin gkxl2024#@
|
||||
|
||||
## 软件包
|
||||
|
||||
部署方法均参见文件夹内readme,变量在对于的env文件。
|
||||
|
||||
### 1. [rabbitmq](../rabbitmq)
|
||||
管理界面: 192.168.10.51:15672
|
||||
|
||||
内部:prod_rabbitmq_stats:5672,prod_rabbitmq_queue1:5672,prod_rabbitmq_queue2:5672
|
||||
|
||||
stomp-url:192.168.10.51:15674
|
||||
|
||||
### 2. [xxl-job-adin](../xxl-job-admin)
|
||||
|
||||
管理界面: 192.168.10.51:9991
|
||||
|
||||
内部:prod-xxl-job-admin:8080
|
||||
|
||||
|
||||
### 3. [nacos](../nacos-cluser)
|
||||
|
||||
管理界面: http://192.168.10.51:25848/nacos/
|
||||
|
||||
|
||||
### 4. [redis](../redis-prod-50)
|
||||
|
||||
端口:192.168.10.51:6379
|
||||
|
||||
内部:prod_redis_redis-sentinel:16379
|
||||
|
||||
### 5. [elasticsearch](../elasticsearch)
|
||||
|
||||
管理界面: 192.168.10.51:5601
|
||||
|
||||
外部端口: 192.168.10.51:9200
|
||||
|
||||
内部端口: prod-es-elasticsearch:9200
|
||||
|
||||
### 6. [log](../log)
|
||||
|
||||
无管理界面,仅需要加载对于的volumes:prod-log
|
||||
|
||||
### 7. [elasticsearch](../mysql-repl-tool)
|
||||
|
||||
管理界面: 192.168.10.51:5601
|
||||
|
||||
外部端口: 192.168.10.51:9200
|
||||
|
||||
内部端口: prod-es-elasticsearch:9200
|
||||
|
||||
### 8. [skywalking](../skywalking)
|
||||
|
||||
管理界面: 192.168.10.51:18080
|
||||
|
||||
内部端口: prod-skywalking-oap:11800
|
||||
|
||||
|
||||
### 8. [mysql](../mysql-repl-tool)
|
||||
|
||||
用于 [nacos](../nacos-cluser/mysql-schema.sql),xxl-job
|
||||
|
||||
管理界面: 192.168.10.51:25306
|
||||
|
||||
内部端口: prod-tool-mysql-master:3306 prod-tool-mysql-salve:3306
|
||||
|
||||
|
||||
## 其余服务
|
||||
|
||||
[clickhouse](../clickhouse) 应用于服务 3.123
|
||||
|
||||
[datart](../datart) 依赖的服务,其中mysql服务bi正在使用,在3.123上
|
||||
|
||||
[jenkins](../jenkins) 3.120上专门用于部署
|
||||
|
||||
[canal](../canal) 用于数据同步只clickhouse,在3.120
|
@ -6,22 +6,100 @@
|
||||
"type": 2,
|
||||
"title": "nacos cluser",
|
||||
"description": "nacos集群",
|
||||
"categories": ["sino"],
|
||||
"categories": ["开发组件"],
|
||||
"platform": "linux",
|
||||
"logo": "",
|
||||
"repository": {
|
||||
"url": "https://git.sino-assist.com//templates",
|
||||
"stackfile": "stacks/liveswitch/docker-stack.yml"
|
||||
"url": "https://git.sino-assist.com/sa-charts/",
|
||||
"stackfile": "docker-swarm/nacos-cluser/cluster-docker-compose.yml"
|
||||
},
|
||||
"env": [
|
||||
{
|
||||
"name": "POSTGRES_PASSWORD",
|
||||
"label": "Postgres password"
|
||||
"name": "NAMESPACE",
|
||||
"label": "NAMESPACE",
|
||||
"default": "prod"
|
||||
},
|
||||
{
|
||||
"name": "NACOS_VERSION",
|
||||
"label": "NACOS_VERSION",
|
||||
"default": "v2.3.0"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_11",
|
||||
"label": "NODE_PORT_11",
|
||||
"default": "21848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_12",
|
||||
"label": "NODE_PORT_12",
|
||||
"default": "22848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_13",
|
||||
"label": "NODE_PORT_13",
|
||||
"default": "22849"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_21",
|
||||
"label": "NODE_PORT_21",
|
||||
"default": "23848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_22",
|
||||
"label": "NODE_PORT_22",
|
||||
"default": "24848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_23",
|
||||
"label": "NODE_PORT_23",
|
||||
"default": "24849"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_31",
|
||||
"label": "NODE_PORT_31",
|
||||
"default": "25848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_32",
|
||||
"label": "NODE_PORT_32",
|
||||
"default": "26848"
|
||||
},
|
||||
{
|
||||
"name": "NODE_PORT_33",
|
||||
"label": "NODE_PORT_33",
|
||||
"default": "26849"
|
||||
},
|
||||
{
|
||||
"name": "MYSQL_SERVICE_HOST",
|
||||
"label": "MYSQL_SERVICE_HOST",
|
||||
"default": "prod-tool-mysql-master"
|
||||
},
|
||||
{
|
||||
"name": "MYSQL_SERVICE_USER",
|
||||
"label": "MYSQL_SERVICE_USER",
|
||||
"default": "zd_tool"
|
||||
},
|
||||
{
|
||||
"name": "MYSQL_SERVICE_PASSWORD",
|
||||
"label": "MYSQL_SERVICE_PASSWORD",
|
||||
"default": "gkxl2024#@"
|
||||
},
|
||||
{
|
||||
"name": "NACOS_AUTH_IDENTITY_KEY",
|
||||
"label": "NACOS_AUTH_IDENTITY_KEY",
|
||||
"default": "nacos"
|
||||
},
|
||||
{
|
||||
"name": "NACOS_AUTH_IDENTITY_VALUE",
|
||||
"label": "NACOS_AUTH_IDENTITY_VALUE",
|
||||
"default": "gkxl2024#@"
|
||||
},
|
||||
{
|
||||
"name": "NACOS_AUTH_TOKEN",
|
||||
"label": "NACOS_AUTH_TOKEN",
|
||||
"default": "OTg1NjRzZnJ0Z2RmZzIwMjQ1NTU1NTExZWZnZGVmZGVz"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
|
||||
|
||||
}
|
||||
]
|
||||
}
|
@ -15,6 +15,7 @@ services:
|
||||
- CLICKHOUSE_ADMIN_PASSWORD=${CLICKHOUSE_ADMIN_PASSWORD}
|
||||
volumes:
|
||||
- 'data_db:/bitnami/clickhouse'
|
||||
- 'data_config:/opt/bitnami/clickhouse/etc'
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
@ -31,5 +32,5 @@ services:
|
||||
volumes:
|
||||
data_db:
|
||||
driver: local
|
||||
|
||||
|
||||
data_config:
|
||||
driver: local
|
||||
|
@ -12,6 +12,7 @@ services:
|
||||
- '${NODE_PORT_2}:9300'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- ELASTICSEARCH_HEAP_SIZE=8192m
|
||||
volumes:
|
||||
- '/mnt/data/volumes/elasticsearch:/bitnami/elasticsearch/data'
|
||||
deploy:
|
||||
|
191
docker-swarm/elasticsearch/kibana.yml
Executable file
191
docker-swarm/elasticsearch/kibana.yml
Executable file
@ -0,0 +1,191 @@
|
||||
# For more configuration options see the configuration guide for Kibana in
|
||||
# https://www.elastic.co/guide/index.html
|
||||
|
||||
# =================== System: Kibana Server ===================
|
||||
# Kibana is served by a back end server. This setting specifies the port to use.
|
||||
#server.port: 5601
|
||||
|
||||
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
|
||||
# The default is 'localhost', which usually means remote machines will not be able to connect.
|
||||
# To allow connections from remote users, set this parameter to a non-loopback address.
|
||||
#server.host: "localhost"
|
||||
|
||||
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
|
||||
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
|
||||
# from requests it receives, and to prevent a deprecation warning at startup.
|
||||
# This setting cannot end in a slash.
|
||||
#server.basePath: ""
|
||||
|
||||
# Specifies whether Kibana should rewrite requests that are prefixed with
|
||||
# `server.basePath` or require that they are rewritten by your reverse proxy.
|
||||
# Defaults to `false`.
|
||||
#server.rewriteBasePath: false
|
||||
|
||||
# Specifies the public URL at which Kibana is available for end users. If
|
||||
# `server.basePath` is configured this URL should end with the same basePath.
|
||||
#server.publicBaseUrl: ""
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
#server.maxPayload: 1048576
|
||||
|
||||
# The Kibana server's name. This is used for display purposes.
|
||||
#server.name: "your-hostname"
|
||||
|
||||
# =================== System: Kibana Server (Optional) ===================
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
#server.ssl.enabled: false
|
||||
#server.ssl.certificate: /path/to/your/server.crt
|
||||
#server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# =================== System: Elasticsearch ===================
|
||||
# The URLs of the Elasticsearch instances to use for all your queries.
|
||||
#elasticsearch.hosts: ["http://localhost:9200"]
|
||||
|
||||
# If your Elasticsearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the Kibana server uses to perform maintenance on the Kibana
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# is proxied through the Kibana server.
|
||||
#elasticsearch.username: "kibana_system"
|
||||
#elasticsearch.password: "pass"
|
||||
|
||||
# Kibana can also authenticate to Elasticsearch via "service account tokens".
|
||||
# Service account tokens are Bearer style tokens that replace the traditional username/password based configuration.
|
||||
# Use this token instead of a username/password.
|
||||
# elasticsearch.serviceAccountToken: "my_token"
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
|
||||
# the elasticsearch.requestTimeout setting.
|
||||
#elasticsearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# must be a positive integer.
|
||||
#elasticsearch.requestTimeout: 30000
|
||||
|
||||
# The maximum number of sockets that can be used for communications with elasticsearch.
|
||||
# Defaults to `Infinity`.
|
||||
#elasticsearch.maxSockets: 1024
|
||||
|
||||
# Specifies whether Kibana should use compression for communications with elasticsearch
|
||||
# Defaults to `false`.
|
||||
#elasticsearch.compression: false
|
||||
|
||||
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
#elasticsearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
|
||||
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
|
||||
#elasticsearch.customHeaders: {}
|
||||
|
||||
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
|
||||
#elasticsearch.shardTimeout: 30000
|
||||
|
||||
# =================== System: Elasticsearch (Optional) ===================
|
||||
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
|
||||
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
|
||||
#elasticsearch.ssl.certificate: /path/to/your/client.crt
|
||||
#elasticsearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your Elasticsearch instance.
|
||||
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
|
||||
#elasticsearch.ssl.verificationMode: full
|
||||
|
||||
# =================== System: Logging ===================
|
||||
# Set the value of this setting to off to suppress all logging output, or to debug to log everything. Defaults to 'info'
|
||||
#logging.root.level: debug
|
||||
|
||||
# Enables you to specify a file where Kibana stores log output.
|
||||
#logging.appenders.default:
|
||||
# type: file
|
||||
# fileName: /var/logs/kibana.log
|
||||
# layout:
|
||||
# type: json
|
||||
|
||||
# Example with size based log rotation
|
||||
#logging.appenders.default:
|
||||
# type: rolling-file
|
||||
# fileName: /var/logs/kibana.log
|
||||
# policy:
|
||||
# type: size-limit
|
||||
# size: 256mb
|
||||
# strategy:
|
||||
# type: numeric
|
||||
# max: 10
|
||||
# layout:
|
||||
# type: json
|
||||
|
||||
# Logs queries sent to Elasticsearch.
|
||||
#logging.loggers:
|
||||
# - name: elasticsearch.query
|
||||
# level: debug
|
||||
|
||||
# Logs http responses.
|
||||
#logging.loggers:
|
||||
# - name: http.server.response
|
||||
# level: debug
|
||||
|
||||
# Logs system usage information.
|
||||
#logging.loggers:
|
||||
# - name: metrics.ops
|
||||
# level: debug
|
||||
|
||||
# Enables debug logging on the browser (dev console)
|
||||
#logging.browser.root:
|
||||
# level: debug
|
||||
|
||||
# =================== System: Other ===================
|
||||
# The path where Kibana stores persistent data not saved in Elasticsearch. Defaults to data
|
||||
#path.data: data
|
||||
|
||||
# Specifies the path where Kibana creates the process ID file.
|
||||
#pid.file: /run/kibana/kibana.pid
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000ms.
|
||||
#ops.interval: 5000
|
||||
|
||||
# Specifies locale to be used for all localizable strings, dates and number formats.
|
||||
# Supported languages are the following: English (default) "en", Chinese "zh-CN", Japanese "ja-JP", French "fr-FR".
|
||||
i18n.locale: "zh-CN"
|
||||
# =================== Frequently used (Optional)===================
|
||||
|
||||
# =================== Saved Objects: Migrations ===================
|
||||
# Saved object migrations run at startup. If you run into migration-related issues, you might need to adjust these settings.
|
||||
|
||||
# The number of documents migrated at a time.
|
||||
# If Kibana can't start up or upgrade due to an Elasticsearch `circuit_breaking_exception`,
|
||||
# use a smaller batchSize value to reduce the memory pressure. Defaults to 1000 objects per batch.
|
||||
#migrations.batchSize: 1000
|
||||
|
||||
# The maximum payload size for indexing batches of upgraded saved objects.
|
||||
# To avoid migrations failing due to a 413 Request Entity Too Large response from Elasticsearch.
|
||||
# This value should be lower than or equal to your Elasticsearch cluster’s `http.max_content_length`
|
||||
# configuration option. Default: 100mb
|
||||
#migrations.maxBatchSizeBytes: 100mb
|
||||
|
||||
# The number of times to retry temporary migration failures. Increase the setting
|
||||
# if migrations fail frequently with a message such as `Unable to complete the [...] step after
|
||||
# 15 attempts, terminating`. Defaults to 15
|
||||
#migrations.retryAttempts: 15
|
||||
|
||||
# =================== Search Autocomplete ===================
|
||||
# Time in milliseconds to wait for autocomplete suggestions from Elasticsearch.
|
||||
# This value must be a whole number greater than zero. Defaults to 1000ms
|
||||
#unifiedSearch.autocomplete.valueSuggestions.timeout: 1000
|
||||
|
||||
# Maximum number of documents loaded by each shard to generate autocomplete suggestions.
|
||||
# This value must be a whole number greater than zero. Defaults to 100_000
|
||||
#unifiedSearch.autocomplete.valueSuggestions.terminateAfter: 100000
|
||||
path:
|
||||
data: /bitnami/kibana/data
|
||||
pid:
|
||||
file: /opt/bitnami/kibana/tmp/kibana.pid
|
||||
server:
|
||||
host: 0.0.0.0
|
||||
port: 5601
|
||||
elasticsearch:
|
||||
hosts: http://prod-es-elasticsearch:9200
|
15
docker-swarm/elasticsearch/node.options
Executable file
15
docker-swarm/elasticsearch/node.options
Executable file
@ -0,0 +1,15 @@
|
||||
## Node command line options
|
||||
## See `node --help` and `node --v8-options` for available options
|
||||
## Please note you should specify one option per line
|
||||
|
||||
## max size of old space in megabytes
|
||||
#--max-old-space-size=4096
|
||||
|
||||
## do not terminate process on unhandled promise rejection
|
||||
--unhandled-rejections=warn
|
||||
|
||||
## restore < Node 16 default DNS lookup behavior
|
||||
--dns-result-order=ipv4first
|
||||
|
||||
## enable OpenSSL 3 legacy provider
|
||||
--openssl-legacy-provider
|
@ -14,6 +14,11 @@ services:
|
||||
configs:
|
||||
- source: logstash_conf
|
||||
target: /usr/share/logstash/pipeline/my.conf
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "500m"
|
||||
max-file: "3"
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
|
@ -119,7 +119,7 @@ volumes:
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: overlay
|
||||
driver: host
|
||||
|
||||
|
||||
|
||||
|
@ -5,11 +5,17 @@ networks:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
server:
|
||||
server1:
|
||||
image: 'docker.io/bitnami/nginx:1.24'
|
||||
ports:
|
||||
- '8080:8080'
|
||||
- '8443:8443'
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 8180
|
||||
target: 8080
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 8143
|
||||
target: 8443
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@ -25,19 +31,50 @@ services:
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.pem
|
||||
- source: nginx_prod_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/prod.conf
|
||||
- source: nginx_prod_sup_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/prod-sup.conf
|
||||
- source: nginx_other_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/zd-other.conf
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 2
|
||||
replicas: 1
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_nginx==1
|
||||
- node.hostname==ZD-CRM1
|
||||
server2:
|
||||
image: 'docker.io/bitnami/nginx:1.24'
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 8280
|
||||
target: 8080
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 8243
|
||||
target: 8443
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- /opt/logs/nginx/:/opt/bitnami/nginx/logs/
|
||||
configs:
|
||||
- source: nginx_conf
|
||||
target: /opt/bitnami/nginx/conf/nginx.conf
|
||||
- source: nginx_ssl_sinoassist_config
|
||||
target: /opt/bitnami/nginx/conf/ssl.sinoassist.conf
|
||||
- source: ssl_sinoassist_key
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.key
|
||||
- source: ssl_sinoassist_pem
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/sinoassist.com.pem
|
||||
- source: nginx_prod_config
|
||||
target: /opt/bitnami/nginx/conf/server_blocks/prod.conf
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
update_config:
|
||||
order: start-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname==ZD-CRM2
|
||||
configs:
|
||||
nginx_conf:
|
||||
external: true
|
||||
@ -47,13 +84,7 @@ configs:
|
||||
name: nginx_ssl_sinoassist_conf_v1
|
||||
nginx_prod_config:
|
||||
external: true
|
||||
name: nginx_prod_config_v1
|
||||
nginx_prod_sup_config:
|
||||
external: true
|
||||
name: nginx_prod_sup_config_v1
|
||||
nginx_other_config:
|
||||
external: true
|
||||
name: nginx_other_config_v1
|
||||
name: nginx_prod_config_v2
|
||||
ssl_sinoassist_key:
|
||||
external: true
|
||||
name: ssl_sinoassist_key_2024
|
||||
|
@ -1,44 +0,0 @@
|
||||
## 公司其他域名的切换
|
||||
|
||||
# 4s店微信
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wx4s.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8777;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 微信供应商
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wxdd.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8568;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apicc.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:8080;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口websocket
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apiccws.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:1884;
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
|
||||
#### 中道prod环境开始 ####
|
||||
|
||||
upstream api.zhongdao {
|
||||
server ss52_sa-gateway_svc:8080;
|
||||
@ -165,3 +166,117 @@ server {
|
||||
return 301 https://www.sinoassist.com$request_uri;
|
||||
|
||||
}
|
||||
|
||||
#### 中道prod环境结束 ####
|
||||
|
||||
## 公司其他域名的切换
|
||||
|
||||
# 4s店微信
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wx4s.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8777;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 微信供应商
|
||||
server {
|
||||
listen 8080;
|
||||
server_name wxdd.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.10.7:8568;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apicc.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:8080;
|
||||
}
|
||||
}
|
||||
|
||||
# 呼叫中心接口websocket
|
||||
server {
|
||||
listen 8080;
|
||||
server_name apiccws.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
location / {
|
||||
proxy_pass http://192.168.5.201:1884;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
## 救援生产环境其他服务域名切换
|
||||
|
||||
|
||||
## rabbitmq stomp
|
||||
upstream stomp.zhongdao {
|
||||
server prod_rabbitmq_queue1:15674;
|
||||
server prod_rabbitmq_queue2:15674;
|
||||
server prod_rabbitmq_stats:15674;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8080;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
server_name stomp.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://stomp.zhongdao;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 备份文件服务器
|
||||
server {
|
||||
listen 8080;
|
||||
server_name file.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
# 录音文件服务器
|
||||
location /ly/ {
|
||||
proxy_pass http://192.168.5.204:8088/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# 备份文件服务器
|
||||
location / {
|
||||
proxy_pass http://192.168.10.18:8888;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1,65 +0,0 @@
|
||||
## 救援生产环境其他服务域名切换
|
||||
|
||||
|
||||
## rabbitmq stomp
|
||||
upstream stomp.zhongdao {
|
||||
server prod_rabbitmq_queue1:15674;
|
||||
server prod_rabbitmq_queue2:15674;
|
||||
server prod_rabbitmq_stats:15674;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8080;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
server_name stomp.sinoassist.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://stomp.zhongdao;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# 备份文件服务器
|
||||
server {
|
||||
listen 8080;
|
||||
server_name file.sinoassist.com;
|
||||
include /opt/bitnami/nginx/conf/ssl.sinoassist.conf;
|
||||
|
||||
# 录音文件服务器
|
||||
location /ly/ {
|
||||
proxy_pass http://192.168.5.204:8088/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# 备份文件服务器
|
||||
location / {
|
||||
proxy_pass http://192.168.10.18:8888;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
proxy_set_header X-Forwarded-For
|
||||
$proxy_add_x_forwarded_for;
|
||||
if ($request_filename ~ .*\.(htm|html)$)
|
||||
{
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
1
docker-swarm/portainer/README.md
Normal file
1
docker-swarm/portainer/README.md
Normal file
@ -0,0 +1 @@
|
||||
docker stack deploy --compose-file docker-compose.yml - portainer
|
@ -21,3 +21,18 @@ rabbitmqctl set_permissions -p / admin ".*" ".*" ".*"
|
||||
# prod环境下 部署rabbitmq集群
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose-prod.yml | docker stack deploy --compose-file - prod_rabbitmq
|
||||
|
||||
|
||||
docker.nju.edu.cn
|
||||
|
||||
|
||||
{
|
||||
"registry-mirrors": [
|
||||
"https://<changme>.mirror.aliyuncs.com",
|
||||
"https://dockerproxy.com",
|
||||
"https://mirror.baidubce.com",
|
||||
"https://docker.m.daocloud.io",
|
||||
"https://docker.nju.edu.cn",
|
||||
"https://docker.mirrors.sjtug.sjtu.edu.cn"
|
||||
]
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ services:
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_MANAGEMENT_ALLOW_WEB_ACCESS=true
|
||||
- RABBITMQ_PLUGINS=rabbitmq_management,rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
ports:
|
||||
@ -42,6 +43,7 @@ services:
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_MANAGEMENT_ALLOW_WEB_ACCESS=true
|
||||
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
volumes:
|
||||
@ -65,6 +67,7 @@ services:
|
||||
- RABBITMQ_VHOSTS=/${NAMESPACE}
|
||||
- RABBITMQ_USERNAME=root
|
||||
- RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}
|
||||
- RABBITMQ_MANAGEMENT_ALLOW_WEB_ACCESS=true
|
||||
- RABBITMQ_PLUGINS=rabbitmq_stomp,rabbitmq_web_stomp
|
||||
- RABBITMQ_LOGS=-
|
||||
volumes:
|
||||
|
@ -1,4 +0,0 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=6379
|
||||
REDIS_PASSWORD=gkxl650
|
||||
REDIS_SENTINEL_PASSWORD=gkxl650
|
116
docker-swarm/redis-prod-50/docker-compose.yml
Normal file
116
docker-swarm/redis-prod-50/docker-compose.yml
Normal file
@ -0,0 +1,116 @@
|
||||
version: '3.8'
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: ${NAMESPACE}
|
||||
external: true
|
||||
services:
|
||||
master:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=192.168.10.4
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_REPLICA_IP=192.168.10.55
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 6379
|
||||
target: 6379
|
||||
volumes:
|
||||
- data_master:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: stop-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname==ZD-CRM5
|
||||
slave:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=192.168.10.4
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_REPLICA_IP=192.168.10.56
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 6379
|
||||
target: 6379
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- data_slave:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: stop-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname==ZD-CRM6
|
||||
sentinel-1:
|
||||
image: 'bitnami/redis-sentinel:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=192.168.10.4
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_ANNOUNCE_IP=192.168.10.55
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_PASSWORD=${REDIS_SENTINEL_PASSWORD}
|
||||
depends_on:
|
||||
- master
|
||||
- slave
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 26379
|
||||
target: 26379
|
||||
deploy:
|
||||
update_config:
|
||||
order: stop-first
|
||||
mode: replicated
|
||||
replicas: 1 # replicas模式, 副本数目为1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname==ZD-CRM5
|
||||
volumes:
|
||||
- data_sentinel_1:/bitnami
|
||||
sentinel-2:
|
||||
image: 'bitnami/redis-sentinel:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=192.168.10.4
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_ANNOUNCE_IP=192.168.10.56
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_PASSWORD=${REDIS_SENTINEL_PASSWORD}
|
||||
depends_on:
|
||||
- master
|
||||
- slave
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 26379
|
||||
target: 26379
|
||||
deploy:
|
||||
update_config:
|
||||
order: stop-first
|
||||
mode: replicated
|
||||
replicas: 1 # replicas模式, 副本数目为1
|
||||
placement:
|
||||
constraints:
|
||||
- node.hostname==ZD-CRM6
|
||||
volumes:
|
||||
- data_sentinel_2:/bitnami
|
||||
volumes:
|
||||
data_sentinel_1:
|
||||
driver: local
|
||||
data_sentinel_2:
|
||||
driver: local
|
||||
data_master:
|
||||
driver: local
|
||||
data_slave:
|
||||
driver: local
|
4
docker-swarm/redis-prod-50/env_prod
Normal file
4
docker-swarm/redis-prod-50/env_prod
Normal file
@ -0,0 +1,4 @@
|
||||
NAMESPACE=prod
|
||||
NODE_PORT=6379
|
||||
REDIS_PASSWORD=sino#650
|
||||
REDIS_SENTINEL_PASSWORD=sino#650
|
@ -9,63 +9,81 @@ services:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=192.168.1.207
|
||||
- REDIS_REPLICATION_MODE=master
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_REPLICA_IP=192.168.1.207
|
||||
ports:
|
||||
- '${NODE_PORT}:6379'
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 6379
|
||||
target: 6379
|
||||
volumes:
|
||||
- data_master:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
order: stop-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_master==1
|
||||
- node.hostname==okd7
|
||||
slave:
|
||||
image: 'bitnami/redis:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
|
||||
- REDIS_MASTER_HOST=192.168.1.207
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_REPLICA_IP=192.168.1.208
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 6379
|
||||
target: 6379
|
||||
depends_on:
|
||||
- master
|
||||
volumes:
|
||||
- data_slave:/bitnami
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
order: stop-first
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_slave==1
|
||||
- node.hostname==zd-dev-208
|
||||
redis-sentinel:
|
||||
image: 'bitnami/redis-sentinel:7.0.11'
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- REDIS_MASTER_HOST=${NAMESPACE}_redis_master
|
||||
- REDIS_MASTER_HOST=192.168.1.207
|
||||
- REDIS_MASTER_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_ANNOUNCE_IP=192.168.1.209
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_SENTINEL_PASSWORD=${REDIS_SENTINEL_PASSWORD}
|
||||
depends_on:
|
||||
- master
|
||||
- slave
|
||||
ports:
|
||||
- mode: host
|
||||
protocol: tcp
|
||||
published: 26379
|
||||
target: 26379
|
||||
deploy:
|
||||
update_config:
|
||||
order: start-first
|
||||
mode: global
|
||||
order: stop-first
|
||||
mode: replicated
|
||||
replicas: 1 # replicas模式, 副本数目为1
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.${NAMESPACE}_redis_sentinel==1
|
||||
max_replicas_per_node: 1
|
||||
- node.hostname==zd-dev-209
|
||||
volumes:
|
||||
- data_sentinel:/bitnami
|
||||
- data_sentinel_1:/bitnami
|
||||
volumes:
|
||||
data_sentinel:
|
||||
data_sentinel_1:
|
||||
driver: local
|
||||
data_sentinel_2:
|
||||
driver: local
|
||||
data_master:
|
||||
driver: local
|
||||
data_slave:
|
||||
driver: local
|
||||
|
||||
|
||||
|
@ -8,3 +8,6 @@ env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack dep
|
||||
#
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_skywalking --with-registry-auth
|
||||
|
||||
|
||||
env $(cat ./env | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - skywalking --with-registry-auth
|
||||
|
@ -6,5 +6,3 @@ env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack dep
|
||||
|
||||
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_xxl_job
|
||||
|
||||
|
||||
docker stack deploy --compose-file docker-compose.yml - portainer
|
2086
运维/inc.log
Normal file
2086
运维/inc.log
Normal file
File diff suppressed because it is too large
Load Diff
2538
运维/index.log
Normal file
2538
运维/index.log
Normal file
File diff suppressed because it is too large
Load Diff
32
运维/mysql-backup.md
Normal file
32
运维/mysql-backup.md
Normal file
@ -0,0 +1,32 @@
|
||||
1. 安装xtrabackup
|
||||
|
||||
sudo yum install https://www.percona.com/downloads/percona-release/redhat/0.1-10/percona-release-0.1-10.noarch.rpm
|
||||
sudo yum install percona-xtrabackup-80
|
||||
|
||||
2. 全量备份加速方案
|
||||
```
|
||||
|
||||
xtrabackup --backup --user=root --password=nczl@sino_db \
|
||||
--parallel=4 --compress --compress-threads=4 \
|
||||
--target-dir=/data/backup/full_$(date +%Y%m%d) | gzip > backup.xbstream.gz
|
||||
|
||||
|
||||
|
||||
xtrabackup --backup --user=root --password=nczl@sino_db \
|
||||
--incremental-basedir=/data/backup/full_$(date +%Y%m%d) \
|
||||
--target-dir=/data/backup/inc_$(date +%Y%m%d) \
|
||||
--parallel=4 --compress --compress-threads=4 | gzip > inc.xbstream.gz
|
||||
|
||||
|
||||
xtrabackup --user=root --password=nczl@sino_db --decompress --target-dir=/data/backup/full_20250508
|
||||
|
||||
|
||||
|
||||
xtrabackup --prepare --apply-log-only --target-dir=/data/backup/full_20250508 \
|
||||
--use-memory=32G
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
rsync -avz -e ssh /data/backup/full_20250508 root@192.168.3.123/data/backup/full_20250508
|
Reference in New Issue
Block a user