mysql backup

This commit is contained in:
marsal
2025-05-08 21:24:40 +08:00
parent b2eb2928d8
commit 15dd2396d6
17 changed files with 2600 additions and 1 deletions

BIN
.DS_Store vendored

Binary file not shown.

5
ai/openai-test.py Normal file
View File

@ -0,0 +1,5 @@
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("/opt/local/openai/whisper-large-v2")
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B")

7
ai/配置服务器.md Normal file
View File

@ -0,0 +1,7 @@
1. 安装anaconda维护python版本
https://www.anaconda.com/download/success
```wget https://repo.anaconda.com/archive/Anaconda3-2024.10-1-Linux-x86_64.sh```

View File

@ -0,0 +1,25 @@
```mermaid
sequenceDiagram
participant 服务商系统
participant 中道系统
服务商系统->>中道系统: 提交司机信息(/provider/driver-info
中道系统-->>服务商系统: 返回受理结果
alt 数据校验失败
中道系统-->>服务商系统: code=2001
else 校验通过
中道系统->>中道系统: 状态变更为「认证中」
loop 核验流程
中道系统->>中道系统: 人工审核/系统核验
end
中道系统->>服务商系统: POST核验通知/provider/verification-notify
alt 核验成功
服务商系统->>服务商系统: 锁定认证字段
else 核验失败/过期
服务商系统->>服务商系统: 开放对应修改权限
end
end

View File

@ -0,0 +1,17 @@
networks:
funasr:
external: false
services:
server:
image: registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-online-cpu-0.1.11
networks:
- funasr
ports:
- 10096:10095
privileged: true
environment:
- TZ=Asia/Shanghai
volumes:
- ./funasr-runtime-resources/models://workspace/models

View File

@ -0,0 +1,94 @@
version: '2'
networks:
kong-net:
driver: bridge
services:
kong-database:
image: postgres:9.6
container_name: kong-database
restart: always
networks:
- kong-net
environment:
POSTGRES_USER: kong
POSTGRES_DB: kong
POSTGRES_PASSWORD: kong
ports:
- "5432:5432"
#kong数据库的初始化
kong-migration:
container_name: kong-migration
image: kong:latest
command: "kong migrations bootstrap"
networks:
- kong-net
restart: on-failure
environment:
KONG_PG_HOST: kong-database
KONG_DATABASE: postgres
KONG_PG_USER: kong
KONG_PG_PASSWORD: kong
KONG_CASSANDRA_CONTACT_POINTS: kong-database
links:
- kong-database
depends_on:
- kong-database
# 启动kong
kong:
container_name: kong
image: kong:latest
restart: always
networks:
- kong-net
environment:
TZ: Asia/Shanghai
KONG_DATABASE: postgres
KONG_PG_HOST: kong-database
KONG_PG_USER: kong
KONG_PG_PASSWORD: kong
KONG_CASSANDRA_CONTACT_POINTS: kong-database
KONG_PROXY_ACCESS_LOG: /dev/stdout
KONG_ADMIN_ACCESS_LOG: /dev/stdout
KONG_PROXY_ERROR_LOG: /dev/stderr
KONG_ADMIN_ERROR_LOG: /dev/stderr
KONG_ADMIN_LISTEN: 0.0.0.0:8001, 0.0.0.0:8444
depends_on:
- kong-migration
- kong-database
ports:
- "8001:8001"
- "8000:8000"
- "8443:8443"
- "8444:8444"
#konga数据库的初始化
konga-prepare:
container_name: konga-prepare
image: pantsel/konga:latest
command: "-c prepare -a postgres -u postgresql://kong:kong@kong-database:5432/konga"
networks:
- kong-net
restart: on-failure
links:
- kong-database
depends_on:
- kong
- kong-database
#postgres数据库存储数据
konga:
container_name: konga
image: pantsel/konga:latest
restart: always
networks:
- kong-net
environment:
DB_ADAPTER: postgres
DB_HOST: kong-database
DB_USER: kong
DB_DATABASE: konga
DB_PASSWORD: kong
depends_on:
- kong
- kong-database
ports:
- "1337:1337"

View File

@ -0,0 +1,41 @@
1. tts服务 https://github.com/remsky/Kokoro-FastAPI
git clone https://github.com/remsky/Kokoro-FastAPI.git
cd Kokoro-FastAPI
cd docker/gpu # For GPU support
# or cd docker/cpu # For CPU support
docker compose up --build
# Models will auto-download, but if needed you can manually download:
python docker/scripts/download_model.py --output api/src/models/v1_0
# Or run directly via UV:
./start-gpu.sh # For GPU support
./start-cpu.sh # For CPU support
2. ASR服务 https://github.com/modelscope/FunASR/blob/main/runtime/docs/SDK_advanced_guide_offline_en_zh.md
镜像启动
通过下述命令拉取并启动FunASR runtime-SDK的docker镜像
sudo docker pull \
registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.7
mkdir -p ./funasr-runtime-resources/models
sudo docker run -p 10097:10095 -it --privileged=true \
-v $PWD/funasr-runtime-resources/models:/workspace/models \
registry.cn-hangzhou.aliyuncs.com/funasr_repo/funasr:funasr-runtime-sdk-en-cpu-0.1.7
服务端启动
docker启动之后启动 funasr-wss-server服务程序
cd FunASR/runtime
nohup bash run_server.sh \
--download-model-dir /workspace/models \
--vad-dir damo/speech_fsmn_vad_zh-cn-16k-common-onnx \
--model-dir damo/speech_paraformer-large_asr_nat-en-16k-common-vocab10020-onnx \
--punc-dir damo/punc_ct-transformer_cn-en-common-vocab471067-large-onnx > log.txt 2>&1 &
# 如果您想关闭ssl增加参数--certfile 0
服务端详细参数介绍可参考服务端用法详解

View File

@ -0,0 +1,75 @@
# 司机/车辆信息核验接口文档
## 一、接口规范
### 1.1 信息提交接口(服务商→中道)
**接口地址**: `/provider/driver-info`
**请求方式**: POST
**Content-Type**: application/json
#### 请求参数说明:
| 参数名 | 必填 | 类型 | 说明 |
|----------------------|------|--------|--------------------------------------------------------------------|
| providerCode | 是 | String | 服务商编码(由中道分配) |
| operateType | 是 | String | 操作类型1-新增 2-修改 3-停用 |
| rescueNo | 是 | String | 救援师傅工号(唯一标识) |
| rescueName | 是 | String | 师傅姓名 |
| rescuePhone | 是 | String | 师傅联系电话 |
| sex | 是 | String | 性别0-女 1-男 |
| identity | 是 | String | 身份证号码 |
| nonMotorVehicle | 是 | String | 是否非机动车驾驶员1-是 0-否选1时驾照相关字段可不填 |
| identityPhoto_1 | 是 | String | 身份证正面照片URL |
| identityPhoto_2 | 是 | String | 身份证反面照片URL |
| licenseType | 否 | String | 驾照类型A1/A2/A3/B1/B2/C1/C2 |
| licenseStartDay | 否 | String | 驾照领证时间格式yyyy-MM-dd |
| licenseEndDay | 否 | String | 驾照失效时间格式yyyy-MM-dd |
| LicensePhoto | 否 | String | 驾照照片URL |
| rescuePersonPhoto | 否 | String | 师傅正面照URL |
| belongType | 是 | String | 归属类型1-自有师傅 0-外协师傅 |
| timestamp | 是 | String | 请求时间戳格式yyyy-MM-dd HH:mm:ss |
### 1.2 核验通知接口(中道→服务商)
**回调地址**: 需服务商提前配置
**通知方式**: POST
**Content-Type**: application/json
#### 通知参数说明:
| 参数名 | 必填 | 类型 | 说明 |
|---------------|------|--------|--------------------------------------|
| providerCode | 是 | String | 服务商编码 |
| rescueNo | 是 | String | 救援工号 |
| status | 是 | String | 核验状态certifying/fail/success/expired |
| timestamp | 是 | String | 状态变更时间(格式同上) |
| remark | 否 | String | 失败原因说明 |
## 二、业务流程
```mermaid
sequenceDiagram
participant 服务商系统
participant 中道系统
服务商系统->>中道系统: 提交司机信息(/provider/driver-info
中道系统-->>服务商系统: 返回受理结果
alt 数据校验失败
中道系统-->>服务商系统: code=2001
else 校验通过
中道系统->>中道系统: 状态变更为「认证中」
loop 核验流程
中道系统->>中道系统: 人工审核/系统核验
end
中道系统->>服务商系统: POST核验通知/provider/verification-notify
alt 核验成功
服务商系统->>服务商系统: 锁定认证字段
else 核验失败/过期
服务商系统->>服务商系统: 开放对应修改权限
end
end

BIN
docker-swarm/.DS_Store vendored

Binary file not shown.

View File

@ -0,0 +1,15 @@
更换yum源为阿里云的yum源因为后续Centos7可能也会停止官方的yum源支持所以需要手动更换
备份官方yum源配置文件
cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
下载阿里云yum源配置文件
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
4、清除缓存生产新的缓存
yum clean all
yum makecache

View File

@ -37,7 +37,19 @@ sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/dock
3、安装 3、安装
输入如下指令进行docker安装 输入如下指令进行docker安装
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin d sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin d
设置日志大小
vim /etc/docker/daemon.json
{
"log-opts": {"max-size":"1g", "max-file":"3"},
"registry-mirrors": ["https://dockerproxy.net"]
}
systemctl reload docker
4、启动docker 4、启动docker
安装完毕后,进行启动,先输入如下指令加载配置: 安装完毕后,进行启动,先输入如下指令加载配置:
@ -60,3 +72,16 @@ systemctl enable docker
[root@ZD-CRM1 ~]# systemctl stop firewalld [root@ZD-CRM1 ~]# systemctl stop firewalld
[root@ZD-CRM1 ~]# systemctl disable firewalld [root@ZD-CRM1 ~]# systemctl disable firewalld
``` ```
6. ulimit
ulimit -SHn 65536
vim /etc/security/limits.conf
* soft nofile 65535
* hard nofile 65535

View File

@ -0,0 +1,191 @@
# For more configuration options see the configuration guide for Kibana in
# https://www.elastic.co/guide/index.html
# =================== System: Kibana Server ===================
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
#server.host: "localhost"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# Defaults to `false`.
#server.rewriteBasePath: false
# Specifies the public URL at which Kibana is available for end users. If
# `server.basePath` is configured this URL should end with the same basePath.
#server.publicBaseUrl: ""
# The maximum payload size in bytes for incoming server requests.
#server.maxPayload: 1048576
# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"
# =================== System: Kibana Server (Optional) ===================
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# =================== System: Elasticsearch ===================
# The URLs of the Elasticsearch instances to use for all your queries.
#elasticsearch.hosts: ["http://localhost:9200"]
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"
# Kibana can also authenticate to Elasticsearch via "service account tokens".
# Service account tokens are Bearer style tokens that replace the traditional username/password based configuration.
# Use this token instead of a username/password.
# elasticsearch.serviceAccountToken: "my_token"
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# The maximum number of sockets that can be used for communications with elasticsearch.
# Defaults to `Infinity`.
#elasticsearch.maxSockets: 1024
# Specifies whether Kibana should use compression for communications with elasticsearch
# Defaults to `false`.
#elasticsearch.compression: false
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000
# =================== System: Elasticsearch (Optional) ===================
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# =================== System: Logging ===================
# Set the value of this setting to off to suppress all logging output, or to debug to log everything. Defaults to 'info'
#logging.root.level: debug
# Enables you to specify a file where Kibana stores log output.
#logging.appenders.default:
# type: file
# fileName: /var/logs/kibana.log
# layout:
# type: json
# Example with size based log rotation
#logging.appenders.default:
# type: rolling-file
# fileName: /var/logs/kibana.log
# policy:
# type: size-limit
# size: 256mb
# strategy:
# type: numeric
# max: 10
# layout:
# type: json
# Logs queries sent to Elasticsearch.
#logging.loggers:
# - name: elasticsearch.query
# level: debug
# Logs http responses.
#logging.loggers:
# - name: http.server.response
# level: debug
# Logs system usage information.
#logging.loggers:
# - name: metrics.ops
# level: debug
# Enables debug logging on the browser (dev console)
#logging.browser.root:
# level: debug
# =================== System: Other ===================
# The path where Kibana stores persistent data not saved in Elasticsearch. Defaults to data
#path.data: data
# Specifies the path where Kibana creates the process ID file.
#pid.file: /run/kibana/kibana.pid
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000ms.
#ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English (default) "en", Chinese "zh-CN", Japanese "ja-JP", French "fr-FR".
i18n.locale: "zh-CN"
# =================== Frequently used (Optional)===================
# =================== Saved Objects: Migrations ===================
# Saved object migrations run at startup. If you run into migration-related issues, you might need to adjust these settings.
# The number of documents migrated at a time.
# If Kibana can't start up or upgrade due to an Elasticsearch `circuit_breaking_exception`,
# use a smaller batchSize value to reduce the memory pressure. Defaults to 1000 objects per batch.
#migrations.batchSize: 1000
# The maximum payload size for indexing batches of upgraded saved objects.
# To avoid migrations failing due to a 413 Request Entity Too Large response from Elasticsearch.
# This value should be lower than or equal to your Elasticsearch clusters `http.max_content_length`
# configuration option. Default: 100mb
#migrations.maxBatchSizeBytes: 100mb
# The number of times to retry temporary migration failures. Increase the setting
# if migrations fail frequently with a message such as `Unable to complete the [...] step after
# 15 attempts, terminating`. Defaults to 15
#migrations.retryAttempts: 15
# =================== Search Autocomplete ===================
# Time in milliseconds to wait for autocomplete suggestions from Elasticsearch.
# This value must be a whole number greater than zero. Defaults to 1000ms
#unifiedSearch.autocomplete.valueSuggestions.timeout: 1000
# Maximum number of documents loaded by each shard to generate autocomplete suggestions.
# This value must be a whole number greater than zero. Defaults to 100_000
#unifiedSearch.autocomplete.valueSuggestions.terminateAfter: 100000
path:
data: /bitnami/kibana/data
pid:
file: /opt/bitnami/kibana/tmp/kibana.pid
server:
host: 0.0.0.0
port: 5601
elasticsearch:
hosts: http://prod-es-elasticsearch:9200

View File

@ -0,0 +1,15 @@
## Node command line options
## See `node --help` and `node --v8-options` for available options
## Please note you should specify one option per line
## max size of old space in megabytes
#--max-old-space-size=4096
## do not terminate process on unhandled promise rejection
--unhandled-rejections=warn
## restore < Node 16 default DNS lookup behavior
--dns-result-order=ipv4first
## enable OpenSSL 3 legacy provider
--openssl-legacy-provider

View File

@ -8,3 +8,6 @@ env $(cat ./env_crm1 | xargs) envsubst < ./docker-compose.yml | docker stack dep
# #
env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_skywalking --with-registry-auth env $(cat ./env_prod | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - prod_skywalking --with-registry-auth
env $(cat ./env | xargs) envsubst < ./docker-compose.yml | docker stack deploy --compose-file - skywalking --with-registry-auth

2086
运维/inc.log Normal file

File diff suppressed because it is too large Load Diff

0
运维/index.log Normal file
View File

0
运维/mysql-backup.md Normal file
View File