K8S实战笔记——某考试系统部署文档
Exam系统部署文档
Version:1.0
本文档部分内容已做脱敏处理,仅做记录参考,不可直接使用
一、服务器环境
演示环境
# BMC
172.29.2.24 root / ********
# 物理机
172.29.2.34 root / ********
# Zstack 4.4.64-c76
# https://cdn.zstack.io/product_downloads/Cloud_suite/2024/4.4.64-hotfix/y86efjbxgc/ZStack/ZStack-Cloud-x86_64-DVD-4.4.64-c76.iso
http://172.29.2.34:5000/
admin / ********
# 管理节点
172.29.2.40 root / ********
# 计算节点
172.29.2.41 root / ********
172.29.2.42 root / ********
172.29.2.43 root / ********
# 存储节点
172.29.2.44 root / ********
环境说明:
以下所有部署服务器操作系统均使用CentOS7.6,下面是基于该系统的内核调优方案
1.1 内核调优
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=0
net.ipv6.conf.default.disable_ipv6=0
net.ipv6.conf.lo.disable_ipv6=0
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096
net.core.somaxconn = 32768
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=524288
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.max_map_count = 262144
1.2 limit调优
* soft nofile 6553500
* hard nofile 6553500
* soft nproc 6553500
* hard nproc 6553500
* soft stack 10240
* hard stack 32768
* hard memlock unlimited
* soft memlock unlimited
二、Docker-Compose单机部署
2.1 环境准备
注意:开始之前务必优化内核!
# 操作系统版本:CentOS7.6
# 关闭 SELinux
setenforce 0
sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 下载Docker-ce源
sudo wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装Dcker
sudo yum -y install docker-ce
# 检查Docker
sudo docker -v
# 启动Docker并设置为开机自启
sudo systemctl start docker
sudo systemctl enable docker
# 设置Docker镜像和默认地址池
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors":[
"https://hub.uuuadc.top",
"https://docker.anyhub.us.kg",
"https://dockerhub.jobcher.com",
"https://dockerhub.icu",
"https://docker.ckyl.me",
"https://docker.awsl9527.cn"
],
"default-address-pools":[
{"base":"192.168.0.0/16","size":24}
]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
# 下载Docker-Compose
sudo wget https://github.com/docker/compose/releases/download/v2.23.1/docker-compose-linux-x86_64
sudo mv docker-compose-linux-x86_64 /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
# 检查Docker-Compose
docker-compose -v
# docker登录
docker login --username=******** registry.cn-hangzhou.aliyuncs.com
********
2.2 项目部署
# 1 下载解压单机部署包exam_release_XXXX.tar.gz
# 2 编辑docker-compose.yml
# 2.1 修改镜像版本为发布版
# 2.2 修改环境变量IP为生产IP
# 3 移动初始化sql文件到指定目录
# - 首次启动前,选择版本并移动sql文件至init目录
# - exam和generate分别表示考试端和生成端
# - 放置exam_init.sql文件至init目录表示初始化<考试端>
# - 放置generate_init.sql文件至init目录表示初始化<生成端>
# - 运行后不可转换,重新初始化请删除/exam_data/mysql目录并重新compose
# 4 docker-compose up -d 启动项目
# 启动后访问方式
ipaddress:80 # 考生端
ipaddress:8002 # 接收端
ipaddress:8003 # 批阅端
ipaddress:8004 # 生成端
# 项目数据目录
/exam_data/
├── dbdata # 数据库持久化目录,对应2个数据库
│ ├── mysql
│ └── redis
├── nginx_log # nginx日志目录,对应4个前端
│ ├── exam
│ ├── generate
│ ├── marking
│ └── recive
└── uploads # 考试附件保存目录
- docker-compose.yml
注:复制docker-compose文件也可启动项目,不过在首次启动后需要手动连接数据库注入sql文件,方法略
version: "3.8"
services:
redis:
container_name: redis
image: redis:alpine3.15
command: redis-server --requirepass ********
volumes:
- /exam_data/dbdata/redis:/data
ports:
- "16379:6379"
restart: always
mysql:
image: mysql:8.0.36
container_name: mysql
ports:
- "3306:3306"
command: --default-authentication-plugin=mysql_native_password
environment:
MYSQL_ROOT_PASSWORD: ********
MYSQL_DATABASE: exam
volumes:
- /exam_data/dbdata/mysql:/var/lib/mysql
- ./mysql/init:/docker-entrypoint-initdb.d/
restart: always
oam:
container_name: oam
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- "18002:8080"
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "1"
environment:
JAVA_OPTIONS: "-Xmx1G"
SERVER_PORT: 8080
ENVIRONMENT: "PROD"
DOMAIN_NAME: 'http://X.X.X.X'
LRM_SERVER: 'lrm:8080'
RESOURCE_SERVER: 'resource:8080'
REDIS_TOKEN_DATABASE: 1
REDIS_TOKEN_HOST: "redis"
REDIS_TOKEN_PORT: 6379
REDIS_TOKEN_PASSWORD: "********"
MYSQL_DRIVER: "com.mysql.cj.jdbc.Driver"
MYSQL_PROTOCOL: "jdbc:mysql"
MYSQL_HOST: "mysql"
MYSQL_PORT: 3306
MYSQL_DATABASE: "exam"
MYSQL_USERNAME: "root"
MYSQL_PASSWORD: "********"
REDIS_HOST: "redis"
REDIS_PORT: 6379
REDIS_DATABASE: 0
REDIS_PASSWORD: "********"
USER_AUTHENTICATION: "false"
depends_on:
- redis
- resource
- mysql
restart: always
qam:
container_name: qam
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- "18003:8080"
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "1"
environment:
JAVA_OPTIONS: "-Xmx2G"
SERVER_PORT: 8080
ENVIRONMENT: "PROD"
DOMAIN_NAME: 'http://X.X.X.X'
OAM_SERVER: 'oam:8080'
RESOURCE_SERVER: 'resource:8080'
REDIS_TOKEN_DATABASE: 1
REDIS_TOKEN_HOST: "redis"
REDIS_TOKEN_PORT: 6379
REDIS_TOKEN_PASSWORD: "********"
MYSQL_DRIVER: "com.mysql.cj.jdbc.Driver"
MYSQL_PROTOCOL: "jdbc:mysql"
MYSQL_HOST: "mysql"
MYSQL_PORT: 3306
MYSQL_DATABASE: "exam"
MYSQL_USERNAME: "root"
MYSQL_PASSWORD: "********"
REDIS_HOST: "redis"
REDIS_PORT: 6379
REDIS_DATABASE: 0
REDIS_PASSWORD: "********"
depends_on:
- redis
- oam
- resource
- mysql
restart: always
resource:
container_name: resource
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- "18004:8080"
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "1"
volumes:
- /exam_data/uploads:/uploads
environment:
JAVA_OPTIONS: "-Xmx1G"
SERVER_PORT: 8080
ENVIRONMENT: "PROD"
DOMAIN_NAME: 'http://X.X.X.X'
REDIS_TOKEN_DATABASE: 1
REDIS_TOKEN_HOST: "redis"
REDIS_TOKEN_PORT: 6379
REDIS_TOKEN_PASSWORD: "********"
MYSQL_DRIVER: "com.mysql.cj.jdbc.Driver"
MYSQL_PROTOCOL: "jdbc:mysql"
MYSQL_HOST: "mysql"
MYSQL_PORT: 3306
MYSQL_DATABASE: "exam"
MYSQL_USERNAME: "root"
MYSQL_PASSWORD: "********"
REDIS_HOST: "redis"
REDIS_PORT: 6379
REDIS_DATABASE: 0
REDIS_PASSWORD: "********"
LOCAL_FILE_ROOT: "/uploads"
FORCE_LOCAL_STORAGE: "true"
depends_on:
- redis
- mysql
restart: always
web_exam:
image: registry.cn-hangzhou.aliyuncs.com/********
container_name: web_exam
restart: always
ports:
- "80:80"
volumes:
- /exam_data/nginx_log/exam:/var/log/nginx
environment:
SERVER_IP: X.X.X.X
RESOURCE_HOST: resource
RESOURCE_PORT: 8080
OAM_HOST: oam
OAM_PORT: 8080
QAM_HOST: qam
QAM_PORT: 8080
EM_HOST: qam
EM_PORT: 8080
depends_on:
- oam
- qam
- resource
web_receive:
image: registry.cn-hangzhou.aliyuncs.com/********
container_name: web_recevie
restart: always
ports:
- "8002:80"
volumes:
- /exam_data/nginx_log/recive:/var/log/nginx
environment:
SERVER_IP: X.X.X.X
RESOURCE_HOST: resource
RESOURCE_PORT: 8080
OAM_HOST: oam
OAM_PORT: 8080
QAM_HOST: qam
QAM_PORT: 8080
EM_HOST: qam
EM_PORT: 8080
depends_on:
- oam
- qam
- resource
web_marking:
image: registry.cn-hangzhou.aliyuncs.com/********
container_name: web_marking
restart: always
ports:
- "8003:80"
volumes:
- /exam_data/nginx_log/marking:/var/log/nginx
environment:
SERVER_IP: X.X.X.X
RESOURCE_HOST: resource
RESOURCE_PORT: 8080
OAM_HOST: oam
OAM_PORT: 8080
QAM_HOST: qam
QAM_PORT: 8080
EM_HOST: qam
EM_PORT: 8080
depends_on:
- oam
- qam
- resource
web_generate:
image: registry.cn-hangzhou.aliyuncs.com/********
container_name: web_generate
restart: always
ports:
- "8004:80"
volumes:
- /exam_data/nginx_log/generate:/var/log/nginx
environment:
SERVER_IP: X.X.X.X
RESOURCE_HOST: resource
RESOURCE_PORT: 8080
OAM_HOST: oam
OAM_PORT: 8080
QAM_HOST: qam
QAM_PORT: 8080
EM_HOST: qam
EM_PORT: 8080
depends_on:
- oam
- qam
- resource
三、Kubernetes分布式部署
3.1 环境准备
各节点分区情况:
[root@172-29-2-34 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 7.3T 0 disk
├─sda1 8:1 0 200M 0 part /boot/efi
├─sda2 8:2 0 1G 0 part /boot
└─sda3 8:3 0 5.9T 0 part
├─zstack-root 253:0 0 500G 0 lvm /
├─zstack-swap 253:1 0 31.3G 0 lvm [SWAP]
├─zstack-zstack_bs 253:2 0 500G 0 lvm /zstack_bs
└─zstack-zstack_ps 253:3 0 4.9T 0 lvm /zstack_ps
[root@master exam_k8s]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 918M 0 rom
vda 252:0 0 200G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 199G 0 part
└─centos-root 253:0 0 199G 0 lvm /
[root@node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 300G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 199G 0 part
└─centos-root 253:0 0 199G 0 lvm /
[root@storage ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 200G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 199G 0 part
└─centos-root 253:0 0 199G 0 lvm /
vdb 252:16 0 2T 0 disk
└─vdb1 252:17 0 2T 0 part
└─exam-data 253:1 0 2T 0 lvm /exam_data
操作系统:CentOS7.6(K8S要求关闭Swap分区)
注意:开始之前务必优化内核!
# 裸机不能存在swap分区
# 如果已有swap分区,可执行swapoff -a 临时关闭
# 每个节点分别设置对应主机名
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
hostnamectl set-hostname node3
# 所有节点都修改 hosts
vim /etc/hosts
172.16.30.136 master
172.16.30.137 node1
172.16.30.138 node2
172.16.30.139 node3
# 所有节点关闭 SELinux
setenforce 0
sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
# 所有节点确保防火墙关闭
systemctl stop firewalld
systemctl disable firewalld
# 所有节点添加 k8s 安装源
cat <<EOF > kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
mv kubernetes.repo /etc/yum.repos.d/
# 所有节点添加 Docker 安装源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 替换阿里云yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
# 所有节点安装组件(指定版本,否则K8s初始化报错)
yum install -y kubelet-1.22.4 kubectl-1.22.4 kubeadm-1.22.4 docker-ce-20.10.24
# 如果存在swap分区,需要手动关闭
swapoff -a
# 所有节点修改docker配置
# kubernetes 官方推荐 docker 等使用 systemd 作为 cgroupdriver,否则 kubelet 启动不了
cat <<EOF > daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors":[
"https://hub.uuuadc.top",
"https://docker.anyhub.us.kg",
"https://dockerhub.jobcher.com",
"https://dockerhub.icu",
"https://docker.ckyl.me",
"https://docker.awsl9527.cn"
],
}
EOF
mkdir /etc/docker
mv daemon.json /etc/docker/
# 所有节点启动服务
systemctl enable kubelet
systemctl start kubelet
systemctl enable docker
systemctl start docker
3.2 K8S初始化
# 管理节点初始化
kubeadm init --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16
# 保存 kubeadm join 部分
# 重新获取:kubeadm token create --print-join-command
# 复制授权文件,以便 kubectl 可以有权限访问集群
# 如果其他节点需要访问集群,需要从主节点复制这个文件过去其他节点
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
# 在其他机器上创建 ~/.kube/config 文件也能通过 kubectl 访问到集群
# 工作节点加入集群
kubeadm join 172.16.30.156:6443 --token xxx --discovery-token-ca-cert-hash xxx
# 加入完成后查看nodes状态
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 31m v1.22.4
node1 NotReady <none> 5m48s v1.22.4
node2 NotReady <none> 5m25s v1.22.4
node3 NotReady <none> 5m23s v1.22.4
# 安装网络插件Flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 或 kube-flannel.yml离线文件(结尾附)
kubectl apply -f kube-flannel.yml
# 网络插件安装后节点Ready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 8m40s v1.22.4
node1 Ready <none> 6m41s v1.22.4
node2 Ready <none> 4m29s v1.22.4
node3 Ready <none> 4m12s v1.22.4
# k8s命令tab补全
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
3.3 NFS服务准备
63 mkdir /exam_data
64 lsblk -f
65 lsblk
66 parted /dev/vdb
mklable gpt
mkpart p 0% 100%
toggle 1 lvm
67 lsblk
68 pvcreate /dev/vdb1
69 vgcreate exam /dev/vdb1
70 lvcreate -l +100%FREE -n data exam
74 mkfs.xfs /dev/mapper/exam-data
75 mount /dev/mapper/exam-data /exam_data/
76 df -h
vim /etc/fstab
/dev/mapper/exam-data /exam_data/ xfs defaults 0 0
# step1 安装yum install nfs-utils -y
yum install nfs-utils -y
# step2 创建NFS共享目录
mkdir /exam_data/{dbdata,uploads,nginx_logs}
mkdir /exam_data/dbdata/{redis,mysql}
# step3 配置NFS共享,编辑/etc/exports文件,将要共享的目录添加到文件中
/exam_data *(rw,sync,no_root_squash) # 这将允许任何客户端以读写模式访问共享目录。
# step4 重新加载NFS配置
exportfs -r
# step5 启动NFS服务
systemctl start nfs-server
systemctl enable nfs-server
# step6 确认NFS服务正在运行
systemctl status nfs-server
# 所有工作节点安装nfs-utils并测试挂载
mount -t nfs 172.29.2.44:/exam_data /mnt/
3.4 应用部署
# 创建阿里云密钥
kubectl create secret docker-registry aliyun-docker-registry \
--docker-username=******** \
--docker-password=******** \
--docker-server=registry.cn-hangzhou.aliyuncs.com
3.4.1 PV&PVC
- pv-pvc.yaml
apiVersion: v1
kind: PersistentVolume # 先创建PV
metadata:
name: pv-redis
spec:
capacity:
storage: 10Gi # 分配容量
accessModes:
- ReadWriteOnce # 仅允许单个pod读写
nfs:
path: /exam_data/dbdata/redis # 需要nfs服务器上该路径真实存在且可挂载
server: 172.29.2.44 # nfs服务器地址
---
apiVersion: v1
kind: PersistentVolumeClaim # 再创建PVC
metadata:
name: pvc-redis
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-mysql
spec:
capacity:
storage: 30Gi
accessModes:
- ReadWriteOnce
nfs:
path: /exam_data/dbdata/mysql
server: 172.29.2.44
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-mysql
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-uploads
spec:
capacity:
storage: 1000Gi
accessModes:
- ReadWriteMany
nfs:
path: /exam_data/uploads
server: 172.29.2.44
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-uploads
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1000Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nginx-logs
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteMany
nfs:
path: /exam_data/nginx_logs/
server: 172.29.2.44
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nginx-logs
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 20Gi
kubectl apply -f pv_pvc.yaml
# 查看pv、pvc
[root@master]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv-mysql 30Gi RWO Retain Bound default/pvc-mysql 34h
pv-redis 10Gi RWO Retain Bound default/pvc-redis 34h
pv-uploads 1000Gi RWX Retain Bound default/pvc-uploads 41h
[root@master]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-mysql Bound pv-mysql 30Gi RWO 34h
pvc-redis Bound pv-redis 10Gi RWO 34h
pvc-uploads Bound pv-uploads 1000Gi RWX 41h
3.4.2 Deployment
- mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-config
data:
mysqld.cnf: |
[mysqld]
max_connections = 5000
default-time_zone = '+8:00'
innodb_flush_log_at_trx_commit=2
loose_recycle_scheduler=OFF
innodb_buffer_pool_load_at_startup=ON
loose_performance_schema_max_index_stat=0
bulk_insert_buffer_size=4194304
show_old_temporals=OFF
ft_query_expansion_limit=20
innodb_old_blocks_time=1000
loose_ccl_queue_hot_delete=OFF
loose_rds_audit_log_event_buffer_size=8192
thread_stack=1048576
loose_performance_schema_max_digest_sample_age=0
innodb_thread_concurrency=0
loose_innodb_rds_flashback_task_enabled=OFF
default_time_zone=+8:00
loose_performance_schema_max_digest_length=0
loose_recycle_bin=OFF
optimizer_search_depth=62
max_sort_length=1024
max_binlog_cache_size=18446744073709547520
schema_definition_cache=256
init_connect=''
slave_transaction_retries=10
innodb_adaptive_max_sleep_delay=150000
innodb_purge_rseg_truncate_frequency=128
innodb_lock_wait_timeout=50
loose_json_document_max_depth=100
innodb_compression_pad_pct_max=50
max_connections=2520
temptable_max_mmap=1073741824
max_execution_time=0
innodb_log_wait_for_flush_spin_hwm=400
event_scheduler=OFF
innodb_flush_method=O_DIRECT
loose_performance_schema_accounts_size=0
loose_optimizer_trace_features=greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on
innodb_purge_batch_size=300
loose_performance_schema_events_statements_history_size=0
avoid_temporal_upgrade=OFF
loose_group_replication_flow_control_member_quota_percent=0
innodb_sync_array_size=1
binlog_transaction_dependency_history_size=500000
net_read_timeout=30
end_markers_in_json=OFF
loose_performance_schema_hosts_size=0
loose_innodb_numa_interleave=ON
loose_performance_schema_max_cond_instances=0
max_binlog_stmt_cache_size=18446744073709547520
skip_show_database=OFF
innodb_checksum_algorithm=crc32
innodb_log_spin_cpu_pct_hwm=50
loose_performance_schema_events_waits_history_long_size=0
innodb_ft_enable_stopword=ON
loose_innodb_undo_retention=0
disconnect_on_expired_password=ON
default_storage_engine=InnoDB
loose_group_replication_flow_control_min_quota=0
loose_performance_schema_session_connect_attrs_size=0
stored_program_definition_cache=256
innodb_ft_result_cache_limit=2000000000
innodb_log_buffer_size=8M
explicit_defaults_for_timestamp=OFF
ft_max_word_len=84
innodb_autoextend_increment=64
sql_mode=ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
innodb_stats_transient_sample_pages=8
table_open_cache=2048
loose_performance_schema_max_rwlock_classes=0
range_optimizer_max_mem_size=8388608
loose_innodb_rds_faster_ddl=ON
innodb_status_output=OFF
innodb_log_compressed_pages=OFF
slave_net_timeout=60
max_points_in_geometry=65536
max_prepared_stmt_count=16382
innodb_fill_factor=100
wait_timeout=86400
loose_group_replication_flow_control_mode=DISABLED
innodb_print_all_deadlocks=OFF
loose_thread_pool_size=1
binlog_stmt_cache_size=32768
transaction_isolation=READ-COMMITTED
optimizer_trace_limit=1
loose_innodb_log_writer_threads=ON
innodb_max_purge_lag=0
innodb_buffer_pool_dump_pct=25
max_sp_recursion_depth=0
updatable_views_with_limit=YES
local_infile=ON
loose_opt_rds_last_error_gtid=ON
innodb_ft_max_token_size=84
loose_thread_pool_enabled=ON
innodb_adaptive_hash_index=OFF
net_write_timeout=60
innodb_print_ddl_logs=OFF
flush_time=0
character_set_filesystem=binary
loose_performance_schema_max_statement_classes=0
key_cache_division_limit=100
innodb_read_ahead_threshold=56
loose_optimizer_switch=index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on
show_create_table_verbosity=OFF
loose_performance_schema_max_socket_classes=0
innodb_log_spin_cpu_abs_lwm=80
innodb_monitor_disable=
loose_performance_schema_max_program_instances=0
innodb_adaptive_flushing_lwm=10
innodb_log_checksums=ON
innodb_ft_sort_pll_degree=2
log_slow_admin_statements=OFF
innodb_stats_on_metadata=OFF
stored_program_cache=256
group_concat_max_len=1024
innodb_rollback_segments=128
loose_information_schema_stats_expiry=86400
innodb_commit_concurrency=0
table_definition_cache=2048
auto_increment_increment=1
max_seeks_for_key=18446744073709500000
binlog_group_commit_sync_no_delay_count=0
loose_kill_idle_transaction_timeout=0
loose_persist_binlog_to_redo=OFF
loose_ccl_queue_hot_update=OFF
back_log=3000
binlog_transaction_dependency_tracking=WRITESET
loose_recycle_bin_retention=604800
innodb_io_capacity_max=40000
loose_performance_schema_events_transactions_history_size=0
min_examined_row_limit=0
loose_performance_schema_events_transactions_history_long_size=0
sync_relay_log_info=10000
binlog_group_commit_sync_delay=0
innodb_stats_auto_recalc=ON
max_connect_errors=100
loose_performance_schema_max_file_classes=0
innodb_change_buffering=all
loose_opt_rds_enable_show_slave_lag=ON
histogram_generation_max_mem_size=20000000
loose_group_replication_flow_control_min_recovery_quota=0
loose_performance_schema_max_statement_stack=0
max_join_size=18446744073709551615
loose_validate_password_length=8
innodb_max_purge_lag_delay=0
loose_optimizer_trace=enabled=off,one_line=off
default_week_format=0
innodb_cmp_per_index_enabled=OFF
loose_slave_parallel_workers=8
host_cache_size=644
auto_increment_offset=1
ft_min_word_len=4
default_authentication_plugin=mysql_native_password
loose_performance_schema_max_sql_text_length=0
slave_type_conversions=
loose_group_replication_flow_control_certifier_threshold=25000
optimizer_trace_offset=-1
loose_force_memory_to_innodb=OFF
character_set_server=utf8
innodb_adaptive_flushing=ON
binlog_max_flush_queue_time=0
regexp_time_limit=32
innodb_monitor_enable=
loose_group_replication_flow_control_applier_threshold=25000
table_open_cache_instances=16
tablespace_definition_cache=256
loose_rds_audit_log_version=MYSQL_V1
innodb_buffer_pool_instances=8
loose_multi_blocks_ddl_count=0
loose_performance_schema_max_table_instances=0
loose_group_replication_flow_control_release_percent=50
loose_innodb_undo_space_reserved_size=0
innodb_log_file_size=1000M
lc_time_names=en_US
sync_master_info=10000
innodb_compression_level=6
loose_innodb_log_optimize_ddl=OFF
loose_performance_schema_max_prepared_statements_instances=0
loose_innodb_log_write_ahead_size=4096
loose_performance_schema_max_mutex_classes=0
cte_max_recursion_depth=1000
key_buffer_size=16M
innodb_online_alter_log_max_size=134217728
key_cache_block_size=1024
mysql_native_password_proxy_users=OFF
loose_innodb_rds_chunk_flush_interval=100
query_alloc_block_size=8192
loose_performance_schema_max_socket_instances=0
innodb_purge_threads=4
loose_group_replication_transaction_size_limit=150000000
innodb_compression_failure_threshold_pct=5
loose_performance_schema_error_size=0
binlog_rows_query_log_events=OFF
loose_innodb_undo_space_supremum_size=10240
innodb_stats_persistent_sample_pages=20
innodb_ft_total_cache_size=640000000
eq_range_index_dive_limit=100
loose_sql_safe_updates=OFF
loose_performance_schema_events_stages_history_long_size=0
connect_timeout=10
div_precision_increment=4
sync_binlog=1000
rpl_read_size=8192
innodb_stats_method=nulls_equal
lock_wait_timeout=31536000
innodb_deadlock_detect=ON
innodb_write_io_threads=4
loose_ccl_queue_bucket_count=4
ngram_token_size=2
loose_performance_schema_max_table_lock_stat=0
loose_performance_schema_max_table_handles=0
loose_performance_schema_max_memory_classes=0
loose_ignore_index_hint_error=OFF
loose_innodb_rds_free_resize=ON
loose_log_error_suppression_list=MY-010520
innodb_ft_enable_diag_print=OFF
innodb_io_capacity=20000
slow_launch_time=2
innodb_table_locks=ON
loose_performance_schema_events_stages_history_size=0
innodb_stats_persistent=ON
tmp_table_size=2097152
loose_performance_schema_max_thread_classes=0
net_retry_count=10
innodb_ft_cache_size=8000000
binlog_cache_size=1M
innodb_max_dirty_pages_pct=75
innodb_disable_sort_file_cache=OFF
innodb_lru_scan_depth=512
loose_performance_schema_max_mutex_instances=0
long_query_time=1
innodb_default_row_format=DYNAMIC
interactive_timeout=7200
innodb_read_io_threads=4
transaction_prealloc_size=4096
open_files_limit=65535
collation_server=utf8_general_ci
loose_performance_schema_max_metadata_locks=0
temptable_max_ram=1073741824
innodb_open_files=1000
max_heap_table_size=67108864
completion_type=0
loose_performance_schema_digests_size=0
automatic_sp_privileges=ON
max_user_connections=2000
innodb_random_read_ahead=OFF
regexp_stack_limit=8000000
loose_group_replication_flow_control_max_commit_quota=0
delay_key_write=ON
general_log=OFF
log_bin_use_v1_row_events=1
loose_performance_schema_setup_actors_size=0
innodb_buffer_pool_dump_at_shutdown=ON
query_prealloc_size=8192
key_cache_age_threshold=300
loose_performance_schema_setup_objects_size=0
transaction_alloc_block_size=8192
optimizer_prune_level=1
loose_performance_schema_max_file_instances=0
max_digest_length=1024
innodb_max_dirty_pages_pct_lwm=0
innodb_status_output_locks=OFF
binlog_row_image=full
innodb_change_buffer_max_size=25
innodb_optimize_fulltext_only=OFF
loose_performance_schema_max_file_handles=0
loose_performance_schema_users_size=0
innodb_max_undo_log_size=1073741824
slave_parallel_type=LOGICAL_CLOCK
innodb_sync_spin_loops=30
loose_group_replication_flow_control_period=1
loose_internal_tmp_mem_storage_engine=MEMORY
sha256_password_proxy_users=OFF
innodb_flush_sync=ON
loose_performance_schema_max_rwlock_instances=0
delayed_insert_timeout=300
preload_buffer_size=32768
concurrent_insert=1
block_encryption_mode="aes-128-ecb"
slow_query_log=ON
net_buffer_length=16384
innodb_buffer_pool_size=3221225472
delayed_insert_limit=100
delayed_queue_size=1000
session_track_gtids=OFF
innodb_thread_sleep_delay=10000
sql_require_primary_key=OFF
innodb_old_blocks_pct=37
innodb_sort_buffer_size=1048576
innodb_page_cleaners=8
loose_innodb_parallel_read_threads=1
innodb_spin_wait_delay=6
myisam_sort_buffer_size=262144
innodb_concurrency_tickets=5000
loose_performance_schema_max_cond_classes=0
loose_innodb_doublewrite_pages=64
transaction_write_set_extraction=XXHASH64
binlog_checksum=CRC32
loose_performance_schema_max_stage_classes=0
loose_performance_schema_events_statements_history_long_size=0
sync_relay_log=10000
loose_ccl_queue_bucket_size=64
max_length_for_sort_data=1024
max_error_count=64
innodb_stats_include_delete_marked=OFF
innodb_strict_mode=OFF
binlog_order_commits=OFF
performance_schema=1
innodb_ft_min_token_size=3
join_buffer_size=1M
optimizer_trace_max_mem_size=16384
innodb_autoinc_lock_mode=2
innodb_rollback_on_timeout=OFF
loose_performance_schema_max_thread_instances=0
max_write_lock_count=102400
loose_innodb_trx_resurrect_table_lock_accelerate=OFF
master_verify_checksum=OFF
innodb_ft_num_word_optimize=2000
log_error_verbosity=3
log_throttle_queries_not_using_indexes=0
loose_group_replication_flow_control_hold_percent=10
low_priority_updates=0
range_alloc_block_size=4096
sort_buffer_size=2M
max_allowed_packet=1073741824
read_buffer_size=1M
thread_cache_size=100
loose_performance_schema_events_waits_history_size=0
loose_thread_pool_oversubscribe=32
windowing_use_high_precision=ON
log_queries_not_using_indexes=OFF
innodb_flush_neighbors=0
- mysql.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
spec:
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8.0
args: ["--default-authentication-plugin=mysql_native_password"]
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: MYSQL_DATABASE
value: "exam"
ports:
- containerPort: 3306
volumeMounts:
- mountPath: /var/lib/mysql
name: mysql-storage
- mountPath: /etc/mysql/conf.d/mysqld.cnf
name: mysql-config
subPath: mysqld.cnf
volumes:
- name: mysql-storage
persistentVolumeClaim:
claimName: pvc-mysql
- name: mysql-config
configMap:
name: mysql-config
---
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
selector:
app: mysql
ports:
- port: 3306
targetPort: 3306
nodePort: 31306
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
name: mysql-secret
type: Opaque
data:
root-password: ******** # base64编码,命令:echo -n '{password}' | base64
- redis.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:alpine3.15
command: ["redis-server", "--requirepass", "$(REDIS_PASSWORD)"]
ports:
- containerPort: 6379
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
volumeMounts:
- mountPath: /data
name: redis-storage
volumes:
- name: redis-storage
persistentVolumeClaim:
claimName: pvc-redis
---
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
ports:
- port: 6379
targetPort: 6379
nodePort: 31379
selector:
app: redis
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
name: redis-secret
type: Opaque
data:
redis-password: ********
- qam.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: qam
spec:
replicas: 3
selector:
matchLabels:
app: qam
template:
metadata:
labels:
app: qam
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: qam
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 8080
env:
- name: JAVA_OPTIONS
value: "-Xmx2G"
- name: SERVER_PORT
value: "8080"
- name: ENVIRONMENT
value: "PROD"
- name: DOMAIN_NAME
value: "http://172.29.2.40"
- name: OAM_SERVER
value: "oam:8080"
- name: RESOURCE_SERVER
value: "resource:8080"
- name: REDIS_TOKEN_DATABASE
value: "1"
- name: REDIS_TOKEN_HOST
value: "redis"
- name: REDIS_TOKEN_PORT
value: "6379"
- name: REDIS_TOKEN_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
- name: MYSQL_DRIVER
value: "com.mysql.cj.jdbc.Driver"
- name: MYSQL_PROTOCOL
value: "jdbc:mysql"
- name: MYSQL_HOST
value: "mysql"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_DATABASE
value: "exam"
- name: MYSQL_USERNAME
value: "root"
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: REDIS_HOST
value: "redis"
- name: REDIS_PORT
value: "6379"
- name: REDIS_DATABASE
value: "0"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
---
apiVersion: v1
kind: Service
metadata:
name: qam
spec:
ports:
- port: 8080
targetPort: 8080
nodePort: 31881
selector:
app: qam
type: NodePort
- oam.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: oam
spec:
replicas: 3
selector:
matchLabels:
app: oam
template:
metadata:
labels:
app: oam
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: oam
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 8080
env:
- name: JAVA_OPTIONS
value: "-Xmx1G"
- name: SERVER_PORT
value: "8080"
- name: ENVIRONMENT
value: "PROD"
- name: DOMAIN_NAME
value: "http://172.29.2.40"
- name: LRM_SERVER
value: "lrm:8080"
- name: RESOURCE_SERVER
value: "resource:8080"
- name: REDIS_TOKEN_DATABASE
value: "1"
- name: REDIS_TOKEN_HOST
value: "redis"
- name: REDIS_TOKEN_PORT
value: "6379"
- name: REDIS_TOKEN_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
- name: MYSQL_DRIVER
value: "com.mysql.cj.jdbc.Driver"
- name: MYSQL_PROTOCOL
value: "jdbc:mysql"
- name: MYSQL_HOST
value: "mysql"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_DATABASE
value: "exam"
- name: MYSQL_USERNAME
value: "root"
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: REDIS_HOST
value: "redis"
- name: REDIS_PORT
value: "6379"
- name: REDIS_DATABASE
value: "0"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
- name: USER_AUTHENTICATION
value: "false"
---
apiVersion: v1
kind: Service
metadata:
name: oam
spec:
ports:
- port: 8080
targetPort: 8080
nodePort: 31880
selector:
app: oam
type: NodePort
- resource.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: resource
spec:
replicas: 3
selector:
matchLabels:
app: resource
template:
metadata:
labels:
app: resource
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: resource
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 8080
env:
- name: JAVA_OPTIONS
value: "-Xmx1G"
- name: SERVER_PORT
value: "8080"
- name: ENVIRONMENT
value: "PROD"
- name: DOMAIN_NAME
value: "http://172.29.2.40"
- name: REDIS_TOKEN_DATABASE
value: "1"
- name: REDIS_TOKEN_HOST
value: "redis"
- name: REDIS_TOKEN_PORT
value: "6379"
- name: REDIS_TOKEN_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
- name: MYSQL_DRIVER
value: "com.mysql.cj.jdbc.Driver"
- name: MYSQL_PROTOCOL
value: "jdbc:mysql"
- name: MYSQL_HOST
value: "mysql"
- name: MYSQL_PORT
value: "3306"
- name: MYSQL_DATABASE
value: "exam"
- name: MYSQL_USERNAME
value: "root"
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: REDIS_HOST
value: "redis"
- name: REDIS_PORT
value: "6379"
- name: REDIS_DATABASE
value: "0"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-secret
key: redis-password
- name: LOCAL_FILE_ROOT
value: "/uploads"
- name: FORCE_LOCAL_STORAGE
value: "true"
volumeMounts:
- mountPath: /uploads
name: uploads-storage
volumes:
- name: uploads-storage
persistentVolumeClaim:
claimName: pvc-uploads
---
apiVersion: v1
kind: Service
metadata:
name: resource
spec:
ports:
- port: 8080
targetPort: 8080
nodePort: 31882
selector:
app: resource
type: NodePort
- web-exam.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-exam
spec:
replicas: 3
selector:
matchLabels:
app: web-exam
template:
metadata:
labels:
app: web-exam
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: web-exam
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 80
env:
- name: SERVER_IP
value: "114.55.86.3"
- name: RESOURCE_HOST
value: "resource"
- name: RESOURCE_PORT
value: "8080"
- name: OAM_HOST
value: "oam"
- name: OAM_PORT
value: "8080"
- name: QAM_HOST
value: "qam"
- name: QAM_PORT
value: "8080"
- name: EM_HOST
value: "qam"
- name: EM_PORT
value: "8080"
volumeMounts:
- name: nginx-log
mountPath: /var/log/nginx
volumes:
- name: nginx-log
persistentVolumeClaim:
claimName: pvc-nginx-logs
---
apiVersion: v1
kind: Service
metadata:
name: web-exam
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30001
selector:
app: web-exam
- web-receive.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-receive
spec:
replicas: 3
selector:
matchLabels:
app: web-receive
template:
metadata:
labels:
app: web-receive
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: web-receive
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 80
env:
- name: SERVER_IP
value: "114.55.86.3"
- name: RESOURCE_HOST
value: "resource"
- name: RESOURCE_PORT
value: "8080"
- name: OAM_HOST
value: "oam"
- name: OAM_PORT
value: "8080"
- name: QAM_HOST
value: "qam"
- name: QAM_PORT
value: "8080"
- name: EM_HOST
value: "qam"
- name: EM_PORT
value: "8080"
volumeMounts:
- name: nginx-log
mountPath: /var/log/nginx
volumes:
- name: nginx-log
persistentVolumeClaim:
claimName: pvc-nginx-logs
---
apiVersion: v1
kind: Service
metadata:
name: web-receive
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30002
selector:
app: web-receive
- web-marking.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-marking
spec:
replicas: 3
selector:
matchLabels:
app: web-marking
template:
metadata:
labels:
app: web-marking
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: web-marking
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 80
env:
- name: SERVER_IP
value: "114.55.86.3"
- name: RESOURCE_HOST
value: "resource"
- name: RESOURCE_PORT
value: "8080"
- name: OAM_HOST
value: "oam"
- name: OAM_PORT
value: "8080"
- name: QAM_HOST
value: "qam"
- name: QAM_PORT
value: "8080"
- name: EM_HOST
value: "qam"
- name: EM_PORT
value: "8080"
volumeMounts:
- name: nginx-log
mountPath: /var/log/nginx
volumes:
- name: nginx-log
persistentVolumeClaim:
claimName: pvc-nginx-logs
---
apiVersion: v1
kind: Service
metadata:
name: web-marking
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30003
selector:
app: web-marking
- web-generate.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-generate
spec:
replicas: 3
selector:
matchLabels:
app: web-generate
template:
metadata:
labels:
app: web-generate
spec:
imagePullSecrets:
- name: aliyun-docker-registry
containers:
- name: web-generate
image: registry.cn-hangzhou.aliyuncs.com/********
ports:
- containerPort: 80
env:
- name: SERVER_IP
value: "114.55.86.3"
- name: RESOURCE_HOST
value: "resource"
- name: RESOURCE_PORT
value: "8080"
- name: OAM_HOST
value: "oam"
- name: OAM_PORT
value: "8080"
- name: QAM_HOST
value: "qam"
- name: QAM_PORT
value: "8080"
- name: EM_HOST
value: "qam"
- name: EM_PORT
value: "8080"
volumeMounts:
- name: nginx-log
mountPath: /var/log/nginx
volumes:
- name: nginx-log
persistentVolumeClaim:
claimName: pvc-nginx-logs
---
apiVersion: v1
kind: Service
metadata:
name: web-generate
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30004
selector:
app: web-generate
3.4.3 部署过程
# 下载或创建上面所有yaml文件,并根据实际情况修改环境变量,例如SERVER_IP和DOMAIN_NAME
# 首先创建pv、pvc
kubectl apply -f pv-pvc.yaml
# 然后部署mysql
kubectl apply -f mysql.yaml
[root@master exam]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mysql-6d66cc75f9-j8p8t 1/1 Running 0 94m
# mysql Pod启动后,使用dbeaver连接数据库,导入初始化sql文件
# 依次部署redis、qam、oam、resource、web-exam、web-receive、web-marking、web-generate
# 待所有pod启动后即可访问服务
[root@master exam]# kubectl get pod
NAME READY STATUS RESTARTS AGE
mysql-6d66cc75f9-j8p8t 1/1 Running 0 98m
oam-85b5fc764c-4qx5n 1/1 Running 0 73m
oam-85b5fc764c-lh778 1/1 Running 0 73m
oam-85b5fc764c-rtdgv 1/1 Running 0 73m
qam-67bdcf477b-fm4nw 1/1 Running 0 75m
qam-67bdcf477b-qn8fm 1/1 Running 0 75m
qam-67bdcf477b-s9zgm 1/1 Running 0 75m
redis-75cf7577c9-rx66r 1/1 Running 0 93m
resource-6d847c99c-6lqvv 1/1 Running 0 40m
resource-6d847c99c-6m96l 1/1 Running 0 40m
resource-6d847c99c-bfl4z 1/1 Running 0 40m
web-exam-655b9cb99-b44c6 1/1 Running 0 8s
web-exam-655b9cb99-l9xlr 1/1 Running 0 33m
web-exam-655b9cb99-xthd5 1/1 Running 0 8s
web-generate-694698c94f-7l2dk 1/1 Running 0 16m
web-generate-694698c94f-czm8g 1/1 Running 0 16m
web-generate-694698c94f-kpd72 1/1 Running 0 16m
web-marking-588dcc9944-8rmlx 1/1 Running 0 17m
web-marking-588dcc9944-k56fb 1/1 Running 0 17m
web-marking-588dcc9944-tf24p 1/1 Running 0 17m
web-receive-76fcd69d9c-cgwjr 1/1 Running 0 24m
web-receive-76fcd69d9c-dxxc5 1/1 Running 0 24m
web-receive-76fcd69d9c-z6gpw 1/1 Running 0 24m
[root@master exam]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14d
mysql NodePort 10.100.41.234 <none> 3306:31306/TCP 3h5m
oam NodePort 10.101.132.80 <none> 8080:31880/TCP 131m
qam NodePort 10.101.250.191 <none> 8080:31881/TCP 149m
redis NodePort 10.104.221.73 <none> 6379:31379/TCP 150m
resource NodePort 10.101.56.111 <none> 8080:31882/TCP 98m
web-exam NodePort 10.104.187.202 <none> 80:30001/TCP 95m
web-generate NodePort 10.106.85.170 <none> 80:30004/TCP 74m
web-marking NodePort 10.106.149.230 <none> 80:30003/TCP 75m
web-receive NodePort 10.106.49.54 <none> 80:30002/TCP 82m
3.5 应用运维
# 更新文件
sed -i 's/2024.06.18.1/2024.08.05.1/g' web-receive.yaml
# 更新qam镜像
kubectl set image deployment/qam qam=registry.cn-hangzhou.aliyuncs.com/********
# 查看滚动更新状态
kubectl rollout status deployment qam
# 重新deployment
kubectl rollout restart deployment qam
# 伸缩副本数量
kubectl scale deployment qam --replicas=5
# 进入 Pod 容器
kubectl exec -it pod-name -- bash
# 查看历史
kubectl rollout history deployment qam
# 回到上个版本
kubectl rollout undo deployment qam
# 回到指定版本
kubectl rollout undo deployment qam --to-revision=2
容器报错Fatal glibc error: CPU does not support x86-64-v2,解决方法:
# 检测cpu指令集脚本
#!/bin/sh
flags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)
supports_v2='awk "/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}"'
echo "$flags" | eval $supports_v2
if [ $? -eq 0 ]; then
echo "CPU supports x86-64-v2"
else
echo "CPU doesn't support x86-64-v2"
fi
# 修改虚拟机CPU模式为直通(host-passthrough)即可解决
3.6 停机
kubectl scale deployment *** --replicas=0
四、Kubesphere可观测平台
4.1 配置默认StorageClasses
安装kubesphere前需要保证k8s集群中存在默认sc,这里以nfs存储作为示例
# 在NFS存储节点创建存储目录,确保NFS可挂载 [root@storage ~]# mkdir /exam_data/nfs_provisioner
# 在Master节点确认当前SC状态 [root@master ~]# kubectl get sc No resources found # 不存在StorageClass # 安装nfs-provisioner-rbac组件 $ kubectl apply -f https://raw.githubusercontent.com/kubernetes-incubator/external-storage/master/nfs-client/deploy/rbac.yaml serviceaccount/nfs-client-provisioner created clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created # 下载nfs-provisioner-deployment.yaml,然后修改配置 wget https://raw.githubusercontent.com/Kubernetes-incubator/external-storage/master/nfs-client/deploy/deployment.yaml # 修改以下几处配置 apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner labels: app: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-client-provisioner template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: nfs-provisioner # provisioner名称,需要和后面StorageClass文件保持一致 - name: NFS_SERVER value: 172.16.30.135 # NFS服务器地址 - name: NFS_PATH value: /exam_data/nfs_provisioner # NFS挂载目录 volumes: - name: nfs-client-root nfs: server: 172.16.30.135 # NFS服务器地址 path: /exam_data/nfs_provisioner # NFS挂载目录 # 执行创建 $ kubectl apply -f deployment.yaml deployment.apps/nfs-client-provisioner created # 创建SC配置storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-nfs-storage annotations: "storageclass.kubernetes.io/is-default-class": "true" # 设置为默认sc provisioner: nfs-provisioner # 需要和上面PROVISIONER_NAME保持一致 parameters: archiveOnDelete: "false" # 执行创建 $ kubectl apply -f storageclass.yaml
创建完成后检查当前sc,应该输出如下:
[root@master kubesphere]# kubectl get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE managed-nfs-storage (default) nfs-provisioner Delete Immediate false 18h
此时还需要修改kube-apiserver.yaml配置文件,否则创建pvc时会报错selfLink was empty, can’t make reference
在 /etc/kubernetes/manifests/kube-apiserver.yaml 文件中添加参数 `- --feature-gates=RemoveSelfLink=false` .... spec: containers: - command: - kube-apiserver - --feature-gates=RemoveSelfLink=false # 添加此行配置 - --advertise-address=172.29.2.40 - --allow-privileged=true - --authorization-mode=Node,RBAC ...... # 应用更改 kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml
4.2 安装kubesphere
执行以下命令开始安装
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/kubesphere-installer.yaml kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/cluster-configuration.yaml
查看安装日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
使用
kubectl get pod --all-namespaces
查看所有 Pod 是否在 KubeSphere 的相关命名空间中正常运行。如果是,请通过以下命令检查控制台的端口(默认为30880
):kubectl get svc/ks-console -n kubesphere-system
确保在安全组中打开了端口
30880
,并通过 NodePort(IP:30880)
使用默认帐户和密码(admin/P@88w0rd)
访问 Web 控制台。
五、Helm部署
5.1 Helm安装
# 下载
https://github.com/helm/helm/releases
# 解压安装
tar -zxvf helm-v3.15.2-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
# 验证版本
helm version
附录:
K8S环境重置
# 管理节点
kubeadm reset
rm -rf /root/.kube/
rm -rf /etc/cni/net.d/
yum install -y ipvsadm
ipvsadm -C
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
# 工作节点
kubeadm reset
rm -rf /root/.kube
rm -rf /etc/cni/net.d
rm -rf /etc/kubernetes/*
yum install -y ipvsadm
ipvsadm -C
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
离线kube-flannel.yaml
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"EnableNFTables": false,
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: docker.io/flannel/flannel-cni-plugin:v1.4.1-flannel1
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: docker.io/flannel/flannel:v0.25.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: docker.io/flannel/flannel:v0.25.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate