平台环境
软件信息
- CentOS Linux release 7.7.1908 (Kernel 3.10.0-1062.18.1.el7.x86_64)
- Docker CE 18.09.9
- Kubernetes v1.18.2
- Calico v3.8
- Keepalived v1.3.5
- HAproxy v1.5.18
硬件信息
主机名
ip
master01
192.168.10.12
master02
192.168.10.13
master03
192.168.10.14
work01
192.168.10.15
work02
192.168.10.16
work03
192.168.10.17
VIP
192.168.10.19
集群装备
初始化
master/worker 均履行
# cat >> /etc/hosts << EOF
192.168.10.12 master01
192.168.10.13 master02
192.168.10.14 master03
192.168.10.15 work01
192.168.10.16 work02
192.168.10.17 work03
EOF
# 封闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 封闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 封闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
# 装置wget
yum install wget -y
# 备份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
# 阿里云yum源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo# 获取阿里云epel源
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo# 清理缓存并创立新的缓存
yum clean all && yum makecache
# 更新
yum update -y
#同步时刻
timedatectl
timedatectl set-ntp true
装置 Docker
master/worker 均装置
# 装置 Docker CE
# 设置仓库
# 装置所需包
yum install -y yum-utils device-mapper-persistent-data lvm2
# 新增 Docker 装置源
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 装置 Docker CE.
yum install -y containerd.io \
docker-ce-18.09.9 \
docker-ce-cli-18.09.9
# 发动 Docker 并增加开机发动
systemctl start docker
systemctl enable docker
#将Docker 的 Cgroup Driver 修改为 systemd
#修改为国内源
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"registry-mirrors":[
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"https://registry.docker-cn.com"
]}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# Restart docker.
systemctl daemon-reload
systemctl restart docker
装置 kubeadm、kubelet 、kubectl
master/worker 节点均履行
# 装备K8S的yum源,最好运用官方源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 增加装备
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 加载
sysctl --system
# 装置当时日期最新稳定版(v1.18.2) kubelet、 kubeadm 、kubectl
yum install -y kubelet-1.18.2 kubeadm-1.18.2 kubectl-1.18.2 --disableexcludes=kubernetes
# 发动并设置 kubelet 开机发动
systemctl start kubelet
systemctl enable kubelet
HAProxy 完成 apiserver 负载均衡集群
所有 master 节点履行
yum install haproxy-1.5.18 -y
cat > /etc/haproxy/haproxy.cfg <<EOF
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend k8s-api
mode tcp
option tcplog
bind *:16443
default_backend k8s-api
backend k8s-api
mode tcp
balance roundrobin
server master01 192.168.10.12:6443 check
server master02 192.168.10.13:6443 check
server master03 192.168.10.14:6443 check
EOF
所有 master 节点发动 HAProxy
systemctl start haproxy
systemctl enable haproxy
Keepalived完成 apiserver 高可用集群
所有 master 节点履行
yum -y install keepalived psmisc
master01 上 keepalived 的装备:
# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id master01
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 2
weight 10
}
vrrp_instance VI_1 {
state MASTER
interface ens192
virtual_router_id 50
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.19
}
track_script {
check_haproxy
}
}
EOF
master02 上 keepalived 的装备:
# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id master02
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 2
weight 10
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 50
priority 98
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.19
}
track_script {
check_haproxy
}
}
EOF
master03 上 keepalived 的装备:
# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id master03
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 2
weight 10
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 50
priority 96
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.10.19
}
track_script {
check_haproxy
}
}
EOF
所有 master 节点履行
service keepalived start
systemctl enable keepalived
留意
- vrrp_script 中参数 weight 一定要大于 master 和 backup 的 priority 的相差值
- vrrp_instance 中参数 nopreempt 能够防止 master 恢复后自动回切
创立 K8S 集群
在初始化之前,需要先设置 hosts 解析 MASTER_IP 为 VIP 的地址 APISERVER_NAME 为 VIP 的域名
export MASTER_IP=192.168.10.19
export APISERVER_NAME=k8s.api
echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts
在 master01 上履行 kubeadm init 进行初始化
kubeadm init \
--apiserver-advertise-address 0.0.0.0 \
--apiserver-bind-port 6443 \
--cert-dir /etc/kubernetes/pki \
--control-plane-endpoint k8s.api \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version 1.18.2 \
--pod-network-cidr 192.10.0.0/16 \
--service-cidr 192.20.0.0/16 \
--service-dns-domain cluster.local \
--upload-certs
加载环境变量
master01 上履行,用于管理集群
如果在root用户下
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source .bash_profile
如果非root用户下
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
装置 Pod 网络组件
master01 上履行
# 获取装备文件
mkdir calico && cd calico
wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
# 修改装备文件
vi calico.yaml
# 找到 192.168.0.0/16 ,修改为 192.10.0.0/16
# 部署 Pod 网络组件
kubectl apply -f calico.yaml
实时检查 pod 的状态
watch kubectl get pods --all-namespaces -o wide
增加其他 master 节点到K8S集群
在其他 master 节点履行
运用 master01 上 kubeadm init 的履行结果中包含 join 的指令信息
端口由 6443 修改为 16443
export MASTER_IP=192.168.10.19
export APISERVER_NAME=k8s.api
echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts
kubeadm join k8s.api:16443 --token ztjux9.2tau56zck212j9ra \
--discovery-token-ca-cert-hash sha256:a2b552266902fb5f6620330fc1a6638a9cdd6fec3408edba1082e6c8389ac517 \
--control-plane --certificate-key 961494e7d0a9de0219e2b0dc8bdaa9ca334ecf093a6c5f648aa34040ad39b61a
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source .bash_profile
将所有 Worker 节点增加到K8S集群
在worker 节点履行
运用 master01 上 kubeadm init 的履行结果中包含 join 的指令信息
端口由 6443 修改为 16443
export MASTER_IP=192.168.10.19
export APISERVER_NAME=k8s.api
echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts
kubeadm join k8s.api:16443 --token ztjux9.2tau56zck212j9ra \
--discovery-token-ca-cert-hash sha256:a2b552266902fb5f6620330fc1a6638a9cdd6fec3408edba1082e6c8389ac517
master01 上检查集群
watch kubectl get nodes -o wide
全部为 Ready 阐明集群装置成功。
破坏性测试
- 把 master01 的 haproxy 停掉
- 把 master01 机器关机
能够看到 VIP 能够漂移到 master02 上
然后能够在 master02 做相同操作观察 VIP 是否能够漂到 master03 上
结束语
今天主要是实战搭建了 K8S 高可用负载均衡集群,是我实际操作的记录。
那么你有没有发现有个地方其实能够进一步的优化。