服务器准备工作
服务器部署说明
主机ip 描述 192.168.1.110(4c 20Gb *200Gb) k8s-master-01 192.168.1.111(4c 20Gb *200Gb) k8s-master-02 192.168.1.112(4c 20Gb *200Gb) k8s-master-03 192.168.1.113(4c 20Gb *200Gb) k8s-node-01 192.168.1.114(4c 20Gb *200Gb) k8s-node-02 192.168.1.200 VIP
部署前准备
修改主机名/hosts文件解析
hostnamectl set-hostname k8s-master-01
hostnamectl set-hostname k8s-master-02
hostnamectl set-hostname k8s-master-03
hostnamectl set-hostname k8s-node-01
hostnamectl set-hostname k8s-node-02
修改hosts文件解析
cat >> sudo /etc/hosts << EOF
192.168.1.110 k8s-master-01
192.168.1.111 k8s-master-02
192.168.1.112 k8s-master-03
192.168.1.113 k8s-node-01
192.168.1.114 k8s-node-02
EOF
固定主机ip设置
ip a s ens33
sudo vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE = Ethernet
PROXY_METHOD = none
BROWSER_ONLY = no
BOOTPROTO = static
IPADDR = 192.168 .1.110
NETMASK = 255.255 .255.0
GATEWAY = 192.168 .1.1
DNS1 = 192.168 .1.1
DEFROUTE = yes
IPV4_FAILURE_FATAL = no
IPV6INIT = yes
IPV6_AUTOCONF = yes
IPV6_DEFROUTE = yes
IPV6_FAILURE_FATAL = no
IPV6_ADDR_GEN_MODE = stable-privacy
NAME = ens33
UUID = 63acc257-a602-4895-b5de-ef4 da082a4de
DEVICE = ens33
ONBOOT = yes
HWADDR = 00:0C:29:BB:18:A8
systemctl restart network.service
关闭防火墙
sudo systemctl stop firewalld.service
sudo systemctl disable firewalld.service
sudo systemctl status firewalld.service
关闭SELIUNX配置
[ admin@k8s-master-01 ~] $ sudo setenforce 0
[ admin@k8s-master-01 ~] $ sudo sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
[ admin@k8s-master-01 ~] $ sudo sestatus
SELinux status: disabled
时间同步配置
crontab -l
crontab -e
0 */1 * * * /usr/sbin/ntpdate time.aliyun.com
升级操作系统内核
[ admin@k8s-master-01 ~] $ uname -r
3.10 .0-1160.el7.x86_64
sudo rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
sudo yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
sudo yum --enablerepo= "elrepo-kernel" -y install kernel-lt.x86_64
sudo grub2-set-default 0
sudo grub2-mkconfig -o /boot/grub2/grub.cfg
reboot
[ admin@k8s-master-01 ~] $ uname -r
5.4 .267-1.el7.elrepo.x86_64
配置内核路由转发及网桥过滤
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
vm.swappiness=0
EOF
sudo modprobe br_netfilter
[ admin@k8s-master-01 ~] $ lsmod | grep br_netfilter
br_netfilter 28672 0
sudo sysctl --system
安装ipset及ipvsadm
sudo yum -y install ipset ipvsadm
sudo mkdir -p /etc/sysconfig/ipvsadm
cat > /etc/sysconfig/ipvsadm/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
sudo chmod 755 /etc/sysconfig/ipvsadm/ipvs.modules
sudo bash /etc/sysconfig/ipvsadm/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack
关闭swap交换分区
free -m
sudo swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
grep swap /etc/fstab
配置ssh免密登录
[ admin@k8s-master-01 ~] $ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key ( /home/admin/.ssh/id_rsa) :
Enter passphrase ( empty for no passphrase) :
Enter same passphrase again:
Your identification has been saved in /home/admin/.ssh/id_rsa.
Your public key has been saved in /home/admin/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:1gneqsEe9sALBeKJ/tti6LMjiPc5+rHonYSp//Kx2IM admin@k8s-master-01
The key's randomart image is:
+---[RSA 2048]----+
| |
| |
| . . . |
| o o . . + . |
|. o . S + |
|. o + . . |
|o.+.= B . |
|+=EO+@ B |
|+*@X#++ . |
+----[SHA256]-----+
'
[ admin@k8s-master-01 ~] $ cd /home/admin/.ssh
[ admin@k8s-master-01 .ssh] $ ll
total 8
-rw------- 1 admin admin 1766 Jan 21 00:11 id_rsa
-rw-r--r-- 1 admin admin 401 Jan 21 00:11 id_rsa.pub
[ admin@k8s-master-01 .ssh] $ cp id_rsa.pub authorized_keys
[ admin@k8s-master-01 .ssh] $ ll
total 12
-rw-r--r-- 1 admin admin 401 Jan 21 00:22 authorized_keys
-rw------- 1 admin admin 1766 Jan 21 00:21 id_rsa
-rw-r--r-- 1 admin admin 401 Jan 21 00:21 id_rsa.pub
[ admin@k8s-master-01 .ssh] $ ll
total 12
-rw-r--r-- 1 admin admin 401 Jan 21 00:17 authorized_keys
-rw------- 1 admin admin 1766 Jan 21 00:11 id_rsa
-rw-r--r-- 1 admin admin 401 Jan 21 00:11 id_rsa.pub
scp -r /home/admin/.ssh/* 192.168 .1.111:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168 .1.112:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168 .1.113:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168 .1.114:/home/admin/.ssh/
ssh username@remote_host
[ admin@k8s-node-01 ~] $ ssh admin@192.168.1.110
The authenticity of host '192.168.1.110 (192.168.1.110)' can't be established.
ECDSA key fingerprint is SHA256:8KouzgTNv7YwgKZqxi9+pMd2XCHErkIx3COxReKqu8A.
ECDSA key fingerprint is MD5:63:2a:20:10:03:75:fe:24:90:b5:89:55:b5:1d:6c:83.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added ' 192.168 .1.110' (ECDSA) to the list of known hosts.
Enter passphrase for key ' /home/admin/.ssh/id_rsa':
Last login: Sat Jan 20 23:42:50 2024 from 192.168.1.24'
[ admin@k8s-node-01 ~] $ exit
logout
Connection to 192.168 .1.113 closed.
etcd集群部署
wget https://github.com/coreos/etcd/releases/download/v3.5.11/etcd-v3.5.11-linux-amd64.tar.gz
tar xzvf etcd-v3.5.11-linux-amd64.tar.gz
cd etcd-v3.5.11-linux-amd64/
sudo mv etcd* /usr/local/bin
sudo tee /usr/lib/systemd/system/etcd.service <<- 'EOF'
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-01 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.110:2380 \
--listen-client-urls=http://192.168.1.110:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.110:2379 \
--initial-advertise-peer-urls=http://192.168.1.110:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
sudo tee /usr/lib/systemd/system/etcd.service <<- 'EOF'
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-02 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.111:2380 \
--listen-client-urls=http://192.168.1.111:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.111:2379 \
--initial-advertise-peer-urls=http://192.168.1.111:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
sudo tee /usr/lib/systemd/system/etcd.service <<- 'EOF'
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-03 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.112:2380 \
--listen-client-urls=http://192.168.1.112:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.112:2379 \
--initial-advertise-peer-urls=http://192.168.1.112:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now etcd
[ admin@k8s-master-01 ~] $ etcd --version
etcd Version: 3.5 .11
Git SHA: 3b252db4f
Go Version: go1.20.12
Go OS/Arch: linux/amd64
You have new mail in /var/spool/mail/admin
[ admin@k8s-master-01 etcd-v3.5.11-linux-amd64] $ systemctl status etcd
● etcd.service - Etcd Server
Loaded: loaded ( /usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
Active: active ( running) since Tue 2024 -01-23 23 :07:35 CST; 16s ago
Main PID: 6494 ( etcd)
Tasks: 9
Memory: 26 .1M
CGroup: /system.slice/etcd.service
└─6494 /usr/local/bin/etcd --name= k8s-master-01 --data-dir= /var/lib/etcd/default.etcd --listen-peer-urls= http://192.168. .. .. ..
[ admin@k8s-master-01 ~] $ etcdctl member list
629900a511866cc5, started, k8s-master-02, http://192.168.1.111:2380, http://192.168.1.111:2379, false
f4525f168244cf0e, started, k8s-master-03, http://192.168.1.112:2380, http://192.168.1.112:2379, false
fa5c82427d33e98c, started, k8s-master-01, http://192.168.1.110:2380, http://192.168.1.110:2379, false
负载均衡器部署Nginx+Keepalived
192.168 .1.110 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168 .1.111 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168 .1.112 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168 .1.113 k8s-node-01
192.168 .1.114 k8s-node-02
keepalived+nginx 实现高可用+反向代理,这里为了节约服务器,将keepalived+nginx部署在master节点上。
keepalived会虚拟一个vip( 192.168 .1.200) ,vip任意绑定在一台master节点上,使用nginx对3台master节点进行反向代理。
在初始化k8s集群的使用,IP填写的vip,这样安装好k8s集群之后,kubectl客户端而言,访问的vip:16443端口,
该端口是nginx监听的端口,nginx会进行反向代理到3个master节点上的6443端口。
安装部署
安装nginx软件
mkdir -p /home/admin/software/docker/nginx/{ conf,html,cert,logs}
[ admin@k8s-master-01 ~] echo '192.168.1.110' > /home/admin/software/docker/nginx/html/index.html
[ admin@k8s-master-02 ~] echo '192.168.1.111' > /home/admin/software/docker/nginx/html/index.html
[ admin@k8s-master-03 ~] echo '192.168.1.112' > /home/admin/software/docker/nginx/html/index.html
vim /home/admin/software/docker/nginx/conf/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024 ;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local ] $status $upstream_bytes_sent ' ;
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168 .1.110:6443;
server 192.168 .1.111:6443;
server 192.168 .1.112:6443;
}
server {
listen 16443 ;
proxy_pass k8s-apiserver;
}
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local ] "$request " '
'$status $body_bytes_sent "$http_referer " '
'"$http_user_agent " "$http_x_forwarded_for "' ;
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65 ;
include /etc/nginx/conf.d/*.conf;
}
version : '3.6'
services :
nginx :
image : nginx
restart : always
hostname : nginx
container_name : nginx
privileged : true
ports :
- 80:80
- 443: 443
volumes :
- /etc/localtime: /etc/localtime: ro
- /home/admin/software/docker/nginx/conf/conf.d: /etc/nginx/conf.d
- /home/admin/software/docker/nginx/html/: /usr/share/nginx/html/
- /home/admin/software/docker/nginx/logs/: /var/log/nginx/
docker-compose up -d
docker restart nginx
docker logs nginx
[ admin@k8s-master-01 conf] $ curl 127.0 .0.1
192.168 .1.110
[ admin@k8s-master-02 nginx] $ curl 127.0 .0.1
192.168 .1.111
[ admin@k8s-master-03 conf] $ curl 127.0 .0.1
192.168 .1.112
安装keepalived软件
下载安装包
wget --no-check-certificate https://www.keepalived.org/software/keepalived-2.2.8.tar.gz
//进入解压后的包
tar -zxvf keepalived-2.2.8.tar.gz
cd keepalived-2.2.8
./configure --prefix= /home/admin/software/keepalived --sysconf= /etc
sudo make
sudo make install
sudo vim /etc/keepalived/check_apiserver.sh
if [ "$( ps -ef | grep "nginx: master process" | grep -v grep ) " == "" ] ; then
docker restart nginx
sleep 5
if [ "$( ps -ef | grep "nginx: master process" | grep -v grep ) " == "" ] ; then
docker stop keepalived
fi
fi
sudo chmod +x /etc/keepalived/check_apiserver.sh
cd /etc/keepalived
sudo cp keepalived.conf.sample keepalived.conf
sudo vim keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
你的163邮箱@163.com
}
notification_email_from 你的163邮箱@163.com
smtp_server smtp.163.com
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168 .1.110
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168 .1.200
}
track_script {
chk_apiserver
}
}
sudo systemctl daemon-reload
sudo systemctl enable --now keepalived
ip address show
[ admin@k8s-master-01 ~] $ ss -anput | grep ":16443"
tcp LISTEN 0 2000 127.0 .0.1:16443 *:*
tcp LISTEN 0 2000 *:16443 *:*
[ admin@k8s-master-02 ~] $ ss -anput | grep ":16443"
tcp LISTEN 0 2000 127.0 .0.1:16443 *:*
tcp LISTEN 0 2000 *:16443 *:*
[ admin@k8s-master-03 ~] $ ss -anput | grep ":16443"
tcp LISTEN 0 2000 127.0 .0.1:16443 *:*
tcp LISTEN 0 2000 *:16443 *:*
[ admin@k8s-master-01 ~] $ systemctl stop keepalived.service
systemctl start keepalived.service
systemctl enable keepalived.service
[ admin@k8s-master-01 keepalived] $ ip address show
2 : ens33: < BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:71:20:2c brd ff:ff:ff:ff:ff:ff
inet 192.168 .1.110/24 brd 192.168 .1.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168 .1.200/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::fa46:8575:c903:ffe8/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::99e:8f40:322e:88a7/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::805c:cc25:201d:c671/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[ admin@k8s-master-01 ~] $ curl 192.168 .1.200
192.168 .1.110
[ admin@k8s-master-02 ~] $ curl 192.168 .1.200
192.168 .1.110
[ admin@k8s-master-03 ~] $ curl 192.168 .1.200
192.168 .1.110
systemctl stop keepalived.service
[ admin@k8s-master-02 ~] $ ip address show
2 : ens33: < BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:cc:94:57 brd ff:ff:ff:ff:ff:ff
inet 192.168 .1.111/24 brd 192.168 .1.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168 .1.200/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::99e:8f40:322e:88a7/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::fa46:8575:c903:ffe8/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[ admin@k8s-master-01 ~] $ curl 192.168 .1.200
192.168 .1.111
[ admin@k8s-master-02 ~] $ curl 192.168 .1.200
192.168 .1.111
[ admin@k8s-master-03 ~] $ curl 192.168 .1.200
192.168 .1.111
systemctl restart keepalived.service
安装docker-ce/cri-dockerd
安装docker-ce
sudo wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
sudo yum -y install docker-ce
sudo systemctl enable --now docker
[ admin@k8s-master-01 ~] $ docker -v
Docker version 25.0 .0, build e758fe5
[ admin@k8s-master-01 ~] $ cat > sudo /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://4bsnyw1n.mirror.aliyuncs.com",
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn",
"https://dockerhub.azk8s.cn",
"http://hub-mirror.c.163.com"
]
}
EOF
[ admin@k8s-master-01 ~] $ sudo systemctl restart docker
安装cri-dockerd
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8-3.el7.x86_64.rpm
sudo yum -y install cri-dockerd-0.3.8-3.el7.x86_64.rpm
sudo vim /usr/lib/systemd/system/cri-docker.service
ExecStart = /usr/bin/cri-dockerd --pod-infra-container-image= registry.k8s.io/pause:3.9 --container-runtime-endpoint fd://
systemctl enable --now cri-docker
Kubernetes 集群安装
镜像源准备
cat << EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
# exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
集群工具安装、配置
sudo yum install -y kubelet kubeadm kubectl
sudo yum list kubeadm.x86_64 --showduplicates | sort -r
sudo yum list kubelet.x86_64 --showduplicates | sort -r
sudo yum list kubectl.x86_64 --showduplicates | sort -r
sudo yum -y install kubeadm-1.29.0-150500.1.1 kubelet-1.29.0-150500.1.1 kubectl-1.29.0-150500.1.1
sudo cp /etc/sysconfig/kubelet{ ,.bak}
sudo vim kubelet
KUBELET_EXTRA_ARGS = "--cgroup-driver=systemd"
sudo systemctl enable kubelet
sudo yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
集群镜像下载
[ admin@k8s-master-01 sysconfig] $ kubeadm config images list
registry.k8s.io/kube-apiserver:v1.29.1
registry.k8s.io/kube-controller-manager:v1.29.1
registry.k8s.io/kube-scheduler:v1.29.1
registry.k8s.io/kube-proxy:v1.29.1
registry.k8s.io/coredns/coredns:v1.11.1
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.10-0
kubeadm config images pull --cri-socket unix:///var/run/cri-dockerd.sock --kubernetes-version= v1.29.0
docker save -o k8s-1-29-0.tar $images_list
wget http://mirrors.oby.ink/docker-images/k8s-1.29.0.tar
docker load -i k8s-1.29.0.tar
[ admin@k8s-master-01 software] $ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.k8s.io/kube-apiserver v1.29.0 1443a367b16d 5 weeks ago 127MB
registry.k8s.io/kube-controller-manager v1.29.0 0824682bcdc8 5 weeks ago 122MB
registry.k8s.io/kube-scheduler v1.29.0 7ace497ddb8e 5 weeks ago 59 .5MB
registry.k8s.io/kube-proxy v1.29.0 98262743b26f 5 weeks ago 82 .2MB
registry.k8s.io/etcd 3.5 .10-0 a0eed15eed44 2 months ago 148MB
registry.k8s.io/coredns/coredns v1.11.1 cbb01a7bd410 5 months ago 59 .8MB
registry.k8s.io/pause 3.9 e6f181688397 15 months ago 744kB
修改初始化文件
kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm-config.yaml
kubeadm config print init-defaults --component-configs InitConfiguration
kubeadm config print init-defaults --component-configs ClusterConfiguration
apiVersion : kubeadm.k8s.io/v1beta3
bootstrapTokens :
- groups :
- system: bootstrappers: kubeadm: default- node- token
token : abcdef.0123456789abcdef
ttl : 24h0m0s
usages :
- signing
- authentication
kind : InitConfiguration
localAPIEndpoint :
advertiseAddress : 192.168.1.110
bindPort : 6443
nodeRegistration :
criSocket : unix: ///var/run/cri- dockerd.sock
imagePullPolicy : IfNotPresent
name : k8s- master- 01
taints : null
---
apiServer :
timeoutForControlPlane : 4m0s
apiVersion : kubeadm.k8s.io/v1beta3
certificatesDir : /etc/kubernetes/pki
clusterName : kubernetes
controllerManager : { }
dns : { }
etcd :
external :
endpoints :
- http: //192.168.1.110: 2379
- http: //192.168.1.111: 2379
- http: //192.168.1.112: 2379
imageRepository : registry.k8s.io
kind : ClusterConfiguration
kubernetesVersion : 1.29.0
networking :
dnsDomain : cluster.local
podSubnet : 10.244.0.0/16
serviceSubnet : 10.96.0.0/12
scheduler : { }
apiServerCertSANs :
- 192.168.1.200
controlPlaneEndpoint : "192.168.1.200:16443"
---
apiVersion : kubeproxy.config.k8s.io/v1alpha1
kind : KubeProxyConfiguration
mode : ipvs
---
apiVersion : kubelet.config.k8s.io/v1beta1
authentication :
anonymous :
enabled : false
webhook :
cacheTTL : 0s
enabled : true
x509 :
clientCAFile : /etc/kubernetes/pki/ca.crt
authorization :
mode : Webhook
webhook :
cacheAuthorizedTTL : 0s
cacheUnauthorizedTTL : 0s
cgroupDriver : systemd
clusterDNS :
- 10.96.0.10
clusterDomain : cluster.local
containerRuntimeEndpoint : ""
cpuManagerReconcilePeriod : 0s
evictionPressureTransitionPeriod : 0s
fileCheckFrequency : 0s
healthzBindAddress : 127.0.0.1
healthzPort : 10248
httpCheckFrequency : 0s
imageMaximumGCAge : 0s
imageMinimumGCAge : 0s
kind : KubeletConfiguration
logging :
flushFrequency : 0
options :
json :
infoBufferSize : "0"
verbosity : 0
memorySwap : { }
nodeStatusReportFrequency : 0s
nodeStatusUpdateFrequency : 0s
rotateCertificates : true
runtimeRequestTimeout : 0s
shutdownGracePeriod : 0s
shutdownGracePeriodCriticalPods : 0s
staticPodPath : /etc/kubernetes/manifests
streamingConnectionIdleTimeout : 0s
syncFrequency : 0s
volumeStatsAggPeriod : 0s
集群初始化安装
生成配置文件样例 kubeadm-config.yaml
[ root@k8s-master-01 ~]
---
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168 .10.160
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: 1.29 .0
networking:
dnsDomain: cluster.local
podSubnet: 10.244 .0.0/16
serviceSubnet: 10.96 .0.0/12
scheduler: { }
apiServerCertSANs:
- 192.168 .10.200
controlPlaneEndpoint: "192.168.10.200:16443"
etcd:
external:
endpoints:
- http://192.168.10.160:2379
- http://192.168.10.161:2379
- http://192.168.10.162:2379
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96 .0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0 .0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: { }
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
EOF
集群初始化
[ admin@k8s-master-01 ~] $ sudo kubeadm init --config kubeadm-config.yaml --upload-certs --v= 9
[ init] Using Kubernetes version: v1.29.0
[ preflight] Running pre-flight checks
[ preflight] Pulling images required for setting up a Kubernetes cluster
[ preflight] This might take a minute or two, depending on the speed of your internet connection
[ preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[ certs] Using certificateDir folder "/etc/kubernetes/pki"
[ certs] Generating "ca" certificate and key
[ certs] Generating "apiserver" certificate and key
[ certs] apiserver serving cert is signed for DNS names [ k8s-master-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [ 10.96 .0.1 192.168 .1.110]
[ certs] Generating "apiserver-kubelet-client" certificate and key
[ certs] Generating "front-proxy-ca" certificate and key
[ certs] Generating "front-proxy-client" certificate and key
[ certs] Generating "etcd/ca" certificate and key
[ certs] Generating "etcd/server" certificate and key
[ certs] etcd/server serving cert is signed for DNS names [ k8s-master-01 localhost] and IPs [ 192.168 .1.110 127.0 .0.1 ::1]
[ certs] Generating "etcd/peer" certificate and key
[ certs] etcd/peer serving cert is signed for DNS names [ k8s-master-01 localhost] and IPs [ 192.168 .1.110 127.0 .0.1 ::1]
[ certs] Generating "etcd/healthcheck-client" certificate and key
[ certs] Generating "apiserver-etcd-client" certificate and key
[ certs] Generating "sa" key and public key
[ kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[ kubeconfig] Writing "admin.conf" kubeconfig file
[ kubeconfig] Writing "super-admin.conf" kubeconfig file
[ kubeconfig] Writing "kubelet.conf" kubeconfig file
[ kubeconfig] Writing "controller-manager.conf" kubeconfig file
[ kubeconfig] Writing "scheduler.conf" kubeconfig file
[ etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[ control-plane] Using manifest folder "/etc/kubernetes/manifests"
[ control-plane] Creating static Pod manifest for "kube-apiserver"
[ control-plane] Creating static Pod manifest for "kube-controller-manager"
[ control-plane] Creating static Pod manifest for "kube-scheduler"
[ kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[ kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[ kubelet-start] Starting the kubelet
[ wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests" . This can take up to 4m0s
[ apiclient] All control plane components are healthy after 4.002323 seconds
[ upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[ kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[ upload-certs] Skipping phase. Please see --upload-certs
[ mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the labels: [ node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[ mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the taints [ node-role.kubernetes.io/control-plane:NoSchedule]
[ bootstrap-token] Using token: 6euw6m.my26a85hjre4vk2t
[ bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[ bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[ bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[ bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[ bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[ bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[ kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[ addons] Applied essential addon: CoreDNS
[ addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) : $( id -g) $HOME /.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG = /etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168 .1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168 .1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85
说明:
– kubernetes-version K8s版本,与上面安装的一致
– pod-network-cidr Pod网络,与下面部署的CNI网络组件yaml中保持一致
– apiserver-advertise-address 集群通告地址,node节点连接master的地址,如果是高可用,需要配置VIP的地址。这里是单master架构,默认master地址即可。
– cri-socket 指定cri-dockerd接口,如果是containerd则使用–cri-socket unix:///run/containerd/containerd.sock
kubeadm join 192.168 .1.113:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash
sha256:d7cd6b10f262d654889574b079ea81bcabc926f1b2b45b9facc62918135a10e3 --
cri-socket unix:///var/run/cri-dockerd.sock
[ admin@k8s-master-01 kubernetes] $ mkdir -p $HOME /.kube
[ admin@k8s-master-01 kubernetes] $ sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
[ admin@k8s-master-01 kubernetes] $ sudo chown $( id -u) : $( id -g) $HOME /.kube/config
[ admin@k8s-master-01 kubernetes] $ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 NotReady control-plane 95s v1.29.1
加入其它K8S集群master节点
[ admin@k8s-master-02 ~] sudo kubeadm join 192.168 .1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044 \
--cri-socket unix:///var/run/cri-dockerd.sock
[ admin@k8s-master-02 ~] mkdir -p $HOME /.kube
[ admin@k8s-master-02 ~] sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
[ admin@k8s-master-02 ~] sudo chown $( id -u) : $( id -g) $HOME /.kube/config
[ admin@k8s-master-03 ~] sudo kubeadm join 192.168 .1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044 \
--cri-socket unix:///var/run/cri-dockerd.sock
[ admin@k8s-master-03 ~] $ mkdir -p $HOME /.kube
[ admin@k8s-master-03 ~] $ sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
[ admin@k8s-master-03 ~] $ sudo chown $( id -u) : $( id -g) $HOME /.kube/config
[ admin@k8s-master-01 ~] $ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 NotReady control-plane 8m11s v1.29.1
k8s-master-02 NotReady control-plane 2m49s v1.29.1
k8s-master-03 NotReady control-plane 114s v1.29.1
加入worker节点
[ admin@k8s-node-01 ~]
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--cri-socket unix:///var/run/cri-dockerd.sock
[ admin@k8s-node-02 ~]
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--cri-socket unix:///var/run/cri-dockerd.sock
[ admin@k8s-master-01 ~] $ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 NotReady control-plane 11m v1.29.1
k8s-master-02 NotReady control-plane 6m36s v1.29.1
k8s-master-03 NotReady control-plane 5m41s v1.29.1
k8s-node-01 NotReady < none> 97s v1.29.1
k8s-node-02 NotReady < none> 75s v1.29.1
集群安装网络插件calico
方式1:通过官方脚本安装
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/tigera-operator.yaml
[ admin@k8s-master-01 software] $ kubectl get pods -n tigera-operator
NAME READY STATUS RESTARTS AGE
tigera-operator-8547bd6cc6-l7lnh 1 /1 Running 0 3m58s
wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
[ root@k8s-master01 ~]
.. .. ..
11 ipPools:
12 - blockSize: 26
13 cidr: 10.244 .0.0/16
14 encapsulation: VXLANCrossSubnet
[ admin@k8s-master-01 software] $ kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
[ admin@k8s-master-01 software] $ watch kubectl get pods -n calico-system
Every 2 .0s: kubectl get pods -n calico-system Sun Jan 28 00:38:46 2024
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-685f7c9b88-9kxk4 0 /1 Pending 0 2m19s
calico-node-2c5d8 0 /1 Init:ImagePullBackOff 0 2m20s
calico-node-7r6cf 0 /1 Init:ImagePullBackOff 0 2m20s
calico-node-bmgn5 0 /1 Init:ImagePullBackOff 0 2m20s
calico-node-d74f4 0 /1 Init:ImagePullBackOff 0 2m20s
calico-node-xsxz5 0 /1 Init:ImagePullBackOff 0 2m20s
calico-typha-7dd5648769-6m4w7 0 /1 ImagePullBackOff 0 2m13s
calico-typha-7dd5648769-lq966 0 /1 ImagePullBackOff 0 2m20s
calico-typha-7dd5648769-wpk7r 0 /1 ImagePullBackOff 0 2m13s
[ admin@k8s-master-01 software] $ kubectl delete -f custom-resources.yaml
installation.operator.tigera.io "default" deleted
apiserver.operator.tigera.io "default" deleted
[ admin@k8s-master-01 software] $ kubectl delete -f tigera-operator.yaml
namespace "tigera-operator" deleted
wget http://mirrors.oby.ink/docker-images/k8s-calico-3.27.0.tar
docker load -i k8s-calico-3.27.0.tar
[ admin@k8s-master-01 software] $ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
calico/kube-controllers v3.27.0 4e87edec0297 6 weeks ago 75 .5MB
calico/cni v3.27.0 8e8d96a874c0 6 weeks ago 211MB
calico/node v3.27.0 1843802b91be 6 weeks ago 340MB
registry.k8s.io/kube-apiserver v1.29.0 1443a367b16d 6 weeks ago 127MB
registry.k8s.io/kube-scheduler v1.29.0 7ace497ddb8e 6 weeks ago 59 .5MB
registry.k8s.io/kube-controller-manager v1.29.0 0824682bcdc8 6 weeks ago 122MB
registry.k8s.io/kube-proxy v1.29.0 98262743b26f 6 weeks ago 82 .2MB
registry.k8s.io/etcd 3.5 .10-0 a0eed15eed44 2 months ago 148MB
registry.k8s.io/coredns/coredns v1.11.1 cbb01a7bd410 5 months ago 59 .8MB
registry.k8s.io/pause 3.9 e6f181688397 15 months ago 744kB
nginx latest 605c77e624dd 2 years ago 141MB
wget https://github.com/projectcalico/calico/blob/v3.27.0/manifests/calico.yaml
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
[ admin@k8s-master-01 software] $ kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
[ admin@k8s-master-01 software] $ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5fc7d6cf67-fvgjx 1 /1 Running 0 18s
calico-node-cfr6w 0 /1 Running 0 18s
calico-node-hnxmf 0 /1 Running 0 18s
calico-node-qvknm 0 /1 Running 0 18s
calico-node-rnnjs 0 /1 Running 0 18s
calico-node-vvdsd 0 /1 Running 0 18s
coredns-76f75df574-q822k 1 /1 Running 0 52m
coredns-76f75df574-sjwps 1 /1 Running 0 52m
kube-apiserver-k8s-master-01 1 /1 Running 0 52m
kube-apiserver-k8s-master-02 1 /1 Running 0 47m
kube-apiserver-k8s-master-03 1 /1 Running 0 46m
kube-controller-manager-k8s-master-01 1 /1 Running 0 52m
kube-controller-manager-k8s-master-02 1 /1 Running 0 47m
kube-controller-manager-k8s-master-03 1 /1 Running 0 46m
kube-proxy-7hqnk 1 /1 Running 0 47m
kube-proxy-9xrpd 1 /1 Running 0 42m
kube-proxy-cdgv7 1 /1 Running 0 52m
kube-proxy-kv9b7 1 /1 Running 0 42m
kube-proxy-r24ww 1 /1 Running 0 46m
kube-scheduler-k8s-master-01 1 /1 Running 0 52m
kube-scheduler-k8s-master-02 1 /1 Running 0 47m
kube-scheduler-k8s-master-03 1 /1 Running 0 46m
[ admin@k8s-master-01 ~] $ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready control-plane 53m v1.29.1
k8s-master-02 Ready control-plane 48m v1.29.1
k8s-master-03 Ready control-plane 47m v1.29.1
k8s-node-01 Ready < none> 43m v1.29.1
k8s-node-02 Ready < none> 42m v1.29.1
[ admin@k8s-node-01 ~] $ kubectl get pods -n kube-system
E0128 00:53:49.201197 24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.201534 24206 memcache.go:265] couldn' t get current server API group list: Get "http://localhost:8080/api?timeout=32s" : dial tcp [ ::1] :8080: connect: connection refused
E0128 00:53:49.203053 24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.203237 24206 memcache.go:265] couldn' t get current server API group list: Get "http://localhost:8080/api?timeout=32s" : dial tcp [ ::1] :8080: connect: connection refused
E0128 00:53:49.205144 24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
The connection to the server localhost:8080 was refused - did you specify the right host or port?'
[ root@k8s-node-01 ~]
[ root@k8s-node-01 ~]
[ root@k8s-node-01 ~]
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5fc7d6cf67-fvgjx 1 /1 Running 0 12m
calico-node-cfr6w 1 /1 Running 0 12m
calico-node-hnxmf 1 /1 Running 0 12m
calico-node-qvknm 1 /1 Running 0 12m
calico-node-rnnjs 1 /1 Running 0 12m
calico-node-vvdsd 1 /1 Running 0 12m
coredns-76f75df574-q822k 1 /1 Running 0 64m
coredns-76f75df574-sjwps 1 /1 Running 0 64m
kube-apiserver-k8s-master-01 1 /1 Running 0 64m
kube-apiserver-k8s-master-02 1 /1 Running 0 59m
kube-apiserver-k8s-master-03 1 /1 Running 0 58m
kube-controller-manager-k8s-master-01 1 /1 Running 0 64m
kube-controller-manager-k8s-master-02 1 /1 Running 0 59m
kube-controller-manager-k8s-master-03 1 /1 Running 0 58m
kube-proxy-7hqnk 1 /1 Running 0 59m
kube-proxy-9xrpd 1 /1 Running 0 54m
kube-proxy-cdgv7 1 /1 Running 0 64m
kube-proxy-kv9b7 1 /1 Running 0 54m
kube-proxy-r24ww 1 /1 Running 0 58m
kube-scheduler-k8s-master-01 1 /1 Running 0 64m
kube-scheduler-k8s-master-02 1 /1 Running 0 59m
kube-scheduler-k8s-master-03 1 /1 Running 0 58m