Kubernetes V1.29.0 高可用集群安装(3主2从)

服务器准备工作

服务器部署说明

主机ip描述
192.168.1.110(4c 20Gb *200Gb)k8s-master-01
192.168.1.111(4c 20Gb *200Gb)k8s-master-02
192.168.1.112(4c 20Gb *200Gb)k8s-master-03
192.168.1.113(4c 20Gb *200Gb)k8s-node-01
192.168.1.114(4c 20Gb *200Gb)k8s-node-02
192.168.1.200VIP

部署前准备

修改主机名/hosts文件解析

# 根据规划设置主机名【k8s-master-01节点上操作192.168.1.110】
hostnamectl set-hostname k8s-master-01
# 根据规划设置主机名【k8s-node-01节点操作192.168.1.111】
hostnamectl set-hostname k8s-master-02
# 根据规划设置主机名【k8s-node-02节点操作192.168.1.112】
hostnamectl set-hostname k8s-master-03
hostnamectl set-hostname k8s-node-01
hostnamectl set-hostname k8s-node-02

修改hosts文件解析
cat >>sudo  /etc/hosts << EOF
192.168.1.110 k8s-master-01
192.168.1.111 k8s-master-02
192.168.1.112 k8s-master-03
192.168.1.113 k8s-node-01
192.168.1.114 k8s-node-02
EOF

固定主机ip设置

# 查看当前ip
ip a s ens33

# 修改静态ip
sudo vim  /etc/sysconfig/network-scripts/ifcfg-ens33

TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static        # 使用静态IP地址,默认为dhcp
IPADDR=192.168.1.110    # 设置的静态IP地址
NETMASK=255.255.255.0   # 子网掩码
GATEWAY=192.168.1.1     # 网关地址
DNS1=192.168.1.1        # DNS服务器
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=63acc257-a602-4895-b5de-ef4 da082a4de
DEVICE=ens33
ONBOOT=yes
HWADDR=00:0C:29:BB:18:A8

# 重启网卡
systemctl restart network.service

关闭防火墙

sudo systemctl stop firewalld.service
sudo systemctl disable firewalld.service
sudo systemctl status firewalld.service

关闭SELIUNX配置

# 临时关闭
[admin@k8s-master-01 ~]$ sudo setenforce 0
# 永久关闭
[admin@k8s-master-01 ~]$ sudo sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
[admin@k8s-master-01 ~]$ sudo sestatus
SELinux status:                 disabled

时间同步配置

# 查看定时任务列表(如果没有安装则yum -y install ntpdate)
crontab -l
# 设置定时任务
crontab -e
# 输入
0 */1 * * *  /usr/sbin/ntpdate time.aliyun.com

升级操作系统内核

# 查看当前内核版本
[admin@k8s-master-01 ~]$ uname -r
3.10.0-1160.el7.x86_64

# 7.升级操作系统内核(生产环境尽可能的使用高版本的系统内核,推荐 5.0+)
# 7.1 导入elrepo gpg key
sudo rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# 7.2 安装elrepo yum源仓库
sudo yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
# 7.3 安装kernel-ml版本,ml为长期版本,lt为长期维护版本
sudo yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
# 7.4 设置grub2默认引导为0
sudo grub2-set-default 0
# 7.5 重新生成grub2引导文件
sudo grub2-mkconfig -o /boot/grub2/grub.cfg
# 7.6 更新完成重启生效
reboot
# 7.7 查看内核版本
[admin@k8s-master-01 ~]$ uname -r
5.4.267-1.el7.elrepo.x86_64

配置内核路由转发及网桥过滤

# 配置内核路由转发及网桥过滤
# 添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
vm.swappiness=0
EOF
# 加载br_netfilter模块儿
sudo modprobe br_netfilter
# 查看是否加载成功
[admin@k8s-master-01 ~]$ lsmod | grep br_netfilter
br_netfilter           28672  0
# 使其生效
sudo sysctl --system

安装ipset及ipvsadm

# 9.安装ipset及ipvsadm
# 9.1 安装ipset ipvsadm
sudo yum -y install ipset ipvsadm
# 9.2 配置ipvsadm模块加载方式,添加需要加载的模块
sudo mkdir -p /etc/sysconfig/ipvsadm
cat > /etc/sysconfig/ipvsadm/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 9.3 授权、运行、检查是否加载
sudo chmod 755 /etc/sysconfig/ipvsadm/ipvs.modules
sudo  bash /etc/sysconfig/ipvsadm/ipvs.modules 
lsmod | grep -e ip_vs -e nf_conntrack

关闭swap交换分区

# 关闭swap交换分区
# 查看swap分区(如果swap行有数据,就代表swap分区开着)
free -m
# 临时关闭Swap分区
sudo swapoff -a
# 永久关闭Swap分区
sed -ri 's/.*swap.*/#&/' /etc/fstab  
# 查看下
grep swap /etc/fstab 

配置ssh免密登录

# 在k8s-master节点生成证书,并创建authorized_keys文件 
[admin@k8s-master-01 ~]$ ssh-keygen
Generating public/private rsa key pair.
# 输入要保存密钥的文件
Enter file in which to save the key (/home/admin/.ssh/id_rsa):
# 输入密码短语(我这里简单输入的是giraffe)
Enter passphrase (empty for no passphrase):
# 再次输入相同的密码(giraffe)
Enter same passphrase again:
# 您的标识已保存在/root/.ssh/id_rsa中。
Your identification has been saved in /home/admin/.ssh/id_rsa.
# 您的公钥已经保存在/root/.ssh/id_rsa.pub中。
Your public key has been saved in /home/admin/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:1gneqsEe9sALBeKJ/tti6LMjiPc5+rHonYSp//Kx2IM admin@k8s-master-01
The key's randomart image is:
+---[RSA 2048]----+
|                 |
|                 |
|  . .   .        |
| o o . . + .     |
|. o   . S +      |
|.  o + . .       |
|o.+.= B .        |
|+=EO+@ B         |
|+*@X#++ .        |
+----[SHA256]-----+
'
[admin@k8s-master-01 ~]$ cd /home/admin/.ssh
[admin@k8s-master-01 .ssh]$ ll
total 8
-rw------- 1 admin admin 1766 Jan 21 00:11 id_rsa
-rw-r--r-- 1 admin admin  401 Jan 21 00:11 id_rsa.pub

# 复制
[admin@k8s-master-01 .ssh]$ cp id_rsa.pub authorized_keys
[admin@k8s-master-01 .ssh]$ ll
total 12
-rw-r--r-- 1 admin admin  401 Jan 21 00:22 authorized_keys
-rw------- 1 admin admin 1766 Jan 21 00:21 id_rsa
-rw-r--r-- 1 admin admin  401 Jan 21 00:21 id_rsa.pub


[admin@k8s-master-01 .ssh]$ ll
total 12
-rw-r--r-- 1 admin admin  401 Jan 21 00:17 authorized_keys
-rw------- 1 admin admin 1766 Jan 21 00:11 id_rsa
-rw-r--r-- 1 admin admin  401 Jan 21 00:11 id_rsa.pub

# 循环拷贝文件到远程服务器111、112指定目录
scp -r /home/admin/.ssh/* 192.168.1.111:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168.1.112:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168.1.113:/home/admin/.ssh/
scp -r /home/admin/.ssh/* 192.168.1.114:/home/admin/.ssh/

# 需要在每台主机上验证是否可以相互免密登:ssh admin@192.168.1.111
ssh username@remote_host
#============================示例start==================================
# 示例,111主机连接上110
[admin@k8s-node-01 ~]$ ssh admin@192.168.1.110
The authenticity of host '192.168.1.110 (192.168.1.110)' can't be established.
ECDSA key fingerprint is SHA256:8KouzgTNv7YwgKZqxi9+pMd2XCHErkIx3COxReKqu8A.
ECDSA key fingerprint is MD5:63:2a:20:10:03:75:fe:24:90:b5:89:55:b5:1d:6c:83.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.1.110' (ECDSA) to the list of known hosts.
Enter passphrase for key '/home/admin/.ssh/id_rsa': 
Last login: Sat Jan 20 23:42:50 2024 from 192.168.1.24'
[admin@k8s-node-01 ~]$ exit
logout
Connection to 192.168.1.113 closed.
#============================示例end==================================

etcd集群部署

# 1.集群master节点上安装etcd(kubernetes 1.29版本要求ETCD版本至少大于V3.5.0+)
# 1.1下载安装包
wget https://github.com/coreos/etcd/releases/download/v3.5.11/etcd-v3.5.11-linux-amd64.tar.gz
# 1.2解压
tar xzvf etcd-v3.5.11-linux-amd64.tar.gz
# 1.3进入目录
cd etcd-v3.5.11-linux-amd64/
# 1.4移动,注意:/usr/local/bin需要与etcd.service中的路径对应
sudo mv etcd* /usr/local/bin

# 2.生成etcd配置相关文件
# 2.1 k8s-master-01 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-01 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.110:2380 \
--listen-client-urls=http://192.168.1.110:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.110:2379 \
--initial-advertise-peer-urls=http://192.168.1.110:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 2.2 k8s-master-02 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-02 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.111:2380 \
--listen-client-urls=http://192.168.1.111:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.111:2379 \
--initial-advertise-peer-urls=http://192.168.1.111:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 2.3 k8s-master-03 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-03 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.1.112:2380 \
--listen-client-urls=http://192.168.1.112:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.1.112:2379 \
--initial-advertise-peer-urls=http://192.168.1.112:2380 \
--initial-cluster=k8s-master-01=http://192.168.1.110:2380,k8s-master-02=http://192.168.1.111:2380,k8s-master-03=http://192.168.1.112:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 3.在k8s集群 3个 master节点上启动etcd,并开机启动
systemctl enable --now etcd
# 查看版本
[admin@k8s-master-01 ~]$ etcd --version
etcd Version: 3.5.11
Git SHA: 3b252db4f
Go Version: go1.20.12
Go OS/Arch: linux/amd64
You have new mail in /var/spool/mail/admin

# 查看etcd服务启动状态
[admin@k8s-master-01 etcd-v3.5.11-linux-amd64]$ systemctl status etcd
● etcd.service - Etcd Server
   Loaded: loaded (/usr/lib/systemd/system/etcd.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2024-01-23 23:07:35 CST; 16s ago
 Main PID: 6494 (etcd)
    Tasks: 9
   Memory: 26.1M
   CGroup: /system.slice/etcd.service
           └─6494 /usr/local/bin/etcd --name=k8s-master-01 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=http://192.168. ......
# 查看ETCD集群成员
[admin@k8s-master-01 ~]$ etcdctl member list
629900a511866cc5, started, k8s-master-02, http://192.168.1.111:2380, http://192.168.1.111:2379, false
f4525f168244cf0e, started, k8s-master-03, http://192.168.1.112:2380, http://192.168.1.112:2379, false
fa5c82427d33e98c, started, k8s-master-01, http://192.168.1.110:2380, http://192.168.1.110:2379, false

负载均衡器部署Nginx+Keepalived

192.168.1.110 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168.1.111 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168.1.112 k8s-master-01、etcd01、keepalived+nginx(vip:192.168.1.200)
192.168.1.113 k8s-node-01
192.168.1.114 k8s-node-02

keepalived+nginx 实现高可用+反向代理,这里为了节约服务器,将keepalived+nginx部署在master节点上。
keepalived会虚拟一个vip(192.168.1.200),vip任意绑定在一台master节点上,使用nginx对3台master节点进行反向代理。
在初始化k8s集群的使用,IP填写的vip,这样安装好k8s集群之后,kubectl客户端而言,访问的vip:16443端口,
该端口是nginx监听的端口,nginx会进行反向代理到3个master节点上的6443端口。

安装部署

安装nginx软件

mkdir -p /home/admin/software/docker/nginx/{conf,html,cert,logs}
# 创建html测试页面:
[admin@k8s-master-01 ~]echo '192.168.1.110'>/home/admin/software/docker/nginx/html/index.html
[admin@k8s-master-02 ~]echo '192.168.1.111'>/home/admin/software/docker/nginx/html/index.html
[admin@k8s-master-03 ~]echo '192.168.1.112'>/home/admin/software/docker/nginx/html/index.html
vim /home/admin/software/docker/nginx/conf/nginx.conf

# ===================================nginx配置文件 start=========================
user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log notice;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}

#添加了stream 这一段,其他的保持默认即可
stream {
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;
    upstream k8s-apiserver {
       server 192.168.1.110:6443;           #k8s-master-01的IP和6443端口
       server 192.168.1.111:6443;               #k8s-master-02的IP和6443端口
       server 192.168.1.112:6443;               #k8s-master-03的IP和6443端口
    }
    server {
       listen 16443;                                    #监听的是16443端口,因为nginx和master复用机器,所以不能是6443端口
       proxy_pass k8s-apiserver;                #使用proxy_pass模块进行反向代理
    }
}
http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile        on;
    #tcp_nopush     on;
    keepalive_timeout  65;
    #gzip  on;
    include /etc/nginx/conf.d/*.conf;
}
# ===================================nginx配置文件 end=========================
version: '3.6'
services:
  nginx:
    image: nginx
    restart: always
    hostname: nginx
    container_name: nginx
    privileged: true
    ports:
      - 80:80
      - 443:443
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /home/admin/software/docker/nginx/conf/conf.d:/etc/nginx/conf.d       # 这里是引用的配置文件,主配置文件路径是/etc/nginx/nginx.conf
      - /home/admin/software/docker/nginx/html/:/usr/share/nginx/html/        # 默认显示的index网页
      #- /home/admin/software/docker/nginx/cert/:/etc/nginx/cert
      - /home/admin/software/docker/nginx/logs/:/var/log/nginx/               # 日志文件
# 启动容器,在docker-compose.yml文件所在目录执行
docker-compose up -d
# 重启容器
docker restart nginx
# 查看日志
docker logs nginx

[admin@k8s-master-01 conf]$ curl 127.0.0.1
192.168.1.110
[admin@k8s-master-02 nginx]$ curl 127.0.0.1
192.168.1.111
[admin@k8s-master-03 conf]$ curl 127.0.0.1
192.168.1.112

安装keepalived软件

下载安装包
wget --no-check-certificate https://www.keepalived.org/software/keepalived-2.2.8.tar.gz
//进入解压后的包
tar -zxvf keepalived-2.2.8.tar.gz
cd keepalived-2.2.8
./configure --prefix=/home/admin/software/keepalived --sysconf=/etc
# 编译  && 编译安装
sudo make 
sudo make install
# 如果你是yum安装,且是本地安装,则可以使用此脚本,案例中我使用的是docker安装,
# 且docker-compose文件指定了restart: always,所以我就不用这个脚本了,搭建自便
# k8s-master-01、02、03均要配置
sudo vim /etc/keepalived/check_apiserver.sh
# ===============================健康检查脚本start=======================================
#!/bin/bash
#检测nginx是否启动了
#如果nginx没有启动就启动nginx 
if [ "$(ps -ef | grep "nginx: master process"| grep -v grep )" == "" ];then     
      #重启nginx                      
      docker restart nginx                
	  sleep 5
	  #nginx重启失败,则停掉keepalived服务,进行VIP转移
      if [ "$(ps -ef | grep "nginx: master process"| grep -v grep )" == "" ];then    
              docker stop keepalived                    
      fi
fi
# ===============================健康检查脚本end=======================================
# 脚本赋予可执行权限
sudo chmod +x /etc/keepalived/check_apiserver.sh
cd /etc/keepalived
sudo cp keepalived.conf.sample  keepalived.conf
# 主节点配置与从节点配置略有不同(如果你是docker安装的nginx,这里也配置了检测nginx启动脚本,你可以正常配置,或者删除,不影响使用)
sudo vim keepalived.conf
# ==============================keepalived配置文件start================================
! Configuration File for keepalived
global_defs {
   notification_email {
       你的163邮箱@163.com
   }
   notification_email_from 你的163邮箱@163.com
   smtp_server smtp.163.com
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script chk_apiserver {
  script "/etc/keepalived/check_apiserver.sh"   #检测脚本文件
  interval 5                    #检测时间间隔
  weight  -5                    #权重
  fall 2
  rise 1
}
vrrp_instance VI_1 {
  state MASTER		              # 主机状态master,从节点为BACKUP
  interface ens33	              #设置实例绑定的网卡
  mcast_src_ip 192.168.1.110    # 广播的原地址,k8s-master-01:192.168.1.110,k8s-master-02:192.168.1.111,k8s-master-03:192.168.1.112
  virtual_router_id 51			    #同一实例下virtual_router_id必须相同
  priority 100                  #设置优先级,优先级高的会被竞选为Master
  advert_int 2
  authentication {				      #设置认证
    auth_type PASS          	  #认证方式,支持PASS和AH
    auth_pass K8SHA_KA_AUTH 	  #认证密码
  }
  virtual_ipaddress {			      #设置VIP,可以设置多个
    192.168.1.200
  }
  track_script {				        #设置追踪脚本
   chk_apiserver
  }
}
# ==============================keepalived配置文件end================================

# 启动服务并验证
sudo systemctl daemon-reload
# 开机启动并立即启动
sudo systemctl enable --now keepalived
# 查看ip
ip address show
[admin@k8s-master-01 ~]$ ss -anput | grep ":16443"
tcp    LISTEN     0      2000   127.0.0.1:16443                 *:*                  
tcp    LISTEN     0      2000      *:16443                 *:* 
[admin@k8s-master-02 ~]$ ss -anput | grep ":16443"
tcp    LISTEN     0      2000   127.0.0.1:16443                 *:*                  
tcp    LISTEN     0      2000      *:16443                 *:* 
[admin@k8s-master-03 ~]$ ss -anput | grep ":16443"
tcp    LISTEN     0      2000   127.0.0.1:16443                 *:*                  
tcp    LISTEN     0      2000      *:16443                 *:* 

#检测vip是否会漂移,关闭k8s-master-01节点的keepalived
[admin@k8s-master-01 ~]$ systemctl stop keepalived.service
# 这时发现vip漂移到了k8s-master-02上,k8s-master-01和k8s-master-03均没有vip
# 重启keepalived服务之后vip又回到了k8s-master-01节点,因为默认配置的是vip抢占模式,符合设计逻辑
# 启动服务
systemctl start keepalived.service
# 开机启动
systemctl enable keepalived.service

# 查看当前ip,注意ens33,多了一个inet 192.168.1.200/32 scope global ens33
[admin@k8s-master-01 keepalived]$ ip address show
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:71:20:2c brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.110/24 brd 192.168.1.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.1.200/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::fa46:8575:c903:ffe8/64 scope link tentative noprefixroute dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::99e:8f40:322e:88a7/64 scope link tentative noprefixroute dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::805c:cc25:201d:c671/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
# 验证访问
[admin@k8s-master-01 ~]$ curl 192.168.1.200
192.168.1.110
[admin@k8s-master-02 ~]$ curl 192.168.1.200
192.168.1.110
[admin@k8s-master-03 ~]$ curl 192.168.1.200
192.168.1.110

# 停掉k8s-master-01后访问
systemctl stop keepalived.service
# Vip 飘逸到k8s-master-02
[admin@k8s-master-02 ~]$ ip address show
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:cc:94:57 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.111/24 brd 192.168.1.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.1.200/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::99e:8f40:322e:88a7/64 scope link tentative noprefixroute dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::fa46:8575:c903:ffe8/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
       
[admin@k8s-master-01 ~]$ curl 192.168.1.200
192.168.1.111
[admin@k8s-master-02 ~]$ curl 192.168.1.200
192.168.1.111
[admin@k8s-master-03 ~]$ curl 192.168.1.200
192.168.1.111

# 重启k8s-master-01后访问
systemctl restart keepalived.service
       

安装docker-ce/cri-dockerd

安装docker-ce

# 准备yum源(使用阿里源)
sudo wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 安装docker-ce
sudo yum -y install docker-ce

# 开机启动,并且现在启动docker
sudo systemctl enable --now docker

# 查看版本
[admin@k8s-master-01 ~]$ docker -v
Docker version 25.0.0, build e758fe5

# 修改cgroup的管理驱动及镜像下载加速器
[admin@k8s-master-01 ~]$ cat > sudo /etc/docker/daemon.json << EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
   "registry-mirrors": [
        "https://4bsnyw1n.mirror.aliyuncs.com",
        "https://registry.docker-cn.com",
        "https://docker.mirrors.ustc.edu.cn",
        "https://dockerhub.azk8s.cn",
        "http://hub-mirror.c.163.com"
	]
}
EOF

# 查看版本
[admin@k8s-master-01 ~]$ sudo systemctl restart docker

安装cri-dockerd

# 下载资源包
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8-3.el7.x86_64.rpm
# 安装资源包
sudo yum -y install cri-dockerd-0.3.8-3.el7.x86_64.rpm
# 2.2.3 修改配置
sudo vim /usr/lib/systemd/system/cri-docker.service
# 修改第10行,指定用作 Pod 的基础容器的容器镜像(“pause 镜像”)
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9 --container-runtime-endpoint fd:// 
# 2.2.4 开机启动,并且现在启动docker
systemctl enable --now cri-docker

Kubernetes 集群安装

镜像源准备

# k8s源镜像源准备(社区版yuym源,注意区分版本)
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
# exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

集群工具安装、配置

# ======================安装kubelet kubeadm kubectl 开始======================
# 方式1:
# 	安装最新版本
sudo yum install -y kubelet kubeadm kubectl
# 方式2:
#   安装指定版本
# 	查看指定版本
sudo yum list kubeadm.x86_64 --showduplicates |sort -r
sudo yum list kubelet.x86_64 --showduplicates |sort -r
sudo yum list kubectl.x86_64 --showduplicates |sort -r
#   集群软件安装(安装指定版本)
sudo yum -y install kubeadm-1.29.0-150500.1.1 kubelet-1.29.0-150500.1.1 kubectl-1.29.0-150500.1.1
# ======================安装kubelet kubeadm kubectl 结束======================

# 配置kublet的cgroup 驱动与docker一致
# 备份原文件
sudo cp /etc/sysconfig/kubelet{,.bak}
# 修改器文件
sudo vim kubelet
# 修改内容
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

# 开机启动kubelet
sudo systemctl enable kubelet

# 安装命令tab健自动补全工具(可选)
sudo yum install bash-completion -y 
source /usr/share/bash-completion/bash_completion
echo "source <(kubectl completion bash)" >> ~/.bashrc
source  ~/.bashrc

集群镜像下载

# 集群镜像下载
#===============================================================
# 方式1:(可使用VPN下载)
# 查看配置对应的镜像
[admin@k8s-master-01 sysconfig]$ kubeadm config images list
registry.k8s.io/kube-apiserver:v1.29.1
registry.k8s.io/kube-controller-manager:v1.29.1
registry.k8s.io/kube-scheduler:v1.29.1
registry.k8s.io/kube-proxy:v1.29.1
registry.k8s.io/coredns/coredns:v1.11.1
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.10-0
# 下载镜像
kubeadm config images pull --cri-socket unix:///var/run/cri-dockerd.sock --kubernetes-version=v1.29.0
# 保存镜像
docker save -o k8s-1-29-0.tar $images_list
#===============================================================
# 方式2:
# 下载k8s所需镜像包
wget http://mirrors.oby.ink/docker-images/k8s-1.29.0.tar
# 导入k8s配置镜像
docker load -i k8s-1.29.0.tar
# 查看当前docker存在的镜像:
[admin@k8s-master-01 software]$ docker images
REPOSITORY                                TAG        IMAGE ID       CREATED         SIZE
registry.k8s.io/kube-apiserver            v1.29.0    1443a367b16d   5 weeks ago     127MB
registry.k8s.io/kube-controller-manager   v1.29.0    0824682bcdc8   5 weeks ago     122MB
registry.k8s.io/kube-scheduler            v1.29.0    7ace497ddb8e   5 weeks ago     59.5MB
registry.k8s.io/kube-proxy                v1.29.0    98262743b26f   5 weeks ago     82.2MB
registry.k8s.io/etcd                      3.5.10-0   a0eed15eed44   2 months ago    148MB
registry.k8s.io/coredns/coredns           v1.11.1    cbb01a7bd410   5 months ago    59.8MB
registry.k8s.io/pause                     3.9        e6f181688397   15 months ago   744kB

修改初始化文件

# 查看不同 kind默认配置
kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm-config.yaml
kubeadm config print init-defaults --component-configs InitConfiguration
kubeadm config print init-defaults --component-configs ClusterConfiguration
# [root@k8s-master-01 ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.110
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master-01
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  external:
    endpoints:
      - http://192.168.1.110:2379
      - http://192.168.1.111:2379
      - http://192.168.1.112:2379
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.29.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
apiServerCertSANs:
- 192.168.1.200
controlPlaneEndpoint: "192.168.1.200:16443"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerRuntimeEndpoint: ""
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMaximumGCAge: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

集群初始化安装

生成配置文件样例 kubeadm-config.yaml

[root@k8s-master-01 ~]# cat > kubeadm-config.yaml << EOF
---
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
 - system:bootstrappers:kubeadm:default-node-token
 token: abcdef.0123456789abcdef
 ttl: 24h0m0s
 usages:
 - signing
 - authentication
 kind: InitConfiguration
localAPIEndpoint:
 advertiseAddress: 192.168.10.160
 bindPort: 6443
nodeRegistration:
 criSocket: unix:///var/run/cri-dockerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: 1.29.0
networking:
 dnsDomain: cluster.local
 podSubnet: 10.244.0.0/16
 serviceSubnet: 10.96.0.0/12
scheduler: {}
apiServerCertSANs:
- 192.168.10.200
controlPlaneEndpoint: "192.168.10.200:16443"
etcd:
 external:
  endpoints:
   - http://192.168.10.160:2379
   - http://192.168.10.161:2379
   - http://192.168.10.162:2379
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
 anonymous:
  enabled: false
 webhook:
  cacheTTL: 0s
  enabled: true
 x509:
  clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
 mode: Webhook
 webhook:
  cacheAuthorizedTTL: 0s
  cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
 flushFrequency: 0
 options:
   json:
   infoBufferSize: "0"
 verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
EOF

集群初始化

# 3.2.4 使用初始化配置文件初始化集群第一个master节点
# 初始化集群
[admin@k8s-master-01 ~]$ sudo kubeadm init --config kubeadm-config.yaml --upload-certs --v=9 
[init] Using Kubernetes version: v1.29.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.110]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [192.168.1.110 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-01 localhost] and IPs [192.168.1.110 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 4.002323 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master-01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 6euw6m.my26a85hjre4vk2t
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
        --control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85
        
说明:
– kubernetes-version K8s版本,与上面安装的一致
– pod-network-cidr Pod网络,与下面部署的CNI网络组件yaml中保持一致
– apiserver-advertise-address 集群通告地址,node节点连接master的地址,如果是高可用,需要配置VIP的地址。这里是单master架构,默认master地址即可。
– cri-socket 指定cri-dockerd接口,如果是containerd则使用–cri-socket unix:///run/containerd/containerd.sock

# 后续操作
# 加入worker节点
kubeadm join 192.168.1.113:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash
sha256:d7cd6b10f262d654889574b079ea81bcabc926f1b2b45b9facc62918135a10e3 --
cri-socket unix:///var/run/cri-dockerd.sock

# 准备k8s-master-01 kubectl配置文件
[admin@k8s-master-01 kubernetes]$ mkdir -p $HOME/.kube
[admin@k8s-master-01 kubernetes]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[admin@k8s-master-01 kubernetes]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

[admin@k8s-master-01 kubernetes]$ kubectl get nodes
NAME            STATUS     ROLES           AGE   VERSION
k8s-master-01   NotReady   control-plane   95s   v1.29.1

加入其它K8S集群master节点

# k8s-master-02 加入集群master节点
[admin@k8s-master-02 ~]sudo kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044  \
--cri-socket unix:///var/run/cri-dockerd.sock

# 准备k8s-master-02 kubectl配置文件
[admin@k8s-master-02 ~]mkdir -p $HOME/.kube
[admin@k8s-master-02 ~]sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[admin@k8s-master-02 ~]sudo chown $(id -u):$(id -g) $HOME/.kube/config

# k8s-master-03 加入集群master节点
[admin@k8s-master-03 ~]sudo kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85 \
--control-plane --certificate-key 98e4dd27ef8d524ef156c0af7867a51ddd99cb8f32e4af9cf071dec1b553b044  \
--cri-socket unix:///var/run/cri-dockerd.sock

# 准备k8s-master-03 kubectl配置文件
[admin@k8s-master-03 ~]$ mkdir -p $HOME/.kube
[admin@k8s-master-03 ~]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[admin@k8s-master-03 ~]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 查看master节点,STATUS是NotReady,莫慌,后面安装完成网络插件就好了
[admin@k8s-master-01 ~]$ kubectl get nodes
NAME            STATUS     ROLES           AGE     VERSION
k8s-master-01   NotReady   control-plane   8m11s   v1.29.1
k8s-master-02   NotReady   control-plane   2m49s   v1.29.1
k8s-master-03   NotReady   control-plane   114s    v1.29.1

加入worker节点

# k8s-node-01节点加入集群
[admin@k8s-node-01 ~]# sudo kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85  \
--cri-socket unix:///var/run/cri-dockerd.sock

# k8s-node-01节点加入集群
[admin@k8s-node-02 ~]# sudo kubeadm join 192.168.1.200:16443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3ff29c0b3e93709d4d2e06b6ddaf14da75760c26d216033ee7642813a3f5dd85  \
--cri-socket unix:///var/run/cri-dockerd.sock

# 查看所有节点
[admin@k8s-master-01 ~]$ kubectl get nodes
NAME            STATUS     ROLES           AGE     VERSION
k8s-master-01   NotReady   control-plane   11m     v1.29.1
k8s-master-02   NotReady   control-plane   6m36s   v1.29.1
k8s-master-03   NotReady   control-plane   5m41s   v1.29.1
k8s-node-01     NotReady   <none>          97s     v1.29.1
k8s-node-02     NotReady   <none>          75s     v1.29.1

集群安装网络插件calico

方式1:通过官方脚本安装
# 应用operator资源清单文件
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/tigera-operator.yaml
[admin@k8s-master-01 software]$ kubectl get pods -n tigera-operator
NAME                               READY   STATUS    RESTARTS   AGE
tigera-operator-8547bd6cc6-l7lnh   1/1     Running   0          3m58s

# 通过自定义资源方式安装
# 下载资源文件
wget https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/custom-resources.yaml
# 修改文件的内容
# 修改文件第13行,修改为使用kubeadm init ----pod-network-cidr对应的IP地址段
[root@k8s-master01 ~]# vim custom-resources.yaml
......
11   ipPools:
12   - blockSize: 26
13    cidr: 10.244.0.0/16
14    encapsulation: VXLANCrossSubnet

# 应用资源清单文件
[admin@k8s-master-01 software]$ kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
# 查看pods,如果没有科学上网,则无法下载,使用方式2
[admin@k8s-master-01 software]$ watch kubectl get pods -n calico-system
Every 2.0s: kubectl get pods -n calico-system                                                                                                                                                               Sun Jan 28 00:38:46 2024
NAME                                       READY   STATUS                  RESTARTS   AGE
calico-kube-controllers-685f7c9b88-9kxk4   0/1     Pending                 0          2m19s
calico-node-2c5d8                          0/1     Init:ImagePullBackOff   0          2m20s
calico-node-7r6cf                          0/1     Init:ImagePullBackOff   0          2m20s
calico-node-bmgn5                          0/1     Init:ImagePullBackOff   0          2m20s
calico-node-d74f4                          0/1     Init:ImagePullBackOff   0          2m20s
calico-node-xsxz5                          0/1     Init:ImagePullBackOff   0          2m20s
calico-typha-7dd5648769-6m4w7              0/1     ImagePullBackOff        0          2m13s
calico-typha-7dd5648769-lq966              0/1     ImagePullBackOff        0          2m20s
calico-typha-7dd5648769-wpk7r              0/1     ImagePullBackOff        0          2m13s

# 删除
[admin@k8s-master-01 software]$ kubectl delete -f custom-resources.yaml
installation.operator.tigera.io "default" deleted
apiserver.operator.tigera.io "default" deleted
[admin@k8s-master-01 software]$ kubectl delete -f tigera-operator.yaml
namespace "tigera-operator" deleted

#=====================================================================================

# 方式2:离线安装
# 下载Calico网络配置镜像
wget http://mirrors.oby.ink/docker-images/k8s-calico-3.27.0.tar
# 导入镜像
docker load -i k8s-calico-3.27.0.tar
# 查看已安装镜像
[admin@k8s-master-01 software]$ docker images
REPOSITORY                                TAG        IMAGE ID       CREATED         SIZE
calico/kube-controllers                   v3.27.0    4e87edec0297   6 weeks ago     75.5MB
calico/cni                                v3.27.0    8e8d96a874c0   6 weeks ago     211MB
calico/node                               v3.27.0    1843802b91be   6 weeks ago     340MB
registry.k8s.io/kube-apiserver            v1.29.0    1443a367b16d   6 weeks ago     127MB
registry.k8s.io/kube-scheduler            v1.29.0    7ace497ddb8e   6 weeks ago     59.5MB
registry.k8s.io/kube-controller-manager   v1.29.0    0824682bcdc8   6 weeks ago     122MB
registry.k8s.io/kube-proxy                v1.29.0    98262743b26f   6 weeks ago     82.2MB
registry.k8s.io/etcd                      3.5.10-0   a0eed15eed44   2 months ago    148MB
registry.k8s.io/coredns/coredns           v1.11.1    cbb01a7bd410   5 months ago    59.8MB
registry.k8s.io/pause                     3.9        e6f181688397   15 months ago   744kB
nginx                                     latest     605c77e624dd   2 years ago     141MB

# 下载calico配置文件
wget https://github.com/projectcalico/calico/blob/v3.27.0/manifests/calico.yaml
#下载完后还需要修改里面定义Pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm init的 --pod-network-cidr指定的一样。
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"

# 修改完后文件后,部署:
[admin@k8s-master-01 software]$ kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created

# 查看pods
[admin@k8s-master-01 software]$ kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5fc7d6cf67-fvgjx   1/1     Running   0          18s
calico-node-cfr6w                          0/1     Running   0          18s
calico-node-hnxmf                          0/1     Running   0          18s
calico-node-qvknm                          0/1     Running   0          18s
calico-node-rnnjs                          0/1     Running   0          18s
calico-node-vvdsd                          0/1     Running   0          18s
coredns-76f75df574-q822k                   1/1     Running   0          52m
coredns-76f75df574-sjwps                   1/1     Running   0          52m
kube-apiserver-k8s-master-01               1/1     Running   0          52m
kube-apiserver-k8s-master-02               1/1     Running   0          47m
kube-apiserver-k8s-master-03               1/1     Running   0          46m
kube-controller-manager-k8s-master-01      1/1     Running   0          52m
kube-controller-manager-k8s-master-02      1/1     Running   0          47m
kube-controller-manager-k8s-master-03      1/1     Running   0          46m
kube-proxy-7hqnk                           1/1     Running   0          47m
kube-proxy-9xrpd                           1/1     Running   0          42m
kube-proxy-cdgv7                           1/1     Running   0          52m
kube-proxy-kv9b7                           1/1     Running   0          42m
kube-proxy-r24ww                           1/1     Running   0          46m
kube-scheduler-k8s-master-01               1/1     Running   0          52m
kube-scheduler-k8s-master-02               1/1     Running   0          47m
kube-scheduler-k8s-master-03               1/1     Running   0          46m

# kubectl get nodes   均为Ready说明k8s部署完成
[admin@k8s-master-01 ~]$ kubectl get nodes
NAME            STATUS   ROLES           AGE   VERSION
k8s-master-01   Ready    control-plane   53m   v1.29.1
k8s-master-02   Ready    control-plane   48m   v1.29.1
k8s-master-03   Ready    control-plane   47m   v1.29.1
k8s-node-01     Ready    <none>          43m   v1.29.1
k8s-node-02     Ready    <none>          42m   v1.29.1


# 如果出现从节点使用kubectl get nodes出现下面情况:
[admin@k8s-node-01 ~]$ kubectl get pods -n kube-system
E0128 00:53:49.201197   24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.201534   24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.203053   24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.203237   24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
E0128 00:53:49.205144   24206 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp [::1]:8080: connect: connection refused
The connection to the server localhost:8080 was refused - did you specify the right host or port?'
# 配置下环境变量
[root@k8s-node-01 ~]# echo "export KUBECONFIG=/etc/kubernetes/kubelet.conf" >> /etc/profile
[root@k8s-node-01 ~]# source /etc/profile
[root@k8s-node-01 ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5fc7d6cf67-fvgjx   1/1     Running   0          12m
calico-node-cfr6w                          1/1     Running   0          12m
calico-node-hnxmf                          1/1     Running   0          12m
calico-node-qvknm                          1/1     Running   0          12m
calico-node-rnnjs                          1/1     Running   0          12m
calico-node-vvdsd                          1/1     Running   0          12m
coredns-76f75df574-q822k                   1/1     Running   0          64m
coredns-76f75df574-sjwps                   1/1     Running   0          64m
kube-apiserver-k8s-master-01               1/1     Running   0          64m
kube-apiserver-k8s-master-02               1/1     Running   0          59m
kube-apiserver-k8s-master-03               1/1     Running   0          58m
kube-controller-manager-k8s-master-01      1/1     Running   0          64m
kube-controller-manager-k8s-master-02      1/1     Running   0          59m
kube-controller-manager-k8s-master-03      1/1     Running   0          58m
kube-proxy-7hqnk                           1/1     Running   0          59m
kube-proxy-9xrpd                           1/1     Running   0          54m
kube-proxy-cdgv7                           1/1     Running   0          64m
kube-proxy-kv9b7                           1/1     Running   0          54m
kube-proxy-r24ww                           1/1     Running   0          58m
kube-scheduler-k8s-master-01               1/1     Running   0          64m
kube-scheduler-k8s-master-02               1/1     Running   0          59m
kube-scheduler-k8s-master-03               1/1     Running   0          58m

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

哒卟溜......

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值