# 安装k8s集群-V2

# 1 虚拟机初始化

# 1.1 安装VMWare-Tools

# 安装gcc
yum -y install gcc-c++
yum -y install kernel-devel-$(uname -r) 

mkdir /mnt/cdrom
mount /dev/cdrom /mnt/cdrom
cd /tmp
tar zxvf /mnt/cdrom/VMwareTools-10.3.23-16594550.tar.gz 
cd vmware-tools-distrib
./vmware-install.pl


1
2
3
4
5
6
7
8
9
10
11
12

# 1.2 所有节点配置hosts

cat >> /etc/hosts << EOF
192.168.179.128 k8s-master
192.168.179.129 k8s-node1
192.168.179.130 k8s-node2
EOF
1
2
3
4
5

# 1.3 所有节点关闭防火墙、selinux、dnsmasq、swap

#关闭防火墙
systemctl disable --now firewalld
#关闭dnsmasq
systemctl disable --now dnsmasq
#关闭postfix
systemctl  disable --now postfix
#关闭NetworkManager
systemctl disable --now NetworkManager
#关闭selinux
sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0
#关闭swap
sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
## 设置主机名:
hostnamectl set-hostname k8s-master

1
2
3
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
modprobe br_netfilter
lsmod | grep br_netfilter
sysctl --system  

1
2
3
4
5
6
7
8
9
10

# 1.4 配置时间同步

方法1:使用ntpdate

#安装ntpdate,需配置yum源
yum install ntpdate -y
#执行同步,可以使用自己的ntp服务器如果没有
ntpdate time2.aliyun.com
#写入定时任务
crontab -e */5 * * * * ntpdate time2.aliyun.com
1
2
3
4
5
6

方法2:使用chrony(推荐使用)

#安装chrony
yum install chrony -y
#在其中一台主机配置为时间服务器
cat /etc/chrony.conf
server time2.aliyun.com iburst   #从哪同步时间
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.0.0/16  #允许的ntp客户端网段
local stratum 10
logdir /var/log/chrony

#重启服务
systemctl restart chronyd
#配置其他节点从服务端获取时间进行同步
cat /etc/chrony.conf
server 192.168.179.128 iburst

#重启验证
systemctl restart chronyd
chronyc sources -v
^* time2.aliyun.com              2   6     7     1    -17us[ +852us] +/-   20ms  #这样表示正常
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

# 1.5 所有节点修改资源限制

cp /etc/security/limits.conf /etc/security/limits.conf.bak
cat > /etc/security/limits.conf <<EOF
*       soft        core        unlimited
*       hard        core        unlimited
*       soft        nproc       1000000
*       hard        nproc       1000000
*       soft        nofile      1000000
*       hard        nofile      1000000
*       soft        memlock     32000
*       hard        memlock     32000
*       soft        msgqueue    8192000
EOF
1
2
3
4
5
6
7
8
9
10
11
12

# 1.6 ssh认证

ssh-keygen -t rsa -b 4096 -C "[email protected]"
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-node1
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-node2
1
2
3
4

# 1.7 开启ipvs

# (opens new window)

  • 在每个节点安装ipset和ipvsadm:
yum -y install ipset ipvsadm
1
  • 在所有节点执行如下脚本:
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
1
2
3
4
5
6
7
8
  • 授权、运行、检查是否加载:
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
1
  • 检查是否加载:
lsmod | grep -e ipvs -e nf_conntrack_ipv4
1

# 1.8 安装containerd

#解压
tar xf cri-containerd-cni-1.6.6-linux-amd64.tar.gz -C /
#创建服务启动文件(已经存在)
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl enable --now  containerd.service
#创建配置文件
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
#修改配置
## sed -ri 's@(sandbox_image = ").*(")@\1192.168.10.254:5000/k8s/pause:3.7\2@ ' /etc/containerd/config.toml
## sed -ri '/registry.mirrors/a\\n[plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.10.254:5000"]\nendpoint = ["http://192.168.10.254:5000"]' /etc/containerd/config.toml 
#重启
systemctl restart containerd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

# 1.9 安装crictl客户端工具

#解压
tar xf crictl-v1.24.2-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
#测试
crictl info
1
2
3
4
5
6
7
8

# 2 部署kubernetes

# 2.1 分发二进制文件

tar xf kubernetes-server-linux-amd64.tar.tar
master="k8s-master"
#分发master组件
for i in $master;do
  scp kubernetes/server/bin/{kubeadm,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy,kubelet,kubectl} $i:/usr/bin
done

node="k8s-node1 k8s-node2"
#分发node组件
for i in $node;do
  scp kubernetes/server/bin/{kube-proxy,kubelet} $i:/usr/bin
done
1
2
3
4
5
6
7
8
9
10
11
12
# 虚拟机执行
BASE_DIR=/mnt/hgfs/sharedir/soft
master="k8s-master"
soft="kubeadm kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl"
for i in $master;do
  for s in ${soft};do
    ssh root@$i "ln -s ${BASE_DIR}/kubernetes/server/bin/${s} /usr/bin/"
  done
done

BASE_DIR=/mnt/hgfs/sharedir/soft
node="k8s-node1 k8s-node2"
soft="kube-proxy kubelet"
for i in $node;do
  for s in ${soft};do
    ssh root@$i "ln -s ${BASE_DIR}/kubernetes/server/bin/${s} /usr/bin/"
  done
done
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

# 3 安装ETCD组件

# 3.1 生成kubernetes集群所需证书

只需要在其中一个节点操作即可

# 3.2 安装cfssl工具并生成证书目录

#解压
# mv cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
# mv cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
# mv cfssl-certinfo_1.6.1_linux_amd64 /usr/bin/cfssl-certinfo

ln -s /mnt/hgfs/sharedir/soft/cfssl_1.6.1_linux_amd64 /usr/bin/cfssl
ln -s /mnt/hgfs/sharedir/soft/cfssljson_1.6.1_linux_amd64 /usr/bin/cfssljson
ln -s /mnt/hgfs/sharedir/soft/cfssl-certinfo_1.6.1_linux_amd64 /usr/bin/cfssl-certinfo
#生成证书存放目录
## 1
cd /mnt/hgfs/sharedir/filesystem/k8s-master
mkdir ./opt/pki/{etcd,kubernetes} -p
ln -s /mnt/hgfs/sharedir/filesystem/k8s-master/opt/pki /opt/pki
## 2
mkdir /opt/pki/{etcd,kubernetes} -p
#验证
ls /usr/bin/cfssl*
/usr/bin/cfssl  /usr/bin/cfssl-certinfo  /usr/bin/cfssljson
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

# 3.3 生成etcd证书

# 3.3.1 生成etcd证书的ca机构

mkdir -p /mnt/hgfs/sharedir/filesystem/k8s-master/opt/pki
ln -s /mnt/hgfs/sharedir/filesystem/k8s-master/opt/pki /opt/pki 

mkdir /opt/pki/etcd/ -p
cd /opt/pki/etcd/
#创建etcd证书的ca
mkdir ca
#生成etcd证书ca配置文件与申请文件
cd ca/
#生成配置文件
cat > ca-config.json <<EOF
{
    "signing": {
         "default": {
             "expiry": "87600h"
        },
         "profiles": {
             "etcd": {
                 "expiry": "87600h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
}
EOF
#生成申请文件
cat > ca-csr.json <<EOF
{
  "CA":{"expiry":"87600h"},
  "CN": "etcd-cluster",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-cluster",
      "OU": "System"
    }
  ]
}
EOF
#生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52

# 3.3.2 生成etcd服务端证书

#生成etcd证书申请文件
cd /opt/pki/etcd/
cat > etcd-server-csr.json << EOF
{
  "CN": "etcd-server",
  "hosts": [
     "192.168.179.128",
     "192.168.179.129",
     "192.168.179.130",
     "127.0.0.1"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-server",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
cfssl gencert \
  -ca=ca/ca.pem \
  -ca-key=ca/ca-key.pem \
  -config=ca/ca-config.json \
  -profile=etcd \
  etcd-server-csr.json | cfssljson -bare etcd-server
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

# 3.3.3 生成etcd客户端证书

#生成etcd证书申请文件
cd /opt/pki/etcd/
cat > etcd-client-csr.json << EOF
{
  "CN": "etcd-client",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-client",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
cfssl gencert \
  -ca=ca/ca.pem \
  -ca-key=ca/ca-key.pem \
  -config=ca/ca-config.json \
  -profile=etcd \
  etcd-client-csr.json | cfssljson -bare etcd-client
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

拷贝证书到节点

master="k8s-master k8s-node1 k8s-node2"
for i in $master;do
  ssh $i "mkdir /etc/etcd/ssl -p"
  scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
1
2
3
4
5

# 4.3 创建kubernetes各组件证书

# 1.创建kubernetes的ca

#创建目录
mkdir /opt/pki/kubernetes/ -p
cd /opt/pki/kubernetes/
mkdir ca
cd ca
#创建ca配置文件与申请文件
cat > ca-config.json <<EOF
{
    "signing": {
         "default": {
             "expiry": "87600h"
        },
         "profiles": {
             "kubernetes": {
                 "expiry": "87600h",
                 "usages": [
                     "signing",
                     "key encipherment",
                     "server auth",
                     "client auth"
                 ]
             }
         }
     }
}
EOF
#生成申请文件
cat > ca-csr.json <<EOF
{
  "CA":{"expiry":"87600h"},
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "kubernetes",
      "OU": "System"
    }
  ]
}
EOF
#生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

# 2.创建kube-apiserver证书

#创建目录
mkdir /opt/pki/kubernetes/kube-apiserver -p
cd /opt/pki/kubernetes/kube-apiserver
#生成证书申请文件
cat > kube-apiserver-csr.json <<EOF
{
  "CN": "kube-apiserver",
  "hosts": [
    "127.0.0.1",
    "192.168.179.128",
    "192.168.179.129",
    "192.168.179.130",
    "10.200.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
   ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "kube-apiserver",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
cfssl gencert \
  -ca=../ca/ca.pem \
  -ca-key=../ca/ca-key.pem \
  -config=../ca/ca-config.json \
  -profile=kubernetes \
  kube-apiserver-csr.json | cfssljson -bare  kube-apiserver
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41

拷贝kube-apiserver组件证书到master节点

master="k8s-master"
for i in $master;do
   ssh $i "mkdir /etc/kubernetes/pki -p"
   scp /opt/pki/kubernetes/ca/{ca.pem,ca-key.pem} /opt/pki/kubernetes/kube-apiserver/{kube-apiserver-key.pem,kube-apiserver.pem} $i:/etc/kubernetes/pki
done
1
2
3
4
5

拷贝证书到node

master="k8s-node1 k8s-node2"
for i in $master;do
   ssh $i "mkdir /etc/kubernetes/pki -p"
   scp /opt/pki/kubernetes/ca/ca.pem $i:/etc/kubernetes/pki
done
1
2
3
4
5

# 3.创建proxy-client证书以及ca

#创建目录
mkdir /opt/pki/proxy-client
cd /opt/pki/proxy-client
#生成ca配置文件
cat > front-proxy-ca-csr.json <<EOF
{
  "CA":{"expiry":"87600h"},
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
#生成ca文件
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
#生成客户端证书申请文件
cat > front-proxy-client-csr.json <<EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF
#生成证书
cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem  \
-config=../kubernetes/ca/ca-config.json   \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

拷贝证书到master节点

master="k8s-master"
for i in $master;do
   scp /opt/pki/proxy-client/{front-proxy-ca.pem,front-proxy-client.pem,front-proxy-client-key.pem} $i:/etc/kubernetes/pki
done

node="k8s-node1 k8s-node2"
for i in $node;do
  scp /opt/pki/proxy-client/front-proxy-ca.pem $i:/etc/kubernetes/pki
done
1
2
3
4
5
6
7
8
9

# 4.创建kube-controller-manager证书与认证文件

#生成目录
mkdir /opt/pki/kubernetes/kube-controller-manager
cd /opt/pki/kubernetes/kube-controller-manager
#生成证书请求文件
cat > kube-controller-manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "System"
    }
  ]
}
EOF
#生成证书文件
cfssl gencert \
   -ca=../ca/ca.pem \
   -ca-key=../ca/ca-key.pem \
   -config=../ca/ca-config.json \
   -profile=kubernetes \
   kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成配置文件
kubectl config set-cluster kubernetes \
    --certificate-authority=../ca/ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
    --client-certificate=kube-controller-manager.pem \
    --client-key=kube-controller-manager-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context default \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

拷贝证书文件到master节点

master="k8s-master"
for i in $master;do
   scp /opt/pki/kubernetes/kube-controller-manager/kube-controller-manager.kubeconfig $i:/etc/kubernetes
done
1
2
3
4

# 5.生成kube-scheduler证书文件

#创建目录
mkdir /opt/pki/kubernetes/kube-scheduler
cd /opt/pki/kubernetes/kube-scheduler
#生成证书申请文件
cat > kube-scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
cfssl gencert \
   -ca=../ca/ca.pem \
   -ca-key=../ca/ca-key.pem \
   -config=../ca/ca-config.json \
   -profile=kubernetes \
   kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#生成配置文件
kubectl config set-cluster kubernetes \
    --certificate-authority=../ca/ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=kube-scheduler.kubeconfig

  kubectl config set-credentials system:kube-scheduler \
    --client-certificate=kube-scheduler.pem \
    --client-key=kube-scheduler-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-scheduler.kubeconfig

  kubectl config set-context default \
    --cluster=kubernetes \
    --user=system:kube-scheduler \
    --kubeconfig=kube-scheduler.kubeconfig

  kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

拷贝证书文件到master节点

master="k8s-master"
for i in $master;do
   scp /opt/pki/kubernetes/kube-scheduler/kube-scheduler.kubeconfig $i:/etc/kubernetes
done
1
2
3
4

# 6.生成kubernetes集群管理员证书

#创建目录
mkdir /opt/pki/kubernetes/admin
cd /opt/pki/kubernetes/admin
#生成证书申请文件
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
cfssl gencert \
   -ca=../ca/ca.pem \
   -ca-key=../ca/ca-key.pem \
   -config=../ca/ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare admin
#生成配置文件
kubectl config set-cluster kubernetes \
    --certificate-authority=../ca/ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=admin.kubeconfig

  kubectl config set-credentials admin \
    --client-certificate=admin.pem \
    --client-key=admin-key.pem \
    --embed-certs=true \
    --kubeconfig=admin.kubeconfig

  kubectl config set-context default \
    --cluster=kubernetes \
    --user=admin \
    --kubeconfig=admin.kubeconfig

  kubectl config use-context default --kubeconfig=admin.kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

# 5 部署etcd集群

# 5.1 三台master节点安装etcd


# 虚拟机执行
BASE_DIR=/mnt/hgfs/sharedir/soft
master="k8s-master k8s-node1 k8s-node2"
soft="etcd etcdctl etcdutl"
for i in $master;do
  for s in ${soft};do
    ssh root@$i "ln -s ${BASE_DIR}/etcd-v3.5.4-linux-amd64/${s} /usr/bin/"
  done
done

etcdctl endpoint health

sudo ETCDCTL_API=3 etcdctl member list \
  --endpoints=https://127.0.0.1:2379 \
  --cacert=/etc/etcd/ssl/ca.pem \
  --cert=/etc/etcd/ssl/etcd-server.pem \
  --key=/etc/etcd/ssl/etcd-server-key.pem
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#解压
tar xf etcd-v3.5.4-linux-amd64.tar.gz
## cp etcd-v3.5.4-linux-amd64/etcd* /usr/bin/

rm -rf etcd-v3.5.3-linux-amd64*
#创建配置文件
#etcd-1
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.179.128:2380'
listen-client-urls: 'https://192.168.179.128:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.179.128:2380'
advertise-client-urls: 'https://192.168.179.128:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://192.168.179.128:2380,etcd-2=https://192.168.179.129:2380,etcd-3=https://192.168.179.130:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false

EOF

#etcd-2
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-2'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.179.129:2380'
listen-client-urls: 'https://192.168.179.129:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.179.129:2380'
advertise-client-urls: 'https://192.168.179.129:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://192.168.179.128:2380,etcd-2=https://192.168.179.129:2380,etcd-3=https://192.168.179.130:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: true
log-package-levels:
log-outputs: [default]
force-new-cluster: false

EOF

#etcd-3
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.179.130:2380'
listen-client-urls: 'https://192.168.179.130:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.179.130:2380'
advertise-client-urls: 'https://192.168.179.130:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://192.168.179.128:2380,etcd-2=https://192.168.179.129:2380,etcd-3=https://192.168.179.130:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false

EOF

#创建service文件
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

#启动服务
systemctl enable --now etcd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180

# 5.2 配置etcdctl客户端工具

#设置全局变量
cat > /etc/profile.d/etcdctl.sh <<EOF
#!/bin/bash
export ETCDCTL_API=3
export ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
export ETCDCTL_CACERT=/etc/etcd/ssl/ca.pem
export ETCDCTL_CERT=/etc/etcd/ssl/etcd-client.pem
export ETCDCTL_KEY=/etc/etcd/ssl/etcd-client-key.pem
EOF
#生效
source /etc/profile
#验证集群状态
[root@k8s-master soft]# etcdctl member list
18799b31e00a520, started, etcd-1, https://192.168.179.128:2380, https://192.168.179.128:2379, false
11e8ca668b3b0f11, started, etcd-2, https://192.168.179.129:2380, https://192.168.179.129:2379, false
e339bd5cf6844c96, started, etcd-3, https://192.168.179.130:2380, https://192.168.179.130:2379, false
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

# 6 安装kube-apiserver

#创建ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
master="k8s-master"
#分发master组件
for i in $master;do
  scp /etc/kubernetes/pki/{sa.pub,sa.key} $i:/etc/kubernetes/pki/
done
#创建service文件
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
# a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/bin/kube-apiserver \\
      --v=2  \\
      --logtostderr=true  \\
      --allow-privileged=true  \\
      --bind-address=$a  \\
      --secure-port=6443  \\
      --advertise-address=$a \\
      --service-cluster-ip-range=10.200.0.0/16  \\
      --service-node-port-range=30000-42767  \\
      --etcd-servers=https://192.168.179.128:2379 \\
      --etcd-cafile=/etc/etcd/ssl/ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd-client.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-client-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User  

Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-apiserver.service
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

# 7 安装kube-controller-manager

#生成service文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=10.200.0.0/16 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
      --node-cidr-mask-size=24
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-controller-manager.service
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

# 8 安装kube-scheduler

#生成service文件
cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-scheduler.service
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

# 9 在master节点配置kubectl工具

#拷贝admin.kubeconfig到~/.kube/config
mkdir /root/.kube/ -p
cp /opt/pki/kubernetes/admin/admin.kubeconfig  /root/.kube/config
#验证集群状态,以下显示信息表示master节点的所有组件运行正常
kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}
1
2
3
4
5
6
7
8
9
10
11
curl http://10.10.0.14:2379/v2/members
1

# 10 部署kubelet

# 10.1 使用TLS Bootstrapping自动认证kubelet

创建TLS Bootstrapping认证文件

在其中一个master节点执行

#创建目录
mkdir /opt/pki/kubernetes/kubelet -p
cd /opt/pki/kubernetes/kubelet
#生成随机认证key
a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
#生成权限绑定文件
cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-$a
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: $a
  token-secret: $b
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF
#生成配置文件
kubectl config set-cluster kubernetes  \
--certificate-authority=../ca/ca.pem   \
--embed-certs=true   \
--server=https://127.0.0.1:6443   \
--kubeconfig=bootstrap-kubelet.kubeconfig

kubectl config set-credentials tls-bootstrap-token-user  \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig

kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes   \
--user=tls-bootstrap-token-user  \
--kubeconfig=bootstrap-kubelet.kubeconfig

kubectl config use-context tls-bootstrap-token-user@kubernetes  \
--kubeconfig=bootstrap-kubelet.kubeconfig
#创建权限
kubectl apply -f bootstrap.secret.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

分发认证文件

node="k8s-master k8s-node1 k8s-node2"
for i in $node;do
  ssh $i "mkdir /etc/kubernetes -p"
  scp  /opt/pki/kubernetes/kubelet/bootstrap-kubelet.kubeconfig $i:/etc/kubernetes
done
1
2
3
4
5

# 10.2 部署kubelet组件

# 10.2.1 使用docker容器运行时部署方式

a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
mkdir /etc/systemd/system/kubelet.service.d/ -p
mkdir /etc/kubernetes/manifests/ -p
#生成service文件
cat > /etc/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/bin/kubelet

Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF
#生成service配置文件
cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=$a"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS \$KUBELET_RINTIME
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

# 10.2.2 使用conatinerd部署方式

# a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
mkdir /etc/systemd/system/kubelet.service.d/ -p
mkdir /etc/kubernetes/manifests/ -p
#生成service文件
cat > /etc/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
ExecStart=/usr/bin/kubelet

Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF
#生成service配置文件
cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=$a"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_SYSTEM_ARGS \$KUBELET_EXTRA_ARGS \$KUBELET_RINTIME
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

# 10.2.3 kubelet配置文件生成

# a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
#生成配置文件
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: $a
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.200.0.2
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

启动服务

systemctl enable --now kubelet.service
1

# 11 部署kube-proxy

# 11.1 生成kube-proxy配置文件

#master节点执行
#创建目录
mkdir /opt/pki/kubernetes/kube-proxy/ -p
cd  /opt/pki/kubernetes/kube-proxy/
#生成配置文件
kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding  system:kube-proxy  --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
cat >kube-proxy-scret.yml<<EOF
apiVersion: v1
kind: Secret
metadata:
  name: kube-proxy
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF

kubectl apply -f kube-proxy-scret.yml

JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
kubectl config set-cluster kubernetes   \
--certificate-authority=/etc/kubernetes/pki/ca.pem    \
--embed-certs=true    \
--server=https://127.0.0.1:6443    \
--kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kubernetes    \
--token=${JWT_TOKEN}   \
--kubeconfig=kube-proxy.kubeconfig

kubectl config set-context kubernetes    \
--cluster=kubernetes   \
--user=kubernetes   \
--kubeconfig=kube-proxy.kubeconfig

kubectl config use-context kubernetes   \
--kubeconfig=kube-proxy.kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# 11.2 拷贝配置文件到node节点

node="master01 master02 master03 node01 node02"
for i in $node;do
  scp  /opt/pki/kubernetes/kube-proxy/kube-proxy.kubeconfig $i:/etc/kubernetes
done
1
2
3
4

# 11.3 生成service文件

cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

# 11.4 生成配置文件

a=`ifconfig eth0 | awk -rn 'NR==2{print $2}'`
cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: $a
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.100.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "$a"
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# 11.5启动服务

systemctl enable --now kube-proxy.service
1
上次更新: : 2 months ago