# 安装k8s集群

# 1. 配置服务器

# 1.1 初始化

[root@k8s-node2 ~]# cat >> /etc/hosts << EOF
192.168.179.128 k8s-master
192.168.179.129 k8s-node1
192.168.179.130 k8s-node2
EOF
[root@k8s-node2 ~]# vim /etc/hosts
[root@k8s-node2 ~]# clear
[root@k8s-node2 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> vm.swappiness = 0
> EOF
[root@k8s-node2 ~]# 
[root@k8s-node2 ~]# modprobe br_netfilter
[root@k8s-node2 ~]# ^C
[root@k8s-node2 ~]# lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter
[root@k8s-node2 ~]# sysctl --system  
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
* Applying /etc/sysctl.conf ...

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

# 1.2 时间同步


[root@k8s-node2 ~]# yum install ntpdate -y
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.bupt.edu.cn
 * extras: mirrors.tuna.tsinghua.edu.cn
 * updates: mirrors.tuna.tsinghua.edu.cn
正在解决依赖关系
--> 正在检查事务
---> 软件包 ntpdate.x86_64.0.4.2.6p5-29.el7.centos.2 将被 安装
--> 解决依赖关系完成

依赖关系解决

===================================================================================
 Package         架构           版本                            源            大小
===================================================================================
正在安装:
 ntpdate         x86_64         4.2.6p5-29.el7.centos.2         base          87 k

事务概要
===================================================================================
安装  1 软件包

总下载量:87 k
安装大小:121 k
Downloading packages:
ntpdate-4.2.6p5-29.el7.centos.2.x86_64.rpm                  |  87 kB  00:00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  正在安装    : ntpdate-4.2.6p5-29.el7.centos.2.x86_64                         1/1 
  验证中      : ntpdate-4.2.6p5-29.el7.centos.2.x86_64                         1/1 

已安装:
  ntpdate.x86_64 0:4.2.6p5-29.el7.centos.2                                         

完毕!
[root@k8s-node2 ~]# ntpdate time.windows.com
23 Jul 21:54:55 ntpdate[8162]: adjust time server 52.231.114.183 offset 0.000238 sec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41

# 1.3 安装ipvsadm

[root@k8s-node2 ~]# yum -y install ipset ipvsadm
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.bupt.edu.cn
 * extras: mirrors.tuna.tsinghua.edu.cn
 * updates: mirrors.tuna.tsinghua.edu.cn
软件包 ipset-7.1-1.el7.x86_64 已安装并且是最新版本
正在解决依赖关系
--> 正在检查事务
---> 软件包 ipvsadm.x86_64.0.1.27-8.el7 将被 安装
--> 解决依赖关系完成

依赖关系解决

===================================================================================
 Package            架构              版本                   源               大小
===================================================================================
正在安装:
 ipvsadm            x86_64            1.27-8.el7             base             45 k

事务概要
===================================================================================
安装  1 软件包

总下载量:45 k
安装大小:75 k
Downloading packages:
ipvsadm-1.27-8.el7.x86_64.rpm                               |  45 kB  00:00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  正在安装    : ipvsadm-1.27-8.el7.x86_64                                      1/1 
  验证中      : ipvsadm-1.27-8.el7.x86_64                                      1/1 

已安装:
  ipvsadm.x86_64 0:1.27-8.el7                                                      

完毕!
[root@k8s-node2 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF
[root@k8s-node2 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4      15053  0 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          139264  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack
[root@k8s-node2 ~]# lsmod | grep -e ipvs -e nf_conntrack_ipv4
nf_conntrack_ipv4      15053  0 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
nf_conntrack          139264  2 ip_vs,nf_conntrack_ipv4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

# 1.4 安装虚拟机工具

安装最新内核后不能够把主机的目录共享给虚拟机

# 安装依赖
yum install gcc-c++
yum install kernel-devel-$(uname -r) 

# 安装vmtools
mkdir /mnt/cdrom
mount /dev/cdrom /mnt/cdrom
cd /tmp
tar zxvf /mnt/cdrom/VMwareTools-10.3.23-16594550.tar.gz 
cd vmware-tools-distrib
./vmware-install.pl

# 安装gcc
yum install gcc-c++
## 找不到kernel
## Enter the path to the kernel header files for the 3.10.0-1160.el7.x86_64 kernel?
yum -y install kernel-devel # 这个是有问题的,最终和系统的不太一致 3.10.0-1160.71.1.el7
yum install kernel-devel-$(uname -r) 
# /usr/src/kernels/3.10.0-1160.71.1.el7.x86_64/include/
# 此时如果你的vmtools版本比较旧的话,还是会提示not a valid path。这是因为3.x版本的内核version.h的路径变了,导致vmtools无法找到。
ln -s /usr/src/kernels/3.10.0-1160.71.1.el7.x86_64/include/generated/uapi/linux/version.h /usr/src/kernels/3.10.0-1160.71.1.el7.x86_64/include/linux/version.h

3.10.0-1160.el7.x86_64
3.10.0-1160.71.1.el7.x86_64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 1.5 下载k8s二进制包


1.下载kubernetes1.24.+的二进制包
github二进制包下载地址:
#  https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#downloads-for-v1242
wget https://dl.k8s.io/v1.24.3/kubernetes.tar.gz

2.下载etcdctl二进制包
# github二进制包下载地址:https://github.com/etcd-io/etcd/releases

3.docker-ce二进制包下载地址
二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/

这里需要下载20.10.+版本
4.cri-docker安装包下载
下载地址:https://github.com/Mirantis/cri-dockerd/releases
5.containerd二进制包下载
github下载地址:https://github.com/containerd/containerd/releases

containerd下载时下载带cni插件的二进制包。
6.下载cfssl二进制包
github二进制包下载地址:https://github.com/cloudflare/cfssl/releases
7.cni插件下载
github下载地址:https://github.com/containernetworking/plugins/releases
8.crictl客户端二进制下载
github下载:https://github.com/kubernetes-sigs/cri-tools/releases

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

# 1.6 安装containerd

# 解压
tar xf cri-containerd-cni-1.6.6-linux-amd64.tar.gz -C  /
# 自启动文件已存在
cat /etc/systemd/system/containerd.service
#启动
systemctl enable --now  containerd.service
#创建配置文件
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
#重启
systemctl restart containerd

1
2
3
4
5
6
7
8
9
10
11
12

# 1.7 安装crictl客户端工具

tar xf crictl-v1.24.2-linux-amd64.tar.gz  -C /usr/bin/
#生成配置文件
cat /etc/crictl.yaml
#测试
crictl info
1
2
3
4
5

# 2 生成kubernetes集群所需证书

只需要在其中一个节点操作即可

安装cfssl工具并生成证书目录

[root@k8s-master sharedir]# history |grep cp
  113  cp cfssl_1.6.1_linux_amd64 /usr/bin/cfssl
  114  cp cfssl-certinfo_1.6.1_linux_amd64 /usr/bin/cfssl-certinfo
  115  cp cfssljson_1.6.1_linux_amd64 /usr/bin/cfssljson
1
2
3
4

# 2.1 初始化配置

创建工作目录:
mkdir -pv ~/TLS/{etcd,k8s}
cd TLS/etcd
自签CA:
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42

# 2.2 生成证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
# 查看生成证书
ls -l *pem
# 查看生成的证书

# 2️⃣使用自签CA签发etcd的https证书:

# 创建证书申请文件:
cat > server-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "192.168.179.128",
    "192.168.179.129",
    "192.168.179.130"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
# 生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
# 查看生成证书:
ls -l server*pem
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

# 2.3 生成etcd证书申请文件

cd /opt/pki/etcd/
cat > etcd-client-csr.json << EOF
{
  "CN": "etcd-client",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "TS": "Beijing",
      "L": "Beijing",
      "O": "etcd-client",
      "OU": "System"
    }
  ]
}
EOF
#生成证书
 cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-client-csr.json | cfssljson -bare client
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

# 2.4 拷贝证书到节点

master="k8s-master k8s-node1 k8s-node2"
node="node01,node02"
for i in $master;do
  ssh $i "mkdir /etc/etcd/ssl -p"
  scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
1
2
3
4
5
6

# 2.5 安装etcd

mkdir /opt/etcd

mkdir /mnt/hgfs/sharedir/etcd/bin
ln -s /mnt/hgfs/sharedir/etcd/bin /opt/etcd/bin
mv /mnt/hgfs/sharedir/etcd-v3.4.19-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/

mkdir /mnt/hgfs/sharedir/etcd/ssl
ln -s /mnt/hgfs/sharedir/etcd/ssl /opt/etcd/ssl

cp ~/TLS/ca*pem ~/TLS/server*.pem /opt/etcd/ssl/
cd /opt/etcd/ssl
ls
ca-key.pem  ca.pem  server-key.pem  server.pem

mkdir /opt/etcd/cfg/

# 配置
ETCD_NAME:节点名称。
ETCD_DATA_DIR:数据目录。
ETCD_LISTEN_PEER_URLS:集群通信监听地址。
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址 。
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址。
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址。
ETCD_INITIAL_CLUSTER:集群节点地址。
ETCD_INITIAL_CLUSTER_TOKEN:集群 Token。
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态, new 是新集群, existing 表示加入已有集群。
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member] 
ETCD_NAME="etcd-1"  
ETCD_DATA_DIR="/var/lib/etcd/default.etcd" 
ETCD_LISTEN_PEER_URLS="https://192.168.179.128:2380" 
ETCD_LISTEN_CLIENT_URLS="https://192.168.179.128:2379" 

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.179.128:2380" 
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.179.128:2379" 
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.179.128:2380,etcd-2=https://192.168.179.129:2380,etcd-3=https://192.168.179.130:2380" 
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" 
ETCD_INITIAL_CLUSTER_STATE="new" 
EOF

cp /mnt/hgfs/sharedir/etcd/etcd.conf.template /opt/etcd/cfg/etcd.conf

cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.targcaet
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl start etcd
systemctl enable etcd

./etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem cluster-health
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

# 2.6 拷贝证书到节点(未使用这种方式)

master="master01 master02 master03"
node="node01,node02"
for i in $master;do
  ssh $i "mkdir /etc/etcd/ssl -p"
  scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
1
2
3
4
5
6

# 2.7 为APIServer签证书

自签证书颁发机构(CA):

进入工作目录:
cd ~/TLS/k8s/
自签CA:
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#使用自签CA签发etcd的https证书:

# 创建证书申请文件:
cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cat > server-csr.json<< EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local",
      "192.168.179.128",
      "192.168.179.129",
      "192.168.217.130",
      "192.168.179.1",
      "192.168.179.2",
      "192.168.31.198"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

# 生成证书:
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

# 2.8 复制证书到其它机器

master="k8s-master k8s-node1 k8s-node2"
for i in $master;do
   ssh $i "mkdir /opt/kubernetes//pki -p"
   scp /opt/pki/kubernetes/ca/{ca.pem,ca-key.pem} /opt/pki/kubernetes/kube-apiserver/{kube-apiserver-key.pem,kube-apiserver.pem} $i:/etc/kubernetes/pki
done

部署kube-apiserver
mkdir -p /opt/kubernetes/bin

ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubeadm /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-apiserver /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-controller-manager /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-scheduler /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-proxy /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubelet /usr/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubectl /usr/bin/

ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubeadm /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-apiserver /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-controller-manager /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-scheduler /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-proxy /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubelet /opt/kubernetes/bin/
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubectl /opt/kubernetes/bin/


ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kube-proxy /usr/bin/kube-proxy
ln -s /mnt/hgfs/sharedir/kubernetes/server/bin/kubelet /usr/bin/kubelet




mkdir /opt/kubernetes/cfg
mkdir /opt/kubernetes/ssl
mkdir /opt/kubernetes/logs/

cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--allow-privileged=true \
--bind-address=192.168.179.128 \
--secure-port=6443 \
--advertise-address=192.168.179.128 \
--service-cluster-ip-range=10.0.0.0/24 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.179.128:2379,https://192.168.179.129:2379,https://192.168.179.130:2379 \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem  \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"

## 参数错误的信息
Flag --logtostderr has been deprecated, will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components
Flag --log-dir has been deprecated, will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components
E0724 02:49:09.688834   33432 run.go:74] "command failed" err="[service-account-issuer is a required flag, --service-account-signing-key-file and --service-account-issuer are required flags
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
  • --logtostderr:启用日志。
  • --v:日志等级。
  • --log-dir:日志目录。
  • --etcd-servers:etcd集群地址。
  • --bind-address:监听地址。
  • --secure-port:https安全端口。
  • --advertise-address:集群通告地址。
  • --allow-privileged:启用授权。
  • --service-cluster-ip-range:Service虚拟IP地址段。
  • --enable-admission-plugins:准入控制模块。
  • --authorization-mode:认证授权,启用RBAC授权和节点自管理。
  • --enable-bootstrap-token-auth:启用TLS bootstrap机制。
  • --token-auth-file:bootstrap token文件。
  • --service-node-port-range:Sevice nodeport类型默认分配端口范围。
  • --kubelet-client-xxx:apiserver访问kubelet客户端整数。
  • --tls-xxx-file:apiserver https证书。
  • --etcd-xxxfile:连接etcd集群证书。
  • --audit-log-xxx:审计日志。

# 3 启用TLS Bootstrapping机制

# (opens new window)

  • Master上的apiserver启用TLS认证后,Node节点kubelet和kube-proxy要和kube-apiserver进行通信,必须使用CA签发的有效整数才可以,当Node节点很多的时候,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化操作流程,k8s引入了TLS Bootstrapping机制来自动颁发客户端证书,kubelet会以一个低权限用户向apiserver申请证书,kubelet的证书由apiserver动态签署。
  • 制作token令牌:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
1

# 3.1 创建token文件:

cat > /opt/kubernetes/cfg/token.csv << EOF
7fdc6c01166bbda6cdafda973b638db5,kubelet-bootstrap,10001,"system:nodebootstrapper"
EOF

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# 4 安装kube-controller-manager

cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

# 5 部署kube-scheduler

# (opens new window)

  • 创建配置文件:
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true"
EOF

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure 
[Install]
WantedBy=multi-user.target
EOF
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
上次更新: : 2 months ago