01. 初始化系统和全局变量
设置主机名
hostnamectl set-hostname node2 # 将 node2 替换为当前主机名
如果 DNS 不支持主机名称解析,还需要在每台机器的 /etc/hosts
文件中添加主机名和 IP 的对应关系:
cat >> /etc/hosts <<EOF
10.20.250.23 node2
EOF
退出,重新登录 root 账号,可以看到主机名生效。
添加节点信任关系
本操作只需要在 node2 节点上进行,设置 root 账户可以无密码登录所有节点:
ssh-keygen -t rsa
ssh-id-copy ....
配置环境变量
由于计划将本次部署需要的所有执行文件放置在/opt/k8s/bin
目录下,所以需要在/etc/profile
中进行配置:
export PATH=$PATH:/opt/k8s/bin
安装依赖包
yum install -y epel-release
yum install -y chrony conntrack ipvsadm ipset jq iptables curl sysstat libseccomp wget socat git
关闭防火墙
关闭防火墙,清理防火墙规则,设置默认转发策略:
systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEPT
关闭 swap 分区
关闭 swap 分区,否则kubelet 会启动失败(可以设置 kubelet 启动参数 –fail-swap-on 为 false 关闭 swap 检查):
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
关闭 SELinux
关闭 SELinux,否则 kubelet 挂载目录时可能报错 Permission denied
:
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
优化内核参数
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
net.ipv4.neigh.default.gc_thresh1=1024
net.ipv4.neigh.default.gc_thresh1=2048
net.ipv4.neigh.default.gc_thresh1=4096
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
设置系统时区
timedatectl set-timezone Asia/Shanghai
设置系统时钟同步
systemctl enable chronyd
systemctl start chronyd
# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
关闭无关的服务
systemctl stop postfix && systemctl disable postfix
创建相关目录
创建目录:
mkdir -p /opt/k8s/{bin,work} /etc/{kubernetes,etcd}/cert
分发集群配置参数脚本
后续使用的环境变量都定义在文件 environment.sh
中,请根据自己的机器、网络情况修改。然后拷贝到所有节点:
source environment.sh # 先修改,重要配置项都在这里
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp environment.sh root@${node_ip}:/opt/k8s/bin/
ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
done
02. 创建 CA 根证书和秘钥
为确保安全,kubernetes
系统各组件需要使用 x509
证书对通信进行加密和认证。
CA (Certificate Authority) 是自签名的根证书,用来签名后续创建的其它证书。
CA 证书是集群所有节点共享的,只需要创建一次,后续用它签名其它所有证书。
本文档使用 CloudFlare
的 PKI 工具集 cfssl 创建所有证书。
创建配置文件
CA 配置文件用于配置根证书的使用场景 (profile) 和具体参数 (usage,过期时间、服务端认证、客户端认证、加密等):
cd /opt/k8s/work
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
signing
:表示该证书可用于签名其它证书(生成的ca.pem
证书中CA=TRUE
);server auth
:表示 client 可以用该该证书对 server 提供的证书进行验证;client auth
:表示 server 可以用该该证书对 client 提供的证书进行验证;"expiry": "876000h"
:证书有效期设置为 100 年;
创建证书签名请求文件
cd /opt/k8s/work
cat > ca-csr.json <<EOF
{
"CN": "kubernetes-ca",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "opsnull"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
CN:Common Name
:kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name),浏览器使用该字段验证网站是否合法;O:Organization
:kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group);- kube-apiserver 将提取的
User、Group
作为RBAC
授权的用户标识;
注意:
- 不同证书 csr 文件的 CN、C、ST、L、O、OU 组合必须不同,否则可能出现
PEER'S CERTIFICATE HAS AN INVALID SIGNATURE
错误; - 后续创建证书的 csr 文件时,CN 都不相同(C、ST、L、O、OU 相同),以达到区分的目的;
生成 CA 证书和私钥
cd /opt/k8s/work
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*
分发证书文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/kubernetes/cert"
scp ca*.pem ca-config.json root@${node_ip}:/etc/kubernetes/cert
done
03. 安装和配置 kubectl
创建 admin 证书和私钥
kubectl 使用 https 协议与 kube-apiserver 进行安全通信,kube-apiserver 对 kubectl 请求包含的证书进行认证和授权。
kubectl 后续用于集群管理,所以这里创建具有最高权限的 admin 证书。
创建证书签名请求:
cd /opt/k8s/work
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "opsnull"
}
]
}
EOF
O: system:masters
:kube-apiserver 收到使用该证书的客户端请求后,为请求添加组(Group)认证标识system:masters
;- 预定义的 ClusterRoleBinding
cluster-admin
将 Groupsystem:masters
与 Rolecluster-admin
绑定,该 Role 授予操作集群所需的最高权限; - 该证书只会被 kubectl 当做 client 证书使用,所以
hosts
字段为空;
生成证书和私钥:
cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*
- 忽略警告消息
[WARNING] This certificate lacks a "hosts" field.
;
创建 kubeconfig 文件
kubectl 使用 kubeconfig 文件访问 apiserver,该文件包含 kube-apiserver 的地址和认证信息(CA 证书和客户端证书):
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/work/ca.pem \
--embed-certs=true \
--server=https://${NODE_IPS[0]}:6443 \
--kubeconfig=kubectl.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials admin \
--client-certificate=/opt/k8s/work/admin.pem \
--client-key=/opt/k8s/work/admin-key.pem \
--embed-certs=true \
--kubeconfig=kubectl.kubeconfig
# 设置上下文参数
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--kubeconfig=kubectl.kubeconfig
# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
--certificate-authority
:验证 kube-apiserver 证书的根证书;--client-certificate
、--client-key
:刚生成的admin
证书和私钥,与 kube-apiserver https 通信时使用;--embed-certs=true
:将 ca.pem 和 admin.pem 证书内容嵌入到生成的 kubectl.kubeconfig 文件中(否则,写入的是证书文件路径,后续拷贝 kubeconfig 到其它机器时,还需要单独拷贝证书文件,不方便。);--server
:指定 kube-apiserver 的地址,这里指向第一个节点上的服务;
分发 kubeconfig 文件
分发到所有使用 kubectl
命令的节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ~/.kube"
scp kubectl.kubeconfig root@${node_ip}:~/.kube/config
done
04. 部署 etcd 集群
etcd 是基于 Raft 的分布式 KV 存储系统,由 CoreOS 开发,常用于服务发现、共享配置以及并发控制(如 leader 选举、分布式锁等)。
kubernetes 使用 etcd 集群持久化存储所有 API 对象、运行数据。
本文档介绍部署一个高可用 etcd 集群的步骤:
- 下载和分发 etcd 二进制文件;
- 创建 etcd 集群各节点的 x509 证书,用于加密客户端(如 etcdctl) 与 etcd 集群、etcd 集群之间的通信;
- 创建 etcd 的 systemd unit 文件,配置服务参数;
- 检查集群工作状态;
注意:
- flanneld 与本文档安装的 etcd v3.4.x 不兼容,如果要安装 flanneld(本文档使用 calio),则需要将 etcd 降级到 v3.3.x 版本;
创建 etcd 证书和私钥
创建证书签名请求:
cd /opt/k8s/work
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"10.20.250.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "opsnull"
}
]
}
EOF
hosts
:指定授权使用该证书的 etcd 节点 IP 列表,需要将 etcd 集群所有节点 IP 都列在其中;
生成证书和私钥:
cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls etcd*pem
分发生成的证书和私钥到各 etcd 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/etcd/cert"
scp etcd*.pem root@${node_ip}:/etc/etcd/cert/
done
创建 etcd 的 systemd unit 模板文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > etcd.service.template <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=${ETCD_DATA_DIR}
ExecStart=/opt/k8s/bin/etcd \\
--data-dir=${ETCD_DATA_DIR} \\
--wal-dir=${ETCD_WAL_DIR} \\
--name=##NODE_NAME## \\
--cert-file=/etc/etcd/cert/etcd.pem \\
--key-file=/etc/etcd/cert/etcd-key.pem \\
--trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
--peer-cert-file=/etc/etcd/cert/etcd.pem \\
--peer-key-file=/etc/etcd/cert/etcd-key.pem \\
--peer-trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth \\
--listen-peer-urls=https://##NODE_IP##:2380 \\
--initial-advertise-peer-urls=https://##NODE_IP##:2380 \\
--listen-client-urls=https://##NODE_IP##:2379,http://127.0.0.1:2379 \\
--advertise-client-urls=https://##NODE_IP##:2379 \\
--initial-cluster-token=etcd-cluster-0 \\
--initial-cluster=${ETCD_NODES} \\
--initial-cluster-state=new \\
--auto-compaction-mode=periodic \\
--auto-compaction-retention=1 \\
--max-request-bytes=33554432 \\
--quota-backend-bytes=6442450944 \\
--heartbeat-interval=250 \\
--election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
WorkingDirectory
、--data-dir
:指定工作目录和数据目录为${ETCD_DATA_DIR}
,需在启动服务前创建这个目录;--wal-dir
:指定 wal 目录,为了提高性能,一般使用 SSD 或者和--data-dir
不同的磁盘;--name
:指定节点名称,当--initial-cluster-state
值为new
时,--name
的参数值必须位于--initial-cluster
列表中;--cert-file
、--key-file
:etcd server 与 client 通信时使用的证书和私钥;--trusted-ca-file
:签名 client 证书的 CA 证书,用于验证 client 证书;--peer-cert-file
、--peer-key-file
:etcd 与 peer 通信使用的证书和私钥;--peer-trusted-ca-file
:签名 peer 证书的 CA 证书,用于验证 peer 证书;
为各节点创建和分发 etcd systemd unit 文件
替换模板文件中的变量,为各节点创建 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < ${#NODE_IPS[@]}; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" etcd.service.template > etcd-${NODE_IPS[i]}.service
done
ls *.service
- NODE_NAMES 和 NODE_IPS 为相同长度的 bash 数组,分别为节点名称和对应的 IP;
分发生成的 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp etcd-${node_ip}.service root@${node_ip}:/etc/systemd/system/etcd.service
done
启动 etcd 服务
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${ETCD_DATA_DIR} ${ETCD_WAL_DIR}"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd " &
done
- 必须先创建 etcd 数据目录和工作目录;
- etcd 进程首次启动时会等待其它节点的 etcd 加入集群,命令
systemctl start etcd
会卡住一段时间,为正常现象;
05 部署 kube-apiserver
创建 kubernetes-master 证书和私钥
创建证书签名请求:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes-master",
"hosts": [
"127.0.0.1",
"10.20.250.23",
"${CLUSTER_KUBERNETES_SVC_IP}",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local.",
"kubernetes.default.svc.${CLUSTER_DNS_DOMAIN}."
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "opsnull"
}
]
}
EOF
- hosts 字段指定授权使用该证书的 IP 和域名列表,这里列出了 master 节点 IP、kubernetes 服务的 IP 和域名;
生成证书和私钥:
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*pem
将生成的证书和私钥文件拷贝到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/kubernetes/cert"
scp kubernetes*.pem root@${node_ip}:/etc/kubernetes/cert/
done
创建加密配置文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
将加密配置文件拷贝到 master 节点的 /etc/kubernetes
目录下:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp encryption-config.yaml root@${node_ip}:/etc/kubernetes/
done
创建审计策略文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk, so drop them.
- level: None
resources:
- group: ""
resources:
- endpoints
- services
- services/status
users:
- 'system:kube-proxy'
verbs:
- watch
- level: None
resources:
- group: ""
resources:
- nodes
- nodes/status
userGroups:
- 'system:nodes'
verbs:
- get
- level: None
namespaces:
- kube-system
resources:
- group: ""
resources:
- endpoints
users:
- 'system:kube-controller-manager'
- 'system:kube-scheduler'
- 'system:serviceaccount:kube-system:endpoint-controller'
verbs:
- get
- update
- level: None
resources:
- group: ""
resources:
- namespaces
- namespaces/status
- namespaces/finalize
users:
- 'system:apiserver'
verbs:
- get
# Don't log HPA fetching metrics.
- level: None
resources:
- group: metrics.k8s.io
users:
- 'system:kube-controller-manager'
verbs:
- get
- list
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- '/healthz*'
- /version
- '/swagger*'
# Don't log events requests.
- level: None
resources:
- group: ""
resources:
- events
# node and pod status calls from nodes are high-volume and can be large, don't log responses
# for expected updates from nodes
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
users:
- kubelet
- 'system:node-problem-detector'
- 'system:serviceaccount:kube-system:node-problem-detector'
verbs:
- update
- patch
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
userGroups:
- 'system:nodes'
verbs:
- update
- patch
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
omitStages:
- RequestReceived
users:
- 'system:serviceaccount:kube-system:namespace-controller'
verbs:
- deletecollection
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- secrets
- configmaps
- group: authentication.k8s.io
resources:
- tokenreviews
# Get repsonses can be large; skip them.
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
verbs:
- get
- list
- watch
# Default level for known APIs
- level: RequestResponse
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
# Default level for all other requests.
- level: Metadata
omitStages:
- RequestReceived
EOF
分发审计策略文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp audit-policy.yaml root@${node_ip}:/etc/kubernetes/audit-policy.yaml
done
创建后续访问 metrics-server 或 kube-prometheus 使用的证书
创建证书签名请求:
cd /opt/k8s/work
cat > proxy-client-csr.json <<EOF
{
"CN": "aggregator",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "opsnull"
}
]
}
EOF
- CN 名称需要位于 kube-apiserver 的
--requestheader-allowed-names
参数中,否则后续访问 metrics 时会提示权限不足。
生成证书和私钥:
cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \
-ca-key=/etc/kubernetes/cert/ca-key.pem \
-config=/etc/kubernetes/cert/ca-config.json \
-profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client
ls proxy-client*.pem
将生成的证书和私钥文件拷贝到所有 master 节点:
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp proxy-client*.pem root@${node_ip}:/etc/kubernetes/cert/
done
创建 kube-apiserver systemd unit 模板文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-apiserver.service.template <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=${K8S_DIR}/kube-apiserver
ExecStart=/opt/k8s/bin/kube-apiserver \\
--advertise-address=##NODE_IP## \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--feature-gates=DynamicAuditing=true \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--encryption-provider-config=/etc/kubernetes/encryption-config.yaml \\
--etcd-cafile=/etc/kubernetes/cert/ca.pem \\
--etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \\
--etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \\
--etcd-servers=${ETCD_ENDPOINTS} \\
--bind-address=##NODE_IP## \\
--secure-port=6443 \\
--tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \\
--insecure-port=0 \\
--audit-dynamic-configuration \\
--audit-log-maxage=15 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-truncate-enabled \\
--audit-log-path=${K8S_DIR}/kube-apiserver/audit.log \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--profiling \\
--anonymous-auth=false \\
--client-ca-file=/etc/kubernetes/cert/ca.pem \\
--enable-bootstrap-token-auth \\
--requestheader-allowed-names="aggregator" \\
--requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-extra-headers-prefix="X-Remote-Extra-" \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--service-account-key-file=/etc/kubernetes/cert/ca.pem \\
--authorization-mode=Node,RBAC \\
--runtime-config=api/all=true \\
--enable-admission-plugins=NodeRestriction \\
--allow-privileged=true \\
--apiserver-count=3 \\
--event-ttl=168h \\
--kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \\
--kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \\
--kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \\
--kubelet-https=true \\
--kubelet-timeout=10s \\
--proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--service-node-port-range=${NODE_PORT_RANGE} \\
--logtostderr=true \\
--v=2
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
--advertise-address
:apiserver 对外通告的 IP(kubernetes 服务后端节点 IP);--default-*-toleration-seconds
:设置节点异常相关的阈值;--max-*-requests-inflight
:请求相关的最大阈值;--etcd-*
:访问 etcd 的证书和 etcd 服务器地址;--bind-address
: https 监听的 IP,不能为127.0.0.1
,否则外界不能访问它的安全端口 6443;--secret-port
:https 监听端口;--insecure-port=0
:关闭监听 http 非安全端口(8080);--tls-*-file
:指定 apiserver 使用的证书、私钥和 CA 文件;--audit-*
:配置审计策略和审计日志文件相关的参数;--client-ca-file
:验证 client (kue-controller-manager、kube-scheduler、kubelet、kube-proxy 等)请求所带的证书;--enable-bootstrap-token-auth
:启用 kubelet bootstrap 的 token 认证;--requestheader-*
:kube-apiserver 的 aggregator layer 相关的配置参数,proxy-client & HPA 需要使用;--requestheader-client-ca-file
:用于签名--proxy-client-cert-file
和--proxy-client-key-file
指定的证书;在启用了 metric aggregator 时使用;--requestheader-allowed-names
:不能为空,值为逗号分割的--proxy-client-cert-file
证书的 CN 名称,这里设置为 “aggregator”;--service-account-key-file
:签名 ServiceAccount Token 的公钥文件,kube-controller-manager 的--service-account-private-key-file
指定私钥文件,两者配对使用;--runtime-config=api/all=true
: 启用所有版本的 APIs,如 autoscaling/v2alpha1;--authorization-mode=Node,RBAC
、--anonymous-auth=false
: 开启 Node 和 RBAC 授权模式,拒绝未授权的请求;--enable-admission-plugins
:启用一些默认关闭的 plugins;--allow-privileged
:运行执行 privileged 权限的容器;--apiserver-count=3
:指定 apiserver 实例的数量;--event-ttl
:指定 events 的保存时间;--kubelet-*
:如果指定,则使用 https 访问 kubelet APIs;需要为证书对应的用户(上面 kubernetes*.pem 证书的用户为 kubernetes) 用户定义 RBAC 规则,否则访问 kubelet API 时提示未授权;--proxy-client-*
:apiserver 访问 metrics-server 使用的证书;--service-cluster-ip-range
: 指定 Service Cluster IP 地址段;--service-node-port-range
: 指定 NodePort 的端口范围;
如果 kube-apiserver 机器没有运行 kube-proxy,则还需要添加 --enable-aggregator-routing=true
参数;
关于 --requestheader-XXX
相关参数,参考:
- https://github.com/kubernetes-incubator/apiserver-builder/blob/master/docs/concepts/auth.md
- https://docs.bitnami.com/kubernetes/how-to/configure-autoscaling-custom-metrics/
注意:
--requestheader-client-ca-file
指定的 CA 证书,必须具有client auth and server auth
;- 如果
--requestheader-allowed-names
不为空,且--proxy-client-cert-file
证书的 CN 名称不在 allowed-names 中,则后续查看 node 或 pods 的 metrics 失败,提示:
$ kubectl top nodes
Error from server (Forbidden): nodes.metrics.k8s.io is forbidden: User "aggregator" cannot list resource "nodes" in API group "metrics.k8s.io" at the cluster scope
为各节点创建和分发 kube-apiserver systemd unit 文件
替换模板文件中的变量,为各节点生成 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < ${#NODE_IPS[@]}; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-apiserver.service.template > kube-apiserver-${NODE_IPS[i]}.service
done
ls kube-apiserver*.service
- NODE_NAMES 和 NODE_IPS 为相同长度的 bash 数组,分别为节点名称和对应的 IP;
分发生成的 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-apiserver-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-apiserver.service
done
启动 kube-apiserver 服务
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-apiserver"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver"
done
检查 kube-apiserver 运行状态
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status kube-apiserver |grep 'Active:'"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u kube-apiserver
检查集群状态
$ kubectl cluster-info
Kubernetes master is running at https://172.27.138.251:6443
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
$ kubectl get all --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 3m53s
$ kubectl get componentstatuses
NAME AGE
controller-manager <unknown>
scheduler <unknown>
etcd-0 <unknown>
etcd-2 <unknown>
etcd-1 <unknown>
- Kubernetes 1.16.6 存在 Bugs 导致返回结果一直为
<unknown>
,但kubectl get cs -o yaml
可以返回正确结果;
检查 kube-apiserver 监听的端口
$ sudo netstat -lnpt|grep kube
tcp 0 0 172.27.138.251:6443 0.0.0.0:* LISTEN 101442/kube-apiserv
- 6443: 接收 https 请求的安全端口,对所有请求做认证和授权;
- 由于关闭了非安全端口,故没有监听 8080;
06 部署kube-controller-manager
创建 kube-controller-manager 证书和私钥
创建证书签名请求:
cd /opt/k8s/work
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"10.20.250.23"
],
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-controller-manager",
"OU": "opsnull"
}
]
}
EOF
- hosts 列表包含所有 kube-controller-manager 节点 IP;
- CN 和 O 均为
system:kube-controller-manager
,kubernetes 内置的 ClusterRoleBindingssystem:kube-controller-manager
赋予 kube-controller-manager 工作所需的权限。
生成证书和私钥:
cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*pem
将生成的证书和私钥分发到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-controller-manager*.pem root@${node_ip}:/etc/kubernetes/cert/
done
创建和分发 kubeconfig 文件
kube-controller-manager 使用 kubeconfig 文件访问 apiserver,该文件提供了 apiserver 地址、嵌入的 CA 证书和 kube-controller-manager 证书等信息:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/work/ca.pem \
--embed-certs=true \
--server="https://##NODE_IP##:6443" \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
- kube-controller-manager 与 kube-apiserver 混布,故直接通过节点 IP 访问 kube-apiserver;
分发 kubeconfig 到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
sed -e "s/##NODE_IP##/${node_ip}/" kube-controller-manager.kubeconfig > kube-controller-manager-${node_ip}.kubeconfig
scp kube-controller-manager-${node_ip}.kubeconfig root@${node_ip}:/etc/kubernetes/kube-controller-manager.kubeconfig
done
创建 kube-controller-manager systemd unit 模板文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-controller-manager.service.template <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
WorkingDirectory=${K8S_DIR}/kube-controller-manager
ExecStart=/opt/k8s/bin/kube-controller-manager \\
--profiling \\
--cluster-name=kubernetes \\
--controllers=*,bootstrapsigner,tokencleaner \\
--kube-api-qps=1000 \\
--kube-api-burst=2000 \\
--leader-elect \\
--use-service-account-credentials\\
--concurrent-service-syncs=2 \\
--bind-address=##NODE_IP## \\
--secure-port=10252 \\
--tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \\
--port=0 \\
--authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
--client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-allowed-names="aggregator" \\
--requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-extra-headers-prefix="X-Remote-Extra-" \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
--cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\
--experimental-cluster-signing-duration=876000h \\
--horizontal-pod-autoscaler-sync-period=10s \\
--concurrent-deployment-syncs=10 \\
--concurrent-gc-syncs=30 \\
--node-cidr-mask-size=24 \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--pod-eviction-timeout=6m \\
--terminated-pod-gc-threshold=10000 \\
--root-ca-file=/etc/kubernetes/cert/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
--logtostderr=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
--port=0
:关闭监听非安全端口(http),同时--address
参数无效,--bind-address
参数有效;--secure-port=10252
、--bind-address=0.0.0.0
: 在所有网络接口监听 10252 端口的 https /metrics 请求;--kubeconfig
:指定 kubeconfig 文件路径,kube-controller-manager 使用它连接和验证 kube-apiserver;--authentication-kubeconfig
和--authorization-kubeconfig
:kube-controller-manager 使用它连接 apiserver,对 client 的请求进行认证和授权。kube-controller-manager
不再使用--tls-ca-file
对请求 https metrics 的 Client 证书进行校验。如果没有配置这两个 kubeconfig 参数,则 client 连接 kube-controller-manager https 端口的请求会被拒绝(提示权限不足)。--cluster-signing-*-file
:签名 TLS Bootstrap 创建的证书;--experimental-cluster-signing-duration
:指定 TLS Bootstrap 证书的有效期;--root-ca-file
:放置到容器 ServiceAccount 中的 CA 证书,用来对 kube-apiserver 的证书进行校验;--service-account-private-key-file
:签名 ServiceAccount 中 Token 的私钥文件,必须和 kube-apiserver 的--service-account-key-file
指定的公钥文件配对使用;--service-cluster-ip-range
:指定 Service Cluster IP 网段,必须和 kube-apiserver 中的同名参数一致;--leader-elect=true
:集群运行模式,启用选举功能;被选为 leader 的节点负责处理工作,其它节点为阻塞状态;--controllers=*,bootstrapsigner,tokencleaner
:启用的控制器列表,tokencleaner 用于自动清理过期的 Bootstrap token;--horizontal-pod-autoscaler-*
:custom metrics 相关参数,支持 autoscaling/v2alpha1;--tls-cert-file
、--tls-private-key-file
:使用 https 输出 metrics 时使用的 Server 证书和秘钥;--use-service-account-credentials=true
: kube-controller-manager 中各 controller 使用 serviceaccount 访问 kube-apiserver;
为各节点创建和分发 kube-controller-mananger systemd unit 文件
替换模板文件中的变量,为各节点创建 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-controller-manager.service.template > kube-controller-manager-${NODE_IPS[i]}.service
done
ls kube-controller-manager*.service
分发到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-controller-manager-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-controller-manager.service
done
启动 kube-controller-manager 服务
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-controller-manager"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager"
done
检查服务运行状态
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status kube-controller-manager|grep Active"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u kube-controller-manager
kube-controller-manager 监听 10252 端口,接收 https 请求:
$ sudo netstat -lnpt | grep kube-cont
tcp 0 0 172.27.138.251:10252 0.0.0.0:* LISTEN 108977/kube-control
07 部署kube-scheduler
创建 kube-scheduler 证书和私钥
创建证书签名请求:
cd /opt/k8s/work
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"10.20.250.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:kube-scheduler",
"OU": "opsnull"
}
]
}
EOF
- hosts 列表包含所有 kube-scheduler 节点 IP;
- CN 和 O 均为
system:kube-scheduler
,kubernetes 内置的 ClusterRoleBindingssystem:kube-scheduler
将赋予 kube-scheduler 工作所需的权限;
生成证书和私钥:
cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*pem
将生成的证书和私钥分发到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler*.pem root@${node_ip}:/etc/kubernetes/cert/
done
创建和分发 kubeconfig 文件
kube-scheduler 使用 kubeconfig 文件访问 apiserver,该文件提供了 apiserver 地址、嵌入的 CA 证书和 kube-scheduler 证书:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/work/ca.pem \
--embed-certs=true \
--server="https://##NODE_IP##:6443" \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
分发 kubeconfig 到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
sed -e "s/##NODE_IP##/${node_ip}/" kube-scheduler.kubeconfig > kube-scheduler-${node_ip}.kubeconfig
scp kube-scheduler-${node_ip}.kubeconfig root@${node_ip}:/etc/kubernetes/kube-scheduler.kubeconfig
done
创建 kube-scheduler 配置文件
cd /opt/k8s/work
cat >kube-scheduler.yaml.template <<EOF
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
bindTimeoutSeconds: 600
clientConnection:
burst: 200
kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig"
qps: 100
enableContentionProfiling: false
enableProfiling: true
hardPodAffinitySymmetricWeight: 1
healthzBindAddress: ##NODE_IP##:10251
leaderElection:
leaderElect: true
metricsBindAddress: ##NODE_IP##:10251
EOF
--kubeconfig
:指定 kubeconfig 文件路径,kube-scheduler 使用它连接和验证 kube-apiserver;--leader-elect=true
:集群运行模式,启用选举功能;被选为 leader 的节点负责处理工作,其它节点为阻塞状态;
替换模板文件中的变量:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.yaml.template > kube-scheduler-${NODE_IPS[i]}.yaml
done
ls kube-scheduler*.yaml
- NODE_NAMES 和 NODE_IPS 为相同长度的 bash 数组,分别为节点名称和对应的 IP;
分发 kube-scheduler 配置文件到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler-${node_ip}.yaml root@${node_ip}:/etc/kubernetes/kube-scheduler.yaml
done
- 重命名为 kube-scheduler.yaml;
创建 kube-scheduler systemd unit 模板文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-scheduler.service.template <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
WorkingDirectory=${K8S_DIR}/kube-scheduler
ExecStart=/opt/k8s/bin/kube-scheduler \\
--config=/etc/kubernetes/kube-scheduler.yaml \\
--bind-address=##NODE_IP## \\
--secure-port=10259 \\
--port=0 \\
--tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \\
--tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \\
--authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-allowed-names="" \\
--requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-extra-headers-prefix="X-Remote-Extra-" \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--logtostderr=true \\
--v=2
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
EOF
为各节点创建和分发 kube-scheduler systemd unit 文件
替换模板文件中的变量,为各节点创建 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.service.template > kube-scheduler-${NODE_IPS[i]}.service
done
ls kube-scheduler*.service
分发 systemd unit 文件到所有 master 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-scheduler.service
done
启动 kube-scheduler 服务
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-scheduler"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler"
done
检查服务运行状态
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status kube-scheduler|grep Active"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u kube-scheduler
08 部署高可用nginx代理
基于 nginx 代理的 kube-apiserver 高可用方案
- 控制节点的 kube-controller-manager、kube-scheduler 是多实例部署且连接本机的 kube-apiserver,所以只要有一个实例正常,就可以保证高可用;
- 集群内的 Pod 使用 K8S 服务域名 kubernetes 访问 kube-apiserver, kube-dns 会自动解析出多个 kube-apiserver 节点的 IP,所以也是高可用的;
- 在每个节点起一个 nginx 进程,后端对接多个 apiserver 实例,nginx 对它们做健康检查和负载均衡;
- kubelet、kube-proxy 通过本地的 nginx(监听 127.0.0.1)访问 kube-apiserver,从而实现 kube-apiserver 的高可用;
下载和编译 nginx
下载源码:
cd /opt/k8s/work
wget http://nginx.org/download/nginx-1.15.3.tar.gz
tar -xzvf nginx-1.15.3.tar.gz
配置编译参数:
cd /opt/k8s/work/nginx-1.15.3
mkdir nginx-prefix
yum install -y gcc make
./configure --with-stream --without-http --prefix=$(pwd)/nginx-prefix --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
--with-stream
:开启 4 层透明转发(TCP Proxy)功能;--without-xxx
:关闭所有其他功能,这样生成的动态链接二进制程序依赖最小;
输出:
Configuration summary
+ PCRE library is not used
+ OpenSSL library is not used
+ zlib library is not used
nginx path prefix: "/root/tmp/nginx-1.15.3/nginx-prefix"
nginx binary file: "/root/tmp/nginx-1.15.3/nginx-prefix/sbin/nginx"
nginx modules path: "/root/tmp/nginx-1.15.3/nginx-prefix/modules"
nginx configuration prefix: "/root/tmp/nginx-1.15.3/nginx-prefix/conf"
nginx configuration file: "/root/tmp/nginx-1.15.3/nginx-prefix/conf/nginx.conf"
nginx pid file: "/root/tmp/nginx-1.15.3/nginx-prefix/logs/nginx.pid"
nginx error log file: "/root/tmp/nginx-1.15.3/nginx-prefix/logs/error.log"
nginx http access log file: "/root/tmp/nginx-1.15.3/nginx-prefix/logs/access.log"
nginx http client request body temporary files: "client_body_temp"
nginx http proxy temporary files: "proxy_temp"
编译和安装:
cd /opt/k8s/work/nginx-1.15.3
make && make install
验证编译的 nginx
cd /opt/k8s/work/nginx-1.15.3
./nginx-prefix/sbin/nginx -v
输出:
nginx version: nginx/1.15.3
安装和部署 nginx
创建目录结构:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /opt/k8s/kube-nginx/{conf,logs,sbin}"
done
拷贝二进制程序:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /opt/k8s/kube-nginx/{conf,logs,sbin}"
scp /opt/k8s/work/nginx-1.15.3/nginx-prefix/sbin/nginx root@${node_ip}:/opt/k8s/kube-nginx/sbin/kube-nginx
ssh root@${node_ip} "chmod a+x /opt/k8s/kube-nginx/sbin/*"
done
- 重命名二进制文件为 kube-nginx;
配置 nginx,开启 4 层透明转发功能:
cd /opt/k8s/work
cat > kube-nginx.conf << \EOF
worker_processes 1;
events {
worker_connections 1024;
}
stream {
upstream backend {
hash $remote_addr consistent;
server 10.20.250.23:6443 max_fails=3 fail_timeout=30s;
}
server {
listen *:8443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
EOF
upstream backend
中的 server 列表为集群中各 kube-apiserver 的节点 IP,需要根据实际情况修改;
分发配置文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-nginx.conf root@${node_ip}:/opt/k8s/kube-nginx/conf/kube-nginx.conf
done
配置 systemd unit 文件,启动服务
配置 kube-nginx systemd unit 文件:
cd /opt/k8s/work
cat > kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
ExecStartPre=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx -t
ExecStart=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx
ExecReload=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
分发 systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-nginx.service root@${node_ip}:/etc/systemd/system/
done
启动 kube-nginx 服务:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-nginx && systemctl restart kube-nginx"
done
检查 kube-nginx 服务运行状态
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status kube-nginx |grep 'Active:'"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u kube-nginx
09 部署 containerd 组件或者部署docker
containerd 实现了 kubernetes 的 Container Runtime Interface (CRI) 接口,提供容器运行时核心功能,如镜像管理、容器管理等,相比 dockerd 更加简单、健壮和可移植。
创建和分发 containerd 配置文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat << EOF | sudo tee containerd-config.toml
version = 2
root = "${CONTAINERD_DIR}/root"
state = "${CONTAINERD_DIR}/state"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.cn-beijing.aliyuncs.com/images_k8s/pause-amd64:3.1"
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/k8s/bin"
conf_dir = "/etc/cni/net.d"
[plugins."io.containerd.runtime.v1.linux"]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
EOF
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/containerd/ ${CONTAINERD_DIR}/{root,state}"
scp containerd-config.toml root@${node_ip}:/etc/containerd/config.toml
done
创建 containerd systemd unit 文件
cd /opt/k8s/work
cat <<EOF | sudo tee containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
Environment="PATH=/opt/k8s/bin:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStartPre=/sbin/modprobe overlay
ExecStart=/opt/k8s/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
分发 systemd unit 文件,启动 containerd 服务
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp containerd.service root@${node_ip}:/etc/systemd/system
ssh root@${node_ip} "systemctl enable containerd && systemctl restart containerd"
done
创建和分发 crictl 配置文件
crictl 是兼容 CRI 容器运行时的命令行工具,提供类似于 docker 命令的功能。具体参考官方文档。
cd /opt/k8s/work
cat << EOF | sudo tee crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
分发到所有 worker 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp crictl.yaml root@${node_ip}:/etc/crictl.yaml
done
部署docker(可选)
安装完docker后修改一下参数:
cat > docker-daemon.json <<EOF
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"],
"insecure-registries": ["docker02:35000"],
"max-concurrent-downloads": 20,
"live-restore": true,
"max-concurrent-uploads": 10,
"debug": true,
"data-root": "${DOCKER_DIR}/data",
"exec-root": "${DOCKER_DIR}/exec",
"log-opts": {
"max-size": "100m",
"max-file": "5"
}
}
EOF
分发 docker 配置文件到所有 worker 节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/docker/ ${DOCKER_DIR}/{data,exec}"
scp docker-daemon.json root@${node_ip}:/etc/docker/daemon.json
done
更新 kubelet 配置并重启服务(每个节点上都操作)
需要删除 kubelet 的 systemd unit 文件(/etc/systemd/system/kubelet.service),删除下面 4 行:
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
然后重启 kubelet 服务:
systemctl restart kubelet
10 部署 kubelet 组件
kubelet 运行在每个 worker 节点上,接收 kube-apiserver 发送的请求,管理 Pod 容器,执行交互式命令,如 exec、run、logs 等。kubelet 启动时自动向 kube-apiserver 注册节点信息,内置的 cadvisor 统计和监控节点的资源使用情况。为确保安全,部署时关闭了 kubelet 的非安全 http 端口,对请求进行认证和授权,拒绝未授权的访问(如 apiserver、heapster 的请求)。
创建 kubelet bootstrap kubeconfig 文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
# 创建 token
export BOOTSTRAP_TOKEN=$(kubeadm token create \
--description kubelet-bootstrap-token \
--groups system:bootstrappers:${node_name} \
--kubeconfig ~/.kube/config)
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/cert/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
done
- 向 kubeconfig 写入的是 token,bootstrap 结束后 kube-controller-manager 为 kubelet 创建 client 和 server 证书;
查看 kubeadm 为各节点创建的 token:
$ kubeadm token list --kubeconfig ~/.kube/config
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
2sb8wy.euialqfpxfbcljby 23h 2020-02-08T15:36:30+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:zhangjun-k8s-02
ta7onm.fcen74h0mczyfbz2 23h 2020-02-08T15:36:30+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:zhangjun-k8s-01
xk27zp.tylnvywx9kc8sq87 23h 2020-02-08T15:36:30+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:zhangjun-k8s-03
- token 有效期为 1 天,超期后将不能再被用来 boostrap kubelet,且会被 kube-controller-manager 的 tokencleaner 清理;
- kube-apiserver 接收 kubelet 的 bootstrap token 后,将请求的 user 设置为
system:bootstrap:<Token ID>
,group 设置为system:bootstrappers
,后续将为这个 group 设置 ClusterRoleBinding;
分发 bootstrap kubeconfig 文件到所有 worker 节点
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
scp kubelet-bootstrap-${node_name}.kubeconfig root@${node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig
done
创建和分发 kubelet 参数配置文件
从 v1.10 开始,部分 kubelet 参数需在配置文件中配置,kubelet --help
会提示:
DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag
创建 kubelet 参数配置文件模板(可配置项参考代码中注释):
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubelet-config.yaml.template <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: "##NODE_IP##"
staticPodPath: ""
syncFrequency: 1m
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodURL: ""
port: 10250
readOnlyPort: 0
rotateCertificates: true
serverTLSBootstrap: true
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: "/etc/kubernetes/cert/ca.pem"
authorization:
mode: Webhook
registryPullQPS: 0
registryBurst: 20
eventRecordQPS: 0
eventBurst: 20
enableDebuggingHandlers: true
enableContentionProfiling: true
healthzPort: 10248
healthzBindAddress: "##NODE_IP##"
clusterDomain: "${CLUSTER_DNS_DOMAIN}"
clusterDNS:
- "${CLUSTER_DNS_SVC_IP}"
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 1m
imageMinimumGCAge: 2m
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
volumeStatsAggPeriod: 1m
kubeletCgroups: ""
systemCgroups: ""
cgroupRoot: ""
cgroupsPerQOS: true
cgroupDriver: cgroupfs
runtimeRequestTimeout: 10m
hairpinMode: promiscuous-bridge
maxPods: 220
podCIDR: "${CLUSTER_CIDR}"
podPidsLimit: -1
resolvConf: /etc/resolv.conf
maxOpenFiles: 1000000
kubeAPIQPS: 1000
kubeAPIBurst: 2000
serializeImagePulls: false
evictionHard:
memory.available: "100Mi"
nodefs.available: "10%"
nodefs.inodesFree: "5%"
imagefs.available: "15%"
evictionSoft: {}
enableControllerAttachDetach: true
failSwapOn: true
containerLogMaxSize: 20Mi
containerLogMaxFiles: 10
systemReserved: {}
kubeReserved: {}
systemReservedCgroup: ""
kubeReservedCgroup: ""
enforceNodeAllocatable: ["pods"]
EOF
- address:kubelet 安全端口(https,10250)监听的地址,不能为 127.0.0.1,否则 kube-apiserver、heapster 等不能调用 kubelet 的 API;
- readOnlyPort=0:关闭只读端口(默认 10255),等效为未指定;
- authentication.anonymous.enabled:设置为 false,不允许匿名�访问 10250 端口;
- authentication.x509.clientCAFile:指定签名客户端证书的 CA 证书,开启 HTTP 证书认证;
- authentication.webhook.enabled=true:开启 HTTPs bearer token 认证;
- 对于未通过 x509 证书和 webhook 认证的请求(kube-apiserver 或其他客户端),将被拒绝,提示 Unauthorized;
- authroization.mode=Webhook:kubelet 使用 SubjectAccessReview API 查询 kube-apiserver 某 user、group 是否具有操作资源的权限(RBAC);
- featureGates.RotateKubeletClientCertificate、featureGates.RotateKubeletServerCertificate:自动 rotate 证书,证书的有效期取决于 kube-controller-manager 的 –experimental-cluster-signing-duration 参数;
- 需要 root 账户运行;
为各节点创建和分发 kubelet 配置文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
sed -e "s/##NODE_IP##/${node_ip}/" kubelet-config.yaml.template > kubelet-config-${node_ip}.yaml.template
scp kubelet-config-${node_ip}.yaml.template root@${node_ip}:/etc/kubernetes/kubelet-config.yaml
done
创建和分发 kubelet systemd unit 文件
创建 kubelet systemd unit 文件模板:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kubelet.service.template <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
WorkingDirectory=${K8S_DIR}/kubelet
ExecStart=/opt/k8s/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \\
--cert-dir=/etc/kubernetes/cert \\
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\
--root-dir=${K8S_DIR}/kubelet \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-config.yaml \\
--hostname-override=##NODE_NAME## \\
--image-pull-progress-deadline=15m \\
--volume-plugin-dir=${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/ \\
--logtostderr=true \\
--v=2
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
EOF
- 如果设置了
--hostname-override
选项,则kube-proxy
也需要设置该选项,否则会出现找不到 Node 的情况; --bootstrap-kubeconfig
:指向 bootstrap kubeconfig 文件,kubelet 使用该文件中的用户名和 token 向 kube-apiserver 发送 TLS Bootstrapping 请求;- K8S approve kubelet 的 csr 请求后,在
--cert-dir
目录创建证书和私钥文件,然后写入--kubeconfig
文件; --pod-infra-container-image
不使用 redhat 的pod-infrastructure:latest
镜像,它不能回收容器的僵尸;
为各节点创建和分发 kubelet systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
sed -e "s/##NODE_NAME##/${node_name}/" kubelet.service.template > kubelet-${node_name}.service
scp kubelet-${node_name}.service root@${node_name}:/etc/systemd/system/kubelet.service
done
授予 kube-apiserver 访问 kubelet API 的权限
在执行 kubectl exec、run、logs 等命令时,apiserver 会将请求转发到 kubelet 的 https 端口。这里定义 RBAC 规则,授权 apiserver 使用的证书(kubernetes.pem)用户名(CN:kuberntes-master)访问 kubelet API 的权限:
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes-master
Bootstrap Token Auth 和授予权限
kubelet 启动时查找 --kubeletconfig
参数对应的文件是否存在,如果不存在则使用 --bootstrap-kubeconfig
指定的 kubeconfig 文件向 kube-apiserver 发送证书签名请求 (CSR)。
kube-apiserver 收到 CSR 请求后,对其中的 Token 进行认证,认证通过后将请求的 user 设置为 system:bootstrap:<Token ID>
,group 设置为 system:bootstrappers
,这一过程称为 Bootstrap Token Auth
。
默认情况下,这个 user 和 group 没有创建 CSR 的权限,kubelet 启动失败,错误日志如下:
$ sudo journalctl -u kubelet -a |grep -A 2 'certificatesigningrequests'
May 26 12:13:41 zhangjun-k8s-01 kubelet[128468]: I0526 12:13:41.798230 128468 certificate_manager.go:366] Rotating certificates
May 26 12:13:41 zhangjun-k8s-01 kubelet[128468]: E0526 12:13:41.801997 128468 certificate_manager.go:385] Failed while requesting a signed certificate from the master: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "system:bootstrap:82jfrm" cannot create resource "certificatesigningrequests" in API group "certificates.k8s.io" at the cluster scope
解决办法是:创建一个 clusterrolebinding,将 group system:bootstrappers 和 clusterrole system:node-bootstrapper 绑定:
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
自动 approve CSR 请求,生成 kubelet client 证书
kubelet 创建 CSR 请求后,下一步需要创建被 approve,有两种方式:
- kube-controller-manager 自动 aprrove;
- 手动使用命令
kubectl certificate approve
;
CSR 被 approve 后,kubelet 向 kube-controller-manager 请求创建 client 证书,kube-controller-manager 中的 csrapproving
controller 使用 SubjectAccessReview
API 来检查 kubelet 请求(对应的 group 是 system:bootstrappers)是否具有相应的权限。
创建三个 ClusterRoleBinding,分别授予 group system:bootstrappers 和 group system:nodes 进行 approve client、renew client、renew server 证书的权限(server csr 是手动 approve 的,见后文):
cd /opt/k8s/work
cat > csr-crb.yaml <<EOF
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auto-approve-csrs-for-group
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
apiGroup: rbac.authorization.k8s.io
---
# To let a node of the group "system:nodes" renew its own credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-client-cert-renewal
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/selfnodeserver"]
verbs: ["create"]
---
# To let a node of the group "system:nodes" renew its own server credentials
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-server-cert-renewal
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: approve-node-server-renewal-csr
apiGroup: rbac.authorization.k8s.io
EOF
kubectl apply -f csr-crb.yaml
- auto-approve-csrs-for-group:自动 approve node 的第一次 CSR; 注意第一次 CSR 时,请求的 Group 为 system:bootstrappers;
- node-client-cert-renewal:自动 approve node 后续过期的 client 证书,自动生成的证书 Group 为 system:nodes;
- node-server-cert-renewal:自动 approve node 后续过期的 server 证书,自动生成的证书 Group 为 system:nodes;
启动 kubelet 服务
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/"
ssh root@${node_ip} "/usr/sbin/swapoff -a"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"
done
- 启动服务前必须先创建工作目录;
- 关闭 swap 分区,否则 kubelet 会启动失败;
kubelet 启动后使用 –bootstrap-kubeconfig 向 kube-apiserver 发送 CSR 请求,当这个 CSR 被 approve 后,kube-controller-manager 为 kubelet 创建 TLS 客户端证书、私钥和 –kubeletconfig 文件。
注意:kube-controller-manager 需要配置 --cluster-signing-cert-file
和 --cluster-signing-key-file
参数,才会为 TLS Bootstrap 创建证书和私钥。
手动 approve server cert csr
基于安全性考虑,CSR approving controllers 不会自动 approve kubelet server 证书签名请求,需要手动 approve:
$ kubectl get csr
NAME AGE REQUESTOR CONDITION
csr-5rwzm 3m22s system:node:zhangjun-k8s-01 Pending
csr-65nms 3m34s system:bootstrap:2sb8wy Approved,Issued
csr-8t5hj 3m21s system:node:zhangjun-k8s-02 Pending
csr-jkhhs 3m20s system:node:zhangjun-k8s-03 Pending
csr-jv7dn 3m35s system:bootstrap:ta7onm Approved,Issued
csr-vb6p5 3m33s system:bootstrap:xk27zp Approved,Issued
$ # 手动 approve
$ kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
$ # 自动生成了 server 证书
$ ls -l /etc/kubernetes/cert/kubelet-*
-rw------- 1 root root 1281 Feb 7 15:38 /etc/kubernetes/cert/kubelet-client-2020-02-07-15-38-21.pem
lrwxrwxrwx 1 root root 59 Feb 7 15:38 /etc/kubernetes/cert/kubelet-client-current.pem -> /etc/kubernetes/cert/kubelet-client-2020-02-07-15-38-21.pem
-rw------- 1 root root 1330 Feb 7 15:42 /etc/kubernetes/cert/kubelet-server-2020-02-07-15-42-12.pem
lrwxrwxrwx 1 root root 59 Feb 7 15:42 /etc/kubernetes/cert/kubelet-server-current.pem -> /etc/kubernetes/cert/kubelet-server-2020-02
bear token 认证和授权
创建一个 ServiceAccount,将它和 ClusterRole system:kubelet-api-admin 绑定,从而具有调用 kubelet API 的权限:
kubectl create sa kubelet-api-test
kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin --serviceaccount=default:kubelet-api-test
SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}')
TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}')
echo ${TOKEN}
$ curl -s --cacert /etc/kubernetes/cert/ca.pem -H "Authorization: Bearer ${TOKEN}" https://172.27.138.251:10250/metrics | head
# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0
cadvisor 和 metrics
cadvisor 是内嵌在 kubelet 二进制中的,统计所在节点各容器的资源(CPU、内存、磁盘、网卡)使用情况的服务。
浏览器访问 https://ip:10250/metrics 和 https://ip:10250/metrics/cadvisor 分别返回 kubelet 和 cadvisor 的 metrics。
注意:
- kubelet.config.json 设置 authentication.anonymous.enabled 为 false,不允许匿名证书访问 10250 的 https 服务;
11 部署 kube-proxy 组件
kube-proxy 运行在所有 worker 节点上,它监听 apiserver 中 service 和 endpoint 的变化情况,创建路由规则以提供服务 IP 和负载均衡功能。
创建 kube-proxy 证书
创建证书签名请求:
cd /opt/k8s/work
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "opsnull"
}
]
}
EOF
- CN:指定该证书的 User 为
system:kube-proxy
; - 预定义的 RoleBinding
system:node-proxier
将Usersystem:kube-proxy
与 Rolesystem:node-proxier
绑定,该 Role 授予了调用kube-apiserver
Proxy 相关 API 的权限; - 该证书只会被 kube-proxy 当做 client 证书使用,所以 hosts 字段为空;
生成证书和私钥:
cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*
创建和分发 kubeconfig 文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/work/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
分发 kubeconfig 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
scp kube-proxy.kubeconfig root@${node_name}:/etc/kubernetes/
done
创建 kube-proxy 配置文件
从 v1.10 开始,kube-proxy 部分参数可以配置文件中配置。可以使用 --write-config-to
选项生成该配置文件,或者参考 源代码的注释。
创建 kube-proxy config 文件模板:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-proxy-config.yaml.template <<EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
burst: 200
kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
qps: 100
bindAddress: ##NODE_IP##
healthzBindAddress: ##NODE_IP##:10256
metricsBindAddress: ##NODE_IP##:10249
enableProfiling: true
clusterCIDR: ${CLUSTER_CIDR}
hostnameOverride: ##NODE_NAME##
mode: "ipvs"
portRange: ""
iptables:
masqueradeAll: false
ipvs:
scheduler: rr
excludeCIDRs: []
EOF
bindAddress
: 监听地址;clientConnection.kubeconfig
: 连接 apiserver 的 kubeconfig 文件;clusterCIDR
: kube-proxy 根据--cluster-cidr
判断集群内部和外部流量,指定--cluster-cidr
或--masquerade-all
选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;hostnameOverride
: 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则;mode
: 使用 ipvs 模式;
为各节点创建和分发 kube-proxy 配置文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for (( i=0; i < ${#NODE_IPS[@]}; i++ ))
do
echo ">>> ${NODE_NAMES[i]}"
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-proxy-config.yaml.template > kube-proxy-config-${NODE_NAMES[i]}.yaml.template
scp kube-proxy-config-${NODE_NAMES[i]}.yaml.template root@${NODE_NAMES[i]}:/etc/kubernetes/kube-proxy-config.yaml
done
创建和分发 kube-proxy systemd unit 文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=${K8S_DIR}/kube-proxy
ExecStart=/opt/k8s/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy-config.yaml \\
--logtostderr=true \\
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
分发 kube-proxy systemd unit 文件:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_name in ${NODE_NAMES[@]}
do
echo ">>> ${node_name}"
scp kube-proxy.service root@${node_name}:/etc/systemd/system/
done
启动 kube-proxy 服务
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-proxy"
ssh root@${node_ip} "modprobe ip_vs_rr"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"
done
检查启动结果
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status kube-proxy|grep Active"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u kube-proxy
12 部署 calico 网络
kubernetes 要求集群内各节点(包括 master 节点)能通过 Pod 网段互联互通。calico 使用 IPIP 或 BGP 技术(默认为 IPIP)为各节点创建一个可以互通的 Pod 网络。如果使用 flannel,请参考附件 E.部署flannel网络.md(flannel 与 docker 结合使用)。
安装 calico 网络插件
cd /opt/k8s/work
curl https://docs.projectcalico.org/manifests/calico.yaml -O
修改配置:
$ cp calico.yaml calico.yaml.orig
$ diff calico.yaml.orig calico.yaml
630c630,632
< value: "192.168.0.0/16"
---
> value: "172.30.0.0/16"
> - name: IP_AUTODETECTION_METHOD
> value: "interface=eth.*"
699c701
< path: /opt/cni/bin
---
> path: /opt/k8s/bin
- 将 Pod 网段地址修改为
172.30.0.0/16
; - calico 自动探查互联网卡,如果有多快网卡,则可以配置用于互联的网络接口命名正则表达式,如上面的
eth.*
(根据自己服务器的网络接口名修改);
运行 calico 插件:
$ kubectl apply -f calico.yaml
- calico 插架以 daemonset 方式运行在所有的 K8S 节点上。
查看 calico 运行状态
$ kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-77c4b7448-99lfq 1/1 Running 0 2m11s 172.30.184.128 zhangjun-k8s-03 <none> <none>
calico-node-dxnjs 1/1 Running 0 2m11s 172.27.137.229 zhangjun-k8s-02 <none> <none>
calico-node-rknzz 1/1 Running 0 2m11s 172.27.138.239 zhangjun-k8s-03 <none> <none>
calico-node-rw84c 1/1 Running 0 2m11s 172.27.138.251 zhangjun-k8s-01 <none> <none>
使用 crictl 命令查看 calico 使用的镜像:
$ crictl images
IMAGE TAG IMAGE ID SIZE
docker.io/calico/cni v3.12.0 cb6799752c46c 66.5MB
docker.io/calico/node v3.12.0 fc05bc4225f39 89.7MB
docker.io/calico/pod2daemon-flexvol v3.12.0 98793d0a88c82 37.5MB
registry.cn-beijing.aliyuncs.com/images_k8s/pause-amd64 3.1 21a595adc69ca 326kB
-
如果 crictl 输出为空或执行失败,则有可能是缺少配置文件
/etc/crictl.yaml
导致的,该文件的配置如下:$ cat /etc/crictl.yaml runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock timeout: 10 debug: false
13 部署 flannel 网络(可选)
kubernetes 要求集群内各节点(包括 master 节点)能通过 Pod 网段互联互通。flannel 使用 vxlan 技术为各节点创建一个可以互通的 Pod 网络,使用的端口为 UDP 8472(需要开放该端口,如公有云 AWS 等)。
flanneld 第一次启动时,从 etcd 获取配置的 Pod 网段信息,为本节点分配一个未使用的地址段,然后创建 flannedl.1
网络接口(也可能是其它名称,如 flannel1 等)。
flannel 将分配给自己的 Pod 网段信息写入 /run/flannel/docker
文件,docker 后续使用这个文件中的环境变量设置 docker0
网桥,从而从这个地址段为本节点的所有 Pod 容器分配 IP。
注意:
- 如果没有特殊指明,本文档的所有操作均在 zhangjun-k8s01 节点上执行,然后远程分发文件和执行命令;
- flanneld 与本文档部署的 etcd v3.4.x 不兼容,需要将 etcd 降级到 v3.3.x;
- flanneld 与 docker 结合使用;
调整etcd systemd unit文件
etcd3.4.2默认关闭v2的api,需要在启动文件中开启:
- 修改etcd.service systemd unit文件
....
--heartbeat-interval=250 \
--election-timeout=2000 \
--enable-v2=true
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
加入 --enable-v2=true
,注意上一行结尾的反斜杠
- 调整写入命令
ETCDCTL_API=2 etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/opt/k8s/work/ca.pem --cert-file=/opt/k8s/work/flanneld.pem --key-file=/opt/k8s/work/flanneld-key.pem mk ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 21, "Backend": {"Type": "vxlan"}}'
FLANNEL_ETCD_PREFIX 环境变量需要自己手动加一下。我是随便给了个值。
下载和分发 flanneld 二进制文件
从 flannel 的 release 页面 下载最新版本的安装包:
cd /opt/k8s/work
mkdir flannel
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
tar -xzvf flannel-v0.11.0-linux-amd64.tar.gz -C flannel
分发二进制文件到集群所有节点:
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp flannel/{flanneld,mk-docker-opts.sh} root@${node_ip}:/opt/k8s/bin/
ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
done
创建 flannel 证书和私钥
flanneld 从 etcd 集群存取网段分配信息,而 etcd 集群启用了双向 x509 证书认证,所以需要为 flanneld 生成证书和私钥。
创建证书签名请求:
cd /opt/k8s/work
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "4Paradigm"
}
]
}
EOF
- 该证书只会被 kubectl 当做 client 证书使用,所以 hosts 字段为空;
生成证书和私钥:
cfssl gencert -ca=/opt/k8s/work/ca.pem \
-ca-key=/opt/k8s/work/ca-key.pem \
-config=/opt/k8s/work/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
ls flanneld*pem
将生成的证书和私钥分发到所有节点(master 和 worker):
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p /etc/flanneld/cert"
scp flanneld*.pem root@${node_ip}:/etc/flanneld/cert
done
向 etcd 写入集群 Pod 网段信息
注意:本步骤只需执行一次。
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
ETCDCTL_API=2 etcdctl \
--endpoints=${ETCD_ENDPOINTS} \
--ca-file=/opt/k8s/work/ca.pem \
--cert-file=/opt/k8s/work/flanneld.pem \
--key-file=/opt/k8s/work/flanneld-key.pem \
mk ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 21, "Backend": {"Type": "vxlan"}}'
- flanneld 当前版本 (v0.11.0) 不支持 etcd v3,故使用 etcd v2 API 写入配置 key 和网段数据;
- 写入的 Pod 网段
${CLUSTER_CIDR}
地址段(如 /16)必须小于SubnetLen
,必须与kube-controller-manager
的--cluster-cidr
参数值一致;
创建 flanneld 的 systemd unit 文件
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/opt/k8s/bin/flanneld \\
-etcd-cafile=/etc/kubernetes/cert/ca.pem \\
-etcd-certfile=/etc/flanneld/cert/flanneld.pem \\
-etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \\
-etcd-endpoints=${ETCD_ENDPOINTS} \\
-etcd-prefix=${FLANNEL_ETCD_PREFIX} \\
-iface=${IFACE} \\
-ip-masq
ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
mk-docker-opts.sh
脚本将分配给 flanneld 的 Pod 子网段信息写入/run/flannel/docker
文件,后续 docker 启动时使用这个文件中的环境变量配置 docker0 网桥;- flanneld 使用系统缺省路由所在的接口与其它节点通信,对于有多个网络接口(如内网和公网)的节点,可以用
-iface
参数指定通信接口; - flanneld 运行时需要 root 权限;
-ip-masq
: flanneld 为访问 Pod 网络外的流量设置 SNAT 规则,同时将传递给 Docker 的变量--ip-masq
(/run/flannel/docker
文件中)设置为 false,这样 Docker 将不再创建 SNAT 规则; Docker 的--ip-masq
为 true 时,创建的 SNAT 规则比较“暴力”:将所有本节点 Pod 发起的、访问非 docker0 接口的请求做 SNAT,这样访问其他节点 Pod 的请求来源 IP 会被设置为 flannel.1 接口的 IP,导致目的 Pod 看不到真实的来源 Pod IP。 flanneld 创建的 SNAT 规则比较温和,只对访问非 Pod 网段的请求做 SNAT。
分发 flanneld systemd unit 文件到所有节点
cd /opt/k8s/work
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
scp flanneld.service root@${node_ip}:/etc/systemd/system/
done
启动 flanneld 服务
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld"
done
检查启动结果
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "systemctl status flanneld|grep Active"
done
确保状态为 active (running)
,否则查看日志,确认原因:
journalctl -u flanneld
检查分配给各 flanneld 的 Pod 网段信息
查看集群 Pod 网段(/16):
source /opt/k8s/bin/environment.sh
ETCDCTL_API=2 etcdctl \
--endpoints=${ETCD_ENDPOINTS} \
--ca-file=/etc/kubernetes/cert/ca.pem \
--cert-file=/etc/flanneld/cert/flanneld.pem \
--key-file=/etc/flanneld/cert/flanneld-key.pem \
get ${FLANNEL_ETCD_PREFIX}/config
输出:
{"Network":"172.30.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}
查看已分配的 Pod 子网段列表(/24):
source /opt/k8s/bin/environment.sh
ETCDCTL_API=2 etcdctl \
--endpoints=${ETCD_ENDPOINTS} \
--ca-file=/etc/kubernetes/cert/ca.pem \
--cert-file=/etc/flanneld/cert/flanneld.pem \
--key-file=/etc/flanneld/cert/flanneld-key.pem \
ls ${FLANNEL_ETCD_PREFIX}/subnets
输出(结果视部署情况而定):
/kubernetes/network/subnets/172.30.80.0-24
/kubernetes/network/subnets/172.30.32.0-24
/kubernetes/network/subnets/172.30.184.0-24
查看某一 Pod 网段对应的节点 IP 和 flannel 接口地址:
source /opt/k8s/bin/environment.sh
ETCDCTL_API=2 etcdctl \
--endpoints=${ETCD_ENDPOINTS} \
--ca-file=/etc/kubernetes/cert/ca.pem \
--cert-file=/etc/flanneld/cert/flanneld.pem \
--key-file=/etc/flanneld/cert/flanneld-key.pem \
get ${FLANNEL_ETCD_PREFIX}/subnets/172.30.80.0-24
输出(结果视部署情况而定):
{"PublicIP":"172.27.137.240","BackendType":"vxlan","BackendData":{"VtepMAC":"ce:9c:a9:08:50:03"}}
- 172.30.80.0/21 被分配给节点 zhangjun-k8s01(172.27.137.240);
- VtepMAC 为 zhangjun-k8s01 节点的 flannel.1 网卡 MAC 地址;
检查节点 flannel 网络信息
[root@zhangjun-k8s01 work]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:50:56:b1:9e:42 brd ff:ff:ff:ff:ff:ff
inet 10.20.250.23/24 brd 10.20.250.255 scope global noprefixroute ens192
valid_lft forever preferred_lft forever
inet6 fe80::3bb2:f3d:e0c7:b4c6/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:76:e9:a9 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:76:e9:a9 brd ff:ff:ff:ff:ff:ff
6: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 6e:35:ce:47:f1:bf brd ff:ff:ff:ff:ff:ff
7: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether ae:21:d2:74:e2:1b brd ff:ff:ff:ff:ff:ff
inet 10.254.0.1/32 brd 10.254.0.1 scope global kube-ipvs0
valid_lft forever preferred_lft forever
8: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 36:8d:2b:f5:04:c9 brd ff:ff:ff:ff:ff:ff
inet 172.30.232.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::348d:2bff:fef5:4c9/64 scope link
valid_lft forever preferred_lft forever
- flannel.1 网卡的地址为分配的 Pod 子网段的第一个 IP(.0),且是 /32 的地址;
[root@zhangjun-k8s01 work]# ip route show |grep flannel.1
172.30.32.0/24 via 172.30.32.0 dev flannel.1 onlink
172.30.184.0/24 via 172.30.184.0 dev flannel.1 onlink
- 到其它节点 Pod 网段请求都被转发到 flannel.1 网卡;
- flanneld 根据 etcd 中子网段的信息,如
${FLANNEL_ETCD_PREFIX}/subnets/172.30.80.0-24
,来决定进请求发送给哪个节点的互联 IP;
验证各节点能通过 Pod 网段互通
在各节点上部署 flannel 后,检查是否创建了 flannel 接口(名称可能为 flannel0、flannel.0、flannel.1 等):
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh ${node_ip} "/usr/sbin/ip addr show flannel.1|grep -w inet"
done
输出:
>>> 172.27.137.240
inet 172.30.80.0/32 scope global flannel.1
>>> 172.27.137.239
inet 172.30.32.0/32 scope global flannel.1
>>> 172.27.137.238
inet 172.30.184.0/32 scope global flannel.1
在各节点上 ping 所有 flannel 接口 IP,确保能通:
source /opt/k8s/bin/environment.sh
for node_ip in ${NODE_IPS[@]}
do
echo ">>> ${node_ip}"
ssh ${node_ip} "ping -c 1 172.30.80.0"
ssh ${node_ip} "ping -c 1 172.30.32.0"
ssh ${node_ip} "ping -c 1 172.30.184.0"
done
验证集群功能
创建测试文件
cd /opt/k8s/work
cat > nginx-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: nginx-ds
labels:
app: nginx-ds
spec:
type: NodePort
selector:
app: nginx-ds
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ds
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
app: nginx-ds
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: nginx:1.7.9
ports:
- containerPort: 80
EOF
执行测试
kubectl create -f nginx-ds.yml
阅读次数: 本文累计被阅读 1000000 次