云计算运维一步步编译安装Kubernetes之计算节点安装部署

部署kubelet

集群规划

主机名角色ip
k8s-node01.boysec.cnkubelet10.1.1.100
k8s-node02.boysec.cnkubelet10.1.1.110

注意:这里部署文档以k8s-node01.boysec.cn主机为例,另外一台运算节点安装部署方法类似

签发kubelet证书

在k8s-dns.boysec.cn上:

vim /opt/certs/kubelet-csr.json

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
{
"CN": "kubelet-node",
"hosts": [
"127.0.0.1",
"10.1.1.50",
"10.1.1.60",
"10.1.1.100",
"10.1.1.110",
"10.1.1.120",
"10.1.1.130",
"10.1.1.150"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}

生成kubelet证书和私钥

1
2
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json |cfssljson -bare kubelet
ll kubelet*

拷贝证书至各运算节点,并创建配置

1
2
3
4
scp kubelet.pem kubelet-key.pem k8s-node01:/opt/kubernetes/server/bin/cert/

## k8s-node01
ls -l /opt/kubernetes/server/bin/cert |grep kubelet

创建配置

注意:所有操作都在conf目录下

1
2
3
4
5
6
7
8
cd /opt/kubernetes/server/bin/conf
kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.1.1.50:7443 \
--kubeconfig=kubelet.kubeconfig

Cluster "myk8s" set. 创建成功
1
2
3
4
cd /opt/kubernetes/server/bin/conf
kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/cert/client.pem --client-key=/opt/kubernetes/server/bin/cert/client-key.pem --embed-certs=true --kubeconfig=kubelet.kubeconfig

User "k8s-node" set. 创建成功
1
2
3
4
5
6
7
8
cd /opt/kubernetes/server/bin/conf

kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig

Context "myk8s-context" created. 创建成功
1
2
3
4
5
cd /opt/kubernetes/server/bin/conf

kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig

Switched to context "myk8s-context". 创建成功

资源配置文件k8s-node.yaml

vim /opt/kubernetes/server/bin/conf/k8s-node.yaml

1
2
3
4
5
6
7
8
9
10
11
12
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node

应用资源配置文件

1
2
3
conf]# kubectl create -f k8s-node.yaml

clusterrolebinding.rbac.authorization.k8s.io/k8s-node created

检查

1
2
3
4
5
kubectl get clusterrolebinding k8s-node -o yaml
cd /opt/kubernetes/server/bin/conf
kubectl get clusterrolebinding k8s-node
NAME AGE
k8s-node 3m

准备infra_pod基础镜像

运维主机k8s-dns.boysec.cn上:

1
2
3
4
docker image pull kubernetes/pause
docker tag kubernetes/pause:latest harbor.od.com/public/pause:latest
docker push harbor.od.com/public/pause:latest
docker pull xplenty/rhel7-pod-infrastructure:v3.4

提交至私有仓库(harbor)中

1
2
3
4
5
6
7
8
9
10
11
12
docker login harbor.od.com


vim /root/.docker/config.json

{
"auths": {
"harbor.od.com": {
"auth": "YWRtaW46SGFyYm9yMTIzNDU=" # base64加密echo YWRtaW46SGFyYm9yMTIzNDU=|base64 -d
}
}
}

提交

1
2
docker tag 34d3450d733b harbor.od.com/public/pod:v3.4
docker push harbor.od.com/public/pod:v3.4

创建kubelet启动脚本

vim /opt/kubernetes/server/bin/kubelet-node.sh

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./cert/ca.pem \
--tls-cert-file ./cert/kubelet.pem \
--tls-private-key-file ./cert/kubelet-key.pem \
--hostname-override k8s-node01.boysec.cn \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.od.com/public/pod:v3.4 \
--root-dir /data/kubelet

注意:kubelet集群各主机的启动脚本略有不同,部署其他节点时注意修改。

检查配置,权限,创建日志目录

k8s-node01

1
2
3
4
chmod +x /opt/kubernetes/server/bin/kubelet-node.sh
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
conf]# ls -l|grep kubelet.kubeconfig
-rw------- 1 root root 6151 Aug 13 09:27 kubelet.kubeconfig

创建supervisor配置

vim /etc/supervisord.d/kube-kubelet.ini

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[program:kube-kubelet-100]
command=/opt/kubernetes/server/bin/kubelet-node.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)

启动服务并检查

1
2
3
4
5
6
7
8
9
supervisorctl update
supervisorctl status

kubectl label node k8s-node01.boysec.cn node-role.kubernetes.io/master=
kubectl label node k8s-node01.boysec.cn node-role.kubernetes.io/node=
[root@k8s-node01 conf]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-node01.boysec.cn Ready master,node 3m26s v1.17.4
k8s-node02.boysec.cn Ready master,node 3m37s v1.17.4

部署kube-proxy

集群规划

主机名角色ip
k8s-node01.boysec.cnkube-proxy10.1.1.100
k8s-node02.boysec.cnkube-proxy10.1.1.110

注意:这里部署文档以k8s-node01.boysec.cn主机为例,另外一台运算节点安装部署方法类似

签发kube-proxy证书

在k8s-dns.boysec.cn上:

vim /opt/certs/kube-proxy-csr.json

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}

生成kube-proxy证书和私钥

1
certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssljson -bare kube-proxy-client

拷贝证书至各运算节点,并创建配置

1
2
3
scp kube-proxy-client-key.pem kube-proxy-client.pem k8s-node01:/opt/kubernetes/server/bin/cert
## k8s-node01
ls -l /opt/kubernetes/server/bin/cert |grep kube-proxy-client

创建配置

注意:所有操作都在conf目录下

1
2
3
4
5
6
7
8
9
10
cd /opt/kubernetes/server/bin/conf

kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.1.1.50:7443 \
--kubeconfig=kube-proxy.kubeconfig


Cluster "myk8s" set.创建成功
1
2
3
4
5
6
7
8
cd /opt/kubernetes/server/bin/conf
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

User "kube-proxy" set.创建成功
1
2
3
4
5
6
7
8
cd /opt/kubernetes/server/bin/conf

kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

Context "myk8s-context" created.创建成功
1
2
3
4
cd /opt/kubernetes/server/bin/conf
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig

Switched to context "myk8s-context". 创建成功

加载IPVS模块

1
2
3
4
5
6
7
8
9
10
11
12
vim /root/ipvs.sh
#!/bin/sh
# load LVS IPVS modules
# /usr/lib/modules/3.10.0-957.el7.x86_64/kernel/net/netfilter/ipvs/
if [ -d /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs/ ];
then
for module in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs/)
do
module=${module%%.*}
modprobe $module >/dev/null 2>&1
done
fi

创建kube-proxy启动脚本

k8s-node01.boysec.cn上:

vim /opt/kubernetes/server/bin/kube-proxy.sh

1
2
3
4
5
6
7
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override 10.1.1.100 \
--kubeconfig ./conf/kube-proxy.kubeconfig \
--proxy-mode=ipvs \
--ipvs-scheduler=nq

注意:kube-proxy集群各主机的启动脚本略有不同,部署其他节点时注意修改。

检查配置,权限,创建日志目录

1
2
3
4
conf]# ls -l|grep kube-proxy.kubeconfig 
-rw------- 1 root root 6171 Aug 13 10:32 kube-proxy.kubeconfig
chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
mkdir -p /data/logs/kubernetes/kube-proxy

创建supervisor配置

vim /etc/supervisord.d/kube-proxy.ini

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[program:kube-proxy-100]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)

启动服务并检查

1
2
3
4
5
supervisorctl update
supervisorctl status
yum -y install ipvsadm
ipvsadm -Ln
kubectl get svc

启动检查所有集群

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@k8s-node01 ~]# cat nginx.yaml 
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: harbor.od.com/public/nginx:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80

创建POD

1
2
3
4
[root@k8s-node01 ~]# kubectl create -f nginx.yaml
[root@k8s-node01 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 65m 172.7.21.2 k8s-node01.boysec.cn <none> <none>

可能遇到问题

问题:Kubernetes创建Pod失败,无法获取image

1
Failed create pod sandbox: rpc error: code = Unknown desc = failed pulling image "harbor.od.com/public/pause:latest": Error response from daemon: Get http://harbor.od.com/v2/public/pause/manifests/latest: Get http://harbor.od.com:180/service/token?scope=repository%3Apublic%2Fpause%3Apull&service=harbor-registry: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers

解决:

1
2
3
4
5
6
7
8
修改harbor.yml配置文件,取消external_url注释,设置为:

external_url: http://harbor.od.com:80
然后,docker-compose down停止所有服务,删除当前配置目录:rm -rf ./common/config下配置清单,重新执行install.sh生成配置,即可解决

配置大概解释:如果使用外部代理就要启动该项
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used

如有新问题欢迎留言!