主机划分

Hostname IP 角色
k8s-node1 172.16.1.14 Master
k8s-node2 172.16.1.15 Master
k8s-node3 172.16.1.16 Master

修改主机名

node1

1
2
3
[root@k8s-node1 ~]# echo "k8s-node1" > /etc/hostname
[root@k8s-node1 ~]# hostname k8s-node1
[root@k8s-node1 ~]# bash

node2

1
2
3
[root@k8s-node2 ~]# echo "k8s-node2" > /etc/hostname
[root@k8s-node2 ~]# hostname k8s-node2
[root@k8s-node2 ~]# bash

node3

1
2
3
[root@k8s-node3 ~]# echo "k8s-node3" > /etc/hostname
[root@k8s-node3 ~]# hostname k8s-node3
[root@k8s-node3 ~]# bash

配置免密

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@k8s-node1 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
02:e1:14:6e:33:57:41:9a:96:6b:ac:19:5f:f8:38:b8 root@k8s-node1
The key's randomart image is:
+--[ RSA 2048]----+
| +. .+. |
| + . = |
| B * |
| . B o |
| . * S |
| B = |
| + + . |
| . . |
| E |
+-----------------+

[root@k8s-node1 ~]# ssh-copy-id root@172.16.1.14
[root@k8s-node1 ~]# ssh-copy-id root@172.16.1.15
[root@k8s-node1 ~]# ssh-copy-id root@172.16.1.16

hosts文件

1
2
3
4
5
6
7
[root@k8s-node1 ~]# cat >> /etc/hosts << EOF
172.16.1.14 k8s-node1
172.16.1.15 k8s-node2
172.16.1.16 k8s-node3
EOF
[root@k8s-node1 ~]# scp /etc/hosts k8s-node2:/etc/hosts
[root@k8s-node1 ~]# scp /etc/hosts k8s-node3:/etc/hosts

关闭swap分区

ps:所有节点

1
2
[root@k8s-node1 ~]# swapoff -a && sed -i 's/.*swap.*/#&/' /etc/fstab
...

禁用selinux

ps:所有节点

1
2
3
4
[root@k8s-node1 ~]# setenforce 0
[root@k8s-node1 ~]# sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
[root@k8s-node1 ~]# sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
...

优化内核

ps:所有节点

1
2
3
4
5
6
7
8
9
[root@k8s-node1 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
vm.swappiness = 0
EOF
[root@k8s-node1 ~]# sysctl --system
[root@k8s-node1 ~]# modprobe ip_vs_rr
[root@k8s-node1 ~]# modprobe br_netfilter

设置时间同步

1
2
[root@k8s-node1 ~]# yum -y install ntpdate
[root@k8s-node1 ~]# ntpdate ntp.aliyun.com

安装Docker

下载阿里云docker源

1
2
3
4
5
6
7
# 所有节点执行
当前docker为稳定版本
[root@k8s-node1 ~]# curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-node1 ~]# yum -y install docker-ce-18.06.1.ce-3.el7
[root@k8s-node1 ~]# systemctl enable docker && systemctl start docker
[root@k8s-node1 ~]# docker --version
Docker version 18.06.1-ce, build e68fc7a

配置加速器

因为docker会从国外的镜像库中拉取镜像,这里需要配成国内阿里云的镜像库

1
2
3
4
5
6
7
[root@k8s-node1 ~]# curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
docker version >= 1.12
{"registry-mirrors": ["http://f1361db2.m.daocloud.io"]}
Success.
You need to restart docker to take effect: sudo systemctl restart docker
[root@k8s-node1 ~]# systemctl restart docker
[root@k8s-node1 ~]# systemctl enable docker

Docker命令补全

1
[root@k8s-node1 ~]# yum install -y epel-release bash-completion && cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/

安装Kubernetes

配置Kubernetes源

1
2
3
4
5
6
7
8
9
10
# 所有节点执行
[root@k8s-node1 ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubenetes组件

1
2
3
4
# 所有节点执行
[root@k8s-node1 ~]# yum -y install kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0
[root@k8s-node1 ~]# systemctl enable kubelet
[root@k8s-node1 ~]# systemctl start kubelet

部署master节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# 生成预处理文件
[root@k8s-node1 ~]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@k8s-node1 ~]# vi kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.16.1.14 *
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-node1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer: # 添加
certSANs:
- "k8s-node1"
- "k8s-node2"
- "k8s-node3"
- "172.16.1.14"
- "172.16.1.15"
- "172.16.1.16"
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 镜像仓库
controlPlaneEndpoint: "172.16.1.14:6443" *
kind: ClusterConfiguration
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12 # 默认
podSubnet: 10.244.0.0/16 # 添加pod网段
scheduler: {}

# 查看需要的镜像
[root@k8s-node1 ~]# kubeadm config images list --config kubeadm-init.yaml
# 拉去镜像
[root@k8s-node1 ~]# kubeadm config images pull --config kubeadm-init.yaml
# 初始化
[root@k8s-node1 ~]# kubeadm init --config=kubeadm-init.yaml
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.16.1.14:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bf445cd4ea6d12a47532cd0293f205752466dd1c9a3885f66c8b2f4b5cc7d889

# 初始化后并执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

部署flannel网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
[root@k8s-node1 ~]# vi kube-flannel.yml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
[root@k8s-node1 ~]# kubectl apply -f kube-flannel.yml

node加入集群

1
2
3
# 在node节点执行
[root@k8s-node2 ~]# kubeadm join 172.16.1.14:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bf445cd4ea6d12a47532cd0293f205752466dd1c9a3885f66c8b2f4b5cc7d889

master节点查看集群状态

1
2
3
4
5
[root@k8s-node1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-node1 Ready master 3h v1.17.0
k8s-node2 Ready <none> 179m v1.17.0
k8s-node3 Ready <none> 179m v1.17.0

查看组件运行状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@k8s-node1 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-d9s76 1/1 Running 0 65m
coredns-9d85f5447-xq59t 1/1 Running 0 65m
etcd-k8s-node1 1/1 Running 0 65m
kube-apiserver-k8s-node1 1/1 Running 0 65m
kube-controller-manager-k8s-node1 1/1 Running 0 65m
kube-flannel-ds-58hdr 1/1 Running 0 7m12s
kube-flannel-ds-qnf59 1/1 Running 0 2m50s
kube-flannel-ds-rdstl 1/1 Running 0 2m48s
kube-proxy-4pfbj 1/1 Running 0 65m
kube-proxy-r5w9q 1/1 Running 0 2m48s
kube-proxy-rhqcl 1/1 Running 0 2m50s
kube-scheduler-k8s-node1 1/1 Running 0 65m

部署Dashboard管理界面

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
vi kubernetes-dashboard.yaml


apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-beta8
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.1
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

[root@k8s-node1 ~]# kubectl apply -f kubernetes-dashboard.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

[root@k8s-node1 kubernetes]# kubectl get pods --namespace kubernetes-dashboard -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dashboard-metrics-scraper-76585494d8-mzpv6 1/1 Running 0 24m 10.244.1.2 k8s-node2 <none> <none>
kubernetes-dashboard-5996555fd8-4rx2z 1/1 Running 0 24m 10.244.2.2 k8s-node3 <none> <none>

# 将容器中的443端口映射到物理机中
[root@k8s-node1 kubernetes]# vi dashboard-svc.yml
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
selector:
k8s-app: kubernetes-dashboard
type: NodePort
ports:
- protocol: TCP
port: 443
targetPort: 8443
nodePort: 30001
[root@k8s-node1 kubernetes]# kubectl apply -f dashboard-svc.yml
service/kubernetes-dashboard created
[root@k8s-node1 kubernetes]# kubectl get service --namespace kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.96.63.3 <none> 8000/TCP 40m
kubernetes-dashboard NodePort 10.96.125.188 <none> 443:30001/TCP 13m

此时访问k8s-node3节点IP,如果出现不是专用链接 只需要用键盘输入“thisisunsafe” 即可正常进入

创建service account并绑定默认cluster-admin管理员集群角色

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@k8s-node1 kubernetes]# kubectl create serviceaccount dashboard-admin -n kube-system
serviceaccount/dashboard-admin created
[root@k8s-node1 kubernetes]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@k8s-node1 kubernetes]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: dashboard-admin-token-7vt4f
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: be26fb74-7360-4c95-88d1-f58f2734e145

Type: kubernetes.io/service-account-token

Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImZEOEZBTnVvUDAzUVpwcHpIYmU0cF9KcDRMRGd0blZDdFRZRjhBUzF5a0kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tN3Z0NGYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiYmUyNmZiNzQtNzM2MC00Yzk1LTg4ZDEtZjU4ZjI3MzRlMTQ1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.DhREJM87iHUt_5GIXZ0V37tZIi-FYlGB3IDQ-xO2Q8AQxw-1XK03pUOTlZFCuB_uIdPUNYimA5WdN69RmeHV2GZDQdGnkAuoohjR0WOtODdpflgBsXFSwKjJToCRoWcXTgoJKKQzPw-PZ5qlzWCUf6lQKce2cUiOhgqOsqvJsRyBHqfufROVtTY0JbK6ohP7ggoWdNhVkalIzWnJLeAZGCk8DKOg4RxJ3fot7pQjluo3yFKwQPsACPBdafQG-XfIVfGXfLshN3OLsuI1maqvk1mdb_RojuEXI9Us5v3T4MNMA9ZwWguHCyi4b07G6j-wRNMjD_arYLdEowxvdrBkWw
ca.crt: 1025 bytes
namespace: 11 bytes

使用输出的token登录Dashboard;

如果node出现问题需要删除node重新加入

删除node节点

1
2
[root@master ~]# kubectl delete node node1
node "node1" deleted

生成注册命令

1
2
3
4
5
6
[root@master ~]# kubeadm token create --print-join-command

W0425 01:02:19.391867 62603 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]

kubeadm join 10.1.1.11:6443 --token 757a06.wnp34zge3cdcqag6 --discovery-token-ca-cert-hash sha256:b1ab3a019f671de99e3af0d9fd023078ad64941a3b8cd56c2a65624f0a218642

node重新加入

1
root@node1:~# kubeadm join 10.1.1.11:6443 --token 757a06.wnp34zge3cdcqag6     --discovery-token-ca-cert-hash sha256:b1ab3a019f671de99e3af0d9fd023078ad64941a3b8cd56c2a65624f0a218642

解决重新注册失败的问题

删除node旧的配置文件

1
root@node1:~# rm -f /etc/kubernetes/kubelet.conf

重启k8s及docker服务

1
root@node1:~# systemctl restart docker kubelet

删除旧的ca文件

1
root@node1:~# rm -f /etc/kubernetes/pki/ca.crt

清空存在node中的容器

1
docker rm -f $(docker ps -a | awk'{print $1}')