静态挂载Ceph RBD

k8s挂载Ceph RBD有两种方式,一种是传统的PV&PVC的方式,也就是说需要管理员先预先创建好相关PV和PVC,然后对应的deployment或者replication来挂载PVC使用。而在k8s 1.4以后,kubernetes提供了一种更加方便的动态创建PV的方式,即StorageClass。使用StorageClass时无需预先创建固定大小的PV来等待使用者创建PVC使用,而是直接创建PVC即可使用。

创建ceph存储池及块存储image

1
2
3
4
5
6
7
[root@v86a5soqgn7i23h ceph]# ceph osd pool create data 64 64
pool 'data' created
[root@v86a5soqgn7i23h ceph]# ceph osd lspools
1 data,
[root@v86a5soqgn7i23h ceph]# rbd create data/data --size 4096 --image-feature layering
[root@v86a5soqgn7i23h ceph]# rbd -p data ls
data

创建ceph密钥

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@v86a5soqgn7i23h ceph]# ceph auth get-key client.admin | base64 > ceph.key
[root@v86a5soqgn7i23h ceph]# ls
ceph.key
[root@v86a5soqgn7i23h ceph]# cat ceph.key
QVFBVFVRdGlSclF6Q1JBQTlpTlFmQ3RvN1dubm5IQ1RmSlMyRHc9PQ==

[root@v86a5soqgn7i23h ceph]# vi ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
#Please note this value is base64 encoded.
# echo "keystring"|base64
key: QVFBVFVRdGlSclF6Q1JBQTlpTlFmQ3RvN1dubm5IQ1RmSlMyRHc9PQ== # ceph生成的密钥

部署PV

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
[root@v86a5soqgn7i23h ceph]# cat ceph-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-ceph-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce

rbd:
#ceph的monitor节点
monitors:
- 172.16.1.11:6789
- 172.16.1.12:6789
- 172.16.1.13:6789

#ceph的存储池名字
pool: data

#在存储池里创建的image的名字
image: data

user: admin
secretRef:
name: ceph-secret
fsType: xfs
readOnly: false
persistentVolumeReclaimPolicy: Retain

[root@v86a5soqgn7i23h ceph]# kubectl get pv local-ceph-pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
local-ceph-pv 2Gi RWO Retain Bound default/local-ceph-pvc 14m

部署PVC

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@v86a5soqgn7i23h ceph]# cat ceph-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-ceph-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

[root@v86a5soqgn7i23h ceph]# kubectl get pvc local-ceph-pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
local-ceph-pvc Bound local-ceph-pv 2Gi RWO 13m

部署Pod

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
[root@v86a5soqgn7i23h ceph]# cat cephdemo-deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cephdemo
name: cephdemo
spec:
replicas: 1
selector:
matchLabels:
app: cephdemo
template:
metadata:
creationTimestamp: null
labels:
app: cephdemo
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- mountPath: "/data"
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: local-ceph-pvc

[root@v86a5soqgn7i23h ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
cephdemo-5855f5df97-kkshv 1/1 Running 0 78s
---
[root@v86a5soqgn7i23h ceph]# kubectl exec -it cephdemo-5855f5df97-kkshv -- sh

# 创建文件测试删除Pod数据是否会丢失
# cd /data
# pwd
/data
# mkdir zux
# echo "zux demo" > zux/demo.td
# ls
zux
# exit
---
[root@v86a5soqgn7i23h ceph]# kubectl delete -f cephdemo-deploy.yaml
deployment.apps "cephdemo" deleted
[root@v86a5soqgn7i23h ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
cephdemo-5855f5df97-kkshv 0/1 Terminating 0 7m51s
---
[root@v86a5soqgn7i23h ceph]# kubectl apply -f cephdemo-deploy.yaml
deployment.apps/cephdemo created
[root@v86a5soqgn7i23h ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
cephdemo-5855f5df97-8msg6 1/1 Running 0 15s
[root@v86a5soqgn7i23h ceph]# kubectl exec -it cephdemo-5855f5df97-8msg6 -- cat /data/zux/demo.td
zux demo
# 成功看到以前的数据,说明Ceph持久化存储生效

动态挂载Ceph RBD

创建Ceph key

1
2
3
4
5
6
7
8
9
[root@v86a5soqgn7i23h ceph]# vi ceph-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
data:
key: QVFBVFVRdGlSclF6Q1JBQTlpTlFmQ3RvN1dubm5IQ1RmSlMyRHc9PQ==

[root@v86a5soqgn7i23h ceph]# kubectl apply -f ceph-secret.yaml

创建storageClass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@v86a5soqgn7i23h newstorage]# vi ceph-storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph
provisioner: ceph.com/rbd
parameters:
monitors: 172.16.1.11:6789 # ceph mon 节点
adminId: admin
adminSecretName: ceph-admin-secret # 上面创建secret的名称
adminSecretNamespace: default
pool: data # rbd存储池
userId: admin
userSecretName: ceph-admin-secret
imageFeatures: layering
imageFormat: "2"
[root@v86a5soqgn7i23h newstorage]# kubectl apply -f ceph-storageclass.yaml

创建第三方Ceph rbd Pod

1
2
3
4
5
6
7
[root@v86a5soqgn7i23h data1]# kubectl apply -f ceph-pvc-master/
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.extensions/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
serviceaccount/rbd-provisioner created

创建PVC

1
2
3
4
5
6
7
8
9
10
11
12
13
[root@v86a5soqgn7i23h newstorage]# vi ceph-storage-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph
resources:
requests:
storage: 1Gi
[root@v86a5soqgn7i23h newstorage]# kubectl apply -f ceph-storage-pvc.yaml

验证

1
2
3
4
5
[root@v86a5soqgn7i23h newstorage]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
claim Bound pvc-6b450a20-6d95-4d73-b70e-7f902795b489 1Gi RWO ceph 10m
[root@v86a5soqgn7i23h newstorage]# kubectl get pv | grep claim
pvc-6b450a20-6d95-4d73-b70e-7f902795b489 1Gi RWO Delete Bound default/claim ceph 11m

查看ceph data存储池中的image

1
2
3
4
5
[root@v86a5soqgn7i23h newstorage]# ceph osd lspools
1 data,
[root@v86a5soqgn7i23h newstorage]# rbd ls -p data
data
kubernetes-dynamic-pvc-34daf5fd-8ed0-11ec-bbb2-32d244ea0bdb

发现pv ID无法和ceph image对应

需要查看pv详细信息 获取pv对应的image即可和ceph image对应

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@v86a5soqgn7i23h newstorage]# kubectl get pv pvc-ebbcb043-0f1c-45fa-b97c-9fe449ccc06f -o yaml | tail -n 15
uid: ebbcb043-0f1c-45fa-b97c-9fe449ccc06f
persistentVolumeReclaimPolicy: Delete
rbd:
image: kubernetes-dynamic-pvc-219719e0-8ed7-11ec-bbb2-32d244ea0bdb # 此image就是ceph image ID
keyring: /etc/ceph/keyring
monitors:
- 172.16.1.11:6789
pool: data
secretRef:
name: ceph-admin-secret
user: admin
storageClassName: ceph
volumeMode: Filesystem
status:
phase: Bound

[root@v86a5soqgn7i23h newstorage]# rbd ls -p data | grep kubernetes-dynamic-pvc-219719e0-8ed7-11ec-bbb2-32d244ea0bdb
kubernetes-dynamic-pvc-219719e0-8ed7-11ec-bbb2-32d244ea0bdb

# 完美对应!