[root@v86a5soqgn7i23h ceph]# ceph osd pool create data 64 64 pool 'data' created [root@v86a5soqgn7i23h ceph]# ceph osd lspools 1 data, [root@v86a5soqgn7i23h ceph]# rbd create data/data --size 4096 --image-feature layering [root@v86a5soqgn7i23h ceph]# rbd -p data ls data
[root@v86a5soqgn7i23h ceph]# kubectl get pvc local-ceph-pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE local-ceph-pvc Bound local-ceph-pv 2Gi RWO 13m
[root@v86a5soqgn7i23h data1]# kubectl apply -f ceph-pvc-master/ clusterrole.rbac.authorization.k8s.io/rbd-provisioner created clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created deployment.extensions/rbd-provisioner created role.rbac.authorization.k8s.io/rbd-provisioner created rolebinding.rbac.authorization.k8s.io/rbd-provisioner created serviceaccount/rbd-provisioner created
[root@v86a5soqgn7i23h newstorage]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE claim Bound pvc-6b450a20-6d95-4d73-b70e-7f902795b489 1Gi RWO ceph 10m [root@v86a5soqgn7i23h newstorage]# kubectl get pv | grep claim pvc-6b450a20-6d95-4d73-b70e-7f902795b489 1Gi RWO Delete Bound default/claim ceph 11m
查看ceph data存储池中的image
1 2 3 4 5
[root@v86a5soqgn7i23h newstorage]# ceph osd lspools 1 data, [root@v86a5soqgn7i23h newstorage]# rbd ls -p data data kubernetes-dynamic-pvc-34daf5fd-8ed0-11ec-bbb2-32d244ea0bdb