1、创建mds
[root@node2 ceph-cluster]# pwd /etc/ceph/ceph-cluster [root@node2 ceph-cluster]# ceph-deploy mds create node2 # 只在node2上创建mds
2、新建pool和fs,并查看
[root@node2 ceph-cluster]# ceph osd pool create cephfs_data 128 pool 'cephfs_data' created [root@node2 ceph-cluster]# ceph osd pool create cephfs_metadata 128 pool 'cephfs_metadata' created [root@node2 ceph-cluster]# ceph fs new cephfs cephfs_metadata cephfs_data new fs with metadata pool 2 and data pool 1 [root@node2 ceph-cluster]# [root@node2 ceph-cluster]# ceph -s cluster 6fd6ad33-621d-48c6-882d-0f364555a16b health HEALTH_ERR 64 pgs are stuck inactive for more than 300 seconds 64 pgs stuck inactive 64 pgs stuck unclean no osds monmap e3: 3 mons at {master1=172.16.18.16:6789/0,node1=172.16.18.24:6789/0,node2=172.16.18.22:6789/0} election epoch 8, quorum 0,1,2 master1,node2,node1 fsmap e2: 0/0/1 up osdmap e4: 0 osds: 0 up, 0 in flags sortbitwise,require_jewel_osds pgmap v5: 320 pgs, 3 pools, 0 bytes data, 0 objects #pools数从1变为3 0 kB used, 0 kB / 0 kB avail 320 creating[root@node2 ceph-cluster]#
3、创建OSD
[root@node2 ceph-cluster]# mkfs.xfs /dev/sdl
[root@node2 ceph-cluster]# ceph-deploy osd create node2:/dev/sdl #创建并激活
或
[root@node2 ceph-cluster]# ceph-deploy osd prepare node2:/dev/sdl #创建osd
[root@node2 ceph-cluster]#ceph-deploy osd activate node2:/dev/sdl1 #激活
或
[root@node2 ceph-cluster]# ceph-deploy osd prepare node2:/dev/sdl #创建osd
[root@node2 ceph-cluster]#ceph-deploy osd activate node2:/dev/sdl1 #激活
创建完成后查看:
[root@node1 ~]# ceph osd tree ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY -1 3.87982 root default -2 1.29327 host node2 0 0.43109 osd.0 up 1.00000 1.00000 1 0.43109 osd.1 up 1.00000 1.00000 2 0.43109 osd.2 up 1.00000 1.00000 -3 1.29327 host node1 3 0.43109 osd.3 up 1.00000 1.00000 4 0.43109 osd.4 up 1.00000 1.00000 5 0.43109 osd.5 up 1.00000 1.00000 -4 1.29327 host master1 6 0.43109 osd.6 up 1.00000 1.00000 7 0.43109 osd.7 up 1.00000 1.00000 8 0.43109 osd.8 up 1.00000 1.00000
5、mount
在每个客户端执行
[root@node2 ceph-cluster]# vim /etc/ceph/admin.secret AQAdVp1dlPM0BRAA/jR1AQzyP5k3zRU+HtcQdQ==[root@node2 ceph-cluster]# mount -t ceph node2:6789,node1:6789,master1:6789:/ /mnt/cephfs -o name=admin,secretfile=/etc/ceph/admin.secret