From your deploy server
If you need to start fresh again and again 🙂
cd /opt/ceph-deploy/ ceph-deploy purge c20 c21 c22 ceph-deploy purgedata c20 c21 c22
mkdir /opt/ceph-deploy/ cd /opt/ceph-deploy/ rm -f * ceph-deploy new c20 c21 c22
add the following to ceph.conf
public_network = 172.29.244.0/22 cluster_network = 172.29.240.0/22
ceph-deploy install c20 c21 c22 ceph-deploy mon create c20 c21 c22 ceph-deploy gatherkeys c20 c21 c22 ## if it fails first time, try again :) # I am not using journals here ceph-deploy osd --zap-disk create c20:vdb ceph-deploy osd --zap-disk create c20:vdc ceph-deploy osd --zap-disk create c20:vdd ceph-deploy osd --zap-disk create c20:vde ceph-deploy osd --zap-disk create c20:vdf ceph-deploy osd --zap-disk create c21:vdb ceph-deploy osd --zap-disk create c21:vdc ceph-deploy osd --zap-disk create c21:vdd ceph-deploy osd --zap-disk create c21:vde ceph-deploy osd --zap-disk create c21:vdf ceph-deploy osd --zap-disk create c22:vdb ceph-deploy osd --zap-disk create c22:vdc ceph-deploy osd --zap-disk create c22:vdd ceph-deploy osd --zap-disk create c22:vde ceph-deploy osd --zap-disk create c22:vdf
ssh c20 ceph osd tree
# id weight type name up/down reweight -1 2.85 root default -2 0.9499 host c20 0 0.19 osd.0 up 1 1 0.19 osd.1 up 1 2 0.19 osd.2 up 1 3 0.19 osd.3 up 1 4 0.19 osd.4 up 1 -3 0.9499 host c21 5 0.19 osd.5 up 1 6 0.19 osd.6 up 1 7 0.19 osd.7 up 1 8 0.19 osd.8 up 1 9 0.19 osd.9 up 1 -4 0.9499 host c22 10 0.19 osd.10 up 1 11 0.19 osd.11 up 1 12 0.19 osd.12 up 1 13 0.19 osd.13 up 1 14 0.19 osd.14 up 1
ceph health might show some stuck pg, error, warnings etc .. we do not need any of the default, so delete them \o/
ceph osd pool delete data data --yes-i-really-really-mean-it ceph osd pool delete metadata metadata --yes-i-really-really-mean-it ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
root@c20:~# ceph status
cluster d66d8ace-e96c-48c3-8d69-87b3f99ff09c health HEALTH_WARN too few pgs per osd (0 < min 20) monmap e1: 3 mons at {c20=172.29.244.20:6789/0,c21=172.29.244.21:6789/0,c22=172.29.244.22:6789/0}, election epoch 10, quorum 0,1,2 c20,c21,c22 osdmap e80: 15 osds: 15 up, 15 in pgmap v139: 0 pgs, 0 pools, 0 bytes data, 0 objects 516 MB used, 2923 GB / 2923 GB avail
ceph osd pool create cinder-volumes 256 ceph osd pool create glance-images 256 ceph osd pool create ephemeral-vms 256
add the user accounts:
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=cinder-volumes, allow rwx pool=ephemeral-vms, allow rx pool=glance-images' ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=glance-images'
root@c20:~# ceph status
cluster d66d8ace-e96c-48c3-8d69-87b3f99ff09c health HEALTH_OK monmap e1: 3 mons at {c20=172.29.244.20:6789/0,c21=172.29.244.21:6789/0,c22=172.29.244.22:6789/0}, election epoch 10, quorum 0,1,2 c20,c21,c22 osdmap e85: 15 osds: 15 up, 15 in pgmap v146: 1024 pgs, 4 pools, 0 bytes data, 0 objects 544 MB used, 2923 GB / 2923 GB avail 1024 active+clean
root@c20:~# ceph df
GLOBAL: SIZE AVAIL RAW USED %RAW USED 2923G 2923G 552M 0.02 POOLS: NAME ID USED %USED MAX AVAIL OBJECTS cinder-volumes 3 0 0 974G 0 glance-images 4 0 0 974G 0 ephemeral-vms 5 0 0 974G 0