You can configure multiple back ends in cinder at the same time
## Do this for all 3 controllers ## example of 3 ceph pools and 1 QNAP pool .. ## you can add any supported example: LVM, NFS etc storage_hosts: infra1: ip: 172.29.236.11 container_vars: cinder_backends: limit_container_types: cinder_volume ceph-ssd: volume_driver: cinder.volume.drivers.rbd.RBDDriver volume_backend_name: CEPH-SSD rbd_pool: volumes-ssd rbd_ceph_conf: /etc/ceph/ceph.conf rbd_user: "{{ cinder_ceph_client }}" rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" rbd_flatten_volume_from_snapshot: 'false' rbd_max_clone_depth: 5 rbd_store_chunk_size: 4 rados_connect_timeout: -1 ceph-hdd: volume_driver: cinder.volume.drivers.rbd.RBDDriver volume_backend_name: CEPH-HDD rbd_pool: volumes-hdd rbd_ceph_conf: /etc/ceph/ceph.conf rbd_user: "{{ cinder_ceph_client }}" rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" rbd_flatten_volume_from_snapshot: 'false' rbd_max_clone_depth: 5 rbd_store_chunk_size: 4 rados_connect_timeout: -1 ceph-nvme: volume_driver: cinder.volume.drivers.rbd.RBDDriver volume_backend_name: CEPH-nVME rbd_pool: volumes-nvme rbd_ceph_conf: /etc/ceph/ceph.conf rbd_user: "{{ cinder_ceph_client }}" rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" rbd_flatten_volume_from_snapshot: 'false' rbd_max_clone_depth: 5 rbd_store_chunk_size: 4 rados_connect_timeout: -1 qnap-hdd: volume_backend_name: "QNAP-HDD" volume_driver: cinder.volume.drivers.qnap.QnapISCSIDriver qnap_management_url : http://10.11.12.10:8080 qnap_poolname: "Storage Pool 1" qnap_storage_protocol: iscsi qnap_server_port: 8080 iscsi_ip_address: 172.29.240.21 san_login: username san_password: password san_thin_provision: True # infra2: ... repeat the above for infra2 and infra3 as well