
|   | Disclaimer yadda yadda… YMMV YOLO | 
Prerequisites
- 
x86_64 system with virtualization technology enabled in the bios 
- 
Fedora 23 as hypervisor OS (certainly something else can be used, but YMMV then with these instructions) - 
Packages: libvirt-client, virt-install, virt-viewer, virsh 
 
- 
- 
Red Hat subscriptions for access to Gluster and Ceph repos 
- 
Latest RHGS 3.1 release ISO 
- 
50GB +/- hard drive space - 
This should be an empty block device partition (we assume /dev/sdb1 below) 
- 
SSD drive preferred 
 
- 
- 
$firewall_magic 
- 
terminatorterminal emulator (not strictly required, but makes executing commands across multiple systems easy breezy)
Prepare the Hypervisor System
Set the tuned Profile
systemctl enable tuned
tuned-adm profile virtual-hostConfigure Hypervisor Network
See Appendix A
Create a Thin LV Pool and Volumes
|   | The assumption here is that we are using block device /dev/sdb1 | 
pvcreate /dev/sdb1
vgcreate vg_lab /dev/sdb1lvcreate -l 100%FREE -T vg_lab/lv_thinpool
lvchange --zero n vg_lab/lv_thinpool- 
One OS volume with a 50GB virtual size 
- 
One data volume with a 256TB virtual size (yes really; you can certainly make this whatever arbitrary size you may want, but for the effect of this lab this is the recommended size) 
lvcreate -T -V 50G -n vm_storagelab-n0 vg_lab/lv_thinpool
lvcreate -T -V 256T -n vm_storagelab-n0-data vg_lab/lv_thinpoolDeploy Node n0
Create the Virtual Machine
|   | The assumption is that your ISO file is located at /isos/rhgs-3.1-u2-rhel-7-x86_64-dvd-1.iso | 
virt-install commandvirt-install -n storagelab-n0 --os-type=Linux --os-variant=rhel7 --ram=2048 --vcpus=2 --disk path=/dev/vg_lab/vm_storagelab-n0,bus=virtio --disk path=/dev/vg_lab/vm_storagelab-n0-data,bus=virtio --network network=storagelab --graphics spice --input tablet,bus=usb --input mouse,bus=ps2 --input keyboard,bus=ps2 --location /isos/rhgs-3.1-u2-rhel-7-x86_64-dvd-1.isoRHGS Installer
- 
In the Software Selection section, select the RH-Gluster-NFS-Ganesha add-on 
- 
In the Installation Destination section, select the 50GiB Virtio Block Device 
- 
In the Network & Hostname section, set the Host name to n0.storage.lab 
- 
Begin the install; set the root password to something you will remember 
Setup Networking
/etc/systemctl/network-scripts/ifcfg-eth0 filenetfile="/etc/sysconfig/network-scripts/ifcfg-eth0"
sed -i s/^BOOTPROTO.*/BOOTPROTO=static/ $netfile
sed -i s/^UUID.*// $netfile
sed -i s/^ONBOOT.*/ONBOOT=yes/ $netfile
cat << EOF >> $netfile
IPADDR=10.11.12.100
NETMASK=255.255.255.0
GATEWAY=10.11.12.1
DNS1=10.11.12.1
EOF
ifup eth0hosts entriesfor i in {0..6}; do
echo "10.11.12.10${i} n${i} n${i}.storage.lab" >> /etc/hosts
doneAdd the clonefix.sh Script to /root
See Appendix B
Register w/ Subscription Manager and Update
subscription-manager register
subscription-manager list --available
subscription-manager attach --pool <pool_id>
subscription-manager repos --disable=*
subscription-manager repos --enable=rhel-7-server-rpms
yum -y updatesubscription-manager repos --enable=rh-gluster-3-for-rhel-7-server-rpms --enable=rh-gluster-3-nfs-for-rhel-7-server-rpms|   | Latest Gluster Packages as of 2016-05-03 
 | 
subscription-manager repos --enable=rhel-7-server-rhceph-1.3-calamari-rpms --enable=rhel-7-server-rhceph-1.3-installer-rpms --enable=rhel-7-server-rhceph-1.3-tools-rpms --enable=rhel-7-server-rhceph-1.3-mon-rpms --enable=rhel-7-server-rhceph-1.3-osd-rpmsSynchronize Time
systemctl stop ntpd.service
ntpdate 0.rhel.pool.ntp.org
systemctl start ntpd.service
systemctl enable ntpd.serviceConfigure the Firewall
firewall-cmd --zone=public --add-service=glusterfs --add-service=nfs --add-service=rpc-bind --add-service=mountd --add-service=high-availability
firewall-cmd --zone=public --add-service=glusterfs --add-service=nfs --add-service=rpc-bind --add-service=mountd --add-service=high-availability --permanent|   | The current 3.1.2 documentation does not cover opening the RQuota port 4501/tcp & 4501/udp, the NLM port 32803/tcp & 32803/udp, or the STATD port 662/tcp & 662/udp. Note that in the 3.1.3 release the default RQuota port will be changed to 875/tcp & 875/udp. | 
|   | Documentation BugBZ 1300175 is tracking fixes for both the default port assignments in the config files as well as the firewall rules. | 
firewall-cmd --zone=public --add-port=4501/tcp --add-port=4501/udp --add-port=32803/tcp --add-port=32803/udp --add-port=662/tcp --add-port=662/udp
firewall-cmd --zone=public --add-port=4501/tcp --add-port=4501/udp --add-port=32803/tcp --add-port=32803/udp --add-port=662/tcp --add-port=662/udp --permanent|   | The documentation also does not cover the client ports that need to be opened. | 
|   | Documentation BugSame BZ 1300175 as noted above. | 
firewall-cmd --zone=public --add-port=662/tcp --add-port=662/udp --add-port=32803/tcp --add-port=32769/udp --add-port=892/tcp --add-port=892/udp
firewall-cmd --zone=public --add-port=662/tcp --add-port=662/udp --add-port=32803/tcp --add-port=32769/udp --add-port=892/tcp --add-port=892/udp --permanent|   | We are being indiscriminate below about what type of Ceph node will run and are opening up ports for management, deployment, monitors, and OSDs. | 
firewall-cmd --zone=public --add-port=80/tcp --add-port=2003/tcp --add-port=4505-4506/tcp --add-port=6789/tcp --add-port=6800-7300/tcp
firewall-cmd --zone=public --add-port=80/tcp --add-port=2003/tcp --add-port=4505-4506/tcp --add-port=6789/tcp --add-port=6800-7300/tcp --permanentSet the Tuned Performance Profile
tuned-adm profile rhgs-sequential-ioSetup the cephdeploy user
useradd cephdeploy
echo redhat | passwd --stdin cephdeploy
cat << EOF >/etc/sudoers.d/cephdeploy
cephdeploy ALL = (root) NOPASSWD:ALL
Defaults:cephdeploy !requiretty
EOF
chmod 0440 /etc/sudoers.d/cephdeploysudo -u cephdeploy ssh-keygen -f ~cephdeploy/.ssh/id_rsa -t rsa -N ''
sudo -u cephdeploy cat ~cephdeploy/.ssh/id_rsa.pub > ~cephdeploy/.ssh/authorized_keys
chown cephdeploy:cephdeploy ~cephdeploy/.ssh/authorized_keysSetup Gluster Brick Data Filesystem
|   | Disk alignment configurations are ignored for this example.
NOTE: The mkfs.xfscommand below can take 3 minutes or more to complete for this 256TB virtual block device. | 
pvcreate /dev/vdb
vgcreate rhgs_vg /dev/vdb
lvcreate -l 100%FREE -T rhgs_vg/rhgs_thinpool
lvchange --zero n rhgs_vg/rhgs_thinpool
lvcreate -V 256T -T rhgs_vg/rhgs_thinpool -n rhgs_lv
mkfs.xfs -i size=512 -n size=8192 /dev/rhgs_vg/rhgs_lv
mkdir -p /rhgs/bricks
echo "/dev/rhgs_vg/rhgs_lv   /rhgs/bricks   xfs   rw,inode64,noatime,nouuid   1 2" >> /etc/fstab
mount /rhgs/bricks
semanage fcontext -a -t glusterd_brick_t /rhgs/bricks
restorecon -Rv /rhgs/bricks|   | After completing the above steps, shut down the n0 virtual machine. | 
Create the Lab Virtual Machines
|   | On the hypervisor, snapshot the n0 thin LVs for each of 6 new virtual machinesSnapshot LVs must be manually activated. This is included in the below command. | 
for i in {1..6}; do lvcreate -s --name vm_storagelab-n${i} vg_lab/vm_storagelab-n0; lvcreate -s --name vm_storagelab-n${i}-data vg_lab/vm_storagelab-n0-data; lvchange -ay -K vg_lab/vm_storagelab-n${i}; lvchange -ay -K vg_lab/vm_storagelab-n${i}-data; donefor i in {1..6}; do virt-install -n storagelab-n${i} --os-type=Linux --os-variant=rhel7 --ram=2048 --vcpus=2 --disk path=/dev/vg_lab/vm_storagelab-n${i},bus=virtio --disk path=/dev/vg_lab/vm_storagelab-n${i}-data,bus=virtio --network network=storagelab --graphics spice --input tablet,bus=usb --input mouse,bus=ps2 --input keyboard,bus=ps2 --noautoconsole --import; donefor i in {4..6}; do for j in {c..e}; do lvcreate -T -V 342T -n vm_storagelab-n${i}-osd${j} vg_lab/lv_thinpool; virsh attach-disk --live --config storagelab-n${i} /dev/vg_lab/vm_storagelab-n${i}-osd${j} vd${j} --targetbus virtio; done; doneLog into each VM console and run ~/clonefix.sh node N where N is the node number. Reboot after running the script.
Prepare the Systems
|   | You should now to be able to ssh into all of the nodes from your hypervisor at the 10.11.12.10{1..6} IPs. I highly recommend the terminatortool to ease the command input and monitoring of multiple nodes simultaneously. | 
Setup the Core Gluster Services
Establish the Gluster Pool
for i in {2..6}; do gluster peer probe n${i}; done
gluster pool listCreate the Gluster Disperse Volume
gluster volume create ec01 disperse 6 redundancy 2 n1:/rhgs/bricks/ec01 n2:/rhgs/bricks/ec01 n3:/rhgs/bricks/ec01 n4:/rhgs/bricks/ec01 n5:/rhgs/bricks/ec01 n6:/rhgs/bricks/ec01
gluster volume start ec01
gluster volume info ec01Ganesha-HA Prerequisites
Enable Shared Storage
gluster volume set all cluster.enable-shared-storage enableSetup Cluster Services
|   | The docs say here to start the pacemaker service (though they do not say to enable it), but the service won’t start because of a corosync failure. This is because the corosync config is managed by the nfs-ganesha service. | 
|   | Documentation BugBZ 1324649 | 
systemctl enable pcsd.service
systemctl start pcsd.service|   | Possible Documentation BugInsert bug tracker here | 
echo redhat | passwd --stdin hacluster|   | You may need to run the below command several times. An undiagnosed problem (possibly related to timeouts from overloading the hypervisor) seems to cause Unable to communicate errors. As long as you eventually see Authorized messages for each node, you should be good (even if subsequent runs display the Unable to communicate error). | 
for i in {1..6}; do pcs cluster auth n${i} -u hacluster -p redhat; done|   | The docs don’t give a great example or explanation for the above pcs cluster authcommand. This is easiest done as above, executed from one node (after starting thepcsdservice on all nodes), listing all cluster nodes on one command line. | 
ssh-keygen -f /var/lib/glusterd/nfs/secret.pem -t rsa -N ''
for i in {1..6}; do ssh-copy-id -i /var/lib/glusterd/nfs/secret.pem.pub root@n${i}; done|   | Yes, you do need to ssh-copy-id to the local node from which the ssh-copy-id is run. This ensures that the shared public key is in the /root/.ssh/authorized-keysfile on all nodes. | 
|   | The docs only have you setup one-way passwordless ssh, from a "primary" node to the other nodes. See this gluster.org blog for Kaleb’s notes which include copying the keypair to the other nodes. | 
|   | Documentation BugBZ 1324941 | 
for i in {2..6}; do scp -i /var/lib/glusterd/nfs/secret.pem /var/lib/glusterd/nfs/secret.* root@n${i}:/var/lib/glusterd/nfs/; done|   | No, you don’t need to copy these files to the local node from which you are sourcing the copy. The above command loop assumes you are copying from node n1. | 
|   | Without universal passwordless ssh with this specific keypair on all nodes, you will be unable to execute some NFS-Ganesha administrative commands from all nodes. | 
Configure and Start NFS-Ganesha
Define Service Ports
sed -i '/NFS_Core_Param/a \ \ \ \ \ \ \ \ MNT_Port = 20048' /etc/ganesha/ganesha.conf|   | The below line in the documentation is unclear. RQUOTADis a typo, and it is theRquota_Portsetting that is being referred to, which is set by default in the config to4501. This port is changing to875in the 3.1.3 release. TheNLM_Portneeds to be defined and match the firewall rules set above, but there is no default value implied in the docs or the default config file. TheSTATD_Portneeds to be defined in the/etc/sysconfig/nfsfile on the server and the client, and should also match the firewall rules above. | 
|   | Documentation BugSame BZ 1300175 as noted above. | 
sed -i '/NFS_Core_Param/a \ \ \ \ \ \ \ \ NLM_Port = 32803' /etc/ganesha/ganesha.confsed -i '/STATD_PORT/s/^#//' /etc/sysconfig/nfssed -i '/STATD_PORT/s/^#//' /etc/sysconfig/nfs
sed -i '/LOCKD_TCPPORT/s/^#//' /etc/sysconfig/nfs
sed -i '/LOCKD_UDPPORT/s/^#//' /etc/sysconfig/nfs
sed -i '/MOUNTD_PORT/s/^#//' /etc/sysconfig/nfsSetup Ganesha for HA
cat << EOF > /etc/ganesha/ganesha-ha.conf
HA_NAME="ganesha-ha-360"
HA_VOL_SERVER="n1"
HA_CLUSTER_NODES="n1,n2,n3,n4,n5,n6"
VIP_n1="10.11.12.201"
VIP_n2="10.11.12.202"
VIP_n3="10.11.12.203"
VIP_n4="10.11.12.204"
VIP_n5="10.11.12.205"
VIP_n6="10.11.12.206"
EOFgluster nfs-ganesha enable|   | It seems that the gluster nfs-ganesha enablecommand run above will in fact start the nfs-ganesha and pacemaker services, so at runtime everything will begin operating. However, on a reboot neither the nfs-ganesha nor the pacemaker services start (they are not enabled with systemctl), so the cluster does not start and NFS does not serve exports. The nfs-ganesha service not starting is by design per BZ 1236017. The pacemaker service should be enabled with systemctl to ensure the HA cluster re-forms at boot time. After this, the VIP will fail back to the server when the nfs-ganesha service is manually started. | 
|   | Documentation BugBZ 1324655 | 
systemctl enable pacemaker.serviceValidate the Configuration
systemctl status {nfs-ganesha,pcsd,pacemaker,corosync}
/usr/libexec/ganesha/ganesha-ha.sh --status
pcs statusgluster volume set ec01 ganesha.enable onshowmount -eImportant Patching
An upstream patch set is needed to correct a problem with VIP failover during a hard outage. This is tracked in BZ 1278332 and the fix is expected in RHGS 3.1.3.
The patch set only changes scripts, so this can be easily tested by downloading the patch file and extracting the ganesha_{grace,mon,nfsd} files to /usr/lib/ocf/resource.d/heartbeat and the ganesha-ha.sh file to /usr/libexec/ganesha.
|   | Without this patch all VIPs become unavailable during a hard outage. | 
Gluster NFS Client Testing
|   | Commands in this section are run on a client node. The hypervisor node may be used effectively as the client. | 
ec01 volume with NFS using one of the VIPsmkdir -p /gluster/ec01
mount -t nfs 10.11.12.201:/ec01 /gluster/ec01df -h /gluster/ec01file="/gluster/ec01/testfile" ; watch -d "dd if=/dev/urandom of=$file bs=1k count=1k >/dev/null 2>&1; echo md5sum:; md5sum $file"Gracefully shutdown node n1 (or whichever node hosts the VIP you are connected to).
|   | If you’ve applied the patches above, you can also stop or pause the VM abruptly instead of gracefully stopping the nfs-ganesha service. If you have not applied those pathes, here be dragons. | 
You should notice a disruption for a short while at the client when your dd writes will hang. When the cluster renegotiates and the VIP is migrated, you can see when node is hosting the VIP with the output of the pcs status command. You can also check each node with the ip a command.
Gracefully shutdown a second node.
When you have identified to which node your VIP has migrated, gracefully shut down that node as well. Again you will experience a short disruption at your client system. At this point, your client connection has survived the failure of two of the clustered NFS servers, and your Gluster disperse (erasure code) volume has survived the failure of two bricks. During the outage, your client has retained access to the full 1PB volume without data loss.
Setup Ceph Core Services
|   | Assume all commands from here on are run as root on node n1unless specified otherwise. | 
Install ceph-deploy and calamari on n1
yum -y install ceph-deploy calamari-server calamari-clientscalamari-ctl initializeSetup the Ceph Cluster
|   | Below commands should be run as the cephdeploy user. The osd preparecommand will take a while to complete. | 
su - cephdeploy
mkdir ~/ceph-config; cd ~/ceph-config
ceph-deploy new n1 n2 n3
ceph-deploy install --mon n{1..3}
ceph-deploy install --osd n{4..6}
ceph-deploy pkg --install ceph-selinux n{1..6}
ceph-deploy mon create-initial
ceph-deploy install --cli n1
ceph-deploy admin n1
sudo ceph osd crush tunables optimal
ceph-deploy disk zap n{4..6}:/dev/vd{c..e}
ceph-deploy osd prepare n{4..6}:/dev/vd{c..e}
ceph-deploy osd activate n{4..6}:/dev/vd{c..e}1
ceph-deploy calamari connect --master 'n1' n{1..6}|   | Below commands should be run as the root user. | 
ceph osd crush add-bucket row1 row
ceph osd crush add-bucket rack1 rack
ceph osd crush add-bucket rack2 rack
ceph osd crush add-bucket rack3 rack
ceph osd crush move row1 root=default
ceph osd crush move rack1 row=row1
ceph osd crush move rack2 row=row1
ceph osd crush move rack3 row=row1
ceph osd crush move n4 rack=rack1
ceph osd crush move n5 rack=rack2
ceph osd crush move n6 rack=rack3
ceph osd tree
ceph osd pool create mypool 128 128 replicated replicated_rulesetThe ceph health output shows a warning for too few PGs. This is because the default rbd pool doesn’t have a PG count that is optimal for our 9 OSD cluster. We can easily correct this.
ceph health
ceph -s
ceph osd lspools
ceph osd pool get rbd pg_num
ceph osd pool set rbd pg_num 128
watch -n1 -d ceph -s
ceph osd pool set rbd pgp_num 128
watch -n1 -d ceph -s
ceph dfsu - cephdeploy
cd ~/ceph-config
ceph-deploy install --rgw n1
ceph-deploy rgw create n1
sudo lsof | grep LISTEN | grep 7480
sudo firewall-cmd --add-port 7480/tcp
sudo firewall-cmd --add-port 7480/tcp --permanentcurl http://10.11.12.101:7480radosgw-admin user create --uid="rgwuser" --display-name="RGW User"
radosgw-admin subuser create --uid=rgwuser --subuser=rgwuser:swift --access=full
radosgw-admin key create --subuser=rgwuser:swift --key-type=swift --gen-secretFrom the above key create command output, note the secret_key under the swift_keys section for use below at the client.
radosgw-admin zone getcurl -s -i -H "X-Auth-User: rgwuser:swift" -H "X-Auth-Key: <swift_secret_key>" http://10.11.12.101:7480/auth
token=`curl -s -i -H "X-Auth-User: rgwuser:swift" -H "X-Auth-Key: <swift_secret_key>" http://10.11.12.101:7480/auth | grep Storage-Token | awk '{print $2}'`
url=`curl -s -i -H "X-Auth-User: rgwuser:swift" -H "X-Auth-Key: <swift_secret_key>" http://10.11.12.101:7480/auth | grep Storage-Url | awk '{print $2}'`
url=${url%$'\r'}
curl -i -H "X-Auth-Token: $token" $url
curl -i -X PUT -H "X-Auth-Token: $token" $url/mybucket
curl -i -H "X-Auth-Token: $token" $url
curl -i -H "X-Auth-Token: $token" $url/mybucket
watch -d "curl -s -i -H 'X-Auth-Token: $token' -T /gluster/ec01/testfile $url/mybucket/testfile"
curl -i -H "X-Auth-Token: $token" $url/mybucket.rgw.buckets and .rgw.buckets.index pools have been automatically created.rados lspoolstestfile file on the Ceph systemrados ls -p .rgw.buckets | grep testfile
ceph osd map .rgw.buckets <testfile_object>
ceph pg map <placement_group>
ceph osd treecurl -s -I -H "X-Auth-Token: $token" -T /etc/passwd $url/mybucket/passwd
curl -i -H "X-Auth-Token: $token" $url/mybucketpasswd file on the Ceph systemrados ls -p .rgw.buckets | grep passwd
ceph osd map .rgw.buckets <passwd_object>
ceph osd treepasswd file on one of the OSDs on node n6df -h
find /var/lib/ceph/osd/ceph-<osd_num> -name '*passwd*'
myosd=`find /var/lib/ceph/osd/ceph-<osd_num> -name '*passwd*'`
ls -l $myosd
file $myosd
cat $myosd
md5sum $myosd
find /var/lib/ceph/osd/ceph-<other_osd_num> -name '*passwd*'md5sum at the clientmd5sum /etc/passwdn1 while you shutdown node n6ceph -wShut down node n6. Watch for the gluster and ceph clients to resume activity.
ceph osd map .rgw.buckets <testfile_object>Reveal lab was run entirely on a laptop with an external SSD hard drive.
Bonus: Primer on LVM thin provisioning…
Appendix A - hypervisor network config
/etc/libvirt/qemu/networks/storagelab.xml file for the virtual network definitioncat << EOF > /etc/libvirt/qemu/networks/storagelab.xml
<network>
  <name>storagelab</name>
  <uuid>b8c296e3-b563-4e4b-bca1-a273ab670bf1</uuid>
  <forward mode='nat'/>
  <bridge name='virbr1' stp='on' delay='0'/>
  <mac address='52:54:00:13:51:c4'/>
  <domain name='lab-rhgs'/>
  <ip address='10.11.12.1' netmask='255.255.255.0'>
    <dhcp>
      <range start='10.11.12.10' end='10.11.12.99'/>
    </dhcp>
  </ip>
</network>
EOF
virsh net-create /etc/libvirt/qemu/networks/storagelab.xmlAppendix B - clonefix.sh
cat << 'EOF' > clonefix.sh
#!/bin/bash
function usage {
 echo "Usage: $0 [node|client] [<host number>]"
 exit 1
}
function doit {
 #echo "Correcting eth0 device name in udev rule..."
 #udevfile="/etc/udev/rules.d/70-persistent-net.rules"
 #sed -i '/eth0/d' $udevfile
 #sed -i s/eth1/eth0/ $udevfile
 echo "Correcting hostname..."
 netfile="/etc/hostname"
 sed -i s/${a}0/${a}${b}/ $netfile
 echo "Correcting ifconfig..."
 ifcfgfile="/etc/sysconfig/network-scripts/ifcfg-eth0"
 #sed -i '/HWADDR/d' $ifcfgfile
 sed -i '/UUID/d' $ifcfgfile
 #echo HWADDR=\"`ifconfig eth1 | head -1 | awk '{print $5}'`\" >> $ifcfgfile
 sed -i s/\.12\.${ip}0/\.12\.${ip}${b}/ $ifcfgfile
 echo "Correcting glusterd.info..."
 glusterdinfo="/var/lib/glusterd/glusterd.info"
 sed -i s/^UUID.*/UUID\=`uuidgen`/ $glusterdinfo
 echo "Done!"
}
if [ $# -lt 2 ]; then
  usage
fi
if [ $1 = "node" ]; then
 a="n"
 ip="10"
elif [ $1 = "client" ]; then
 a="client"
 ip="11"
else
 usage
fi
re='^[0-9]+$'
if [[ $2 =~ $re ]]; then
 b=$2
else
 usage
fi
echo
echo "  !!Careful!!"
echo "  !!Careful!! This is only meant to be run on a newly-cloned node."
echo "  !!Careful!! It'll break things otherwise."
echo "  !!Careful!!"
echo
echo "This host will be reconfigured as \"${a}${b}\""
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
 doit
else
 echo "Aborted!"
 exit 1
fi
EOF
chmod 755 clonefix.shAppendix C - Lab Reset (from hypervisor)
umount /gluster/ec01
for i in {1..6}; do virsh destroy storagelab-n${i}; done
for i in `lvs vg_lab | grep storagelab | awk '{print $1}' | grep -v n0`; do lvremove -f vg_lab/$i; done
for i in `ls /etc/libvirt/qemu/storagelab* | grep -v n0`; do rm -f $i; done ; systemctl restart libvirtd
sed -i s/^10.11.12.*//g ~/.ssh/known_hosts