Prerequisites
- access to a full set of all gluster brick backups
- A freshly provisioned cluster with glfs server and client daemonsets running (emtpy globalfs) provisioned from the same inventory - minimally the same gluster global fs configuration and container versions
Post-Condition
- a fully HA gluster globalfs on the new cluster with the state from time of the backup
Process
- master: kubectl get ds glfs-server-global --namespace=kube-system -o yaml > /tmp/server.yml
- master: kubectl get ds glfs-client-global --namespace=kube-system -o yaml > /tmp/client.yml
- master: kubectl delete -f /tmp/client.yml
- master: kubectl exec -it bash <server-pod> --namespace=kube-system
- <glpod>: gluster vol stop global
- <glpod>: gluster vol del global
- master: kubectl delete -f /tmp/server.yml
- for each glfs{1,2,3,4}:
- ssh <host>:
- <ssh>: sudo rm -rf /media/brick0/brick
- <ssh>: sudo -s
- <su>: copy(ftp, scp, sshfs,or similar) the brick xfs dump to /media/brick0
- exit
- <ssh>: docker run --privileged --rm -it -v /var/lib/glusterd:/var/lib/glusterd -v /var/log/glusterfs:/var/log/glusterfs -v /etc/glusterfs:/etc/glusterfs -v /media/brick0:/media/brick0 ndslabs/cluster-backup bash
- <docker>: cd /media/brick0
- <docker>: xfsrestore -f <file> .
- <docker>: exit
- <su>: exit
- master: kubectl create -f /tmp/server.yml
- master: kubectl exec -it <glfs-server-global-rh7hs> bash --namespace=kube-system
- <glpod>: . /etc/glconfig/glfs-config-global
- <glpod>: for i in ${PEERS}; do gluster peer probe $i; done
- <glpod>: gluster vol create global replica 2 transport tcp $(for i in ${PEERS}; do echo $i:/media/brick0/brick; done) force
- <glpod>: gluster vol set global nfs.disable on
- <glpod>: gluster vol start global
- <glpod>: gluster vol quota global enable
- <glpod>: exit
- master: kubectl create -f /tmp/client.yml
- <any-glfs-client>: sudo du -hc --max-depth=1 /var/glfs/global/ # force glfs to reset size and quota metadata