common-ha : Fixing add node operation

Resource create for the added node referenced a variable
new_node that was never passed. This led to a wrong schema
type in the cib file and hence the added node always ended
up in failed state. And also, resources were wrongly
created twice and led to more errors. I have fixed the variable
name and deleted the repetitive invocation of the recreate-resource
function.

The new node has to be added to the existing ganesha-ha config
file for correct behaviour during subsequent add-node operations.
This edited file has to be copied to all the other cluster nodes.
I have added a fix for this as well.

Change-Id: Ie55138e2657d22298d89db1c08f2e17930686bd6
BUG: 1233246
Signed-off-by: Meghana M <mmadhusu@redhat.com>
Reviewed-on: http://review.gluster.org/11316
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Reviewed-by: soumya k <skoduri@redhat.com>
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
This commit is contained in:
Meghana M 2015-06-18 06:39:41 +05:30 committed by Kaleb KEITHLEY
parent 8ab6608acc
commit e19552c8cc
2 changed files with 39 additions and 27 deletions

View File

@ -1971,7 +1971,7 @@ gf_cli_ganesha_cbk (struct rpc_req *req, struct iovec *iov,
}
else {
cli_out("ganesha enable : success ");
cli_out("nfs-ganesha : success ");
}
ret = rsp.op_ret;

View File

@ -53,11 +53,15 @@ determine_service_manager () {
manage_service ()
{
local action=${1}
local new_node=${2}
if [ "$SERVICE_MAN" == "/usr/sbin/systemctl" ]
then
$SERVICE_MAN $1 nfs-ganesha
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
/var/lib/glusterd/nfs/secret.pem root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha"
else
$SERVICE_MAN nfs-ganesha $1
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
/var/lib/glusterd/nfs/secret.pem root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}"
fi
}
@ -171,7 +175,8 @@ setup_copy_config()
if [ -e /var/lib/glusterd/nfs/secret.pem ]; then
while [[ ${1} ]]; do
if [ ${short_host} != ${1} ]; then
scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/nfs/secret.pem /etc/ganesha/ganesha-ha.conf ${1}:/etc/ganesha/
scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
/var/lib/glusterd/nfs/secret.pem ${1}:${HA_CONFDIR}/ganesha-ha.conf ${1}:${HA_CONFDIR}/
if [ $? -ne 0 ]; then
logger "warning: scp ganesha-ha.conf to ${1} failed"
fi
@ -185,9 +190,11 @@ setup_copy_config()
copy_export_config ()
{
. /etc/ganesha/ganesha.conf
scp $HA_VOL_SERVER:/etc/ganesha.conf ${1}:/etc/ganesha/
scp -r $HA_VOL_SERVER:$2/exports/ ${1}:${2}/
local new_node=${1}
scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
/var/lib/glusterd/nfs/secret.pem ${HA_VOL_SERVER}:${GANESHA_CONF} ${new_node}:/etc/ganesha/
scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
/var/lib/glusterd/nfs/secret.pem ${HA_VOL_SERVER}:${HA_CONFDIR}/exports/ ${new_node}:${HA_CONFDIR}/
}
@ -547,9 +554,6 @@ clear_resources()
shift
done
recreate_resources ${cibfile} ${add_node} ${add_vip} ${HA_SERVERS}
}
@ -570,9 +574,9 @@ addnode_create_resources()
logger "warning: pcs -f ${cibfile} resource create nfs_start-${add_node} ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} failed"
fi
pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${newnode}=INFINITY
pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${add_node}=INFINITY
if [ $? -ne 0 ]; then
logger "warning: pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${newnode}=INFINITY failed"
logger "warning: pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${add_node}=INFINITY failed"
fi
pcs -f ${cibfile} constraint order nfs_start-${add_node} then nfs-mon-clone
@ -802,24 +806,34 @@ main()
logger "adding ${node} with ${vip} to ${HA_NAME}"
copy_export_config ${node} ${HA_CONFDIR}
determine_service_manager
manage_service "start" ${node}
determine_servers "add"
pcs cluster node add ${node}
if [ $? -ne 0 ]; then
pcs cluster node add ${node}
if [ $? -ne 0 ]; then
logger "warning: pcs cluster node add ${node} failed"
fi
fi
addnode_create_resources ${node} ${vip}
addnode_create_resources ${node} ${vip}
#Subsequent add-node recreates resources for all the nodes
#that already exist in the cluster. The nodes are picked up
#from the entries in the ganesha-ha.conf file. Adding the
#newly added node to the file so that the resources specfic
#to this node is correctly recreated in the future.
echo "VIP_$node=\"$vip\"" >> ${HA_CONFDIR}/ganesha-ha.conf
setup_state_volume ${node}
NEW_NODES="$HA_CLUSTER_NODES,$node"
setup_copy_config ${node}
sed -i s/HA_CLUSTER_NODES.*/"HA_CLUSTER_NODES=\"$NEW_NODES\""/ \
$HA_CONFDIR/ganesha-ha.conf
HA_SERVERS="${HA_SERVERS} ${node}"
copy_export_config ${node} ${HA_CONFDIR}
determine_service_manager
manage_service "start"
setup_copy_config ${HA_SERVERS}
;;
delete | --delete)
@ -838,8 +852,6 @@ main()
# TODO: delete node's directory in shared state
teardown_clean_etccluster ${node}
determine_service_manager
manage-service "stop"
@ -856,9 +868,9 @@ main()
help | --help)
echo "Usage : add|delete|status"
echo "Add-node : ganesha-ha.sh --add <HA_CONF_DIR> \
echo "Add-node : ganesha-ha.sh --add <HA_CONFDIR> \
<NODE-IP/HOSTNAME> <NODE-VIP>"
echo "Delete-node: ganesha-ha.sh --delete <HA_CONF_DIR> \
echo "Delete-node: ganesha-ha.sh --delete <HA_CONFDIR> \
<NODE-IP/HOSTNAME>"
;;
*)