geo-replication: fix the logic of choosing the remote node to sync

Change-Id: Ie15636357d89e94b6bfad0e168b1fcad53508c47
BUG: 1003807
Signed-off-by: Amar Tumballi <amarts@redhat.com>
Signed-off-by: Venky Shankar <vshankar@redhat.com>
Reviewed-on: http://review.gluster.org/5759
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Avra Sengupta <asengupt@redhat.com>
Tested-by: Avra Sengupta <asengupt@redhat.com>
Reviewed-by: Anand Avati <avati@redhat.com>
This commit is contained in:
Venky Shankar 2013-08-28 01:33:11 +05:30 committed by Anand Avati
parent b2e8a4517d
commit eebd4e1485

View File

@ -248,11 +248,20 @@ def distribute(*resources):
slaves = [ 'ssh://' + rap.remote_addr + ':' + v for v in slavevols ]
else:
slaves = slavevols
# get the proper index of local bricks in master volume,
# and setup the geo replication with appropriate slave node
mbricks = [ b['dir'] for b in mvol.bricks ]
mbricks.sort()
locmbricks.sort()
slaves.sort()
workerspex = []
for i in range(len(locmbricks)):
workerspex.append((locmbricks[i], slaves[i % len(slaves)]))
locbidx = 0
for idx, brick in enumerate(mbricks):
if brick == locmbricks[locbidx]:
workerspex.append((locmbricks[locbidx], slaves[idx % len(slaves)]))
locbidx += 1
logging.info('worker specs: ' + repr(workerspex))
return workerspex, suuid