object-storage: Removed the redundant REMOTE_CLUSTER option.
Gluster cli uses the remote-host option to connect to the glusterd and by default it uses localhost to connect to glusterd. So, UFO code will use the remote-host option everytime to connect to the glusterd. Change-Id: I5a684d3c43fe9bdc9cc0b7c472a9d8145f9e1fd4 BUG: 878663 Signed-off-by: Mohammed Junaid <junaid@redhat.com> Reviewed-on: http://review.gluster.org/4690 Reviewed-by: Peter Portante <pportant@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
This commit is contained in:
parent
5d19cddd60
commit
790cbb82a1
@ -3,10 +3,6 @@
|
||||
# local host.
|
||||
mount_ip = localhost
|
||||
|
||||
# The GlusterFS server need not be local, a remote server can also be used
|
||||
# by setting "remote_cluster = yes".
|
||||
remote_cluster = no
|
||||
|
||||
# By default it is assumed the Gluster volumes can be accessed using other
|
||||
# methods besides UFO (not object only), which disables a caching
|
||||
# optimizations in order to keep in sync with file system changes.
|
||||
|
@ -24,7 +24,6 @@ from gluster.swift.common.fs_utils import mkdirs
|
||||
#
|
||||
_fs_conf = ConfigParser()
|
||||
MOUNT_IP = 'localhost'
|
||||
REMOTE_CLUSTER = False
|
||||
OBJECT_ONLY = False
|
||||
RUN_DIR='/var/run/swift'
|
||||
SWIFT_DIR = '/etc/swift'
|
||||
@ -33,10 +32,6 @@ if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
|
||||
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
|
||||
except (NoSectionError, NoOptionError):
|
||||
pass
|
||||
try:
|
||||
REMOTE_CLUSTER = _fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES
|
||||
except (NoSectionError, NoOptionError):
|
||||
pass
|
||||
try:
|
||||
OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
|
||||
except (NoSectionError, NoOptionError):
|
||||
@ -106,19 +101,12 @@ def unmount(full_mount_path):
|
||||
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
|
||||
|
||||
def _get_export_list():
|
||||
if REMOTE_CLUSTER:
|
||||
cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
||||
else:
|
||||
cmnd = 'gluster volume info'
|
||||
cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
||||
|
||||
export_list = []
|
||||
|
||||
if os.system(cmnd + ' >> /dev/null'):
|
||||
if REMOTE_CLUSTER:
|
||||
logging.error('Getting volume info failed for %s, make sure '\
|
||||
'gluster --remote-host=%s works', NAME, MOUNT_IP)
|
||||
else:
|
||||
logging.error('Getting volume info failed for %s', NAME)
|
||||
logging.error('Getting volume info failed for %s', NAME)
|
||||
else:
|
||||
fp = os.popen(cmnd)
|
||||
while True:
|
||||
|
@ -38,6 +38,6 @@ class TestRing(unittest.TestCase):
|
||||
assert node[0]['device'] == 'test'
|
||||
part, node = self.ring.get_nodes('test2')
|
||||
assert node
|
||||
assert node[0]['device'] == 'volume'
|
||||
assert node[0]['device'] == 'volume_not_in_ring'
|
||||
finally:
|
||||
self.ring._devs = __devs
|
||||
|
Loading…
Reference in New Issue
Block a user