cluster/ec: Provide an option to enable/disable eager lock
Problem: If a fop takes lock, and completes its operation, it waits for 1 second before releasing the lock. However, If ec find any lock contention within this time period, it release the lock immediately before time expires. As we take lock on first brick, for few operations, like read, it might happen that discovery of lock contention might take long time and can degrades the performance. Solution: Provide an option to enable/disable eager lock. If eager lock is disabled, lock will be released as soon as fop completes. gluster v set <VOLUME NAME> disperse.eager-lock on gluster v set <VOLUME NAME> disperse.eager-lock off Change-Id: I000985a787eba3c190fdcd5981dfbf04e64af166 BUG: 1314649 Signed-off-by: Ashish Pandey <aspandey@redhat.com> Reviewed-on: http://review.gluster.org/13605 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com> Tested-by: Pranith Kumar Karampuri <pkarampu@redhat.com> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
This commit is contained in:
parent
b145cd15de
commit
23ccabbeb7
@ -66,6 +66,8 @@
|
||||
|
||||
#define GD_OP_VERSION_3_7_9 30709 /* Op-version for GlusterFS 3.7.9 */
|
||||
|
||||
#define GD_OP_VERSION_3_7_10 30710 /* Op-version for GlusterFS 3.7.10 */
|
||||
|
||||
#define GD_OP_VERSION_4_0_0 40000 /* Op-version for GlusterFS 4.0.0 */
|
||||
|
||||
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
|
||||
|
@ -13,7 +13,7 @@ TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
|
||||
|
||||
TEST $CLI volume set $V0 eager-lock on
|
||||
TEST $CLI volume set $V0 cluster.eager-lock on
|
||||
TEST $CLI volume set $V0 post-op-delay-secs 3
|
||||
TEST $CLI volume set $V0 cluster.entry-self-heal off
|
||||
TEST $CLI volume set $V0 cluster.data-self-heal off
|
||||
|
@ -8,7 +8,7 @@ TEST glusterd
|
||||
TEST pidof glusterd
|
||||
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
|
||||
TEST $CLI volume set $V0 eager-lock off
|
||||
TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
TEST $CLI volume set $V0 cluster.self-heal-daemon off
|
||||
TEST $CLI volume set $V0 performance.quick-read off
|
||||
TEST $CLI volume set $V0 performance.open-behind off
|
||||
|
@ -3,7 +3,7 @@
|
||||
. $(dirname $0)/../../include.rc
|
||||
. $(dirname $0)/../../volume.rc
|
||||
|
||||
# This test writes to same file with 2 fds and tests that eager-lock is not
|
||||
# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
|
||||
# causing extra delay because of post-op-delay-secs
|
||||
cleanup;
|
||||
|
||||
@ -14,7 +14,7 @@ function write_to_file {
|
||||
TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
|
||||
TEST $CLI volume set $V0 eager-lock on
|
||||
TEST $CLI volume set $V0 cluster.eager-lock on
|
||||
TEST $CLI volume set $V0 post-op-delay-secs 3
|
||||
TEST $CLI volume set $V0 client-log-level DEBUG
|
||||
TEST $CLI volume start $V0
|
||||
|
@ -4,7 +4,7 @@
|
||||
. $(dirname $0)/../../volume.rc
|
||||
. $(dirname $0)/../../nfs.rc
|
||||
|
||||
#This tests if eager-lock blocks metadata operations on nfs/fuse mounts.
|
||||
#This tests if cluster.eager-lock blocks metadata operations on nfs/fuse mounts.
|
||||
#If it is not woken up, INODELK from the next command waits
|
||||
#for post-op-delay secs.
|
||||
|
||||
|
@ -20,7 +20,7 @@ TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
|
||||
TEST $CLI volume set $V0 ensure-durability off
|
||||
TEST $CLI volume set $V0 eager-lock off
|
||||
TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
TEST $CLI volume set $V0 flush-behind off
|
||||
TEST $CLI volume start $V0
|
||||
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
|
||||
|
@ -15,7 +15,7 @@ TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
|
||||
TEST $CLI volume set $V0 ensure-durability on
|
||||
TEST $CLI volume set $V0 eager-lock off
|
||||
TEST $CLI volume set $V0 cluster.eager-lock off
|
||||
TEST $CLI volume start $V0
|
||||
TEST $CLI volume profile $V0 start
|
||||
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
|
@ -2034,11 +2034,13 @@ void ec_flush_size_version(ec_fop_data_t * fop)
|
||||
void ec_lock_reuse(ec_fop_data_t *fop)
|
||||
{
|
||||
ec_cbk_data_t *cbk;
|
||||
ec_t *ec = NULL;
|
||||
int32_t i, count;
|
||||
gf_boolean_t release = _gf_false;
|
||||
|
||||
ec = fop->xl->private;
|
||||
cbk = fop->answer;
|
||||
if (cbk != NULL) {
|
||||
|
||||
if (ec->eager_lock && cbk != NULL) {
|
||||
if (cbk->xdata != NULL) {
|
||||
if ((dict_get_int32(cbk->xdata, GLUSTERFS_INODELK_COUNT,
|
||||
&count) == 0) && (count > 1)) {
|
||||
@ -2050,7 +2052,8 @@ void ec_lock_reuse(ec_fop_data_t *fop)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If we haven't get an answer with enough quorum, we always release
|
||||
/* If eager lock is disabled or If we haven't get
|
||||
* an answer with enough quorum, we always release
|
||||
* the lock. */
|
||||
release = _gf_true;
|
||||
}
|
||||
|
@ -261,6 +261,8 @@ reconfigure (xlator_t *this, dict_t *options)
|
||||
failed);
|
||||
GF_OPTION_RECONF ("iam-self-heal-daemon", ec->shd.iamshd, options,
|
||||
bool, failed);
|
||||
GF_OPTION_RECONF ("eager-lock", ec->eager_lock, options,
|
||||
bool, failed);
|
||||
GF_OPTION_RECONF ("background-heals", background_heals, options,
|
||||
uint32, failed);
|
||||
GF_OPTION_RECONF ("heal-wait-qlength", heal_wait_qlen, options,
|
||||
@ -601,6 +603,7 @@ init (xlator_t *this)
|
||||
ec_method_initialize();
|
||||
GF_OPTION_INIT ("self-heal-daemon", ec->shd.enabled, bool, failed);
|
||||
GF_OPTION_INIT ("iam-self-heal-daemon", ec->shd.iamshd, bool, failed);
|
||||
GF_OPTION_INIT ("eager-lock", ec->eager_lock, bool, failed);
|
||||
GF_OPTION_INIT ("background-heals", ec->background_heals, uint32, failed);
|
||||
GF_OPTION_INIT ("heal-wait-qlength", ec->heal_wait_qlen, uint32, failed);
|
||||
ec_configure_background_heal_opts (ec, ec->background_heals,
|
||||
@ -1321,6 +1324,12 @@ struct volume_options options[] =
|
||||
"translator is running as part of self-heal-daemon "
|
||||
"or not."
|
||||
},
|
||||
{ .key = {"eager-lock"},
|
||||
.type = GF_OPTION_TYPE_BOOL,
|
||||
.default_value = "on",
|
||||
.description = "This option will enable/diable eager lock for"
|
||||
"disperse volume "
|
||||
},
|
||||
{ .key = {"background-heals"},
|
||||
.type = GF_OPTION_TYPE_INT,
|
||||
.min = 0,/*Disabling background heals*/
|
||||
|
@ -54,6 +54,7 @@ struct _ec
|
||||
gf_lock_t lock;
|
||||
gf_timer_t * timer;
|
||||
gf_boolean_t shutdown;
|
||||
gf_boolean_t eager_lock;
|
||||
uint32_t background_heals;
|
||||
uint32_t heal_wait_qlen;
|
||||
struct list_head pending_fops;
|
||||
|
@ -1075,6 +1075,11 @@ struct volopt_map_entry glusterd_volopt_map[] = {
|
||||
.op_version = 1,
|
||||
.flags = OPT_FLAG_CLIENT_OPT
|
||||
},
|
||||
{ .key = "disperse.eager-lock",
|
||||
.voltype = "cluster/disperse",
|
||||
.op_version = GD_OP_VERSION_3_7_10,
|
||||
.flags = OPT_FLAG_CLIENT_OPT
|
||||
},
|
||||
{ .key = "cluster.quorum-type",
|
||||
.voltype = "cluster/replicate",
|
||||
.option = "quorum-type",
|
||||
|
Loading…
x
Reference in New Issue
Block a user