glusterd: Better op-version values and ranges

Till now, the op-version was an incrementing integer that was
incremented by 1 for every Y release (when using the X.Y.Z release
numbering). This is not flexible enough to handle backports of features
into Z releases.

Going forward, from the upcoming 3.6.0 release, the op-versions will be
multi-digit integer values composed of the version numbers, instead of a
simple incrementing integer. An X.Y.Z release will have XYZ as its
op-version. Y and Z will always be 2 digits wide and will be padded with
0 if required. This way of bumping op-versions allows for gaps in
between the subsequent Y releases. These gaps will allow backporting
features from new Y releases into old Z releases.

Change-Id: I463f82902d997ec07e76dae58ac935f33e6393c2
BUG: 1104997
Signed-off-by: Kaushal M <kaushal@redhat.com>
Reviewed-on: http://review.gluster.org/7963
Reviewed-by: Niels de Vos <ndevos@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
This commit is contained in:
Kaushal M 2014-06-03 16:14:35 +05:30 committed by Krishnan Parthasarathi
parent 211785f299
commit 66b99406a7
9 changed files with 60 additions and 51 deletions

View File

@ -19,21 +19,30 @@
/* Gluster versions - OP-VERSION mapping
*
* 3.3.0 - 1
* 3.4.0 - 2
* 3.3.x - 1
* 3.4.x - 2
* 3.5.0 - 3
* 3.next (3.6?) - 4
* 3.5.1 - 30501
* 3.6.0 - 30600
*
* TODO: Change above comment once gluster version is finalised
* TODO: Finalize the op-version ranges
*
* Starting with Gluster v3.6, the op-version will be multi-digit integer values
* based on the Glusterfs version, instead of a simply incrementing integer
* value. The op-version for a given X.Y.Z release will be an integer XYZ, with
* Y and Z 2 digit always 2 digits wide and padded with 0 when needed. This
* should allow for some gaps between two Y releases for backports of features
* in Z releases.
*/
#define GD_OP_VERSION_MIN 1 /* MIN is the fresh start op-version, mostly
should not change */
#define GD_OP_VERSION_MAX 4 /* MAX VERSION is the maximum count in VME table,
should keep changing with introduction of newer
versions */
#define GD_OP_VERSION_4 4 /* Op-Version 4 */
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_4
#define GD_OP_VERSION_MAX 30600 /* MAX VERSION is the maximum count in VME
table, should keep changing with
introduction of newer versions */
#define GD_OP_VERSION_3_6_0 30600 /* Op-Version for GlusterFS 3.6.0 */
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
#include "xlator.h"

View File

@ -637,7 +637,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
}
/* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_4) {
if (priv->op_version < GD_OP_VERSION_3_6_0) {
ret = glusterd_lock (MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@ -684,7 +684,7 @@ local_locking_done:
/* If no volname is given as a part of the command, locks will
* not be held, hence sending stage event. */
if (volname || (priv->op_version < GD_OP_VERSION_4))
if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
event_type = GD_OP_EVENT_START_LOCK;
else {
txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
@ -714,7 +714,7 @@ out:
if (locked && ret) {
/* Based on the op-version, we release the
* cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_4)
if (priv->op_version < GD_OP_VERSION_3_6_0)
glusterd_unlock (MY_UUID);
else {
ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
@ -4339,7 +4339,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_friend_sm_state_name_get (peerinfo->state.state));
if (peerinfo->connected) {
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
glusterd_get_lock_owner (&uuid);
if (!uuid_is_null (uuid) &&
!uuid_compare (peerinfo->uuid, uuid))

View File

@ -153,7 +153,7 @@ glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id)
if (!*txn_id)
goto out;
if (priv->op_version < GD_OP_VERSION_4)
if (priv->op_version < GD_OP_VERSION_3_6_0)
uuid_copy (**txn_id, priv->global_txn_id);
else
uuid_generate (**txn_id);
@ -2824,7 +2824,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
continue;
/* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_4) {
if (priv->op_version < GD_OP_VERSION_3_6_0) {
proc = &peerinfo->mgmt->proctable
[GLUSTERD_MGMT_CLUSTER_LOCK];
if (proc->fn) {
@ -2907,7 +2907,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
/* Based on the op_version,
* release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_4) {
if (priv->op_version < GD_OP_VERSION_3_6_0) {
proc = &peerinfo->mgmt->proctable
[GLUSTERD_MGMT_CLUSTER_UNLOCK];
if (proc->fn) {
@ -4403,7 +4403,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_clear_errstr ();
/* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_4) {
if (priv->op_version < GD_OP_VERSION_3_6_0) {
ret = glusterd_unlock (MY_UUID);
/* unlock cant/shouldnt fail here!! */
if (ret)

View File

@ -1274,7 +1274,7 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
goto out;
}
if (priv->op_version >= GD_OP_VERSION_4) {
if (priv->op_version >= GD_OP_VERSION_3_6_0) {
ret = glusterd_add_missed_snaps_to_export_dict (peer_data);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,

View File

@ -686,7 +686,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
/* Compare missed_snapshot list with the peer *
* if volume comparison is successful */
if ((op_ret == 0) &&
(conf->op_version >= GD_OP_VERSION_4)) {
(conf->op_version >= GD_OP_VERSION_3_6_0)) {
ret = glusterd_import_friend_missed_snap_list (ev_ctx->vols);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,

View File

@ -270,7 +270,7 @@ gd_store_brick_snap_details_write (int fd, glusterd_brickinfo_t *brickinfo)
GF_VALIDATE_OR_GOTO (this->name, (fd > 0), out);
GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = 0;
goto out;
}
@ -623,7 +623,7 @@ glusterd_volume_write_snap_details (int fd, glusterd_volinfo_t *volinfo)
GF_VALIDATE_OR_GOTO (this->name, (fd > 0), out);
GF_VALIDATE_OR_GOTO (this->name, (volinfo != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = 0;
goto out;
}

View File

@ -1116,7 +1116,7 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
/* Reset lock status */
peerinfo->locked = _gf_false;
gd_syncop_mgmt_lock (peerinfo, &args,
@ -1378,7 +1378,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
this = THIS;
synctask_barrier_init((&args));
peer_cnt = 0;
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
list_for_each_entry_safe (peerinfo, tmp, peers, op_peers_list) {
/* Only unlock peers that were locked */
if (peerinfo->locked) {
@ -1424,7 +1424,7 @@ out:
* and clear the op */
glusterd_op_clear_op (op);
if (conf->op_version < GD_OP_VERSION_4)
if (conf->op_version < GD_OP_VERSION_3_6_0)
glusterd_unlock (MY_UUID);
else {
if (volname) {
@ -1595,7 +1595,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
}
/* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = glusterd_lock (MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@ -1645,7 +1645,7 @@ local_locking_done:
/* If no volname is given as a part of the command, locks will
* not be held */
if (volname || (conf->op_version < GD_OP_VERSION_4)) {
if (volname || (conf->op_version < GD_OP_VERSION_3_6_0)) {
ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr,
npeers, *txn_id);
if (ret) {

View File

@ -2339,7 +2339,7 @@ gd_add_brick_snap_details_to_dict (dict_t *dict, char *prefix,
GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out);
GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = 0;
goto out;
}
@ -2398,7 +2398,7 @@ gd_add_vol_snap_details_to_dict (dict_t *dict, char *prefix,
GF_VALIDATE_OR_GOTO (this->name, (volinfo != NULL), out);
GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret =0;
goto out;
}
@ -3701,7 +3701,7 @@ gd_import_new_brick_snap_details (dict_t *dict, char *prefix,
GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out);
GF_VALIDATE_OR_GOTO (this->name, (brickinfo != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = 0;
goto out;
}
@ -4014,7 +4014,7 @@ out:
* Imports the snapshot details of a volume if required and available
*
* Snapshot details will be imported only if cluster.op_version is greater than
* or equal to GD_OP_VERSION_4, the op-version from which volume snapshot is
* or equal to GD_OP_VERSION_3_6_0, the op-version from which volume snapshot is
* supported.
*/
int
@ -4037,7 +4037,7 @@ gd_import_volume_snap_details (dict_t *dict, glusterd_volinfo_t *volinfo,
GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out);
GF_VALIDATE_OR_GOTO (this->name, (volname != NULL), out);
if (conf->op_version < GD_OP_VERSION_4) {
if (conf->op_version < GD_OP_VERSION_3_6_0) {
ret = 0;
goto out;
}

View File

@ -613,45 +613,45 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{ .key = "diagnostics.brick-logger",
.voltype = "debug/io-stats",
.option = "!logger",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "diagnostics.client-logger",
.voltype = "debug/io-stats",
.option = "!logger",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
.flags = OPT_FLAG_CLIENT_OPT
},
{ .key = "diagnostics.brick-log-format",
.voltype = "debug/io-stats",
.option = "!log-format",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "diagnostics.client-log-format",
.voltype = "debug/io-stats",
.option = "!log-format",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
.flags = OPT_FLAG_CLIENT_OPT
},
{ .key = "diagnostics.brick-log-buf-size",
.voltype = "debug/io-stats",
.option = "!log-buf-size",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "diagnostics.client-log-buf-size",
.voltype = "debug/io-stats",
.option = "!log-buf-size",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
.flags = OPT_FLAG_CLIENT_OPT
},
{ .key = "diagnostics.brick-log-flush-timeout",
.voltype = "debug/io-stats",
.option = "!log-flush-timeout",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "diagnostics.client-log-flush-timeout",
.voltype = "debug/io-stats",
.option = "!log-flush-timeout",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
.flags = OPT_FLAG_CLIENT_OPT
},
@ -946,16 +946,16 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{ .key = "server.manage-gids",
.voltype = "protocol/server",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "client.send-gids",
.voltype = "protocol/client",
.type = NO_DOC,
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "server.gid-timeout",
.voltype = "protocol/server",
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
},
/* Performance xlators enable/disbable options */
@ -1417,12 +1417,12 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{ .key = "nfs-ganesha.enable",
.voltype = "nfs/server",
.option = "!nfs-ganesha.enable",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "nfs-ganesha.host",
.voltype = "nfs/server",
.option = "!nfs-ganesha.host",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "nfs.nlm",
.voltype = "nfs/server",
@ -1452,13 +1452,13 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.voltype = "nfs/server",
.option = "nfs.rpc-statd",
.type = NO_DOC,
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "nfs.log-level",
.voltype = "nfs/server",
.option = "nfs.log-level",
.type = NO_DOC,
.op_version = 4,
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "nfs.server-aux-gids",
.voltype = "nfs/server",
@ -1526,7 +1526,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
},
{ .key = "storage.xattr-user-namespace-mode",
.voltype = "storage/posix",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "storage.owner-uid",
.voltype = "storage/posix",
@ -1549,7 +1549,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{ .option = "update-link-count-parent",
.key = "storage.build-pgfid",
.voltype = "storage/posix",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "storage.bd-aio",
.voltype = "storage/bd",
@ -1605,16 +1605,16 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{ .key = "features.barrier",
.voltype = "features/barrier",
.value = "disable",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "features.barrier-timeout",
.voltype = "features/barrier",
.value = "120",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = "cluster.op-version",
.voltype = "mgmt/glusterd",
.op_version = 4
.op_version = GD_OP_VERSION_3_6_0,
},
{ .key = NULL
}