All: run codespell on the code and fix issues.
Please review, it's not always just the comments that were fixed. I've had to revert of course all calls to creat() that were changed to create() ... Only compile-tested! Change-Id: I7d02e82d9766e272a7fd9cc68e51901d69e5aab5 updates: bz#1193929 Signed-off-by: Yaniv Kaul <ykaul@redhat.com>
This commit is contained in:
parent
c0e76377d0
commit
621138ce76
@ -1282,7 +1282,7 @@ test_handleops (int argc, char *argv[])
|
||||
|
||||
/* extract handle and create from handle test */
|
||||
printf ("glfs_h_extract_handle and glfs_h_create_from_handle tests: In Progress\n");
|
||||
/* TODO: Change the lookup to creat below for a GIFD recovery falure,
|
||||
/* TODO: Change the lookup to create below for a GIFD recovery failure,
|
||||
* that needs to be fixed */
|
||||
leaf = glfs_h_lookupat (fs, parent, leaf_name1, &sb, 0);
|
||||
if (leaf == NULL) {
|
||||
@ -1417,7 +1417,7 @@ test_handleops (int argc, char *argv[])
|
||||
}
|
||||
peek_stat (&sb);
|
||||
|
||||
/* TODO: creat op on a FIFO node hangs, need to check and fix
|
||||
/* TODO: create op on a FIFO node hangs, need to check and fix
|
||||
tmp = glfs_h_creat (fs, parent, newnod_name, O_CREAT, 0644, &sb);
|
||||
if (tmp != NULL || errno != EINVAL) {
|
||||
fprintf (stderr, "glfs_h_creat: node create, tmp = (%p), errno = %s\n",
|
||||
|
@ -52,7 +52,7 @@ glfs_mark_glfd_for_deletion (struct glfs_fd *glfd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function is usefull for all async fops. There is chance that glfd is
|
||||
/* This function is useful for all async fops. There is chance that glfd is
|
||||
* closed before async fop is completed. When glfd is closed we change the
|
||||
* state to GLFD_CLOSE.
|
||||
*
|
||||
@ -539,7 +539,7 @@ pub_glfs_creat (struct glfs *fs, const char *path, int flags, mode_t mode)
|
||||
/* This must be glfs_resolve() and NOT glfs_lresolve().
|
||||
That is because open("name", O_CREAT) where "name"
|
||||
is a danging symlink must create the dangling
|
||||
destinataion.
|
||||
destination.
|
||||
*/
|
||||
retry:
|
||||
ret = glfs_resolve (fs, subvol, path, &loc, &iatt, reval);
|
||||
@ -5299,7 +5299,7 @@ out:
|
||||
* to be read by the applications.
|
||||
*
|
||||
* In case if the application registers a cbk function, that shall
|
||||
* be called by this routine incase of any event received.
|
||||
* be called by this routine in case of any event received.
|
||||
* The cbk fn is responsible for notifying the
|
||||
* applications the way it desires for each event queued (for eg.,
|
||||
* can raise a signal or broadcast a cond variable etc.)
|
||||
|
@ -1946,7 +1946,7 @@ glfs_h_poll_cache_invalidation (struct glfs *fs,
|
||||
up_inode_arg->flags = ca_data->flags;
|
||||
up_inode_arg->expire_time_attr = ca_data->expire_time_attr;
|
||||
|
||||
/* XXX: Update stat as well incase of UP_*_TIMES.
|
||||
/* XXX: Update stat as well in case of UP_*_TIMES.
|
||||
* This will be addressed as part of INODE_UPDATE */
|
||||
if (ca_data->flags & GFAPI_INODE_UPDATE_FLAGS) {
|
||||
glfs_iatt_to_stat (fs, &ca_data->stat, &up_inode_arg->buf);
|
||||
@ -2021,7 +2021,7 @@ void glfs_release_upcall (void *ptr)
|
||||
|
||||
/*
|
||||
* This API is used to poll for upcall events stored in the upcall list.
|
||||
* Current users of this API is NFS-Ganesha. Incase of any event received, it
|
||||
* Current users of this API is NFS-Ganesha. In case of any event received, it
|
||||
* will be mapped appropriately into 'glfs_upcall' along with the handle object
|
||||
* to be passed to NFS-Ganesha.
|
||||
*
|
||||
@ -2163,13 +2163,13 @@ GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_poll_upcall, 3.7.16);
|
||||
|
||||
static gf_boolean_t log_upcall370 = _gf_true; /* log once */
|
||||
|
||||
/* The old glfs_h_poll_upcall interface requires intimite knowledge of the
|
||||
/* The old glfs_h_poll_upcall interface requires intimate knowledge of the
|
||||
* structures that are returned to the calling application. This is not
|
||||
* recommended, as the returned structures need to returned correctly (handles
|
||||
* closed, memory free'd with the unavailable GF_FREE(), and possibly more.)
|
||||
*
|
||||
* To the best of our knowledge, only NFS-Ganesha uses the upcall events
|
||||
* through gfapi. We keep this backwards compatability function around so that
|
||||
* through gfapi. We keep this backwards compatibility function around so that
|
||||
* applications using the existing implementation do not break.
|
||||
*
|
||||
* WARNING: this function will be removed in the future.
|
||||
|
@ -47,7 +47,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/* Values for valid falgs to be used when using XXXsetattr, to set multiple
|
||||
/* Values for valid flags to be used when using XXXsetattr, to set multiple
|
||||
attribute values passed via the related stat structure.
|
||||
*/
|
||||
#define GFAPI_SET_ATTR_MODE 0x1
|
||||
@ -275,7 +275,7 @@ glfs_h_access (glfs_t *fs, glfs_object_t *object, int mask) __THROW
|
||||
|
||||
This API is used to poll for upcall events stored in the
|
||||
upcall list. Current users of this API is NFS-Ganesha.
|
||||
Incase of any event received, it will be mapped appropriately
|
||||
In case of any event received, it will be mapped appropriately
|
||||
into 'glfs_upcall' along with the handle('glfs_object') to be
|
||||
passed to NFS-Ganesha.
|
||||
|
||||
|
@ -170,11 +170,12 @@ struct glfs {
|
||||
* progress */
|
||||
xlator_t *next_subvol; /* Any new graph is put to
|
||||
* next_subvol, the graph in
|
||||
* next_subvol can either be move to
|
||||
* mip_subvol (if any IO picks it up
|
||||
* for migration), or be detroyed (if
|
||||
* there is a new graph, and this was
|
||||
* never picked for migration) */
|
||||
* next_subvol can either be moved
|
||||
* to mip_subvol (if any IO picks it
|
||||
* up for migration), or be
|
||||
* destroyed (if there is a new
|
||||
* graph, and this was never picked
|
||||
* for migration) */
|
||||
xlator_t *old_subvol;
|
||||
|
||||
char *oldvolfile;
|
||||
|
@ -319,7 +319,7 @@ glfs_resolve_component (struct glfs *fs, xlator_t *subvol, inode_t *parent,
|
||||
* parent : output_dir
|
||||
* component : "dir"
|
||||
*
|
||||
* Incase of nameless lookup, both "." and ".." retained
|
||||
* In case of nameless lookup, both "." and ".." retained
|
||||
*/
|
||||
|
||||
if (strcmp (component, ".") == 0) {
|
||||
@ -987,7 +987,7 @@ priv_glfs_subvol_done (struct glfs *fs, xlator_t *subvol)
|
||||
/* For decrementing subvol->wind ref count we need not check/wait for
|
||||
* migration-in-progress flag.
|
||||
* Also glfs_subvol_done is called in call-back path therefore waiting
|
||||
* fot migration-in-progress flag can lead to dead-lock.
|
||||
* for migration-in-progress flag can lead to dead-lock.
|
||||
*/
|
||||
glfs_lock (fs, _gf_false);
|
||||
{
|
||||
|
@ -1231,7 +1231,7 @@ glusterfs_ctx_destroy (glusterfs_ctx_t *ctx)
|
||||
|
||||
/* Free all the graph structs and its containing xlator_t structs
|
||||
* from this point there should be no reference to GF_FREE/GF_CALLOC
|
||||
* as it will try to access mem_acct and the below funtion would
|
||||
* as it will try to access mem_acct and the below function would
|
||||
* have freed the same.
|
||||
*/
|
||||
list_for_each_entry_safe (trav_graph, tmp, &ctx->graphs, list) {
|
||||
@ -1666,7 +1666,7 @@ pub_glfs_upcall_register (struct glfs *fs, uint32_t event_list,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* incase other thread does unregister */
|
||||
/* in case other thread does unregister */
|
||||
pthread_mutex_lock (&fs->mutex);
|
||||
{
|
||||
if (event_list & GLFS_EVENT_INODE_INVALIDATE) {
|
||||
|
@ -336,8 +336,8 @@ glfs_get_volfile (glfs_t *fs, void *buf, size_t len) __THROW
|
||||
the management server (glusterd) to fetch volume uuid and stores it
|
||||
in the glusterfs_context linked to the glfs object fs which can be used
|
||||
in the subsequent calls. Later it parses that UUID to convert it from
|
||||
cannonical string format into an opaque byte array and copy it into
|
||||
the volid array. Incase if either of the input parameters, volid or size,
|
||||
canonical string format into an opaque byte array and copy it into
|
||||
the volid array. In case if either of the input parameters, volid or size,
|
||||
is NULL, number of bytes required to copy the volume UUID is returned.
|
||||
|
||||
PARAMETERS
|
||||
@ -992,7 +992,7 @@ glfs_xreaddirplus_get_stat (glfs_xreaddirp_stat_t *xstat) __THROW
|
||||
* GFAPI_XREADDIRP_HANDLE
|
||||
* @ext: Dirent struture to copy the values to
|
||||
* (though optional recommended to be allocated by application
|
||||
* esp., in multi-threaded environement)
|
||||
* esp., in multi-threaded environment)
|
||||
*
|
||||
* OUTPUT:
|
||||
* @res: to store the next dirent value. If NULL and return value is '0',
|
||||
@ -1151,7 +1151,7 @@ typedef void (*glfs_upcall_cbk) (glfs_upcall_t *up_arg, void *data);
|
||||
* Current available values are:
|
||||
* - GFAPI_UPCALL_INODE_INVALIDATE
|
||||
*
|
||||
* @cbk: The cbk routine to be invoked incase of any upcall received
|
||||
* @cbk: The cbk routine to be invoked in case of any upcall received
|
||||
* @data: Any opaque pointer provided by caller which shall be using while
|
||||
* making cbk calls. This pointer may be used by caller for any of its
|
||||
* internal use while processing upcalls. Can be NULL.
|
||||
@ -1252,11 +1252,11 @@ typedef void (*glfs_recall_cbk) (glfs_lease_t lease, void *data);
|
||||
@data: It is a cookie, this pointer is returned as a part of recall
|
||||
|
||||
fn and data field are stored as a part of glfs_fd, hence if there are multiple
|
||||
glfs_lease calls, each of them updates the fn and data fileds. glfs_recall_cbk
|
||||
glfs_lease calls, each of them updates the fn and data fields. glfs_recall_cbk
|
||||
will be invoked with the last updated fn and data
|
||||
|
||||
RETURN VALUES
|
||||
0: Successfull completion
|
||||
0: Successful completion
|
||||
<0: Failure. @errno will be set with the type of failure
|
||||
*/
|
||||
|
||||
|
@ -1495,7 +1495,7 @@ _limits_set_on_volume (char *volname, int type) {
|
||||
else
|
||||
gfid_type = GF_QUOTA_CONF_TYPE_OBJECTS;
|
||||
|
||||
/* Try to read atleast one gfid of type 'gfid_type' */
|
||||
/* Try to read at least one gfid of type 'gfid_type' */
|
||||
while (1) {
|
||||
ret = quota_conf_read_gfid (fd, buf, &gfid_type_stored,
|
||||
version);
|
||||
|
@ -3641,7 +3641,7 @@ print_quota_list_from_mountdir (cli_local_t *local, char *mountdir,
|
||||
case ENOATTR:
|
||||
#endif
|
||||
/* If it's an ENOATTR, quota/inode-quota is
|
||||
* configured(limit is set atleast for one directory).
|
||||
* configured(limit is set at least for one directory).
|
||||
* The user is trying to issue 'list/list-objects'
|
||||
* command for a directory on which quota limit is
|
||||
* not set and we are showing the used-space in case
|
||||
@ -4060,7 +4060,7 @@ out:
|
||||
* the results and hangs, because same thread has already holding
|
||||
* the lock
|
||||
*
|
||||
* Broadcasting response in a seperate thread which is not a
|
||||
* Broadcasting response in a separate thread which is not a
|
||||
* good fix. This needs to be re-visted with better solution
|
||||
*/
|
||||
if (ret == -1) {
|
||||
|
@ -42,7 +42,7 @@ enum gf_task_types {
|
||||
* }
|
||||
* #endif
|
||||
*
|
||||
* Following the above formate ensures that all xml related code is compliled
|
||||
* Following the above format ensures that all xml related code is compiled
|
||||
* only when libxml2 is present, and also keeps the rest of the codebase free
|
||||
* of #if (HAVE_LIB_XML)
|
||||
*/
|
||||
@ -5082,7 +5082,7 @@ cli_xml_snapshot_info (xmlTextWriterPtr writer, xmlDocPtr doc, dict_t *dict)
|
||||
|
||||
snap_driven = dict_get_str_boolean (dict, "snap-driven", _gf_false);
|
||||
|
||||
/* If the approach is volume based then we should display orgin volume
|
||||
/* If the approach is volume based then we should display origin volume
|
||||
* information first followed by per snap info*/
|
||||
if (!snap_driven) {
|
||||
ret = cli_xml_snapshot_info_orig_vol (writer, doc, dict, "");
|
||||
|
@ -416,7 +416,7 @@ cli_opt_parse (char *opt, struct cli_state *state)
|
||||
}
|
||||
}
|
||||
else {
|
||||
cli_err ("invalide secure-mgmt value (ignored)");
|
||||
cli_err ("invalid secure-mgmt value (ignored)");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static const struct uparam_name uparam_names[] =
|
||||
{ 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* Read user options from the environment, and fill in UPARAMS appropiately. */
|
||||
/* Read user options from the environment, and fill in UPARAMS appropriately.*/
|
||||
static void
|
||||
fill_in_uparams (const struct argp_state *state)
|
||||
{
|
||||
@ -286,11 +286,11 @@ fill_in_uparams (const struct argp_state *state)
|
||||
-xARG, -yARG, --long1=ARG, --long2=ARG Documentation...
|
||||
|
||||
Where ARG will be omitted if there's no argument, for this option, or
|
||||
will be surrounded by "[" and "]" appropiately if the argument is
|
||||
optional. The documentation string is word-wrapped appropiately, and if
|
||||
will be surrounded by "[" and "]" appropriately if the argument is
|
||||
optional. The documentation string is word-wrapped appropriately, and if
|
||||
the list of options is long enough, it will be started on a separate line.
|
||||
If there are no short options for a given option, the first long option is
|
||||
indented slighly in a way that's supposed to make most long options appear
|
||||
indented slightly in a way that's supposed to make most long options appear
|
||||
to be in a separate column.
|
||||
|
||||
For example, the following output (from ps):
|
||||
@ -745,7 +745,7 @@ hol_entry_cmp (const struct hol_entry *entry1,
|
||||
if (entry1->cluster != entry2->cluster)
|
||||
{
|
||||
/* The entries are not within the same cluster, so we can't compare them
|
||||
directly, we have to use the appropiate clustering level too. */
|
||||
directly, we have to use the appropriate clustering level too. */
|
||||
if (! entry1->cluster)
|
||||
/* ENTRY1 is at the `base level', not in a cluster, so we have to
|
||||
compare it's group number with that of the base cluster in which
|
||||
@ -1006,7 +1006,7 @@ filter_doc (const char *doc, int key, const struct argp *argp,
|
||||
return doc;
|
||||
}
|
||||
|
||||
/* Prints STR as a header line, with the margin lines set appropiately, and
|
||||
/* Prints STR as a header line, with the margin lines set appropriately, and
|
||||
notes the fact that groups should be separated with a blank line. ARGP is
|
||||
the argp that should dictate any user doc filtering to take place. Note
|
||||
that the previous wrap margin isn't restored, but the left margin is reset
|
||||
@ -1462,7 +1462,7 @@ argp_args_usage (const struct argp *argp, const struct argp_state *state,
|
||||
}
|
||||
|
||||
/* Print the documentation for ARGP to STREAM; if POST is false, then
|
||||
everything preceeding a `\v' character in the documentation strings (or
|
||||
everything preceding a `\v' character in the documentation strings (or
|
||||
the whole string, for those with none) is printed, otherwise, everything
|
||||
following the `\v' character (nothing for strings without). Each separate
|
||||
bit of documentation is separated a blank line, and if PRE_BLANK is true,
|
||||
@ -1555,7 +1555,7 @@ argp_doc (const struct argp *argp, const struct argp_state *state,
|
||||
}
|
||||
|
||||
/* Output a usage message for ARGP to STREAM. If called from
|
||||
argp_state_help, STATE is the relevent parsing state. FLAGS are from the
|
||||
argp_state_help, STATE is the relevant parsing state. FLAGS are from the
|
||||
set ARGP_HELP_*. NAME is what to use wherever a `program name' is
|
||||
needed. */
|
||||
|
||||
|
@ -208,7 +208,7 @@ struct group
|
||||
/* Which argp this group is from. */
|
||||
const struct argp *argp;
|
||||
|
||||
/* The number of non-option args sucessfully handled by this parser. */
|
||||
/* The number of non-option args successfully handled by this parser. */
|
||||
unsigned args_processed;
|
||||
|
||||
/* This group's parser's parent's group. */
|
||||
|
@ -263,7 +263,7 @@ struct argp
|
||||
};
|
||||
|
||||
/* Possible KEY arguments to a help filter function. */
|
||||
#define ARGP_KEY_HELP_PRE_DOC 0x2000001 /* Help text preceeding options. */
|
||||
#define ARGP_KEY_HELP_PRE_DOC 0x2000001 /* Help text preceding options. */
|
||||
#define ARGP_KEY_HELP_POST_DOC 0x2000002 /* Help text following options. */
|
||||
#define ARGP_KEY_HELP_HEADER 0x2000003 /* Option header string. */
|
||||
#define ARGP_KEY_HELP_EXTRA 0x2000004 /* After all other documentation;
|
||||
@ -477,7 +477,7 @@ extern void __argp_help (__const struct argp *__restrict __argp,
|
||||
parsing routine (thus taking an argp_state structure as the first
|
||||
argument). They may or may not print an error message and exit, depending
|
||||
on the flags in STATE -- in any case, the caller should be prepared for
|
||||
them *not* to exit, and should return an appropiate error after calling
|
||||
them *not* to exit, and should return an appropriate error after calling
|
||||
them. [argp_usage & argp_error should probably be called argp_state_...,
|
||||
but they're used often enough that they should be short] */
|
||||
|
||||
|
@ -51,7 +51,7 @@ NtAllocateUuids_2000)(
|
||||
|
||||
|
||||
//
|
||||
// Nice, but instead of including ntddk.h ot winnt.h
|
||||
// Nice, but instead of including ntddk.h or winnt.h
|
||||
// I should define it here because they MISSED __stdcall in those headers.
|
||||
//
|
||||
|
||||
|
@ -22,7 +22,7 @@ gen_header_type = sys.argv[1]
|
||||
# When adding new keys add it to the END
|
||||
keys = (
|
||||
# user driven events
|
||||
#peer and volume managment events
|
||||
#peer and volume management events
|
||||
"EVENT_PEER_ATTACH",
|
||||
"EVENT_PEER_DETACH",
|
||||
"EVENT_VOLUME_CREATE",
|
||||
|
@ -408,8 +408,8 @@ def webhook_monitor(proc_queue, webhooks):
|
||||
|
||||
class WebhookThreadPool(object):
|
||||
def start(self):
|
||||
# Seperate process to emit messages to webhooks
|
||||
# which maintains one thread per webhook. Seperate
|
||||
# Separate process to emit messages to webhooks
|
||||
# which maintains one thread per webhook. Separate
|
||||
# process is required since on reload we need to stop
|
||||
# and start the thread pool. In Python Threads can't be stopped
|
||||
# so terminate the process and start again. Note: In transit
|
||||
|
@ -37,7 +37,7 @@ def socket_instance(address_family):
|
||||
elif address_family.upper() == 'ipv6'.upper():
|
||||
return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
else:
|
||||
Log.error("Invalid IP addess family")
|
||||
Log.error("Invalid IP address family")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
@ -128,7 +128,7 @@ class GitBranchDiff:
|
||||
exit(status_tbr)
|
||||
|
||||
def check_author_exist (self):
|
||||
" defend to check given author exist, format incase of multiple"
|
||||
" defend to check given author exist, format in case of multiple"
|
||||
contrib_list = ['', '*', 'all', 'All', 'ALL', 'null', 'Null', 'NULL']
|
||||
if self.g_author in contrib_list:
|
||||
self.g_author = ""
|
||||
@ -190,7 +190,7 @@ class GitBranchDiff:
|
||||
'--author',
|
||||
help = 'default: git config name/email, '
|
||||
'to provide multiple specify comma'
|
||||
' seperated values',
|
||||
' separated values',
|
||||
default = author,
|
||||
dest = 'author')
|
||||
self.parser.add_argument('-p',
|
||||
|
@ -3,7 +3,7 @@
|
||||
# 1. Display xattr of entire FS tree in a human readable form
|
||||
# 2. Display all the directory where contri and size mismatch.
|
||||
# (If there are any directory with contri and size mismatch that are not dirty
|
||||
# then that highlights a propogation issue)
|
||||
# then that highlights a propagation issue)
|
||||
# The script takes only one input LOG _FILE generated from the command,
|
||||
# find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr
|
||||
|
||||
@ -15,7 +15,7 @@ from hurry.filesize import size
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
sys.exit('Usage: %s log_gluster_xattr \n'
|
||||
'to genereate log_gluster_xattr use: \n'
|
||||
'to generate log_gluster_xattr use: \n'
|
||||
'find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr'
|
||||
% sys.argv[0])
|
||||
LOG_FILE=sys.argv[1]
|
||||
|
@ -730,7 +730,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
|
||||
status="Successfuly Disabled")
|
||||
status="Successfully Disabled")
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
|
||||
error=print_error(ret))
|
||||
@ -765,7 +765,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLED,
|
||||
status="Successfuly Enabled")
|
||||
status="Successfully Enabled")
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED,
|
||||
error=print_error(ret))
|
||||
@ -777,7 +777,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
|
||||
status="Successfuly Disabled")
|
||||
status="Successfully Disabled")
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
|
||||
error=print_error(ret))
|
||||
@ -797,7 +797,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED,
|
||||
status="Successfuly added job "+args.jobname)
|
||||
status="Successfully added job "+args.jobname)
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED,
|
||||
status="Failed to add job "+args.jobname,
|
||||
@ -813,7 +813,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED,
|
||||
status="Successfuly deleted job "+args.jobname)
|
||||
status="Successfully deleted job "+args.jobname)
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED,
|
||||
status="Failed to delete job "+args.jobname,
|
||||
@ -829,7 +829,7 @@ def perform_operation(args):
|
||||
if ret == 0:
|
||||
subprocess.Popen(["touch", "-h", GCRON_TASKS])
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED,
|
||||
status="Successfuly edited job "+args.jobname)
|
||||
status="Successfully edited job "+args.jobname)
|
||||
else:
|
||||
gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED,
|
||||
status="Failed to edit job "+args.jobname,
|
||||
|
@ -50,7 +50,7 @@ class Gconf(object):
|
||||
self.args = args
|
||||
self.extra_tmpl_args = extra_tmpl_args
|
||||
self.override_from_args = override_from_args
|
||||
# Store default values only if overwriten, Only for JSON/CLI output
|
||||
# Store default values only if overwritten, Only for JSON/CLI output
|
||||
self.default_values = {}
|
||||
self._load()
|
||||
|
||||
|
@ -14,7 +14,7 @@ from ctypes import CDLL, create_string_buffer, get_errno
|
||||
|
||||
class Xattr(object):
|
||||
|
||||
"""singleton that wraps the extended attribues system
|
||||
"""singleton that wraps the extended attributes system
|
||||
interface for python using ctypes
|
||||
|
||||
Just implement it to the degree we need it, in particular
|
||||
|
@ -192,7 +192,7 @@ class NormalMixin(object):
|
||||
vi = vi.copy()
|
||||
vi['timeout'] = int(time.time()) + timo
|
||||
else:
|
||||
# send keep-alives more frequently to
|
||||
# send keep-alive more frequently to
|
||||
# avoid a delay in announcing our volume info
|
||||
# to slave if it becomes established in the
|
||||
# meantime
|
||||
@ -529,7 +529,7 @@ class GMasterCommon(object):
|
||||
|
||||
# If crawlwrap is called when partial history available,
|
||||
# then it sets register_time which is the time when geo-rep
|
||||
# worker registerd to changelog consumption. Since nsec is
|
||||
# worker registered to changelog consumption. Since nsec is
|
||||
# not considered in register time, their are chances of skipping
|
||||
# changes detection in xsync crawl. This limit will be reset when
|
||||
# crawlwrap is called again.
|
||||
@ -540,7 +540,7 @@ class GMasterCommon(object):
|
||||
# no need to maintain volinfo state machine.
|
||||
# in a cascading setup, each geo-replication session is
|
||||
# independent (ie. 'volume-mark' and 'xtime' are not
|
||||
# propogated). This is because the slave's xtime is now
|
||||
# propagated). This is because the slave's xtime is now
|
||||
# stored on the master itself. 'volume-mark' just identifies
|
||||
# that we are in a cascading setup and need to enable
|
||||
# 'geo-replication.ignore-pid-check' option.
|
||||
@ -919,6 +919,8 @@ class GMasterChangelogMixin(GMasterCommon):
|
||||
if fix_entry_ops:
|
||||
# Process deletions of entries whose gfids are mismatched
|
||||
failures1 = self.slave.server.entry_ops(fix_entry_ops)
|
||||
if not failures1:
|
||||
logging.info("Successfully fixed entry ops with gfid mismatch")
|
||||
|
||||
return (failures1, fix_entry_ops)
|
||||
|
||||
@ -1563,7 +1565,7 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
|
||||
self.history_crawl_start_time = int(time.time())
|
||||
self.crawl()
|
||||
else:
|
||||
# This exeption will be catched in resource.py and
|
||||
# This exception will be caught in resource.py and
|
||||
# fallback to xsync for the small gap.
|
||||
raise PartialHistoryAvailable(str(actual_end))
|
||||
|
||||
|
@ -126,7 +126,7 @@ class Monitor(object):
|
||||
raise
|
||||
|
||||
def exit_signalled(s):
|
||||
""" child teminated due to receipt of SIGUSR1 """
|
||||
""" child terminated due to receipt of SIGUSR1 """
|
||||
return (os.WIFSIGNALED(s) and (os.WTERMSIG(s) == signal.SIGUSR1))
|
||||
|
||||
def exit_status(s):
|
||||
|
@ -193,7 +193,7 @@ class RepceClient(object):
|
||||
"""RePCe client is callabe, calling it implements a synchronous
|
||||
remote call.
|
||||
|
||||
We do a .push with a cbk which does a wakeup upon receiving anwser,
|
||||
We do a .push with a cbk which does a wakeup upon receiving answer,
|
||||
then wait on the RepceJob.
|
||||
"""
|
||||
rjob = self.push(
|
||||
|
@ -638,7 +638,7 @@ class Server(object):
|
||||
# So both validations are necessary to decide src doesn't
|
||||
# exist. We can't rely on only gfid stat as hardlink could
|
||||
# be present and we can't rely only on name as name could
|
||||
# exist with differnt gfid.
|
||||
# exist with different gfid.
|
||||
if not matching_disk_gfid(gfid, entry):
|
||||
if e['stat'] and not stat.S_ISDIR(e['stat']['mode']):
|
||||
if stat.S_ISLNK(e['stat']['mode']) and \
|
||||
@ -1400,7 +1400,7 @@ class SSH(object):
|
||||
'--local-node-id', rconf.args.resource_remote_id] + \
|
||||
[
|
||||
# Add all config arguments here, slave gsyncd will not use
|
||||
# config file in slave side, so all overridding options should
|
||||
# config file in slave side, so all overriding options should
|
||||
# be sent as arguments
|
||||
'--slave-timeout', str(gconf.get("slave-timeout")),
|
||||
'--slave-log-level', gconf.get("slave-log-level"),
|
||||
|
@ -935,7 +935,7 @@ class VolinfoFromGconf(object):
|
||||
# Glusterd will generate following config items before Geo-rep start
|
||||
# So that Geo-rep need not run gluster commands from inside
|
||||
# Volinfo object API/interface kept as is so that caller need not
|
||||
# change anything exept calling this instead of Volinfo()
|
||||
# change anything except calling this instead of Volinfo()
|
||||
#
|
||||
# master-bricks=
|
||||
# master-bricks=NODEID:HOSTNAME:PATH,..
|
||||
|
@ -287,7 +287,7 @@ set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options)
|
||||
cmd_args = &ctx->cmd_args;
|
||||
|
||||
/* Check if mount-point is absolute path,
|
||||
* if not convert to absolute path by concating with CWD
|
||||
* if not convert to absolute path by concatenating with CWD
|
||||
*/
|
||||
if (cmd_args->mount_point[0] != '/') {
|
||||
if (getcwd (cwd, PATH_MAX) != NULL) {
|
||||
@ -1536,7 +1536,7 @@ cleanup_and_exit (int signum)
|
||||
* But in another thread (epoll thread), upon poll error in the
|
||||
* socket the transports are cleaned up where again rpcsvc object
|
||||
* is accessed (which is already freed by the below function).
|
||||
* Since the process is about to be killed dont execute the function
|
||||
* Since the process is about to be killed don't execute the function
|
||||
* below.
|
||||
*/
|
||||
/* if (ctx->listener) { */
|
||||
|
@ -88,7 +88,7 @@ glfsh_init ()
|
||||
int
|
||||
glfsh_end_op_granular_entry_heal (int op_ret, char *op_errstr)
|
||||
{
|
||||
/* If error sting is available, give it higher precedence.*/
|
||||
/* If error string is available, give it higher precedence.*/
|
||||
|
||||
if (op_errstr) {
|
||||
printf ("%s\n", op_errstr);
|
||||
@ -1509,7 +1509,7 @@ cleanup (glfs_t *fs)
|
||||
return;
|
||||
#if 0
|
||||
/* glfs fini path is still racy and crashing the program. Since
|
||||
* this program any way has to die, we are not gonna call fini
|
||||
* this program any way has to die, we are not going to call fini
|
||||
* in the released versions. i.e. final builds. For all
|
||||
* internal testing lets enable this so that glfs_fini code
|
||||
* path becomes stable. */
|
||||
|
@ -482,7 +482,7 @@ __client_ctx_set_int (client_t *client, void *key, void *value)
|
||||
if (!client->scratch_ctx.ctx[index].ctx_key) {
|
||||
if (set_idx == -1)
|
||||
set_idx = index;
|
||||
/* dont break, to check if key already exists
|
||||
/* don't break, to check if key already exists
|
||||
further on */
|
||||
}
|
||||
if (client->scratch_ctx.ctx[index].ctx_key == key) {
|
||||
|
@ -2371,7 +2371,7 @@ valid_ipv4_address (char *address, int length, gf_boolean_t wildcard_acc)
|
||||
/*
|
||||
* To prevent cases where last character is '.' and which have
|
||||
* consecutive dots like ".." as strtok ignore consecutive
|
||||
* delimeters.
|
||||
* delimiters.
|
||||
*/
|
||||
if (length <= 0 ||
|
||||
(strstr (address, "..")) ||
|
||||
@ -2410,7 +2410,7 @@ out:
|
||||
/**
|
||||
* valid_ipv4_subnetwork() takes the pattern and checks if it contains
|
||||
* a valid ipv4 subnetwork pattern i.e. xx.xx.xx.xx/n. IPv4 address
|
||||
* part (xx.xx.xx.xx) and mask bits lengh part (n). The mask bits lengh
|
||||
* part (xx.xx.xx.xx) and mask bits length part (n). The mask bits length
|
||||
* must be in 0-32 range (ipv4 addr is 32 bit). The pattern must be
|
||||
* in this format.
|
||||
*
|
||||
@ -5014,7 +5014,7 @@ close_fds_except (int *fdv, size_t count)
|
||||
*
|
||||
* gf_getgrouplist allocates a gid_t buffer which is big enough to
|
||||
* hold the list of auxiliary group ids for user, up to the GF_MAX_AUX_GROUPS
|
||||
* threshold. Upon succesfull invocation groups will be pointed to that buffer.
|
||||
* threshold. Upon successful invocation groups will be pointed to that buffer.
|
||||
*
|
||||
* @return success: the number of auxiliary group ids retrieved
|
||||
* failure: -1
|
||||
|
@ -68,7 +68,7 @@
|
||||
|
||||
#ifndef HAVE_LLISTXATTR
|
||||
|
||||
/* This part is valid only incase of old glibc which doesn't support
|
||||
/* This part is valid only in case of old glibc which doesn't support
|
||||
* 'llistxattr()' system calls.
|
||||
*/
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
/* libglusterfs/src/defaults.c:
|
||||
This file contains functions, which are used to fill the 'fops', 'cbk'
|
||||
structures in the xlator structures, if they are not written. Here, all the
|
||||
function calls are plainly forwared to the first child of the xlator, and
|
||||
function calls are plainly forwarded to the first child of the xlator, and
|
||||
all the *_cbk function does plain STACK_UNWIND of the frame, and returns.
|
||||
|
||||
This function also implements *_resume () functions, which does same
|
||||
|
@ -2919,7 +2919,7 @@ out:
|
||||
*
|
||||
* @this: dict to serialize
|
||||
* @buf: buffer to serialize into. This must be
|
||||
* atleast dict_serialized_length (this) large
|
||||
* at least dict_serialized_length (this) large
|
||||
*
|
||||
* @return: success: 0
|
||||
* failure: -errno
|
||||
@ -3039,7 +3039,7 @@ out:
|
||||
*
|
||||
* @this: dict to serialize
|
||||
* @buf: buffer to serialize into. This must be
|
||||
* atleast dict_serialized_length (this) large
|
||||
* at least dict_serialized_length (this) large
|
||||
*
|
||||
* @return: success: 0
|
||||
* failure: -errno
|
||||
@ -3273,7 +3273,7 @@ out:
|
||||
* @delimiter : the delimiter to separate the values
|
||||
*
|
||||
* @return : 0 -> success
|
||||
* : -errno -> faliure
|
||||
* : -errno -> failure
|
||||
*/
|
||||
int
|
||||
dict_serialize_value_with_delim_lk (dict_t *this, char *buf, int32_t *serz_len,
|
||||
|
@ -323,7 +323,7 @@ event_register_epoll (struct event_pool *event_pool, int fd,
|
||||
|
||||
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
|
||||
|
||||
/* TODO: Even with the below check, there is a possiblity of race,
|
||||
/* TODO: Even with the below check, there is a possibility of race,
|
||||
* What if the destroy mode is set after the check is done.
|
||||
* Not sure of the best way to prevent this race, ref counting
|
||||
* is one possibility.
|
||||
@ -700,7 +700,7 @@ event_dispatch_epoll (struct event_pool *event_pool)
|
||||
gf_common_mt_event_pool);
|
||||
if (!ev_data) {
|
||||
if (i == 0) {
|
||||
/* Need to suceed creating 0'th
|
||||
/* Need to succeed creating 0'th
|
||||
* thread, to joinable and wait */
|
||||
break;
|
||||
} else {
|
||||
|
@ -173,7 +173,7 @@ event_pool_new_poll (int count, int eventthreadcount)
|
||||
"thread count (%d) ignored", eventthreadcount);
|
||||
}
|
||||
|
||||
/* although, eventhreadcount for poll implementaiton is always
|
||||
/* although, eventhreadcount for poll implementation is always
|
||||
* going to be 1, eventthreadcount needs to be set to 1 so that
|
||||
* rpcsvc_request_handler() thread scaling works flawlessly in
|
||||
* both epoll and poll models
|
||||
|
@ -196,8 +196,8 @@ out:
|
||||
* Note: to be called before event_pool_destroy is called.
|
||||
* The order in which cleaning is performed:
|
||||
* - Register a pipe fd(this is for waking threads in poll()/epoll_wait())
|
||||
* - Set the destroy mode, which this no new event registration will succede
|
||||
* - Reconfigure the thread count to 0(this will succede only in destroy mode)
|
||||
* - Set the destroy mode, which this no new event registration will succeed
|
||||
* - Reconfigure the thread count to 0(this will succeed only in destroy mode)
|
||||
* - Wake up all the threads in poll() or epoll_wait(), so that they can
|
||||
* destroy themselves.
|
||||
* - Wait for the thread to join(which will happen only after all the other
|
||||
|
@ -756,7 +756,7 @@ __fd_anonymous (inode_t *inode, int32_t flags)
|
||||
|
||||
/* if (fd); then we already have increased the refcount in
|
||||
__fd_lookup_anonymous(), so no need of one more fd_ref().
|
||||
if (!fd); then both create and bind wont bump up the ref
|
||||
if (!fd); then both create and bind won't bump up the ref
|
||||
count, so we have to call fd_ref() after bind. */
|
||||
if (!fd) {
|
||||
fd = __fd_create (inode, 0);
|
||||
@ -867,7 +867,7 @@ __fd_ctx_set (fd_t *fd, xlator_t *xlator, uint64_t value)
|
||||
if (!fd->_ctx[index].key) {
|
||||
if (set_idx == -1)
|
||||
set_idx = index;
|
||||
/* dont break, to check if key already exists
|
||||
/* don't break, to check if key already exists
|
||||
further on */
|
||||
}
|
||||
if (fd->_ctx[index].xl_key == xlator) {
|
||||
|
@ -115,7 +115,7 @@ add_connection_node (gfdb_conn_node_t *_conn_node) {
|
||||
/*TODO What if the unlock fails.
|
||||
* Will it lead to deadlock?
|
||||
* Most of the gluster code
|
||||
* no check for unlock or destory of mutex!*/
|
||||
* no check for unlock or destroy of mutex!*/
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
@ -163,7 +163,7 @@ delete_conn_node (gfdb_conn_node_t *_conn_node)
|
||||
/*TODO What if the unlock fails.
|
||||
* Will it lead to deadlock?
|
||||
* Most of the gluster code
|
||||
* no check for unlock or destory of mutex!*/
|
||||
* no check for unlock or destroy of mutex!*/
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
@ -241,7 +241,7 @@ init_db (dict_t *args, gfdb_db_type_t gfdb_db_type)
|
||||
goto alloc_failed;
|
||||
}
|
||||
|
||||
/*Init the list component of db conneciton object*/
|
||||
/*Init the list component of db connection object*/
|
||||
INIT_LIST_HEAD (&_conn_node->conn_list);
|
||||
|
||||
|
||||
@ -355,7 +355,7 @@ out:
|
||||
* link of inode from GF_FLINK_TB and
|
||||
* GFDB_FOP_UNDEL_ALL to delete all the records from
|
||||
* GF_FLINK_TB and GF_FILE_TB.
|
||||
* TODO: Should seperate this function into the
|
||||
* TODO: Should separate this function into the
|
||||
* delete_record function
|
||||
* Refer CTR Xlator features/changetimerecorder for usage
|
||||
* Arguments:
|
||||
@ -756,7 +756,7 @@ clear_files_heat (gfdb_conn_node_t *conn_node)
|
||||
* this variable. The freeing of the memory should be done by
|
||||
* the caller.
|
||||
* Return:
|
||||
* On success return the lenght of the version string that is
|
||||
* On success return the length of the version string that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
|
@ -72,7 +72,7 @@ typedef int (*fini_db_t) (gfdb_conn_node_t *_conn_node);
|
||||
* link of inode from GF_FLINK_TB and
|
||||
* GFDB_FOP_UNDEL_ALL to delete all the records from
|
||||
* GF_FLINK_TB and GF_FILE_TB.
|
||||
* TODO: Should seperate this function into the
|
||||
* TODO: Should separate this function into the
|
||||
* delete_record function
|
||||
* Refer CTR Xlator features/changetimerecorder for usage
|
||||
* Arguments:
|
||||
@ -289,7 +289,7 @@ typedef int (*get_db_version_t)(gfdb_conn_node_t *_conn_node,
|
||||
* extracted. This function will allocate memory
|
||||
* to pragma_value. The caller should free the memory.
|
||||
* Return:
|
||||
* On success return the lenght of the param value that is
|
||||
* On success return the length of the param value that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
|
@ -446,7 +446,7 @@ gfdb_query_record_deserialize (char *in_buffer,
|
||||
list_add_tail (&link_info->list,
|
||||
&(ret_qrecord->link_list));
|
||||
|
||||
/* Reseting link_info */
|
||||
/* Resetting link_info */
|
||||
link_info = NULL;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ typedef gfdb_query_record_t *
|
||||
|
||||
|
||||
|
||||
/* Fuction to add linkinfo to query record */
|
||||
/* Function to add linkinfo to query record */
|
||||
int
|
||||
gfdb_add_link_to_query_record (gfdb_query_record_t *gfdb_query_record,
|
||||
uuid_t pgfid,
|
||||
|
@ -255,7 +255,7 @@ typedef struct gfdb_db_record {
|
||||
/*Time of change or access*/
|
||||
gfdb_time_t gfdb_wind_change_time;
|
||||
gfdb_time_t gfdb_unwind_change_time;
|
||||
/* For crash consistancy while inserting/updating hard links */
|
||||
/* For crash consistency while inserting/updating hard links */
|
||||
gf_boolean_t islinkupdate;
|
||||
/* For link consistency we do a double update i.e mark the link
|
||||
* during the wind and during the unwind we update/delete the link.
|
||||
|
@ -149,7 +149,7 @@ gf_sql_str2sync_t (const char *sync_str)
|
||||
}
|
||||
|
||||
|
||||
/*TODO replace GF_CALLOC by mem_pool or iobuff if required for performace */
|
||||
/*TODO replace GF_CALLOC by mem_pool or iobuff if required for performance */
|
||||
static char *
|
||||
sql_stmt_init ()
|
||||
{
|
||||
@ -168,7 +168,7 @@ out:
|
||||
return sql_stmt;
|
||||
}
|
||||
|
||||
/*TODO replace GF_FREE by mem_pool or iobuff if required for performace */
|
||||
/*TODO replace GF_FREE by mem_pool or iobuff if required for performance */
|
||||
static void
|
||||
sql_stmt_fini (char **sql_stmt)
|
||||
{
|
||||
@ -627,7 +627,7 @@ gf_get_basic_query_stmt (char **out_stmt)
|
||||
/*
|
||||
* Find All files recorded in the DB
|
||||
* Input:
|
||||
* query_callback : query callback fuction to handle
|
||||
* query_callback : query callback function to handle
|
||||
* result records from the query
|
||||
* */
|
||||
int
|
||||
@ -699,7 +699,7 @@ out:
|
||||
/*
|
||||
* Find recently changed files from the DB
|
||||
* Input:
|
||||
* query_callback : query callback fuction to handle
|
||||
* query_callback : query callback function to handle
|
||||
* result records from the query
|
||||
* from_time : Time to define what is recent
|
||||
* */
|
||||
@ -799,7 +799,7 @@ out:
|
||||
/*
|
||||
* Find unchanged files from a specified time from the DB
|
||||
* Input:
|
||||
* query_callback : query callback fuction to handle
|
||||
* query_callback : query callback function to handle
|
||||
* result records from the query
|
||||
* for_time : Time from where the file/s are not changed
|
||||
* */
|
||||
@ -903,7 +903,7 @@ out:
|
||||
* Find recently changed files with a specific frequency from the DB
|
||||
* Input:
|
||||
* db_conn : db connection object
|
||||
* query_callback : query callback fuction to handle
|
||||
* query_callback : query callback function to handle
|
||||
* result records from the query
|
||||
* from_time : Time to define what is recent
|
||||
* freq_write_cnt : Frequency thresold for write
|
||||
@ -1048,7 +1048,7 @@ out:
|
||||
/*
|
||||
* Find unchanged files from a specified time, w.r.t to frequency, from the DB
|
||||
* Input:
|
||||
* query_callback : query callback fuction to handle
|
||||
* query_callback : query callback function to handle
|
||||
* result records from the query
|
||||
* for_time : Time from where the file/s are not changed
|
||||
* freq_write_cnt : Frequency thresold for write
|
||||
@ -1255,7 +1255,7 @@ out:
|
||||
* this variable. The freeing of the memory should be done by
|
||||
* the caller.
|
||||
* Return:
|
||||
* On success return the lenght of the version string that is
|
||||
* On success return the length of the version string that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
@ -1310,7 +1310,7 @@ out:
|
||||
* extracted. This function will allocate memory
|
||||
* to pragma_value. The caller should free the memory
|
||||
* Return:
|
||||
* On success return the lenght of the pragma/setting value that is
|
||||
* On success return the length of the pragma/setting value that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
@ -1415,7 +1415,7 @@ out:
|
||||
* Input:
|
||||
* void *db_conn : Sqlite connection
|
||||
* gf_boolean_t compact_active : Is compaction on?
|
||||
* gf_boolean_t compact_mode_switched : Did we just flip the compaction swtich?
|
||||
* gf_boolean_t compact_mode_switched : Did we just flip the compaction switch?
|
||||
* Return:
|
||||
* On success return 0
|
||||
* On failure return -1
|
||||
|
@ -229,7 +229,7 @@ gfdb_set_sql_params(char *comp_name, dict_t *from_dict, dict_t *to_dict)
|
||||
GF_ASSERT (from_dict);
|
||||
GF_ASSERT (to_dict);
|
||||
|
||||
/*Extact and Set of the sql params from page_size*/
|
||||
/*Extract and Set of the sql params from page_size*/
|
||||
for (sql_index = sql_pagesize_ix; sql_index < sql_index_max;
|
||||
sql_index++) {
|
||||
_val_str = NULL;
|
||||
@ -292,7 +292,7 @@ int gf_sqlite3_clear_files_heat (void *db_conn);
|
||||
* this variable. The freeing of the memory should be done by
|
||||
* the caller.
|
||||
* Return:
|
||||
* On success return the lenght of the version string that is
|
||||
* On success return the length of the version string that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
@ -306,7 +306,7 @@ int gf_sqlite3_version (void *db_conn, char **version);
|
||||
* extracted. This function will allocate memory
|
||||
* to pragma_value. The caller should free the memory
|
||||
* Return:
|
||||
* On success return the lenght of the pragma/setting value that is
|
||||
* On success return the length of the pragma/setting value that is
|
||||
* extracted.
|
||||
* On failure return -1
|
||||
* */
|
||||
@ -328,7 +328,7 @@ gf_sqlite3_set_pragma (void *db_conn, char *pragma_key, char *pragma_value);
|
||||
* Input:
|
||||
* void *db_conn : Sqlite connection
|
||||
* gf_boolean_t compact_active : Is compaction on?
|
||||
* gf_boolean_t compact_mode_switched : Did we just flip the compaction swtich?
|
||||
* gf_boolean_t compact_mode_switched : Did we just flip the compaction switch?
|
||||
* Return:
|
||||
* On success return 0
|
||||
* On failure return -1
|
||||
|
@ -631,14 +631,14 @@ gf_update_time (gf_sql_connection_t *sql_conn,
|
||||
freq_cntr_str = (record_counter) ?
|
||||
", WRITE_FREQ_CNTR = WRITE_FREQ_CNTR + 1" : "";
|
||||
|
||||
/*Prefectly safe as we will not go array of bound*/
|
||||
/*Perfectly safe as we will not go array of bound*/
|
||||
sprintf (update_str, "UPDATE "
|
||||
GF_FILE_TABLE
|
||||
" SET W_SEC = ?, W_MSEC = ? "
|
||||
" %s"/*place for read freq counters*/
|
||||
" WHERE GF_ID = ? ;", freq_cntr_str);
|
||||
} else {
|
||||
/*Prefectly safe as we will not go array of bound*/
|
||||
/*Perfectly safe as we will not go array of bound*/
|
||||
sprintf (update_str, "UPDATE "
|
||||
GF_FILE_TABLE
|
||||
" SET UW_SEC = ?, UW_MSEC = ? ;");
|
||||
@ -651,14 +651,14 @@ gf_update_time (gf_sql_connection_t *sql_conn,
|
||||
freq_cntr_str = (record_counter) ?
|
||||
", READ_FREQ_CNTR = READ_FREQ_CNTR + 1" : "";
|
||||
|
||||
/*Prefectly safe as we will not go array of bound*/
|
||||
/*Perfectly safe as we will not go array of bound*/
|
||||
sprintf (update_str, "UPDATE "
|
||||
GF_FILE_TABLE
|
||||
" SET W_READ_SEC = ?, W_READ_MSEC = ? "
|
||||
" %s"/*place for read freq counters*/
|
||||
" WHERE GF_ID = ? ;", freq_cntr_str);
|
||||
} else {
|
||||
/*Prefectly safe as we will not go array of bound*/
|
||||
/*Perfectly safe as we will not go array of bound*/
|
||||
sprintf (update_str, "UPDATE "
|
||||
GF_FILE_TABLE
|
||||
" SET UW_READ_SEC = ?, UW_READ_MSEC = ? ;");
|
||||
@ -794,7 +794,7 @@ gf_sql_insert_wind (gf_sql_connection_t *sql_conn,
|
||||
* gf_file_tb is deleted but the link record
|
||||
* still exist. Lookup heal will attempt a heal
|
||||
* with create_wind set. The link heal will fail
|
||||
* as there is already a record and if we dont
|
||||
* as there is already a record and if we don't
|
||||
* ignore the error we will not heal the
|
||||
* gf_file_tb.
|
||||
* 2) Rename file in cold tier: During a rename
|
||||
@ -803,7 +803,7 @@ gf_sql_insert_wind (gf_sql_connection_t *sql_conn,
|
||||
* linkto file. When the file gets heated and
|
||||
* moves to hot tier there will be attempt from
|
||||
* ctr lookup heal to create link and file
|
||||
* record and If we dont ignore the error we
|
||||
* record and If we don't ignore the error we
|
||||
* will not heal the gf_file_tb.
|
||||
* */
|
||||
}
|
||||
|
@ -187,7 +187,7 @@
|
||||
|
||||
/*
|
||||
* Always append entries to end of the enum, do not delete entries.
|
||||
* Currently dict_set_flag allows to set upto 256 flag, if the enum
|
||||
* Currently dict_set_flag allows to set up to 256 flag, if the enum
|
||||
* needs to grow beyond this dict_set_flag has to be changed accordingly
|
||||
*/
|
||||
enum gf_internal_fop_indicator {
|
||||
|
@ -1796,7 +1796,7 @@ inode_table_destroy (inode_table_t *inode_table) {
|
||||
* also not free its inode context and this could leak a lot of
|
||||
* memory, force free the inodes by changing the ref to 0.
|
||||
* The problem with this is that any reference to inode after this
|
||||
* calling this funtion will lead to a crash.
|
||||
* calling this function will lead to a crash.
|
||||
* 3. Knowing there could be leakes, just free the inode contexts of
|
||||
* all the inodes. and let the inodes be alive. This way the major
|
||||
* memory consumed by the inode contexts are freed, but there can
|
||||
|
@ -342,8 +342,8 @@ iobuf_pool_destroy (struct iobuf_pool *iobuf_pool)
|
||||
__iobuf_arena_destroy (iobuf_pool, iobuf_arena);
|
||||
}
|
||||
/* If there are no iobuf leaks, there shoould be
|
||||
* no standard alloced arenas, iobuf_put will free such
|
||||
* arenas.
|
||||
* no standard allocated arenas, iobuf_put will free
|
||||
* such arenas.
|
||||
* TODO: Free the stdalloc arenas forcefully if present?
|
||||
*/
|
||||
}
|
||||
@ -444,7 +444,7 @@ __iobuf_arena_prune (struct iobuf_pool *iobuf_pool,
|
||||
GF_VALIDATE_OR_GOTO ("iobuf", iobuf_pool, out);
|
||||
|
||||
/* code flow comes here only if the arena is in purge list and we can
|
||||
* free the arena only if we have atleast one arena in 'arenas' list
|
||||
* free the arena only if we have at least one arena in 'arenas' list
|
||||
* (ie, at least few iobufs free in arena), that way, there won't
|
||||
* be spurious mmap/unmap of buffers
|
||||
*/
|
||||
@ -856,7 +856,7 @@ iobref_new ()
|
||||
return NULL;
|
||||
}
|
||||
|
||||
iobref->alloced = 16;
|
||||
iobref->allocated = 16;
|
||||
iobref->used = 0;
|
||||
|
||||
LOCK_INIT (&iobref->lock);
|
||||
@ -885,7 +885,7 @@ iobref_destroy (struct iobref *iobref)
|
||||
|
||||
GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
|
||||
|
||||
for (i = 0; i < iobref->alloced; i++) {
|
||||
for (i = 0; i < iobref->allocated; i++) {
|
||||
iobuf = iobref->iobrefs[i];
|
||||
|
||||
iobref->iobrefs[i] = NULL;
|
||||
@ -924,11 +924,11 @@ iobref_clear (struct iobref *iobref)
|
||||
|
||||
GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
|
||||
|
||||
for (; i < iobref->alloced; i++) {
|
||||
for (; i < iobref->allocated; i++) {
|
||||
if (iobref->iobrefs[i] != NULL) {
|
||||
iobuf_unref (iobref->iobrefs[i]);
|
||||
} else {
|
||||
/** iobuf's are attched serially */
|
||||
/** iobuf's are attached serially */
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -947,12 +947,12 @@ __iobref_grow (struct iobref *iobref)
|
||||
int i = 0;
|
||||
|
||||
newptr = GF_REALLOC (iobref->iobrefs,
|
||||
iobref->alloced * 2 * (sizeof (*iobref->iobrefs)));
|
||||
iobref->allocated * 2 * (sizeof (*iobref->iobrefs)));
|
||||
if (newptr) {
|
||||
iobref->iobrefs = newptr;
|
||||
iobref->alloced *= 2;
|
||||
iobref->allocated *= 2;
|
||||
|
||||
for (i = iobref->used; i < iobref->alloced; i++)
|
||||
for (i = iobref->used; i < iobref->allocated; i++)
|
||||
iobref->iobrefs[i] = NULL;
|
||||
}
|
||||
}
|
||||
@ -967,16 +967,16 @@ __iobref_add (struct iobref *iobref, struct iobuf *iobuf)
|
||||
GF_VALIDATE_OR_GOTO ("iobuf", iobref, out);
|
||||
GF_VALIDATE_OR_GOTO ("iobuf", iobuf, out);
|
||||
|
||||
if (iobref->used == iobref->alloced) {
|
||||
if (iobref->used == iobref->allocated) {
|
||||
__iobref_grow (iobref);
|
||||
|
||||
if (iobref->used == iobref->alloced) {
|
||||
if (iobref->used == iobref->allocated) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < iobref->alloced; i++) {
|
||||
for (i = 0; i < iobref->allocated; i++) {
|
||||
if (iobref->iobrefs[i] == NULL) {
|
||||
iobref->iobrefs[i] = iobuf_ref (iobuf);
|
||||
iobref->used++;
|
||||
@ -1021,7 +1021,7 @@ iobref_merge (struct iobref *to, struct iobref *from)
|
||||
|
||||
LOCK (&from->lock);
|
||||
{
|
||||
for (i = 0; i < from->alloced; i++) {
|
||||
for (i = 0; i < from->allocated; i++) {
|
||||
iobuf = from->iobrefs[i];
|
||||
|
||||
if (!iobuf)
|
||||
@ -1075,7 +1075,7 @@ iobref_size (struct iobref *iobref)
|
||||
|
||||
LOCK (&iobref->lock);
|
||||
{
|
||||
for (i = 0; i < iobref->alloced; i++) {
|
||||
for (i = 0; i < iobref->allocated; i++) {
|
||||
if (iobref->iobrefs[i])
|
||||
size += iobuf_size (iobref->iobrefs[i]);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@
|
||||
/* each unit hosts @page_size bytes of memory */
|
||||
struct iobuf;
|
||||
|
||||
/* one region of memory MMAPed from the operating system */
|
||||
/* one region of memory mapped from the operating system */
|
||||
/* each region MMAPs @arena_size bytes of memory */
|
||||
/* each arena hosts @arena_size / @page_size IOBUFs */
|
||||
struct iobuf_arena;
|
||||
@ -152,7 +152,7 @@ struct iobref {
|
||||
gf_lock_t lock;
|
||||
gf_atomic_t ref;
|
||||
struct iobuf **iobrefs;
|
||||
int alloced;
|
||||
int allocated;
|
||||
int used;
|
||||
};
|
||||
|
||||
|
@ -2530,7 +2530,7 @@ _do_slog_format (const char *event, va_list inp, char **msg) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Get number of times % is used in input for formating, */
|
||||
/* Get number of times % is used in input for formatting, */
|
||||
/* this count will be used to skip those many args from the */
|
||||
/* main list and will be used to format inner format */
|
||||
num_format_chars = 0;
|
||||
|
@ -211,37 +211,37 @@ int _gf_log_eh (const char *function, const char *fmt, ...);
|
||||
#define FMT_WARN(fmt...) do { if (0) printf (fmt); } while (0)
|
||||
|
||||
/* Interface to log messages with message IDs */
|
||||
#define gf_msg(dom, levl, errnum, msgid, fmt...) do { \
|
||||
#define gf_msg(dom, level, errnum, msgid, fmt...) do { \
|
||||
_gf_msg (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, errnum, 0, msgid, ##fmt); \
|
||||
level, errnum, 0, msgid, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
/* no frills, no thrills, just a vanilla message, used to print the graph */
|
||||
#define gf_msg_plain(levl, fmt...) do { \
|
||||
_gf_msg_plain (levl, ##fmt); \
|
||||
#define gf_msg_plain(level, fmt...) do { \
|
||||
_gf_msg_plain (level, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
#define gf_msg_plain_nomem(levl, msg) do { \
|
||||
_gf_msg_plain_nomem (levl, msg); \
|
||||
#define gf_msg_plain_nomem(level, msg) do { \
|
||||
_gf_msg_plain_nomem (level, msg); \
|
||||
} while (0)
|
||||
|
||||
#define gf_msg_vplain(levl, fmt, va) do { \
|
||||
_gf_msg_vplain (levl, fmt, va); \
|
||||
#define gf_msg_vplain(level, fmt, va) do { \
|
||||
_gf_msg_vplain (level, fmt, va); \
|
||||
} while (0)
|
||||
|
||||
#define gf_msg_backtrace_nomem(level, stacksize) do { \
|
||||
_gf_msg_backtrace_nomem (level, stacksize); \
|
||||
} while (0)
|
||||
|
||||
#define gf_msg_callingfn(dom, levl, errnum, msgid, fmt...) do { \
|
||||
_gf_msg (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, errnum, 1, msgid, ##fmt); \
|
||||
#define gf_msg_callingfn(dom, level, errnum, msgid, fmt...) do { \
|
||||
_gf_msg (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
level, errnum, 1, msgid, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
/* No malloc or calloc should be called in this function */
|
||||
#define gf_msg_nomem(dom, levl, size) do { \
|
||||
_gf_msg_nomem (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, size); \
|
||||
#define gf_msg_nomem(dom, level, size) do { \
|
||||
_gf_msg_nomem (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
level, size); \
|
||||
} while (0)
|
||||
|
||||
/* Debug or trace messages do not need message IDs as these are more developer
|
||||
@ -256,27 +256,27 @@ int _gf_log_eh (const char *function, const char *fmt, ...);
|
||||
GF_LOG_TRACE, errnum, 0, 0, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
#define gf_log(dom, levl, fmt...) do { \
|
||||
#define gf_log(dom, level, fmt...) do { \
|
||||
FMT_WARN (fmt); \
|
||||
_gf_log (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, ##fmt); \
|
||||
level, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
#define gf_log_eh(fmt...) do { \
|
||||
FMT_WARN (fmt); \
|
||||
_gf_log_eh (__FUNCTION__, ##fmt); \
|
||||
_gf_log_eh (__FUNCTION__, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
#define gf_log_callingfn(dom, levl, fmt...) do { \
|
||||
FMT_WARN (fmt); \
|
||||
#define gf_log_callingfn(dom, level, fmt...) do { \
|
||||
FMT_WARN (fmt); \
|
||||
_gf_log_callingfn (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, ##fmt); \
|
||||
level, ##fmt); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Log once in GF_UNIVERSAL_ANSWER times */
|
||||
#define GF_LOG_OCCASIONALLY(var, args...) if (!(var++%GF_UNIVERSAL_ANSWER)) { \
|
||||
gf_log (args); \
|
||||
gf_log (args); \
|
||||
}
|
||||
|
||||
struct _glusterfs_ctx;
|
||||
@ -340,14 +340,14 @@ _gf_smsg (const char *domain, const char *file, const char *function,
|
||||
uint64_t msgid, const char *event, ...);
|
||||
|
||||
/* Interface to log messages with message IDs */
|
||||
#define gf_smsg(dom, levl, errnum, msgid, event...) do { \
|
||||
_gf_smsg (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, errnum, 0, msgid, ##event); \
|
||||
#define gf_smsg(dom, level, errnum, msgid, event...) do { \
|
||||
_gf_smsg (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
level, errnum, 0, msgid, ##event); \
|
||||
} while (0)
|
||||
|
||||
#define gf_slog(dom, levl, event...) do { \
|
||||
_gf_slog (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
levl, ##event); \
|
||||
#define gf_slog(dom, level, event...) do { \
|
||||
_gf_slog (dom, __FILE__, __FUNCTION__, __LINE__, \
|
||||
level, ##event); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __LOGGING_H__ */
|
||||
|
@ -21,7 +21,7 @@ _gf_ref_get (gf_ref_t *ref)
|
||||
/* if cnt == 0, we're in a fatal position, the object will be free'd
|
||||
*
|
||||
* There is a race when two threads do a _gf_ref_get(). Only one of
|
||||
* them may get a 0 returned. That is acceptible, because one
|
||||
* them may get a 0 returned. That is acceptable, because one
|
||||
* _gf_ref_get() returning 0 should be handled as a fatal problem and
|
||||
* when correct usage/locking is used, it should never happen.
|
||||
*/
|
||||
|
@ -53,7 +53,7 @@ _gf_ref_get (gf_ref_t *ref);
|
||||
unsigned int
|
||||
_gf_ref_put (gf_ref_t *ref);
|
||||
|
||||
/* _gf_ref_init -- initalize an embedded refcount object
|
||||
/* _gf_ref_init -- initialize an embedded refcount object
|
||||
*
|
||||
* @release: function to call when the refcount == 0
|
||||
* @data: parameter to be passed to @release
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define RLIST_IOV_MELDED_ALLOC_SIZE (RBUF_IOVEC_SIZE + ROT_BUFF_ALLOC_SIZE)
|
||||
|
||||
/**
|
||||
* iovec list is not shrinked (deallocated) if usage/total count
|
||||
* iovec list is not shrunk (deallocated) if usage/total count
|
||||
* falls in this range. this is the fast path and should satisfy
|
||||
* most of the workloads. for the rest shrinking iovec list is
|
||||
* generous.
|
||||
|
@ -73,7 +73,7 @@ struct rlist_iter {
|
||||
--((riter)->iter))
|
||||
|
||||
/**
|
||||
* Sequence number assigment routine is called during buffer
|
||||
* Sequence number assignment routine is called during buffer
|
||||
* switch under rbuff ->lock.
|
||||
*/
|
||||
typedef void (sequence_fn) (rbuf_list_t *, void *);
|
||||
|
@ -33,11 +33,11 @@
|
||||
* on glusterfs:
|
||||
* $ cc -DRUN_STANDALONE -c run.c
|
||||
*
|
||||
* Compiling a demo progam that exercises bits of run.c
|
||||
* Compiling a demo program that exercises bits of run.c
|
||||
* functionality (linking to glusterfs):
|
||||
* $ cc -DRUN_DO_DEMO -orun run.c `pkg-config --libs --cflags glusterfs-api`
|
||||
*
|
||||
* Compiling a demo progam that exercises bits of run.c
|
||||
* Compiling a demo program that exercises bits of run.c
|
||||
* functionality (with no dependence on glusterfs):
|
||||
*
|
||||
* $ cc -DRUN_DO_DEMO -DRUN_STANDALONE -orun run.c
|
||||
@ -54,7 +54,7 @@ int close_fds_except (int *fdv, size_t count);
|
||||
#define gf_strdup(s) strdup(s)
|
||||
#define gf_vasprintf(p, f, va) vasprintf(p, f, va)
|
||||
#define gf_loglevel_t int
|
||||
#define gf_msg_callingfn(dom, levl, errnum, msgid, fmt, args...) printf("LOG: " fmt "\n", ##args)
|
||||
#define gf_msg_callingfn(dom, level, errnum, msgid, fmt, args...) printf("LOG: " fmt "\n", ##args)
|
||||
#define LOG_DEBUG 0
|
||||
#ifdef RUN_STANDALONE
|
||||
#include <stdbool.h>
|
||||
@ -330,7 +330,7 @@ runner_start (runner_t *runner)
|
||||
}
|
||||
|
||||
if (ret != -1) {
|
||||
/* save child from inheriting our singal handling */
|
||||
/* save child from inheriting our signal handling */
|
||||
sigemptyset (&set);
|
||||
sigprocmask (SIG_SETMASK, &set, NULL);
|
||||
|
||||
|
@ -418,7 +418,7 @@ sys_statvfs (const char *path, struct statvfs *buf)
|
||||
|
||||
ret = statvfs (path, buf);
|
||||
#ifdef __FreeBSD__
|
||||
/* FreeBSD doesn't return the expected vaule in buf->f_bsize. It
|
||||
/* FreeBSD doesn't return the expected value in buf->f_bsize. It
|
||||
* contains the optimal I/O size instead of the file system block
|
||||
* size. Gluster expects that this field contains the block size.
|
||||
*/
|
||||
@ -438,7 +438,7 @@ sys_fstatvfs (int fd, struct statvfs *buf)
|
||||
|
||||
ret = fstatvfs (fd, buf);
|
||||
#ifdef __FreeBSD__
|
||||
/* FreeBSD doesn't return the expected vaule in buf->f_bsize. It
|
||||
/* FreeBSD doesn't return the expected value in buf->f_bsize. It
|
||||
* contains the optimal I/O size instead of the file system block
|
||||
* size. Gluster expects this field to contain the block size.
|
||||
*/
|
||||
@ -485,7 +485,7 @@ sys_fdatasync (int fd)
|
||||
void
|
||||
gf_add_prefix(const char *ns, const char *key, char **newkey)
|
||||
{
|
||||
/* if we dont have any namespace, append USER NS */
|
||||
/* if we don't have any namespace, append USER NS */
|
||||
if (strncmp(key, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
|
||||
strncmp(key, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
|
||||
strncmp(key, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
|
||||
|
@ -205,7 +205,7 @@ rpc_transport_load (glusterfs_ctx_t *ctx, dict_t *options, char *trans_name)
|
||||
"\"socket\"");
|
||||
} else {
|
||||
{
|
||||
/* Backword compatibility to handle * /client,
|
||||
/* Backward compatibility to handle * /client,
|
||||
* * /server.
|
||||
*/
|
||||
char *tmp = strchr (type, '/');
|
||||
@ -563,7 +563,7 @@ out:
|
||||
|
||||
//give negative values to skip setting that value
|
||||
//this function asserts if both the values are negative.
|
||||
//why call it if you dont set it.
|
||||
//why call it if you don't set it.
|
||||
int
|
||||
rpc_transport_keepalive_options_set (dict_t *options, int32_t interval,
|
||||
int32_t time, int32_t timeout)
|
||||
|
@ -2859,7 +2859,7 @@ rpcsvc_transport_privport_check (rpcsvc_t *svc, char *volname, uint16_t port)
|
||||
return ret;
|
||||
|
||||
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Client port: %d", (int)port);
|
||||
/* If the port is already a privileged one, dont bother with checking
|
||||
/* If the port is already a privileged one, don't bother with checking
|
||||
* options.
|
||||
*/
|
||||
if (port <= 1024) {
|
||||
@ -2973,7 +2973,7 @@ rpcsvc_match_subnet_v4 (const char *addrtok, const char *ipaddr)
|
||||
*slash = '\0';
|
||||
/*
|
||||
* Find the IPv4 network mask in network byte order.
|
||||
* IMP: String slash+1 is already validated, it cant have value
|
||||
* IMP: String slash+1 is already validated, it can't have value
|
||||
* more than IPv4_ADDR_SIZE (32).
|
||||
*/
|
||||
prefixlen = (uint32_t) atoi (slash + 1);
|
||||
|
@ -91,7 +91,7 @@ loop:
|
||||
port--;
|
||||
}
|
||||
|
||||
/* Incase if all the secure ports are exhausted, we are no more
|
||||
/* In case if all the secure ports are exhausted, we are no more
|
||||
* binding to secure ports, hence instead of getting a random
|
||||
* port, lets define the range to restrict it from getting from
|
||||
* ports reserved for bricks i.e from range of 49152 - 65535
|
||||
|
@ -4052,7 +4052,7 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc)
|
||||
* have to account for the quota used while sending
|
||||
* first msg (which may or may not be returned to pool
|
||||
* at this point) while deriving peer.quota from
|
||||
* header->rm_credit. Hence the arithmatic below,
|
||||
* header->rm_credit. Hence the arithmetic below,
|
||||
* instead of directly setting it to header->rm_credit.
|
||||
*/
|
||||
priv->peer.quota = header->rm_credit
|
||||
|
@ -79,7 +79,7 @@ loop:
|
||||
port--;
|
||||
}
|
||||
|
||||
/* Incase if all the secure ports are exhausted, we are no more
|
||||
/* In case if all the secure ports are exhausted, we are no more
|
||||
* binding to secure ports, hence instead of getting a random
|
||||
* port, lets define the range to restrict it from getting from
|
||||
* ports reserved for bricks i.e from range of 49152 - 65535
|
||||
|
@ -3084,7 +3084,7 @@ socket_server_event_handler (int fd, int idx, int gen, void *data,
|
||||
|
||||
/* event_register() could have failed for some
|
||||
* reason, implying that the new_sock cannot be
|
||||
* added to the epoll set. If we wont get any
|
||||
* added to the epoll set. If we won't get any
|
||||
* more notifications for new_sock from epoll,
|
||||
* then we better remove the corresponding
|
||||
* new_trans object from the RPCSVC service list.
|
||||
@ -3313,7 +3313,7 @@ socket_connect (rpc_transport_t *this, int port)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Cant help if setting socket options fails. We can continue
|
||||
/* Can't help if setting socket options fails. We can continue
|
||||
* working nonetheless.
|
||||
*/
|
||||
if (priv->windowsize != 0) {
|
||||
@ -3340,7 +3340,7 @@ socket_connect (rpc_transport_t *this, int port)
|
||||
|
||||
/* Make sure we are not vulnerable to someone setting
|
||||
* net.ipv6.bindv6only to 1 so that gluster services are
|
||||
* avalable over IPv4 & IPv6.
|
||||
* available over IPv4 & IPv6.
|
||||
*/
|
||||
#ifdef IPV6_DEFAULT
|
||||
int disable_v6only = 0;
|
||||
@ -3562,7 +3562,7 @@ err:
|
||||
/* Cleaup requires to send notification to upper layer which
|
||||
intern holds the big_lock. There can be dead-lock situation
|
||||
if big_lock is already held by the current thread.
|
||||
So transfer the ownership to seperate thread for cleanup.
|
||||
So transfer the ownership to separate thread for cleanup.
|
||||
*/
|
||||
arg = GF_CALLOC (1, sizeof (*arg),
|
||||
gf_sock_connect_error_state_t);
|
||||
@ -3645,7 +3645,7 @@ socket_listen (rpc_transport_t *this)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Cant help if setting socket options fails. We can continue
|
||||
/* Can't help if setting socket options fails. We can continue
|
||||
* working nonetheless.
|
||||
*/
|
||||
if (priv->windowsize != 0) {
|
||||
|
@ -725,7 +725,8 @@ dict_to_xdr (dict_t *this, gfx_dict *dict)
|
||||
|
||||
/* This is done for backward compatibility as dict is
|
||||
heavily used for transporting data over wire.
|
||||
Ideally, whereever there is an issue, fix and move on */
|
||||
Ideally, wherever there is an issue, fix and
|
||||
move on */
|
||||
xpair->value.gfx_value_u.other.other_val =
|
||||
dpair->value->data;
|
||||
xpair->value.gfx_value_u.other.other_len =
|
||||
|
@ -87,7 +87,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
ret = glfs_set_logging (fs, "/tmp/ec-fgetxattr.log", 7);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* Pre-requisites:-
|
||||
*
|
||||
* 1. Make sure that peformance translators are switched off while running this test.
|
||||
* 1. Make sure that performance translators are switched off while running this test.
|
||||
* 2. Perform the following volume set operation:
|
||||
* # gluster volume set <VOLNAME> locks.mandatory-locking optimal
|
||||
* 3. For installation under non-standard paths, export LD_LIBRARY_PATH to
|
||||
|
@ -123,7 +123,7 @@ main (int argc, char *argv[])
|
||||
LOG_ERR ("glfs_write", ret);
|
||||
} else {
|
||||
fprintf (stderr,
|
||||
"glfs_write suceeded\n");
|
||||
"glfs_write succeeded\n");
|
||||
}
|
||||
free(writebuf);
|
||||
} else {
|
||||
@ -175,7 +175,7 @@ main (int argc, char *argv[])
|
||||
reason, object, flags, expire);
|
||||
} else {
|
||||
fprintf (stderr,
|
||||
"Didnt receive upcall notify event");
|
||||
"Didn't receive upcall notify event");
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ main (int argc, char *argv[])
|
||||
ret = glfs_init (fs);
|
||||
LOG_ERR("glfs_init", ret);
|
||||
|
||||
/* Intialize fs2 */
|
||||
/* Initialize fs2 */
|
||||
fs2 = glfs_new (volname);
|
||||
if (!fs2) {
|
||||
fprintf (stderr, "glfs_new fs2: returned NULL\n");
|
||||
@ -199,7 +199,7 @@ main (int argc, char *argv[])
|
||||
|
||||
/* Check if the return mask contains the event */
|
||||
if (!(ret & GLFS_EVENT_INODE_INVALIDATE)) {
|
||||
fprintf (stderr, "glfs_upcall_register return doesnt contain"
|
||||
fprintf (stderr, "glfs_upcall_register return doesn't contain"
|
||||
" upcall event\n");
|
||||
return -1;
|
||||
}
|
||||
@ -208,7 +208,7 @@ main (int argc, char *argv[])
|
||||
|
||||
/* Check if the return mask contains the event */
|
||||
if ((ret < 0) || !(ret & GLFS_EVENT_INODE_INVALIDATE)) {
|
||||
fprintf (stderr, "glfs_upcall_register return doesnt contain"
|
||||
fprintf (stderr, "glfs_upcall_register return doesn't contain"
|
||||
" upcall event\n");
|
||||
return -1;
|
||||
}
|
||||
@ -233,7 +233,7 @@ main (int argc, char *argv[])
|
||||
|
||||
/* Check if the return mask contains the event */
|
||||
if ((ret < 0) || !(ret & GLFS_EVENT_INODE_INVALIDATE)) {
|
||||
fprintf (stderr, "glfs_upcall_register return doesnt contain"
|
||||
fprintf (stderr, "glfs_upcall_register return doesn't contain"
|
||||
" upcall event\n");
|
||||
return -1;
|
||||
}
|
||||
@ -242,7 +242,7 @@ main (int argc, char *argv[])
|
||||
|
||||
/* Check if the return mask contains the event */
|
||||
if ((ret < 0) || !(ret & GLFS_EVENT_INODE_INVALIDATE)) {
|
||||
fprintf (stderr, "glfs_upcall_register return doesnt contain"
|
||||
fprintf (stderr, "glfs_upcall_register return doesn't contain"
|
||||
" upcall event\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
ret = glfs_set_logging (fs, argv[4], 7);
|
||||
|
@ -23,7 +23,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
ret = glfs_set_logging (fs, "/dev/null", 7);
|
||||
|
@ -25,7 +25,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
ret = glfs_set_logging (fs, argv[4], 7);
|
||||
|
@ -61,7 +61,7 @@ main (int argc, char *argv[])
|
||||
|
||||
ret = glfs_set_volfile_server (fs, "tcp", argv[1], 24007);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "glfs_set_volfile_server: retuned %d\n", ret);
|
||||
fprintf (stderr, "glfs_set_volfile_server: returned %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ do {
|
||||
do { \
|
||||
if (ret != value) { \
|
||||
fprintf (log_file, "\n Testcase %d failed, ret = %d, value=%d\n", test_case, ret, value); \
|
||||
goto error; /*test unsuccesfull*/ \
|
||||
goto error; /*test unsuccessful*/ \
|
||||
} \
|
||||
fprintf (log_file, "\n Testcase %d Succeeded\n", test_case); \
|
||||
} while (0) \
|
||||
@ -210,7 +210,7 @@ recall_cbk (struct glfs_lease lease, void *data)
|
||||
int ret = -1;
|
||||
char ld[GLFS_LEASE_ID_SIZE] = "";
|
||||
|
||||
fprintf (log_file, "\nRECALL recieved on lease_id:(%s)", lease.lease_id);
|
||||
fprintf (log_file, "\nRECALL received on lease_id:(%s)", lease.lease_id);
|
||||
memcpy (ld, lease.lease_id, GLFS_LEASE_ID_SIZE);
|
||||
ret = unlk_write_lease ((glfs_fd_t *)data, ld);
|
||||
VERIFY_RESULT (500, ret, SHUD_PASS);
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
/*
|
||||
* FTW_ACTIONRETVAL is a GNU libc extension. It is used here to skip
|
||||
* hiearchies. On other systems we will still walk the tree, ignoring
|
||||
* hierarchies. On other systems we will still walk the tree, ignoring
|
||||
* entries.
|
||||
*/
|
||||
#ifndef FTW_ACTIONRETVAL
|
||||
@ -482,7 +482,7 @@ process_entry (const char *path, const struct stat *sb,
|
||||
|
||||
This mainly helps in calculating the checksum of network filesystems
|
||||
(client-server), where the server might have some hidden directories
|
||||
for managing the filesystem. So to calculate the sanity of filesytem
|
||||
for managing the filesystem. So to calculate the sanity of filesystem
|
||||
one has to get the checksum of the client and then the export directory
|
||||
of server by telling arequal to ignore some of the directories which
|
||||
are not part of the namespace.
|
||||
|
@ -65,7 +65,7 @@ if __name__ == '__main__':
|
||||
" them. Valid values of [en] are `text`, `hex`,"
|
||||
" and `base64`. Values encoded as text strings are"
|
||||
" enclosed in double quotes (\"), while strings"
|
||||
" encoded as hexidecimal and base64 are prefixed with"
|
||||
" encoded as hexadecimal and base64 are prefixed with"
|
||||
" 0x and 0s, respectively.")
|
||||
parser.add_option("-m", action="store", dest="pattern", type="string",
|
||||
help="Only include attributes with names matching the"
|
||||
|
@ -15,7 +15,7 @@ from ctypes import CDLL, c_int, create_string_buffer
|
||||
|
||||
class Xattr(object):
|
||||
|
||||
"""singleton that wraps the extended attribues system
|
||||
"""singleton that wraps the extended attributes system
|
||||
interface for python using ctypes
|
||||
|
||||
Just implement it to the degree we need it, in particular
|
||||
|
@ -381,7 +381,7 @@ def _get_args():
|
||||
action=StoreAbsPath)
|
||||
parser_query.add_argument("--since-time", help="UNIX epoch time since "
|
||||
"which listing is required", type=int)
|
||||
parser_query.add_argument("--end-time", help="UNIX epoch time upto "
|
||||
parser_query.add_argument("--end-time", help="UNIX epoch time up to "
|
||||
"which listing is required", type=int)
|
||||
parser_query.add_argument("--no-encode",
|
||||
help="Do not encode path in output file",
|
||||
|
@ -912,7 +912,7 @@ afr_set_split_brain_choice (int ret, call_frame_t *frame, void *opaque)
|
||||
/* If timer cancel failed here it means that the
|
||||
* previous cbk will be executed which will set
|
||||
* spb_choice to -1. So we can consider the
|
||||
* 'valid to -1' case to be a sucess
|
||||
* 'valid to -1' case to be a success
|
||||
* (i.e. ret = 0) and goto unlock.
|
||||
*/
|
||||
goto unlock;
|
||||
@ -4722,7 +4722,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
int child_index = (long)cookie;
|
||||
int call_count = 0;
|
||||
gf_boolean_t failed = _gf_false;
|
||||
gf_boolean_t succeded = _gf_false;
|
||||
gf_boolean_t succeeded = _gf_false;
|
||||
int i = 0;
|
||||
afr_private_t *priv = NULL;
|
||||
|
||||
@ -4742,7 +4742,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
* return error else return success unless all the subvolumes
|
||||
* failed.
|
||||
* TODO: In case of failure, we need to unregister the xattrs
|
||||
* from the other subvolumes where it succeded (once upcall
|
||||
* from the other subvolumes where it succeeded (once upcall
|
||||
* fixes the Bz-1371622)*/
|
||||
for (i = 0; i < priv->child_count; i++) {
|
||||
if (!local->replies[i].valid)
|
||||
@ -4762,7 +4762,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
break;
|
||||
}
|
||||
if (local->replies[i].op_ret == 0) {
|
||||
succeded = _gf_true;
|
||||
succeeded = _gf_true;
|
||||
local->op_ret = 0;
|
||||
local->op_errno = 0;
|
||||
if (!local->xdata_rsp && local->replies[i].xdata) {
|
||||
@ -4772,7 +4772,7 @@ afr_ipc_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
}
|
||||
}
|
||||
|
||||
if (!succeded && !failed) {
|
||||
if (!succeeded && !failed) {
|
||||
local->op_ret = -1;
|
||||
local->op_errno = ENOTCONN;
|
||||
}
|
||||
@ -5314,7 +5314,7 @@ __afr_handle_child_down_event (xlator_t *this, xlator_t *child_xlator,
|
||||
if (down_children == priv->child_count) {
|
||||
gf_msg (this->name, GF_LOG_ERROR, 0, AFR_MSG_SUBVOLS_DOWN,
|
||||
"All subvolumes are down. Going "
|
||||
"offline until atleast one of them "
|
||||
"offline until at least one of them "
|
||||
"comes back up.");
|
||||
gf_event (EVENT_AFR_SUBVOLS_DOWN, "subvol=%s", this->name);
|
||||
} else {
|
||||
@ -5364,7 +5364,7 @@ afr_notify (xlator_t *this, int32_t event,
|
||||
priv->did_discovery = _gf_false;
|
||||
|
||||
|
||||
/* parent xlators dont need to know about every child_up, child_down
|
||||
/* parent xlators don't need to know about every child_up, child_down
|
||||
* because of afr ha. If all subvolumes go down, child_down has
|
||||
* to be triggered. In that state when 1 subvolume comes up child_up
|
||||
* needs to be triggered. dht optimizes revalidate lookup by sending
|
||||
|
@ -735,7 +735,7 @@ afr_getxattr_node_uuid_cbk (call_frame_t *frame, void *cookie,
|
||||
|
||||
/**
|
||||
* _current_ becomes _next_
|
||||
* If done with all childs and yet no success; give up !
|
||||
* If done with all children and yet no success; give up !
|
||||
*/
|
||||
curr_call_child = (int) ((long)cookie);
|
||||
if (++curr_call_child == priv->child_count)
|
||||
|
@ -102,7 +102,7 @@ __afr_inode_write_finalize (call_frame_t *frame, xlator_t *this)
|
||||
below is important.
|
||||
|
||||
- Highest precedence: largest op_ret
|
||||
- Next precendence: if all op_rets are equal, read subvol
|
||||
- Next precedence: if all op_rets are equal, read subvol
|
||||
- Least precedence: any succeeded subvol
|
||||
*/
|
||||
if ((local->op_ret < local->replies[i].op_ret) ||
|
||||
|
@ -601,7 +601,7 @@ is_blocking_locks_count_sufficient (call_frame_t *frame, xlator_t *this)
|
||||
"gfid:%s.", uuid_utoa (local->inode->gfid));
|
||||
return _gf_false;
|
||||
} else {
|
||||
/*inodelk succeded on atleast one child. */
|
||||
/*inodelk succeeded on at least one child. */
|
||||
return _gf_true;
|
||||
}
|
||||
|
||||
@ -611,7 +611,7 @@ is_blocking_locks_count_sufficient (call_frame_t *frame, xlator_t *this)
|
||||
return _gf_false;
|
||||
}
|
||||
/* For FOPS that take multiple sets of locks (mkdir, rename),
|
||||
* there must be atleast one brick on which the locks from
|
||||
* there must be at least one brick on which the locks from
|
||||
* all lock sets were successful. */
|
||||
for (child = 0; child < priv->child_count; child++) {
|
||||
ret = _gf_true;
|
||||
|
@ -309,7 +309,7 @@ afr_pending_xattrs_init (afr_private_t *priv, xlator_t *this)
|
||||
child_count = priv->child_count;
|
||||
if (priv->thin_arbiter_count) {
|
||||
/* priv->pending_key[THIN_ARBITER_BRICK_INDEX] is used as the
|
||||
* name of the thin arbiter file for persistance across add/
|
||||
* name of the thin arbiter file for persistence across add/
|
||||
* removal of DHT subvols.*/
|
||||
child_count++;
|
||||
}
|
||||
@ -621,7 +621,7 @@ fini (xlator_t *this)
|
||||
UNLOCK (&priv->lock);
|
||||
this->private = NULL;
|
||||
afr_priv_destroy (priv);
|
||||
//if (this->itable);//I dont see any destroy func
|
||||
//if (this->itable);//I don't see any destroy func
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1967,7 +1967,7 @@ cont:
|
||||
DHT_STRIP_PHASE1_FLAGS (&local->stbuf);
|
||||
dht_set_fixed_dir_stat (&local->postparent);
|
||||
|
||||
/* local->stbuf is udpated only from subvols which have a layout
|
||||
/* local->stbuf is updated only from subvols which have a layout
|
||||
* The reason is to avoid choosing attr heal source from newly
|
||||
* added bricks. In case e.g we have only one subvol and for
|
||||
* some reason layout is not present on it, then local->stbuf
|
||||
@ -2454,7 +2454,7 @@ dht_lookup_everywhere_done (call_frame_t *frame, xlator_t *this)
|
||||
*
|
||||
* Performing deletion of stale link file when
|
||||
* setting key in dict fails, may cause the data
|
||||
* loss becase of the above mentioned race.
|
||||
* loss because of the above mentioned race.
|
||||
*/
|
||||
|
||||
|
||||
@ -3255,9 +3255,9 @@ dht_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
"Entry %s missing on subvol %s",
|
||||
loc->path, prev->name);
|
||||
|
||||
/* lookup-optimize supercedes lookup-unhashed settings,
|
||||
/* lookup-optimize supersedes lookup-unhashed settings,
|
||||
* - so if it is set, do not process search_unhashed
|
||||
* - except, in the case of rebalance deamon, we want to
|
||||
* - except, in the case of rebalance daemon, we want to
|
||||
* force the lookup_everywhere behavior */
|
||||
if (!conf->defrag && conf->lookup_optimize && loc->parent) {
|
||||
ret = dht_inode_ctx_layout_get (loc->parent, this,
|
||||
@ -4429,7 +4429,7 @@ dht_vgetxattr_fill_and_set (dht_local_t *local, dict_t **dict, xlator_t *this,
|
||||
*
|
||||
* For node-uuid we just don't have all the pretty formatting,
|
||||
* but since this is a generic routine for pathinfo & node-uuid
|
||||
* we dont have conditional space allocation and try to be
|
||||
* we don't have conditional space allocation and try to be
|
||||
* generic
|
||||
*/
|
||||
local->alloc_len += (2 * strlen (this->name))
|
||||
@ -6122,7 +6122,7 @@ dht_setxattr (call_frame_t *frame, xlator_t *this,
|
||||
* promotions and demotions are multithreaded
|
||||
* so the original frame from gf_defrag_start()
|
||||
* is not carried. A new frame will be created when
|
||||
* we do syncop_setxattr(). This doesnot have the
|
||||
* we do syncop_setxattr(). This does not have the
|
||||
* frame->root->pid of the original frame. So we pass
|
||||
* this dic key-value when we do syncop_setxattr() to do
|
||||
* data migration and set the frame->root->pid to
|
||||
@ -8417,7 +8417,7 @@ dht_link_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Update parent on success, even if P1/2 checks are positve.
|
||||
/* Update parent on success, even if P1/2 checks are positive.
|
||||
* The second call on success will further update the parent */
|
||||
if (local->loc.parent) {
|
||||
dht_inode_ctx_time_update (local->loc.parent, this,
|
||||
@ -9933,7 +9933,7 @@ dht_rmdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Track if rmdir succeeded on atleast one subvol*/
|
||||
/* Track if rmdir succeeded on at least one subvol*/
|
||||
local->fop_succeeded = 1;
|
||||
dht_iatt_merge (this, &local->preparent, preparent);
|
||||
dht_iatt_merge (this, &local->postparent, postparent);
|
||||
|
@ -470,7 +470,7 @@ dht_subvol_with_free_space_inodes(xlator_t *this, xlator_t *subvol, xlator_t *ig
|
||||
}
|
||||
|
||||
|
||||
/* Get subvol which has atleast one inode and maximum space */
|
||||
/* Get subvol which has at least one inode and maximum space */
|
||||
xlator_t *
|
||||
dht_subvol_maxspace_nonzeroinode (xlator_t *this, xlator_t *subvol,
|
||||
dht_layout_t *layout)
|
||||
|
@ -1322,7 +1322,7 @@ dht_migration_complete_check_task (void *data)
|
||||
inode = (!local->fd) ? local->loc.inode : local->fd->inode;
|
||||
|
||||
/* getxattr on cached_subvol for 'linkto' value. Do path based getxattr
|
||||
* as root:root. If a fd is already open, access check wont be done*/
|
||||
* as root:root. If a fd is already open, access check won't be done*/
|
||||
|
||||
if (!local->loc.inode) {
|
||||
ret = syncop_fgetxattr (src_node, local->fd, &dict,
|
||||
@ -1600,7 +1600,7 @@ dht_rebalance_inprogress_task (void *data)
|
||||
inode = (!local->fd) ? local->loc.inode : local->fd->inode;
|
||||
|
||||
/* getxattr on cached_subvol for 'linkto' value. Do path based getxattr
|
||||
* as root:root. If a fd is already open, access check wont be done*/
|
||||
* as root:root. If a fd is already open, access check won't be done*/
|
||||
if (local->loc.inode) {
|
||||
SYNCTASK_SETID (0, 0);
|
||||
ret = syncop_getxattr (src_node, &local->loc, &dict,
|
||||
|
@ -815,7 +815,7 @@ __dht_rebalance_create_dst_file (xlator_t *this, xlator_t *to, xlator_t *from,
|
||||
*server (posix_layer) and binding it in server (incrementing fd count),
|
||||
*so if in that time-gap, if other process sends unlink considering it
|
||||
*as a linkto file, because inode->fd count will be 0, so file will be
|
||||
*unlinked at the backend. And because furthur operations are performed
|
||||
*unlinked at the backend. And because further operations are performed
|
||||
*on fd, so though migration will be done but will end with no file
|
||||
*at the backend.
|
||||
*/
|
||||
@ -1017,9 +1017,9 @@ __dht_check_free_space (xlator_t *this, xlator_t *to, xlator_t *from,
|
||||
During rebalance `migrate-data` - Destination subvol experiences
|
||||
a `reduction` in 'blocks' of free space, at the same time source
|
||||
subvol gains certain 'blocks' of free space. A valid check is
|
||||
necessary here to avoid errorneous move to destination where
|
||||
necessary here to avoid erroneous move to destination where
|
||||
the space could be scantily available.
|
||||
With heterogenous brick support, an actual space comparison could
|
||||
With heterogeneous brick support, an actual space comparison could
|
||||
prevent any files being migrated to newly added bricks if they are
|
||||
smaller then the free space available on the existing bricks.
|
||||
*/
|
||||
@ -1119,7 +1119,7 @@ find_new_subvol:
|
||||
if ((!(*new_subvol)) || (*new_subvol == from)) {
|
||||
gf_msg (this->name, GF_LOG_WARNING, 0,
|
||||
DHT_MSG_SUBVOL_INSUFF_SPACE, "Could not find any subvol"
|
||||
" with space accomodating the file - %s. Consider "
|
||||
" with space accommodating the file - %s. Consider "
|
||||
"adding bricks", loc->path);
|
||||
|
||||
*target_changed = _gf_false;
|
||||
@ -3363,7 +3363,7 @@ gf_defrag_get_entry (xlator_t *this, int i, struct dht_container **container,
|
||||
tmp_container->df_entry->dict =
|
||||
dict_ref (df_entry->dict);
|
||||
|
||||
/*Build Container Structue >> END*/
|
||||
/*Build Container Structure >> END*/
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
@ -3670,7 +3670,7 @@ gf_defrag_settle_hash (xlator_t *this, gf_defrag_info_t *defrag,
|
||||
|
||||
if (conf->local_subvols_cnt == 0 || !conf->lookup_optimize) {
|
||||
/* Commit hash updates are only done on local subvolumes and
|
||||
* only when lookup optmization is needed (for older client
|
||||
* only when lookup optimization is needed (for older client
|
||||
* support)
|
||||
*/
|
||||
return 0;
|
||||
|
@ -1462,7 +1462,7 @@ dht_selfheal_dir_mkdir_lock_cbk (call_frame_t *frame, void *cookie,
|
||||
if (op_ret < 0) {
|
||||
|
||||
/* We get this error when the directory entry was not created
|
||||
* on a newky attatched tier subvol. Hence proceed and do mkdir
|
||||
* on a newky attached tier subvol. Hence proceed and do mkdir
|
||||
* on the tier subvol.
|
||||
*/
|
||||
if (op_errno == EINVAL) {
|
||||
@ -2047,7 +2047,7 @@ dht_selfheal_dir_getafix (call_frame_t *frame, loc_t *loc,
|
||||
overlaps = local->selfheal.overlaps_cnt;
|
||||
|
||||
if (holes || overlaps) {
|
||||
/* If the layout has anomolies which would change the hash
|
||||
/* If the layout has anomalies which would change the hash
|
||||
* ranges, then we need to reset the commit_hash for this
|
||||
* directory, as the layout would change and things may not
|
||||
* be in place as expected */
|
||||
|
@ -564,7 +564,7 @@ tier_set_migrate_data (dict_t *migrate_data)
|
||||
* promotions and demotions are multithreaded
|
||||
* so the original frame from gf_defrag_start()
|
||||
* is not carried. A new frame will be created when
|
||||
* we do syncop_setxattr(). This doesnot have the
|
||||
* we do syncop_setxattr(). This does not have the
|
||||
* frame->root->pid of the original frame. So we pass
|
||||
* this dic key-value when we do syncop_setxattr() to do
|
||||
* data migration and set the frame->root->pid to
|
||||
@ -964,7 +964,7 @@ tier_migrate_using_query_file (void *_args)
|
||||
* per_file_status and per_link_status
|
||||
* 0 : success
|
||||
* -1 : failure
|
||||
* 1 : ignore the status and dont count for migration
|
||||
* 1 : ignore the status and don't count for migration
|
||||
* */
|
||||
int per_file_status = 0;
|
||||
int per_link_status = 0;
|
||||
@ -2444,7 +2444,7 @@ static void
|
||||
while (1) {
|
||||
|
||||
/*
|
||||
* Check if a graph switch occured. If so, stop migration
|
||||
* Check if a graph switch occurred. If so, stop migration
|
||||
* thread. It will need to be restarted manually.
|
||||
*/
|
||||
any = THIS->ctx->active->first;
|
||||
@ -2489,8 +2489,8 @@ static void
|
||||
|
||||
/* To have proper synchronization amongst all
|
||||
* brick holding nodes, so that promotion and demotions
|
||||
* start atomicly w.r.t promotion/demotion frequency
|
||||
* period, all nodes should have thier system time
|
||||
* start atomically w.r.t promotion/demotion frequency
|
||||
* period, all nodes should have their system time
|
||||
* in-sync with each other either manually set or
|
||||
* using a NTP server*/
|
||||
ret = gettimeofday (¤t_time, NULL);
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* This is from dht-rebalancer.c as we dont have dht-rebalancer.h */
|
||||
/* This is from dht-rebalancer.c as we don't have dht-rebalancer.h */
|
||||
#include "dht-common.h"
|
||||
#include "xlator.h"
|
||||
#include <signal.h>
|
||||
|
@ -479,7 +479,7 @@ ec_code_space_create(ec_code_t *code, size_t size)
|
||||
done_close:
|
||||
/* If everything has succeeded, we already have the memory areas
|
||||
* mapped. We don't need the file descriptor anymore because the
|
||||
* backend storage will be there until the mmaped regions are
|
||||
* backend storage will be there until the mmap()'d regions are
|
||||
* unmapped. */
|
||||
sys_close(fd);
|
||||
done:
|
||||
|
@ -1631,7 +1631,7 @@ int32_t ec_get_real_size_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
}
|
||||
|
||||
/* This function is used to get the trusted.ec.size xattr from a file when
|
||||
* no lock is needed on the inode. This is only required to maintan iatt
|
||||
* no lock is needed on the inode. This is only required to maintain iatt
|
||||
* structs on fops that manipulate directory entries but do not operate
|
||||
* directly on the inode, like link, rename, ...
|
||||
*
|
||||
|
@ -41,7 +41,7 @@ enum _ec_xattrop_flags {
|
||||
};
|
||||
|
||||
/* We keep two sets of flags. One to determine what's really providing the
|
||||
* currect xattrop and the other to know what the parent fop of the xattrop
|
||||
* current xattrop and the other to know what the parent fop of the xattrop
|
||||
* needs to proceed. It might happen that a fop needs some information that
|
||||
* is being already requested by a previous fop. The two sets are stored
|
||||
* contiguously. */
|
||||
|
@ -2933,7 +2933,7 @@ ec_need_data_heal (ec_t *ec, inode_t *inode, default_args_cbk_t *replies,
|
||||
size = alloca0 (ec->nodes * sizeof (*size));
|
||||
|
||||
/* When dd is going on and heal info is called there is a very good
|
||||
* chance for on disk sizes to mismatch eventhough nothing is wrong
|
||||
* chance for on disk sizes to mismatch even though nothing is wrong
|
||||
* we don't need ondisk size check there. But if the file is either
|
||||
* self-locked or the caller wants a thorough check then make sure to
|
||||
* perform on disk check also. */
|
||||
|
@ -96,7 +96,7 @@ ec_adjust_size_up(ec_t *ec, uint64_t *value, gf_boolean_t scale)
|
||||
tmp += ec->fragment_size;
|
||||
} else {
|
||||
tmp += ec->stripe_size;
|
||||
/* If no scaling is requested there's a posibility of
|
||||
/* If no scaling is requested there's a possibility of
|
||||
* overflow. */
|
||||
if (tmp < ec->stripe_size) {
|
||||
tmp = UINT64_MAX;
|
||||
|
@ -1167,7 +1167,7 @@ void ec_discard_adjust_offset_size(ec_fop_data_t *fop)
|
||||
ec_t *ec = fop->xl->private;
|
||||
|
||||
fop->user_size = fop->size;
|
||||
/* If discard length covers atleast a fragment on brick, we will
|
||||
/* If discard length covers at least a fragment on brick, we will
|
||||
* perform discard operation(when fop->size is non-zero) else we just
|
||||
* write zeros.
|
||||
*/
|
||||
|
@ -43,7 +43,7 @@ int32_t ec_lock_check(ec_fop_data_t *fop, uintptr_t *mask)
|
||||
case EC_LOCK_MODE_NONE:
|
||||
case EC_LOCK_MODE_ALL:
|
||||
/* Goal is to treat non-blocking lock as failure
|
||||
* even if there is a signle EAGAIN*/
|
||||
* even if there is a single EAGAIN*/
|
||||
notlocked |= ans->mask;
|
||||
break;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user