synctask: introduce synclocks for co-operative locking

This patch introduces a synclocks - co-operative locks for synctasks.
Synctasks yield themselves when a lock cannot be acquired at the time
of the lock call, and the unlocker will wake the yielded locker at
the time of unlock.

The implementation is safe in a multi-threaded syncenv framework.

It is also safe for sharing the lock between non-synctasks. i.e, the
same lock can be used for synchronization between a synctask and
a regular thread. In such a situation, waiting synctasks will yield
themselves while non-synctasks will sleep on a cond variable. The
unlocker (which could be either a synctask or a regular thread) will
wake up any type of lock waiter (synctask or regular).

Usage:

    Declaration and Initialization
    ------------------------------

    synclock_t lock;

    ret = synclock_init (&lock);
    if (ret) {
        /* lock could not be allocated */
    }

   Locking and non-blocking lock attempt
   -------------------------------------

   ret = synclock_trylock (&lock);
   if (ret && (errno == EBUSY)) {
      /* lock is held by someone else */
      return;
   }

   synclock_lock (&lock);
   {
      /* critical section */
   }
   synclock_unlock (&lock);

Change-Id: I081873edb536ddde69a20f4a7dc6558ebf19f5b2
BUG: 763820
Signed-off-by: Anand Avati <avati@redhat.com>
Reviewed-on: http://review.gluster.org/4717
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Raghavendra G <raghavendra@gluster.com>
Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
This commit is contained in:
Anand Avati 2013-03-23 13:55:09 -07:00
parent af939370ad
commit 87300be91c
2 changed files with 167 additions and 1 deletions

View File

@ -1,5 +1,5 @@
/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
@ -507,6 +507,154 @@ syncenv_new (size_t stacksize)
}
int
synclock_init (synclock_t *lock)
{
if (!lock)
return -1;
pthread_cond_init (&lock->cond, 0);
lock->lock = 0;
INIT_LIST_HEAD (&lock->waitq);
return pthread_mutex_init (&lock->guard, 0);
}
int
synclock_destroy (synclock_t *lock)
{
if (!lock)
return -1;
pthread_cond_destroy (&lock->cond);
return pthread_mutex_destroy (&lock->guard);
}
static int
__synclock_lock (struct synclock *lock)
{
struct synctask *task = NULL;
if (!lock)
return -1;
task = synctask_get ();
while (lock->lock) {
if (task) {
/* called within a synctask */
list_add_tail (&task->waitq, &lock->waitq);
{
pthread_mutex_unlock (&lock->guard);
synctask_yield (task);
pthread_mutex_lock (&lock->guard);
}
list_del_init (&task->waitq);
} else {
/* called by a non-synctask */
pthread_cond_wait (&lock->cond, &lock->guard);
}
}
lock->lock = _gf_true;
lock->owner = task;
return 0;
}
int
synclock_lock (synclock_t *lock)
{
int ret = 0;
pthread_mutex_lock (&lock->guard);
{
ret = __synclock_lock (lock);
}
pthread_mutex_unlock (&lock->guard);
return ret;
}
int
synclock_trylock (synclock_t *lock)
{
int ret = 0;
errno = 0;
pthread_mutex_lock (&lock->guard);
{
if (lock->lock) {
errno = EBUSY;
ret = -1;
goto unlock;
}
ret = __synclock_lock (lock);
}
unlock:
pthread_mutex_unlock (&lock->guard);
return ret;
}
static int
__synclock_unlock (synclock_t *lock)
{
struct synctask *task = NULL;
struct synctask *curr = NULL;
if (!lock)
return -1;
curr = synctask_get ();
if (lock->owner != curr) {
/* warn ? */
}
lock->lock = _gf_false;
/* There could be both synctasks and non synctasks
waiting (or none, or either). As a mid-approach
between maintaining too many waiting counters
at one extreme and a thundering herd on unlock
at the other, call a cond_signal (which wakes
one waiter) and first synctask waiter. So at
most we have two threads waking up to grab the
just released lock.
*/
pthread_cond_signal (&lock->cond);
if (!list_empty (&lock->waitq)) {
task = list_entry (lock->waitq.next, struct synctask, waitq);
synctask_wake (task);
}
return 0;
}
int
synclock_unlock (synclock_t *lock)
{
int ret = 0;
pthread_mutex_lock (&lock->guard);
{
ret = __synclock_unlock (lock);
}
pthread_mutex_unlock (&lock->guard);
return ret;
}
/* FOPS */

View File

@ -69,6 +69,8 @@ struct synctask {
pthread_mutex_t mutex; /* for synchronous spawning of synctask */
pthread_cond_t cond;
int done;
struct list_head waitq; /* can wait only "once" at a time */
};
@ -96,6 +98,15 @@ struct syncenv {
};
struct synclock {
pthread_mutex_t guard; /* guard the remaining members, pair @cond */
pthread_cond_t cond; /* waiting non-synctasks */
struct list_head waitq; /* waiting synctasks */
gf_boolean_t lock; /* _gf_true or _gf_false, lock status */
struct synctask *owner; /* NULL if current owner is not a synctask */
};
typedef struct synclock synclock_t;
struct syncargs {
int op_ret;
int op_errno;
@ -220,6 +231,13 @@ void synctask_waitfor (struct synctask *task, int count);
int synctask_setid (struct synctask *task, uid_t uid, gid_t gid);
#define SYNCTASK_SETID(uid, gid) synctask_setid (synctask_get(), uid, gid);
int synclock_init (synclock_t *lock);
int synclock_destory (synclock_t *lock);
int synclock_lock (synclock_t *lock);
int synclock_trylock (synclock_t *lock);
int synclock_unlock (synclock_t *lock);
int syncop_lookup (xlator_t *subvol, loc_t *loc, dict_t *xattr_req,
/* out */
struct iatt *iatt, dict_t **xattr_rsp, struct iatt *parent);