Rework condition variable implementation to be consistent with

other primitive implementations.  Additionally ensure that GFP_ATOMIC
is use for allocations when in interrupt context.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@108 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-05-15 17:10:30 +00:00
parent a97df54e83
commit 4efd41189a
4 changed files with 209 additions and 152 deletions

View File

@ -7,6 +7,7 @@ extern "C" {
#include <linux/module.h>
#include <linux/wait.h>
#include <sys/mutex.h>
/* The kcondvar_t struct is protected by mutex taken externally before
* calling any of the wait/signal funs, and passed into the wait funs.
@ -17,165 +18,34 @@ extern "C" {
typedef struct {
int cv_magic;
char *cv_name;
int cv_name_size;
wait_queue_head_t cv_event;
atomic_t cv_waiters;
kmutex_t *cv_mutex; /* only for verification purposes */
kmutex_t *cv_mutex;
spinlock_t cv_lock;
} kcondvar_t;
typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
static __inline__ void
cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
ENTRY;
ASSERT(cvp);
ASSERT(type == CV_DEFAULT);
ASSERT(arg == NULL);
extern void __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg);
extern void __cv_destroy(kcondvar_t *cvp);
extern void __cv_wait(kcondvar_t *cvp, kmutex_t *mp);
extern clock_t __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp,
clock_t expire_time);
extern void __cv_signal(kcondvar_t *cvp);
extern void __cv_broadcast(kcondvar_t *cvp);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
spin_lock_init(&cvp->cv_lock);
atomic_set(&cvp->cv_waiters, 0);
cvp->cv_mutex = NULL;
cvp->cv_name = NULL;
#define cv_init(cvp, name, type, arg) \
({ \
if ((name) == NULL) \
__cv_init(cvp, #cvp, type, arg); \
else \
__cv_init(cvp, name, type, arg); \
})
#define cv_destroy(cvp) __cv_destroy(cvp)
#define cv_wait(cvp, mp) __cv_wait(cvp, mp)
#define cv_timedwait(cvp, mp, t) __cv_timedwait(cvp, mp, t)
#define cv_signal(cvp) __cv_signal(cvp)
#define cv_broadcast(cvp) __cv_broadcast(cvp)
if (name) {
cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
if (cvp->cv_name)
strcpy(cvp->cv_name, name);
}
EXIT;
}
static __inline__ void
cv_destroy(kcondvar_t *cvp)
{
ENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(atomic_read(&cvp->cv_waiters) == 0);
ASSERT(!waitqueue_active(&cvp->cv_event));
if (cvp->cv_name)
kfree(cvp->cv_name);
memset(cvp, CV_POISON, sizeof(*cvp));
spin_unlock(&cvp->cv_lock);
EXIT;
}
static __inline__ void
cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
{
DEFINE_WAIT(wait);
ENTRY;
ASSERT(cvp);
ASSERT(mtx);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(mutex_owned(mtx));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */
ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
TASK_UNINTERRUPTIBLE);
atomic_inc(&cvp->cv_waiters);
/* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty. */
mutex_exit(mtx);
schedule();
mutex_enter(mtx);
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
EXIT;
}
/* 'expire_time' argument is an absolute wall clock time in jiffies.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
static __inline__ clock_t
cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
{
DEFINE_WAIT(wait);
clock_t time_left;
ENTRY;
ASSERT(cvp);
ASSERT(mtx);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(mutex_owned(mtx));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */
ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock);
/* XXX - Does not handle jiffie wrap properly */
time_left = expire_time - jiffies;
if (time_left <= 0)
RETURN(-1);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
TASK_UNINTERRUPTIBLE);
atomic_inc(&cvp->cv_waiters);
/* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty. */
mutex_exit(mtx);
time_left = schedule_timeout(time_left);
mutex_enter(mtx);
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
RETURN(time_left > 0 ? time_left : -1);
}
static __inline__ void
cv_signal(kcondvar_t *cvp)
{
ENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
/* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runable with each call to wake_up().
* Additionally wake_up() holds a spin_lock assoicated with
* the wait queue to ensure we don't race waking up processes. */
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up(&cvp->cv_event);
EXIT;
}
static __inline__ void
cv_broadcast(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ENTRY;
/* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set. */
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up_all(&cvp->cv_event);
EXIT;
}
#endif /* _SPL_CONDVAR_H */

View File

@ -24,6 +24,7 @@ spl-objs += spl-generic.o
spl-objs += spl-atomic.o
spl-objs += spl-mutex.o
spl-objs += spl-kstat.o
spl-objs += spl-condvar.o
splmodule := spl.ko
splmoduledir := @kmoduledir@/kernel/lib/

175
modules/spl/spl-condvar.c Normal file
View File

@ -0,0 +1,175 @@
#include <sys/condvar.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_CONDVAR
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
int flags = KM_SLEEP;
ENTRY;
ASSERT(cvp);
ASSERT(name);
ASSERT(type == CV_DEFAULT);
ASSERT(arg == NULL);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
spin_lock_init(&cvp->cv_lock);
atomic_set(&cvp->cv_waiters, 0);
cvp->cv_mutex = NULL;
cvp->cv_name = NULL;
cvp->cv_name_size = strlen(name) + 1;
/* We may be called when there is a non-zero preempt_count or
* interrupts are disabled is which case we must not sleep.
*/
if (current_thread_info()->preempt_count || irqs_disabled())
flags = KM_NOSLEEP;
cvp->cv_name = kmem_alloc(cvp->cv_name_size, flags);
if (cvp->cv_name)
strcpy(cvp->cv_name, name);
EXIT;
}
EXPORT_SYMBOL(__cv_init);
void
__cv_destroy(kcondvar_t *cvp)
{
ENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(atomic_read(&cvp->cv_waiters) == 0);
ASSERT(!waitqueue_active(&cvp->cv_event));
if (cvp->cv_name)
kmem_free(cvp->cv_name, cvp->cv_name_size);
memset(cvp, CV_POISON, sizeof(*cvp));
spin_unlock(&cvp->cv_lock);
EXIT;
}
EXPORT_SYMBOL(__cv_destroy);
void
__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
{
DEFINE_WAIT(wait);
ENTRY;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(mutex_owned(mp));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mp;
/* Ensure the same mutex is used by all callers */
ASSERT(cvp->cv_mutex == mp);
spin_unlock(&cvp->cv_lock);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
TASK_UNINTERRUPTIBLE);
atomic_inc(&cvp->cv_waiters);
/* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty. */
mutex_exit(mp);
schedule();
mutex_enter(mp);
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
EXIT;
}
EXPORT_SYMBOL(__cv_wait);
/* 'expire_time' argument is an absolute wall clock time in jiffies.
* Return value is time left (expire_time - now) or -1 if timeout occurred.
*/
clock_t
__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
{
DEFINE_WAIT(wait);
clock_t time_left;
ENTRY;
ASSERT(cvp);
ASSERT(mp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
ASSERT(mutex_owned(mp));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mp;
/* Ensure the same mutex is used by all callers */
ASSERT(cvp->cv_mutex == mp);
spin_unlock(&cvp->cv_lock);
/* XXX - Does not handle jiffie wrap properly */
time_left = expire_time - jiffies;
if (time_left <= 0)
RETURN(-1);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
TASK_UNINTERRUPTIBLE);
atomic_inc(&cvp->cv_waiters);
/* Mutex should be dropped after prepare_to_wait() this
* ensures we're linked in to the waiters list and avoids the
* race where 'cvp->cv_waiters > 0' but the list is empty. */
mutex_exit(mp);
time_left = schedule_timeout(time_left);
mutex_enter(mp);
atomic_dec(&cvp->cv_waiters);
finish_wait(&cvp->cv_event, &wait);
RETURN(time_left > 0 ? time_left : -1);
}
EXPORT_SYMBOL(__cv_timedwait);
void
__cv_signal(kcondvar_t *cvp)
{
ENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
/* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runable with each call to wake_up().
* Additionally wake_up() holds a spin_lock assoicated with
* the wait queue to ensure we don't race waking up processes. */
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up(&cvp->cv_event);
EXIT;
}
EXPORT_SYMBOL(__cv_signal);
void
__cv_broadcast(kcondvar_t *cvp)
{
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ENTRY;
/* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set. */
if (atomic_read(&cvp->cv_waiters) > 0)
wake_up_all(&cvp->cv_event);
EXIT;
}
EXPORT_SYMBOL(__cv_broadcast);

View File

@ -147,6 +147,8 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
kmem_constructor_t constructor;
void *private;
ASSERT(flags & SLAB_CTOR_CONSTRUCTOR);
/* Ensure constructor verifies are not passed to the registered
* constructors. This may not be safe due to the Solaris constructor
* not being aware of how to handle the SLAB_CTOR_VERIFY flag
@ -154,6 +156,11 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
if (flags & SLAB_CTOR_VERIFY)
return;
if (flags & SLAB_CTOR_ATOMIC)
flags = KM_NOSLEEP;
else
flags = KM_SLEEP;
/* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
@ -185,6 +192,9 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
kmem_destructor_t destructor;
void *private;
/* No valid destructor flags */
ASSERT(flags == 0);
/* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
@ -286,6 +296,7 @@ __kmem_cache_create(char *name, size_t size, size_t align,
/* XXX: - Option currently unsupported by shim layer */
ASSERT(!vmp);
ASSERT(flags == 0);
cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (cache_name == NULL)