Update SPL to use new debug infrastructure. This means:

- Replacing all BUG_ON()'s with proper ASSERT()'s
- Using ENTRY,EXIT,GOTO, and RETURN macro to instument call paths



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@78 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-04-21 17:29:47 +00:00
parent 2fae1b3d0a
commit 937879f11d
23 changed files with 410 additions and 271 deletions

View File

@ -11,7 +11,7 @@ extern "C" {
#define DEBUG_CALLB #define DEBUG_CALLB
#ifndef DEBUG_CALLB #ifndef DEBUG_CALLB
#define CALLB_CPR_ASSERT(cp) BUG_ON(!(MUTEX_HELD((cp)->cc_lockp))); #define CALLB_CPR_ASSERT(cp) ASSERT(MUTEX_HELD((cp)->cc_lockp));
#else #else
#define CALLB_CPR_ASSERT(cp) #define CALLB_CPR_ASSERT(cp)
#endif #endif

View File

@ -28,9 +28,9 @@ typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
static __inline__ void static __inline__ void
cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{ {
BUG_ON(cvp == NULL); ASSERT(cvp);
BUG_ON(type != CV_DEFAULT); ASSERT(type == CV_DEFAULT);
BUG_ON(arg != NULL); ASSERT(arg == NULL);
cvp->cv_magic = CV_MAGIC; cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event); init_waitqueue_head(&cvp->cv_event);
@ -49,11 +49,11 @@ cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
static __inline__ void static __inline__ void
cv_destroy(kcondvar_t *cvp) cv_destroy(kcondvar_t *cvp)
{ {
BUG_ON(cvp == NULL); ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock); spin_lock(&cvp->cv_lock);
BUG_ON(cvp->cv_magic != CV_MAGIC); ASSERT(atomic_read(&cvp->cv_waiters) == 0);
BUG_ON(atomic_read(&cvp->cv_waiters) != 0); ASSERT(!waitqueue_active(&cvp->cv_event));
BUG_ON(waitqueue_active(&cvp->cv_event));
if (cvp->cv_name) if (cvp->cv_name)
kfree(cvp->cv_name); kfree(cvp->cv_name);
@ -67,16 +67,17 @@ cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
BUG_ON(cvp == NULL || mtx == NULL); ASSERT(cvp);
ASSERT(mtx);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock); spin_lock(&cvp->cv_lock);
BUG_ON(cvp->cv_magic != CV_MAGIC); ASSERT(mutex_owned(mtx));
BUG_ON(!mutex_owned(mtx));
if (cvp->cv_mutex == NULL) if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx; cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */ /* Ensure the same mutex is used by all callers */
BUG_ON(cvp->cv_mutex != mtx); ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock); spin_unlock(&cvp->cv_lock);
prepare_to_wait_exclusive(&cvp->cv_event, &wait, prepare_to_wait_exclusive(&cvp->cv_event, &wait,
@ -103,16 +104,17 @@ cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
clock_t time_left; clock_t time_left;
BUG_ON(cvp == NULL || mtx == NULL); ASSERT(cvp);
ASSERT(mtx);
ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock); spin_lock(&cvp->cv_lock);
BUG_ON(cvp->cv_magic != CV_MAGIC); ASSERT(mutex_owned(mtx));
BUG_ON(!mutex_owned(mtx));
if (cvp->cv_mutex == NULL) if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx; cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */ /* Ensure the same mutex is used by all callers */
BUG_ON(cvp->cv_mutex != mtx); ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock); spin_unlock(&cvp->cv_lock);
/* XXX - Does not handle jiffie wrap properly */ /* XXX - Does not handle jiffie wrap properly */
@ -140,8 +142,8 @@ cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
static __inline__ void static __inline__ void
cv_signal(kcondvar_t *cvp) cv_signal(kcondvar_t *cvp)
{ {
BUG_ON(cvp == NULL); ASSERT(cvp);
BUG_ON(cvp->cv_magic != CV_MAGIC); ASSERT(cvp->cv_magic == CV_MAGIC);
/* All waiters are added with WQ_FLAG_EXCLUSIVE so only one /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runable with each call to wake_up(). * waiter will be set runable with each call to wake_up().
@ -154,8 +156,8 @@ cv_signal(kcondvar_t *cvp)
static __inline__ void static __inline__ void
cv_broadcast(kcondvar_t *cvp) cv_broadcast(kcondvar_t *cvp)
{ {
BUG_ON(cvp == NULL); ASSERT(cvp);
BUG_ON(cvp->cv_magic != CV_MAGIC); ASSERT(cvp->cv_magic == CV_MAGIC);
/* Wake_up_all() will wake up all waiters even those which /* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set. */ * have the WQ_FLAG_EXCLUSIVE flag set. */

View File

@ -22,6 +22,7 @@ extern unsigned long spl_debug_subsys;
#define S_DEBUG 0x00001000 #define S_DEBUG 0x00001000
#define S_GENERIC 0x00002000 #define S_GENERIC 0x00002000
#define S_PROC 0x00004000 #define S_PROC 0x00004000
#define S_MODULE 0x00008000
#define D_TRACE 0x00000001 #define D_TRACE 0x00000001
#define D_INFO 0x00000002 #define D_INFO 0x00000002
@ -148,9 +149,47 @@ struct page_collection {
#define SBUG() spl_debug_bug(__FILE__, __FUNCTION__, __LINE__); #define SBUG() spl_debug_bug(__FILE__, __FUNCTION__, __LINE__);
#ifdef __ia64__
#define CDEBUG_STACK() (THREAD_SIZE - \
((unsigned long)__builtin_dwarf_cfa() & \
(THREAD_SIZE - 1)))
#else
#define CDEBUG_STACK() (THREAD_SIZE - \
((unsigned long)__builtin_frame_address(0) & \
(THREAD_SIZE - 1)))
# endif /* __ia64__ */
#define __CHECK_STACK(file, func, line) \
do { \
unsigned long _stack = CDEBUG_STACK(); \
unsigned long _soft_limit = (9 * THREAD_SIZE) / 10; \
\
if (unlikely(_stack > _soft_limit && _stack > spl_debug_stack)){\
spl_debug_stack = _stack; \
if (_stack <= THREAD_SIZE) { \
spl_debug_msg(NULL, D_TRACE, D_WARNING, \
file, func, line, "Warning " \
"exceeded 90%% of maximum safe " \
"stack size (%lu/%lu)\n", \
_stack, THREAD_SIZE); \
} else { \
spl_debug_msg(NULL, D_TRACE, D_WARNING, \
file, func, line, "Error " \
"exceeded maximum safe stack " \
"size (%lu/%lu)\n", \
_stack, THREAD_SIZE); \
SBUG(); \
} \
} \
} while (0)
#define CHECK_STACK()__CHECK_STACK(__FILE__, __func__, __LINE__)
/* ASSERTION that is safe to use within the debug system */ /* ASSERTION that is safe to use within the debug system */
#define __ASSERT(cond) \ #define __ASSERT(cond) \
do { \ do { \
CHECK_STACK(); \
\
if (unlikely(!(cond))) { \ if (unlikely(!(cond))) { \
printk(KERN_ERR "ASSERTION("#cond") failed"); \ printk(KERN_ERR "ASSERTION("#cond") failed"); \
SBUG(); \ SBUG(); \
@ -168,6 +207,8 @@ do { \
/* ASSERTION that will debug log used outside the debug sysytem */ /* ASSERTION that will debug log used outside the debug sysytem */
#define ASSERT(cond) \ #define ASSERT(cond) \
do { \ do { \
CHECK_STACK(); \
\
if (unlikely(!(cond))) { \ if (unlikely(!(cond))) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \ spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \ __FILE__, __FUNCTION__, __LINE__, \
@ -178,6 +219,8 @@ do { \
#define ASSERTF(cond, fmt, a...) \ #define ASSERTF(cond, fmt, a...) \
do { \ do { \
CHECK_STACK(); \
\
if (unlikely(!(cond))) { \ if (unlikely(!(cond))) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \ spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \ __FILE__, __FUNCTION__, __LINE__, \
@ -191,6 +234,9 @@ do { \
do { \ do { \
const TYPE __left = (TYPE)(LEFT); \ const TYPE __left = (TYPE)(LEFT); \
const TYPE __right = (TYPE)(RIGHT); \ const TYPE __right = (TYPE)(RIGHT); \
\
CHECK_STACK(); \
\
if (!(__left OP __right)) { \ if (!(__left OP __right)) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \ spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \ __FILE__, __FUNCTION__, __LINE__, \
@ -214,32 +260,6 @@ do { \
spl_debug_vmsg(cdls, subsys, mask, file, fn, \ spl_debug_vmsg(cdls, subsys, mask, file, fn, \
line, NULL, NULL, format, ##a) line, NULL, NULL, format, ##a)
#ifdef __ia64__
#define CDEBUG_STACK() (THREAD_SIZE - \
((unsigned long)__builtin_dwarf_cfa() & \
(THREAD_SIZE - 1)))
#else
#define CDEBUG_STACK() (THREAD_SIZE - \
((unsigned long)__builtin_frame_address(0) & \
(THREAD_SIZE - 1)))
# endif /* __ia64__ */
#define __CHECK_STACK(file, func, line) \
do { \
unsigned long _stack = CDEBUG_STACK(); \
\
if (_stack > (3*THREAD_SIZE/4) && _stack > spl_debug_stack) { \
spl_debug_stack = _stack; \
spl_debug_msg(NULL, D_TRACE, D_WARNING, \
file, func, line, \
"Exceeded maximum safe stack " \
"%lu/%lu\n", _stack, THREAD_SIZE); \
__ASSERT(0); \
} \
} while (0)
#define CHECK_STACK()__CHECK_STACK(__FILE__, __func__, __LINE__)
#define __CDEBUG(cdls, subsys, mask, format, a...) \ #define __CDEBUG(cdls, subsys, mask, format, a...) \
do { \ do { \
CHECK_STACK(); \ CHECK_STACK(); \

View File

@ -13,6 +13,7 @@ extern "C" {
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <sys/debug.h>
/* /*
* Memory allocation interfaces * Memory allocation interfaces
*/ */
@ -60,7 +61,7 @@ extern int kmem_warning_flag;
#define kmem_free(ptr, size) \ #define kmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!(ptr) || (size) < 0); \ ASSERT((ptr) || (size > 0)); \
atomic64_sub((size), &kmem_alloc_used); \ atomic64_sub((size), &kmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \ memset(ptr, 0x5a, (size)); /* Poison */ \
kfree(ptr); \ kfree(ptr); \
@ -69,7 +70,7 @@ extern int kmem_warning_flag;
#define __vmem_alloc(size, flags) \ #define __vmem_alloc(size, flags) \
({ void *_ptr_; \ ({ void *_ptr_; \
\ \
BUG_ON(!(flags & KM_SLEEP)); \ ASSERT(flags & KM_SLEEP); \
\ \
_ptr_ = (void *)__vmalloc((size), ((flags) | \ _ptr_ = (void *)__vmalloc((size), ((flags) | \
__GFP_HIGHMEM), PAGE_KERNEL); \ __GFP_HIGHMEM), PAGE_KERNEL); \
@ -93,7 +94,7 @@ extern int kmem_warning_flag;
#define vmem_free(ptr, size) \ #define vmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!(ptr) || (size) < 0); \ ASSERT((ptr) || (size > 0)); \
atomic64_sub((size), &vmem_alloc_used); \ atomic64_sub((size), &vmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \ memset(ptr, 0x5a, (size)); /* Poison */ \
vfree(ptr); \ vfree(ptr); \
@ -105,7 +106,7 @@ extern int kmem_warning_flag;
#define kmem_zalloc(size, flags) kzalloc((size), (flags)) #define kmem_zalloc(size, flags) kzalloc((size), (flags))
#define kmem_free(ptr, size) \ #define kmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!(ptr) || (size) < 0); \ ASSERT((ptr) || (size > 0)); \
kfree(ptr); \ kfree(ptr); \
}) })
@ -116,7 +117,7 @@ extern int kmem_warning_flag;
PAGE_KERNEL) PAGE_KERNEL)
#define vmem_free(ptr, size) \ #define vmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!(ptr) || (size) < 0); \ ASSERT((ptr) || (size > 0)); \
vfree(ptr); \ vfree(ptr); \
}) })

View File

@ -36,9 +36,9 @@ typedef struct {
static __inline__ void static __inline__ void
mutex_init(kmutex_t *mp, char *name, int type, void *ibc) mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
{ {
BUG_ON(mp == NULL); ASSERT(mp);
BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */ ASSERT(ibc == NULL); /* XXX - Spin mutexes not needed */
BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */ ASSERT(type == MUTEX_DEFAULT); /* XXX - Only default type supported */
mp->km_magic = KM_MAGIC; mp->km_magic = KM_MAGIC;
spin_lock_init(&mp->km_lock); spin_lock_init(&mp->km_lock);
@ -57,9 +57,9 @@ mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
static __inline__ void static __inline__ void
mutex_destroy(kmutex_t *mp) mutex_destroy(kmutex_t *mp)
{ {
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
if (mp->km_name) if (mp->km_name)
kfree(mp->km_name); kfree(mp->km_name);
@ -71,9 +71,9 @@ mutex_destroy(kmutex_t *mp)
static __inline__ void static __inline__ void
mutex_enter(kmutex_t *mp) mutex_enter(kmutex_t *mp)
{ {
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
if (unlikely(in_atomic() && !current->exit_state)) { if (unlikely(in_atomic() && !current->exit_state)) {
printk("May schedule while atomic: %s/0x%08x/%d\n", printk("May schedule while atomic: %s/0x%08x/%d\n",
@ -87,7 +87,7 @@ mutex_enter(kmutex_t *mp)
down(&mp->km_sem); down(&mp->km_sem);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_owner != NULL); ASSERT(mp->km_owner == NULL);
mp->km_owner = current; mp->km_owner = current;
spin_unlock(&mp->km_lock); spin_unlock(&mp->km_lock);
} }
@ -98,9 +98,9 @@ mutex_tryenter(kmutex_t *mp)
{ {
int rc; int rc;
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
if (unlikely(in_atomic() && !current->exit_state)) { if (unlikely(in_atomic() && !current->exit_state)) {
printk("May schedule while atomic: %s/0x%08x/%d\n", printk("May schedule while atomic: %s/0x%08x/%d\n",
@ -113,7 +113,7 @@ mutex_tryenter(kmutex_t *mp)
rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */ rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
if (rc == 0) { if (rc == 0) {
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_owner != NULL); ASSERT(mp->km_owner == NULL);
mp->km_owner = current; mp->km_owner = current;
spin_unlock(&mp->km_lock); spin_unlock(&mp->km_lock);
return 1; return 1;
@ -124,10 +124,11 @@ mutex_tryenter(kmutex_t *mp)
static __inline__ void static __inline__ void
mutex_exit(kmutex_t *mp) mutex_exit(kmutex_t *mp)
{ {
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
BUG_ON(mp->km_owner != current); ASSERT(mp->km_owner == current);
mp->km_owner = NULL; mp->km_owner = NULL;
spin_unlock(&mp->km_lock); spin_unlock(&mp->km_lock);
up(&mp->km_sem); up(&mp->km_sem);
@ -139,9 +140,9 @@ mutex_owned(kmutex_t *mp)
{ {
int rc; int rc;
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
rc = (mp->km_owner == current); rc = (mp->km_owner == current);
spin_unlock(&mp->km_lock); spin_unlock(&mp->km_lock);
@ -154,9 +155,9 @@ mutex_owner(kmutex_t *mp)
{ {
kthread_t *thr; kthread_t *thr;
BUG_ON(mp == NULL); ASSERT(mp);
ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock); spin_lock(&mp->km_lock);
BUG_ON(mp->km_magic != KM_MAGIC);
thr = mp->km_owner; thr = mp->km_owner;
spin_unlock(&mp->km_lock); spin_unlock(&mp->km_lock);

View File

@ -17,7 +17,7 @@ extern "C" {
static __inline__ int static __inline__ int
random_get_bytes(uint8_t *ptr, size_t len) random_get_bytes(uint8_t *ptr, size_t len)
{ {
BUG_ON(len < 0); ASSERT(len >= 0);
get_random_bytes((void *)ptr,(int)len); get_random_bytes((void *)ptr,(int)len);
return 0; return 0;
} }
@ -26,7 +26,7 @@ random_get_bytes(uint8_t *ptr, size_t len)
static __inline__ int static __inline__ int
random_get_pseudo_bytes(uint8_t *ptr, size_t len) random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{ {
BUG_ON(len < 0); ASSERT(len >= 0);
get_random_bytes((void *)ptr,(int)len); get_random_bytes((void *)ptr,(int)len);
return 0; return 0;
} }

View File

@ -102,8 +102,9 @@ extern int __rw_lock_held(krwlock_t *rwlp);
static __inline__ void static __inline__ void
rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg) rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
{ {
BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */ ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */
BUG_ON(arg != NULL); /* XXX no irq handler use */ ASSERT(arg == NULL); /* XXX no irq handler use */
rwlp->rw_magic = RW_MAGIC; rwlp->rw_magic = RW_MAGIC;
rwlp->rw_owner = NULL; /* no one holds the write lock yet */ rwlp->rw_owner = NULL; /* no one holds the write lock yet */
init_rwsem(&rwlp->rw_sem); init_rwsem(&rwlp->rw_sem);
@ -119,11 +120,11 @@ rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
static __inline__ void static __inline__ void
rw_destroy(krwlock_t *rwlp) rw_destroy(krwlock_t *rwlp)
{ {
BUG_ON(rwlp == NULL); ASSERT(rwlp);
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp->rw_magic == RW_MAGIC);
BUG_ON(rwlp->rw_owner != NULL); ASSERT(rwlp->rw_owner == NULL);
spin_lock(&rwlp->rw_sem.wait_lock); spin_lock(&rwlp->rw_sem.wait_lock);
BUG_ON(!list_empty(&rwlp->rw_sem.wait_list)); ASSERT(list_empty(&rwlp->rw_sem.wait_list));
spin_unlock(&rwlp->rw_sem.wait_lock); spin_unlock(&rwlp->rw_sem.wait_lock);
if (rwlp->rw_name) if (rwlp->rw_name)
@ -139,7 +140,9 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
{ {
int result; int result;
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp);
ASSERT(rwlp->rw_magic == RW_MAGIC);
switch (rw) { switch (rw) {
/* these functions return 1 if success, 0 if contention */ /* these functions return 1 if success, 0 if contention */
case RW_READER: case RW_READER:
@ -159,12 +162,12 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
if (result) { if (result) {
/* there better not be anyone else /* there better not be anyone else
* holding the write lock here */ * holding the write lock here */
BUG_ON(rwlp->rw_owner != NULL); ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current; rwlp->rw_owner = current;
} }
break; break;
default: default:
BUG_ON(1); SBUG();
} }
return result; return result;
@ -173,7 +176,9 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
static __inline__ void static __inline__ void
rw_enter(krwlock_t *rwlp, krw_t rw) rw_enter(krwlock_t *rwlp, krw_t rw)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp);
ASSERT(rwlp->rw_magic == RW_MAGIC);
switch (rw) { switch (rw) {
case RW_READER: case RW_READER:
/* Here the Solaris code would block /* Here the Solaris code would block
@ -192,18 +197,19 @@ rw_enter(krwlock_t *rwlp, krw_t rw)
/* there better not be anyone else /* there better not be anyone else
* holding the write lock here */ * holding the write lock here */
BUG_ON(rwlp->rw_owner != NULL); ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current; rwlp->rw_owner = current;
break; break;
default: default:
BUG_ON(1); SBUG();
} }
} }
static __inline__ void static __inline__ void
rw_exit(krwlock_t *rwlp) rw_exit(krwlock_t *rwlp)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp);
ASSERT(rwlp->rw_magic == RW_MAGIC);
/* rw_owner is held by current /* rw_owner is held by current
* thread iff it is a writer */ * thread iff it is a writer */
@ -218,8 +224,10 @@ rw_exit(krwlock_t *rwlp)
static __inline__ void static __inline__ void
rw_downgrade(krwlock_t *rwlp) rw_downgrade(krwlock_t *rwlp)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp);
BUG_ON(rwlp->rw_owner != current); ASSERT(rwlp->rw_magic == RW_MAGIC);
ASSERT(rwlp->rw_owner == current);
rwlp->rw_owner = NULL; rwlp->rw_owner = NULL;
downgrade_write(&rwlp->rw_sem); downgrade_write(&rwlp->rw_sem);
} }
@ -232,7 +240,9 @@ static __inline__ int
rw_tryupgrade(krwlock_t *rwlp) rw_tryupgrade(krwlock_t *rwlp)
{ {
int result = 0; int result = 0;
BUG_ON(rwlp->rw_magic != RW_MAGIC);
ASSERT(rwlp);
ASSERT(rwlp->rw_magic == RW_MAGIC);
spin_lock(&rwlp->rw_sem.wait_lock); spin_lock(&rwlp->rw_sem.wait_lock);
@ -280,8 +290,8 @@ rw_tryupgrade(krwlock_t *rwlp)
/* Check if upgrade failed. Should not ever happen /* Check if upgrade failed. Should not ever happen
* if we got to this point */ * if we got to this point */
BUG_ON(!result); ASSERT(result);
BUG_ON(rwlp->rw_owner != NULL); ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current; rwlp->rw_owner = current;
spin_unlock(&rwlp->rw_sem.wait_lock); spin_unlock(&rwlp->rw_sem.wait_lock);
return 1; return 1;
@ -290,7 +300,9 @@ rw_tryupgrade(krwlock_t *rwlp)
static __inline__ kthread_t * static __inline__ kthread_t *
rw_owner(krwlock_t *rwlp) rw_owner(krwlock_t *rwlp)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp);
ASSERT(rwlp->rw_magic == RW_MAGIC);
return rwlp->rw_owner; return rwlp->rw_owner;
} }

View File

@ -16,7 +16,7 @@
static __inline__ int static __inline__ int
issig(int why) issig(int why)
{ {
BUG_ON(!(why == FORREAL || why == JUSTLOOKING)); ASSERT(why == FORREAL || why == JUSTLOOKING);
return signal_pending(current); return signal_pending(current);
} }

View File

@ -98,8 +98,8 @@ extern "C" {
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0) #define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0)
/* Missing globals */ /* Missing globals */
extern unsigned long spl_hostid; extern long spl_hostid;
extern char spl_hw_serial[11]; extern char hw_serial[11];
extern int p0; extern int p0;
/* Missing misc functions */ /* Missing misc functions */

View File

@ -41,7 +41,7 @@ typedef enum clock_type {
#define hz \ #define hz \
({ \ ({ \
BUG_ON(HZ < 100 || HZ > MICROSEC); \ ASSERT(HZ >= 100 && HZ <= MICROSEC); \
HZ; \ HZ; \
}) })

View File

@ -1,5 +1,11 @@
#include <sys/atomic.h> #include <sys/atomic.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_ATOMIC
/* Global atomic lock declarations */ /* Global atomic lock declarations */
spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED; spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED;
spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED; spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED;

View File

@ -1104,10 +1104,10 @@ EXPORT_SYMBOL(spl_debug_dumpstack);
void spl_debug_bug(char *file, const char *func, const int line) void spl_debug_bug(char *file, const char *func, const int line)
{ {
spl_debug_catastrophe = 1; spl_debug_catastrophe = 1;
spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SPL BUG\n"); spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
if (in_interrupt()) { if (in_interrupt()) {
panic("SPL BUG in interrupt.\n"); panic("SBUG in interrupt.\n");
/* not reached */ /* not reached */
} }
@ -1119,7 +1119,7 @@ void spl_debug_bug(char *file, const char *func, const int line)
spl_debug_dumplog(); spl_debug_dumplog();
if (spl_debug_panic_on_bug) if (spl_debug_panic_on_bug)
panic("SPL BUG"); panic("SBUG");
set_task_state(current, TASK_UNINTERRUPTIBLE); set_task_state(current, TASK_UNINTERRUPTIBLE);
while (1) while (1)

View File

@ -2,6 +2,12 @@
#include <sys/cmn_err.h> #include <sys/cmn_err.h>
#include "config.h" #include "config.h"
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_GENERIC
static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" }; static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" }; static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
@ -25,7 +31,7 @@ cmn_err(int ce, const char *fmt, ...)
vsnprintf(msg, MAXMSGLEN - 1, fmt, ap); vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
va_end(ap); va_end(ap);
printk("%s", msg); CERROR("%s", msg);
} /* cmn_err() */ } /* cmn_err() */
EXPORT_SYMBOL(cmn_err); EXPORT_SYMBOL(cmn_err);
@ -39,7 +45,7 @@ vcmn_err(int ce, const char *fmt, va_list ap)
if (ce != CE_NOTE) { /* suppress noise in stress testing */ if (ce != CE_NOTE) { /* suppress noise in stress testing */
vsnprintf(msg, MAXMSGLEN - 1, fmt, ap); vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
printk("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]); CERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
} }
} /* vcmn_err() */ } /* vcmn_err() */
EXPORT_SYMBOL(vcmn_err); EXPORT_SYMBOL(vcmn_err);

View File

@ -13,11 +13,11 @@
#define DEBUG_SUBSYSTEM S_GENERIC #define DEBUG_SUBSYSTEM S_GENERIC
unsigned long spl_hostid = 0; long spl_hostid = 0;
EXPORT_SYMBOL(spl_hostid); EXPORT_SYMBOL(spl_hostid);
char spl_hw_serial[11] = "<none>"; char hw_serial[11] = "<none>";
EXPORT_SYMBOL(spl_hw_serial); EXPORT_SYMBOL(hw_serial);
int p0 = 0; int p0 = 0;
EXPORT_SYMBOL(p0); EXPORT_SYMBOL(p0);
@ -79,7 +79,7 @@ set_hostid(void)
NULL }; NULL };
/* Doing address resolution in the kernel is tricky and just /* Doing address resolution in the kernel is tricky and just
* not a good idea in general. So to set the proper 'spl_hw_serial' * not a good idea in general. So to set the proper 'hw_serial'
* use the usermodehelper support to ask '/bin/sh' to run * use the usermodehelper support to ask '/bin/sh' to run
* '/usr/bin/hostid' and redirect the result to /proc/sys/spl/hostid * '/usr/bin/hostid' and redirect the result to /proc/sys/spl/hostid
* for us to use. It's a horific solution but it will do for now. * for us to use. It's a horific solution but it will do for now.
@ -107,7 +107,7 @@ static int __init spl_init(void)
if ((rc = set_hostid())) if ((rc = set_hostid()))
GOTO(out4, rc = -EADDRNOTAVAIL); GOTO(out4, rc = -EADDRNOTAVAIL);
CWARN("Loaded Solaris Porting Layer v%s\n", VERSION); printk("SPL: Loaded Solaris Porting Layer v%s\n", VERSION);
RETURN(rc); RETURN(rc);
out4: out4:
proc_fini(); proc_fini();
@ -127,7 +127,7 @@ static void spl_fini(void)
{ {
ENTRY; ENTRY;
CWARN("Unloaded Solaris Porting Layer v%s\n", VERSION); printk("SPL: Unloaded Solaris Porting Layer v%s\n", VERSION);
proc_fini(); proc_fini();
vn_fini(); vn_fini();
kmem_fini(); kmem_fini();

View File

@ -1,5 +1,11 @@
#include <sys/kmem.h> #include <sys/kmem.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_KMEM
/* /*
* Memory allocation interfaces * Memory allocation interfaces
*/ */
@ -120,7 +126,8 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
/* Callback list must be in sync with linux slab caches */ /* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
BUG_ON(!kcc); ASSERT(kcc);
constructor = kcc->kcc_constructor; constructor = kcc->kcc_constructor;
private = kcc->kcc_private; private = kcc->kcc_private;
@ -144,7 +151,8 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
/* Callback list must be in sync with linux slab caches */ /* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
BUG_ON(!kcc); ASSERT(kcc);
destructor = kcc->kcc_destructor; destructor = kcc->kcc_destructor;
private = kcc->kcc_private; private = kcc->kcc_private;
@ -213,20 +221,21 @@ __kmem_cache_create(char *name, size_t size, size_t align,
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
int shrinker_flag = 0; int shrinker_flag = 0;
char *cache_name; char *cache_name;
ENTRY;
/* FIXME: - Option currently unsupported by shim layer */ /* XXX: - Option currently unsupported by shim layer */
BUG_ON(vmp); ASSERT(!vmp);
cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL); cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (cache_name == NULL) if (cache_name == NULL)
return NULL; RETURN(NULL);
strcpy(cache_name, name); strcpy(cache_name, name);
cache = kmem_cache_create(cache_name, size, align, flags, cache = kmem_cache_create(cache_name, size, align, flags,
kmem_cache_generic_constructor, kmem_cache_generic_constructor,
kmem_cache_generic_destructor); kmem_cache_generic_destructor);
if (cache == NULL) if (cache == NULL)
return NULL; RETURN(NULL);
/* Register shared shrinker function on initial cache create */ /* Register shared shrinker function on initial cache create */
spin_lock(&kmem_cache_cb_lock); spin_lock(&kmem_cache_cb_lock);
@ -236,7 +245,7 @@ __kmem_cache_create(char *name, size_t size, size_t align,
if (kmem_cache_shrinker == NULL) { if (kmem_cache_shrinker == NULL) {
kmem_cache_destroy(cache); kmem_cache_destroy(cache);
spin_unlock(&kmem_cache_cb_lock); spin_unlock(&kmem_cache_cb_lock);
return NULL; RETURN(NULL);
} }
} }
@ -249,10 +258,10 @@ __kmem_cache_create(char *name, size_t size, size_t align,
remove_shrinker(kmem_cache_shrinker); remove_shrinker(kmem_cache_shrinker);
kmem_cache_destroy(cache); kmem_cache_destroy(cache);
return NULL; RETURN(NULL);
} }
return cache; RETURN(cache);
} }
EXPORT_SYMBOL(__kmem_cache_create); EXPORT_SYMBOL(__kmem_cache_create);
@ -265,12 +274,13 @@ __kmem_cache_destroy(kmem_cache_t *cache)
char *name; char *name;
unsigned long flags; unsigned long flags;
int rc; int rc;
ENTRY;
spin_lock_irqsave(&kmem_cache_cb_lock, flags); spin_lock_irqsave(&kmem_cache_cb_lock, flags);
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
if (kcc == NULL) if (kcc == NULL)
return -EINVAL; RETURN(-EINVAL);
name = (char *)kmem_cache_name(cache); name = (char *)kmem_cache_name(cache);
rc = kmem_cache_destroy(cache); rc = kmem_cache_destroy(cache);
@ -283,38 +293,44 @@ __kmem_cache_destroy(kmem_cache_t *cache)
remove_shrinker(kmem_cache_shrinker); remove_shrinker(kmem_cache_shrinker);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
return rc; RETURN(rc);
} }
EXPORT_SYMBOL(__kmem_cache_destroy); EXPORT_SYMBOL(__kmem_cache_destroy);
void void
__kmem_reap(void) { __kmem_reap(void)
{
ENTRY;
/* Since there's no easy hook in to linux to force all the registered /* Since there's no easy hook in to linux to force all the registered
* shrinkers to run we just run the ones registered for this shim */ * shrinkers to run we just run the ones registered for this shim */
kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL); kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
EXIT;
} }
EXPORT_SYMBOL(__kmem_reap); EXPORT_SYMBOL(__kmem_reap);
int int
kmem_init(void) kmem_init(void)
{ {
ENTRY;
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
atomic64_set(&kmem_alloc_used, 0); atomic64_set(&kmem_alloc_used, 0);
atomic64_set(&vmem_alloc_used, 0); atomic64_set(&vmem_alloc_used, 0);
#endif #endif
return 0; RETURN(0);
} }
void void
kmem_fini(void) kmem_fini(void)
{ {
ENTRY;
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
if (atomic64_read(&kmem_alloc_used) != 0) if (atomic64_read(&kmem_alloc_used) != 0)
printk("spl: Warning kmem leaked %ld/%ld bytes\n", CWARN("kmem leaked %ld/%ld bytes\n",
atomic_read(&kmem_alloc_used), kmem_alloc_max); atomic_read(&kmem_alloc_used), kmem_alloc_max);
if (atomic64_read(&vmem_alloc_used) != 0) if (atomic64_read(&vmem_alloc_used) != 0)
printk("spl: Warning vmem leaked %ld/%ld bytes\n", CWARN("vmem leaked %ld/%ld bytes\n",
atomic_read(&vmem_alloc_used), vmem_alloc_max); atomic_read(&vmem_alloc_used), vmem_alloc_max);
#endif #endif
EXIT;
} }

View File

@ -1,39 +1,47 @@
#include <sys/kobj.h> #include <sys/kobj.h>
#include "config.h" #include "config.h"
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_KOBJ
struct _buf * struct _buf *
kobj_open_file(const char *name) kobj_open_file(const char *name)
{ {
struct _buf *file; struct _buf *file;
vnode_t *vp; vnode_t *vp;
int rc; int rc;
ENTRY;
if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0))) if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0)))
return ((_buf_t *)-1UL); RETURN((_buf_t *)-1UL);
file = kmalloc(sizeof(_buf_t), GFP_KERNEL); file = kmalloc(sizeof(_buf_t), GFP_KERNEL);
file->vp = vp; file->vp = vp;
return file; RETURN(file);
} /* kobj_open_file() */ } /* kobj_open_file() */
EXPORT_SYMBOL(kobj_open_file); EXPORT_SYMBOL(kobj_open_file);
void void
kobj_close_file(struct _buf *file) kobj_close_file(struct _buf *file)
{ {
ENTRY;
VOP_CLOSE(file->vp, 0, 0, 0, 0, 0); VOP_CLOSE(file->vp, 0, 0, 0, 0, 0);
VN_RELE(file->vp); VN_RELE(file->vp);
kfree(file); kfree(file);
EXIT;
return;
} /* kobj_close_file() */ } /* kobj_close_file() */
EXPORT_SYMBOL(kobj_close_file); EXPORT_SYMBOL(kobj_close_file);
int int
kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off) kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off)
{ {
return vn_rdwr(UIO_READ, file->vp, buf, size, off, ENTRY;
UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL); RETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL));
} /* kobj_read_file() */ } /* kobj_read_file() */
EXPORT_SYMBOL(kobj_read_file); EXPORT_SYMBOL(kobj_read_file);
@ -42,13 +50,14 @@ kobj_get_filesize(struct _buf *file, uint64_t *size)
{ {
vattr_t vap; vattr_t vap;
int rc; int rc;
ENTRY;
rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL); rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL);
if (rc) if (rc)
return rc; RETURN(rc);
*size = vap.va_size; *size = vap.va_size;
return rc; RETURN(rc);
} /* kobj_get_filesize() */ } /* kobj_get_filesize() */
EXPORT_SYMBOL(kobj_get_filesize); EXPORT_SYMBOL(kobj_get_filesize);

View File

@ -2,6 +2,12 @@
#include <sys/sunddi.h> #include <sys/sunddi.h>
#include "config.h" #include "config.h"
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_MODULE
static spinlock_t dev_info_lock = SPIN_LOCK_UNLOCKED; static spinlock_t dev_info_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(dev_info_list); static LIST_HEAD(dev_info_list);
@ -50,29 +56,30 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
struct cb_ops *cb_ops; struct cb_ops *cb_ops;
struct file_operations *fops; struct file_operations *fops;
int rc; int rc;
ENTRY;
BUG_ON(spec_type != S_IFCHR); ASSERT(spec_type == S_IFCHR);
BUG_ON(minor_num >= di->di_minors); ASSERT(minor_num < di->di_minors);
BUG_ON(strcmp(node_type, DDI_PSEUDO)); ASSERT(!strcmp(node_type, DDI_PSEUDO));
BUG_ON(flag != 0); ASSERT(flag == 0);
fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL); fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
if (fops == NULL) if (fops == NULL)
return DDI_FAILURE; RETURN(DDI_FAILURE);
cdev = cdev_alloc(); cdev = cdev_alloc();
if (cdev == NULL) { if (cdev == NULL) {
kfree(fops); kfree(fops);
return DDI_FAILURE; RETURN(DDI_FAILURE);
} }
cdev->ops = fops; cdev->ops = fops;
mutex_enter(&di->di_lock); mutex_enter(&di->di_lock);
dev_ops = di->di_ops; dev_ops = di->di_ops;
BUG_ON(dev_ops == NULL); ASSERT(dev_ops);
cb_ops = di->di_ops->devo_cb_ops; cb_ops = di->di_ops->devo_cb_ops;
BUG_ON(cb_ops == NULL); ASSERT(cb_ops);
/* Setup the fops to cb_ops mapping */ /* Setup the fops to cb_ops mapping */
fops->owner = mod; fops->owner = mod;
@ -93,42 +100,42 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
fops->write = mod_generic_write; fops->write = mod_generic_write;
#endif #endif
/* XXX: Currently unsupported operations */ /* XXX: Currently unsupported operations */
BUG_ON(cb_ops->cb_open != NULL); ASSERT(cb_ops->cb_open == NULL);
BUG_ON(cb_ops->cb_close != NULL); ASSERT(cb_ops->cb_close == NULL);
BUG_ON(cb_ops->cb_read != NULL); ASSERT(cb_ops->cb_read == NULL);
BUG_ON(cb_ops->cb_write != NULL); ASSERT(cb_ops->cb_write == NULL);
BUG_ON(cb_ops->cb_strategy != NULL); ASSERT(cb_ops->cb_strategy == NULL);
BUG_ON(cb_ops->cb_print != NULL); ASSERT(cb_ops->cb_print == NULL);
BUG_ON(cb_ops->cb_dump != NULL); ASSERT(cb_ops->cb_dump == NULL);
BUG_ON(cb_ops->cb_devmap != NULL); ASSERT(cb_ops->cb_devmap == NULL);
BUG_ON(cb_ops->cb_mmap != NULL); ASSERT(cb_ops->cb_mmap == NULL);
BUG_ON(cb_ops->cb_segmap != NULL); ASSERT(cb_ops->cb_segmap == NULL);
BUG_ON(cb_ops->cb_chpoll != NULL); ASSERT(cb_ops->cb_chpoll == NULL);
BUG_ON(cb_ops->cb_prop_op != NULL); ASSERT(cb_ops->cb_prop_op == NULL);
BUG_ON(cb_ops->cb_str != NULL); ASSERT(cb_ops->cb_str == NULL);
BUG_ON(cb_ops->cb_aread != NULL); ASSERT(cb_ops->cb_aread == NULL);
BUG_ON(cb_ops->cb_awrite != NULL); ASSERT(cb_ops->cb_awrite == NULL);
di->di_minor = minor_num; di->di_minor = minor_num;
di->di_dev = MKDEV(di->di_major, di->di_minor); di->di_dev = MKDEV(di->di_major, di->di_minor);
rc = cdev_add(cdev, di->di_dev, 1); rc = cdev_add(cdev, di->di_dev, 1);
if (rc) { if (rc) {
printk("spl: Error adding cdev, %d\n", rc); CERROR("Error adding cdev, %d\n", rc);
kfree(fops); kfree(fops);
cdev_del(cdev); cdev_del(cdev);
mutex_exit(&di->di_lock); mutex_exit(&di->di_lock);
return DDI_FAILURE; RETURN(DDI_FAILURE);
} }
di->di_class = class_create(THIS_MODULE, name); di->di_class = class_create(THIS_MODULE, name);
if (IS_ERR(di->di_class)) { if (IS_ERR(di->di_class)) {
rc = PTR_ERR(di->di_class); rc = PTR_ERR(di->di_class);
printk("spl: Error creating %s class, %d\n", name, rc); CERROR("Error creating %s class, %d\n", name, rc);
kfree(fops); kfree(fops);
cdev_del(di->di_cdev); cdev_del(di->di_cdev);
mutex_exit(&di->di_lock); mutex_exit(&di->di_lock);
return DDI_FAILURE; RETURN(DDI_FAILURE);
} }
/* Do not append a 0 to devices with minor nums of 0 */ /* Do not append a 0 to devices with minor nums of 0 */
@ -148,7 +155,7 @@ __ddi_create_minor_node(dev_info_t *di, char *name, int spec_type,
mutex_exit(&di->di_lock); mutex_exit(&di->di_lock);
return DDI_SUCCESS; RETURN(DDI_SUCCESS);
} }
EXPORT_SYMBOL(__ddi_create_minor_node); EXPORT_SYMBOL(__ddi_create_minor_node);
@ -176,9 +183,11 @@ __ddi_remove_minor_node_locked(dev_info_t *di, char *name)
void void
__ddi_remove_minor_node(dev_info_t *di, char *name) __ddi_remove_minor_node(dev_info_t *di, char *name)
{ {
ENTRY;
mutex_enter(&di->di_lock); mutex_enter(&di->di_lock);
__ddi_remove_minor_node_locked(di, name); __ddi_remove_minor_node_locked(di, name);
mutex_exit(&di->di_lock); mutex_exit(&di->di_lock);
EXIT;
} }
EXPORT_SYMBOL(ddi_remove_minor_node); EXPORT_SYMBOL(ddi_remove_minor_node);
@ -245,11 +254,12 @@ __mod_install(struct modlinkage *modlp)
struct modldrv *drv = modlp->ml_modldrv; struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di; struct dev_info *di;
int rc; int rc;
ENTRY;
di = dev_info_alloc(modlp->ml_major, modlp->ml_minors, di = dev_info_alloc(modlp->ml_major, modlp->ml_minors,
drv->drv_dev_ops); drv->drv_dev_ops);
if (di == NULL) if (di == NULL)
return ENOMEM; RETURN(ENOMEM);
/* XXX: Really we need to be calling devo_probe if it's available /* XXX: Really we need to be calling devo_probe if it's available
* and then calling devo_attach for each device discovered. However * and then calling devo_attach for each device discovered. However
@ -258,12 +268,12 @@ __mod_install(struct modlinkage *modlp)
rc = drv->drv_dev_ops->devo_attach(di, DDI_ATTACH); rc = drv->drv_dev_ops->devo_attach(di, DDI_ATTACH);
if (rc != DDI_SUCCESS) { if (rc != DDI_SUCCESS) {
dev_info_free(di); dev_info_free(di);
return rc; RETURN(rc);
} }
drv->drv_dev_info = di; drv->drv_dev_info = di;
return DDI_SUCCESS; RETURN(DDI_SUCCESS);
} }
EXPORT_SYMBOL(__mod_install); EXPORT_SYMBOL(__mod_install);
@ -273,15 +283,16 @@ __mod_remove(struct modlinkage *modlp)
struct modldrv *drv = modlp->ml_modldrv; struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di = drv->drv_dev_info; struct dev_info *di = drv->drv_dev_info;
int rc; int rc;
ENTRY;
rc = drv->drv_dev_ops->devo_detach(di, DDI_DETACH); rc = drv->drv_dev_ops->devo_detach(di, DDI_DETACH);
if (rc != DDI_SUCCESS) if (rc != DDI_SUCCESS)
return rc; RETURN(rc);
dev_info_free(di); dev_info_free(di);
drv->drv_dev_info = NULL; drv->drv_dev_info = NULL;
return DDI_SUCCESS; RETURN(DDI_SUCCESS);
} }
EXPORT_SYMBOL(__mod_remove); EXPORT_SYMBOL(__mod_remove);
@ -289,24 +300,28 @@ int
ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip) ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
{ {
ldi_ident_t li; ldi_ident_t li;
ENTRY;
BUG_ON(modlp == NULL || lip == NULL); ASSERT(modlp);
ASSERT(lip);
li = kmalloc(sizeof(struct ldi_ident), GFP_KERNEL); li = kmalloc(sizeof(struct ldi_ident), GFP_KERNEL);
if (li == NULL) if (li == NULL)
return ENOMEM; RETURN(ENOMEM);
li->li_dev = MKDEV(modlp->ml_major, 0); li->li_dev = MKDEV(modlp->ml_major, 0);
*lip = li; *lip = li;
return 0; RETURN(0);
} }
EXPORT_SYMBOL(ldi_ident_from_mod); EXPORT_SYMBOL(ldi_ident_from_mod);
void void
ldi_ident_release(ldi_ident_t lip) ldi_ident_release(ldi_ident_t lip)
{ {
BUG_ON(lip == NULL); ENTRY;
ASSERT(lip);
kfree(lip); kfree(lip);
EXIT;
} }
EXPORT_SYMBOL(ldi_ident_release); EXPORT_SYMBOL(ldi_ident_release);

View File

@ -332,7 +332,7 @@ proc_dohostid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
int len, rc = 0; int len, rc = 0;
unsigned long val; int32_t val;
char *end, str[32]; char *end, str[32];
ENTRY; ENTRY;
@ -344,12 +344,12 @@ proc_dohostid(struct ctl_table *table, int write, struct file *filp,
if (rc < 0) if (rc < 0)
RETURN(rc); RETURN(rc);
val = simple_strtoul(str, &end, 16); val = simple_strtol(str, &end, 16);
if (str == end) if (str == end)
RETURN(-EINVAL); RETURN(-EINVAL);
spl_hostid = val; spl_hostid = (long)val;
sprintf(spl_hw_serial, "%lu", ((long)val >= 0) ? val : -val); sprintf(hw_serial, "%u", (val >= 0) ? val : -val);
*ppos += *lenp; *ppos += *lenp;
} else { } else {
len = snprintf(str, sizeof(str), "%lx", spl_hostid); len = snprintf(str, sizeof(str), "%lx", spl_hostid);
@ -530,8 +530,8 @@ static struct ctl_table spl_table[] = {
{ {
.ctl_name = CTL_HW_SERIAL, .ctl_name = CTL_HW_SERIAL,
.procname = "hw_serial", .procname = "hw_serial",
.data = spl_hw_serial, .data = hw_serial,
.maxlen = sizeof(spl_hw_serial), .maxlen = sizeof(hw_serial),
.mode = 0444, .mode = 0444,
.proc_handler = &proc_dostring, .proc_handler = &proc_dostring,
}, },

View File

@ -1,28 +1,26 @@
#include <sys/rwlock.h> #include <sys/rwlock.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_RWLOCK
int int
__rw_read_held(krwlock_t *rwlp) __rw_read_held(krwlock_t *rwlp)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ENTRY;
ASSERT(rwlp->rw_magic == RW_MAGIC);
if (__rw_lock_held(rwlp) && rwlp->rw_owner == NULL) { RETURN(__rw_lock_held(rwlp) && rwlp->rw_owner == NULL);
return 1;
}
return 0;
} }
EXPORT_SYMBOL(__rw_read_held); EXPORT_SYMBOL(__rw_read_held);
int int
__rw_write_held(krwlock_t *rwlp) __rw_write_held(krwlock_t *rwlp)
{ {
BUG_ON(rwlp->rw_magic != RW_MAGIC); ENTRY;
ASSERT(rwlp->rw_magic == RW_MAGIC);
if (rwlp->rw_owner == current) { RETURN(rwlp->rw_owner == current);
return 1;
}
return 0;
} }
EXPORT_SYMBOL(__rw_write_held); EXPORT_SYMBOL(__rw_write_held);
@ -30,8 +28,9 @@ int
__rw_lock_held(krwlock_t *rwlp) __rw_lock_held(krwlock_t *rwlp)
{ {
int rc = 0; int rc = 0;
ENTRY;
BUG_ON(rwlp->rw_magic != RW_MAGIC); ASSERT(rwlp->rw_magic == RW_MAGIC);
spin_lock_irq(&(rwlp->rw_sem.wait_lock)); spin_lock_irq(&(rwlp->rw_sem.wait_lock));
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@ -44,6 +43,6 @@ __rw_lock_held(krwlock_t *rwlp)
spin_unlock_irq(&(rwlp->rw_sem.wait_lock)); spin_unlock_irq(&(rwlp->rw_sem.wait_lock));
return rc; RETURN(rc);
} }
EXPORT_SYMBOL(__rw_lock_held); EXPORT_SYMBOL(__rw_lock_held);

View File

@ -1,5 +1,11 @@
#include <sys/taskq.h> #include <sys/taskq.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_TASKQ
/* /*
* Task queue interface * Task queue interface
* *
@ -21,8 +27,8 @@ taskq_work_handler(void *priv)
{ {
taskq_work_wrapper_t *tww = priv; taskq_work_wrapper_t *tww = priv;
BUG_ON(tww == NULL); ASSERT(tww);
BUG_ON(tww->tww_func == NULL); ASSERT(tww->tww_func);
/* Call the real function and free the wrapper */ /* Call the real function and free the wrapper */
tww->tww_func(tww->tww_priv); tww->tww_func(tww->tww_priv);
@ -36,14 +42,15 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
struct workqueue_struct *wq = tq; struct workqueue_struct *wq = tq;
taskq_work_wrapper_t *tww; taskq_work_wrapper_t *tww;
int rc; int rc;
ENTRY;
BUG_ON(tq == NULL); ASSERT(tq);
BUG_ON(func == NULL); ASSERT(func);
/* Use GFP_ATOMIC since this may be called in interrupt context */ /* Use GFP_ATOMIC since this may be called in interrupt context */
tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_ATOMIC); tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_ATOMIC);
if (!tww) if (!tww)
return (taskqid_t)0; RETURN((taskqid_t)0);
INIT_WORK(&(tww->tww_work), taskq_work_handler, tww); INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
tww->tww_func = func; tww->tww_func = func;
@ -52,10 +59,10 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
rc = queue_work(wq, &(tww->tww_work)); rc = queue_work(wq, &(tww->tww_work));
if (!rc) { if (!rc) {
kfree(tww); kfree(tww);
return (taskqid_t)0; RETURN((taskqid_t)0);
} }
return (taskqid_t)wq; RETURN((taskqid_t)wq);
} }
EXPORT_SYMBOL(__taskq_dispatch); EXPORT_SYMBOL(__taskq_dispatch);
@ -73,21 +80,25 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
int minalloc, int maxalloc, uint_t flags) int minalloc, int maxalloc, uint_t flags)
{ {
/* NOTE: Linux workqueue names are limited to 10 chars */ /* NOTE: Linux workqueue names are limited to 10 chars */
ENTRY;
return create_singlethread_workqueue(name); RETURN(create_singlethread_workqueue(name));
} }
EXPORT_SYMBOL(__taskq_create); EXPORT_SYMBOL(__taskq_create);
void void
__taskq_destroy(taskq_t *tq) __taskq_destroy(taskq_t *tq)
{ {
ENTRY;
destroy_workqueue(tq); destroy_workqueue(tq);
EXIT;
} }
EXPORT_SYMBOL(__taskq_destroy); EXPORT_SYMBOL(__taskq_destroy);
void void
__taskq_wait(taskq_t *tq) __taskq_wait(taskq_t *tq)
{ {
ENTRY;
flush_workqueue(tq); flush_workqueue(tq);
EXIT;
} }
EXPORT_SYMBOL(__taskq_wait); EXPORT_SYMBOL(__taskq_wait);

View File

@ -1,6 +1,12 @@
#include <sys/thread.h> #include <sys/thread.h>
#include <sys/kmem.h> #include <sys/kmem.h>
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_THREAD
/* /*
* Thread interfaces * Thread interfaces
*/ */
@ -20,7 +26,7 @@ thread_generic_wrapper(void *arg)
void (*func)(void *); void (*func)(void *);
void *args; void *args;
BUG_ON(tp->tp_magic != TP_MAGIC); ASSERT(tp->tp_magic == TP_MAGIC);
func = tp->tp_func; func = tp->tp_func;
args = tp->tp_args; args = tp->tp_args;
set_current_state(tp->tp_state); set_current_state(tp->tp_state);
@ -36,8 +42,10 @@ thread_generic_wrapper(void *arg)
void void
__thread_exit(void) __thread_exit(void)
{ {
ENTRY;
EXIT;
do_exit(0); do_exit(0);
return; /* Unreachable */
} }
EXPORT_SYMBOL(__thread_exit); EXPORT_SYMBOL(__thread_exit);
@ -52,15 +60,15 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
thread_priv_t *tp; thread_priv_t *tp;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct task_struct *tsk; struct task_struct *tsk;
ENTRY;
/* Option pp is simply ignored */ /* Option pp is simply ignored */
/* Variable stack size unsupported */ /* Variable stack size unsupported */
BUG_ON(stk != NULL); ASSERT(stk == NULL);
BUG_ON(stk != 0);
tp = kmem_alloc(sizeof(thread_priv_t), KM_SLEEP); tp = kmem_alloc(sizeof(thread_priv_t), KM_SLEEP);
if (tp == NULL) if (tp == NULL)
return NULL; RETURN(NULL);
tp->tp_magic = TP_MAGIC; tp->tp_magic = TP_MAGIC;
tp->tp_func = func; tp->tp_func = func;
@ -71,11 +79,11 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
tsk = kthread_create(thread_generic_wrapper, (void *)tp, "%s", name); tsk = kthread_create(thread_generic_wrapper, (void *)tp, "%s", name);
if (IS_ERR(tsk)) { if (IS_ERR(tsk)) {
printk("spl: Failed to create thread: %ld\n", PTR_ERR(tsk)); CERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
return NULL; RETURN(NULL);
} }
wake_up_process(tsk); wake_up_process(tsk);
return (kthread_t *)tsk; RETURN((kthread_t *)tsk);
} }
EXPORT_SYMBOL(__thread_create); EXPORT_SYMBOL(__thread_create);

View File

@ -2,6 +2,12 @@
#include <sys/time.h> #include <sys/time.h>
#include "config.h" #include "config.h"
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_TIME
void void
__gethrestime(timestruc_t *ts) __gethrestime(timestruc_t *ts)
{ {
@ -13,7 +19,7 @@ int
__clock_gettime(clock_type_t type, timespec_t *tp) __clock_gettime(clock_type_t type, timespec_t *tp)
{ {
/* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */ /* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */
BUG_ON(!((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0))); ASSERT((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0));
getnstimeofday(tp); getnstimeofday(tp);
return 0; return 0;

View File

@ -2,6 +2,13 @@
#include <sys/vnode.h> #include <sys/vnode.h>
#include "config.h" #include "config.h"
#ifdef DEBUG_SUBSYSTEM
#undef DEBUG_SUBSYSTEM
#endif
#define DEBUG_SUBSYSTEM S_VNODE
void *rootdir = NULL; void *rootdir = NULL;
EXPORT_SYMBOL(rootdir); EXPORT_SYMBOL(rootdir);
@ -45,6 +52,7 @@ vnode_t *
vn_alloc(int flag) vn_alloc(int flag)
{ {
vnode_t *vp; vnode_t *vp;
ENTRY;
vp = kmem_cache_alloc(vn_cache, flag); vp = kmem_cache_alloc(vn_cache, flag);
if (vp != NULL) { if (vp != NULL) {
@ -52,14 +60,16 @@ vn_alloc(int flag)
vp->v_type = 0; vp->v_type = 0;
} }
return vp; RETURN(vp);
} /* vn_alloc() */ } /* vn_alloc() */
EXPORT_SYMBOL(vn_alloc); EXPORT_SYMBOL(vn_alloc);
void void
vn_free(vnode_t *vp) vn_free(vnode_t *vp)
{ {
ENTRY;
kmem_cache_free(vn_cache, vp); kmem_cache_free(vn_cache, vp);
EXIT;
} /* vn_free() */ } /* vn_free() */
EXPORT_SYMBOL(vn_free); EXPORT_SYMBOL(vn_free);
@ -71,10 +81,11 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
struct kstat stat; struct kstat stat;
int rc, saved_umask; int rc, saved_umask;
vnode_t *vp; vnode_t *vp;
ENTRY;
BUG_ON(!(flags & (FWRITE | FREAD))); ASSERT(flags & (FWRITE | FREAD));
BUG_ON(seg != UIO_SYSSPACE); ASSERT(seg == UIO_SYSSPACE);
BUG_ON(!vpp); ASSERT(vpp);
*vpp = NULL; *vpp = NULL;
if (!(flags & FCREAT) && (flags & FWRITE)) if (!(flags & FCREAT) && (flags & FWRITE))
@ -96,18 +107,18 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
(void)xchg(&current->fs->umask, saved_umask); (void)xchg(&current->fs->umask, saved_umask);
if (IS_ERR(fp)) if (IS_ERR(fp))
return -PTR_ERR(fp); RETURN(-PTR_ERR(fp));
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat); rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc) { if (rc) {
filp_close(fp, 0); filp_close(fp, 0);
return -rc; RETURN(-rc);
} }
vp = vn_alloc(KM_SLEEP); vp = vn_alloc(KM_SLEEP);
if (!vp) { if (!vp) {
filp_close(fp, 0); filp_close(fp, 0);
return ENOMEM; RETURN(ENOMEM);
} }
mutex_enter(&vp->v_lock); mutex_enter(&vp->v_lock);
@ -116,7 +127,7 @@ vn_open(const char *path, uio_seg_t seg, int flags, int mode,
*vpp = vp; *vpp = vp;
mutex_exit(&vp->v_lock); mutex_exit(&vp->v_lock);
return 0; RETURN(0);
} /* vn_open() */ } /* vn_open() */
EXPORT_SYMBOL(vn_open); EXPORT_SYMBOL(vn_open);
@ -126,19 +137,20 @@ vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
{ {
char *realpath; char *realpath;
int rc; int rc;
ENTRY;
BUG_ON(vp != rootdir); ASSERT(vp == rootdir);
realpath = kmalloc(strlen(path) + 2, GFP_KERNEL); realpath = kmalloc(strlen(path) + 2, GFP_KERNEL);
if (!realpath) if (!realpath)
return ENOMEM; RETURN(ENOMEM);
sprintf(realpath, "/%s", path); sprintf(realpath, "/%s", path);
rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2); rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
kfree(realpath); kfree(realpath);
return rc; RETURN(rc);
} /* vn_openat() */ } /* vn_openat() */
EXPORT_SYMBOL(vn_openat); EXPORT_SYMBOL(vn_openat);
@ -150,13 +162,14 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
mm_segment_t saved_fs; mm_segment_t saved_fs;
struct file *fp; struct file *fp;
int rc; int rc;
ENTRY;
BUG_ON(!(uio == UIO_WRITE || uio == UIO_READ)); ASSERT(uio == UIO_WRITE || uio == UIO_READ);
BUG_ON(!vp); ASSERT(vp);
BUG_ON(!vp->v_file); ASSERT(vp->v_file);
BUG_ON(seg != UIO_SYSSPACE); ASSERT(seg == UIO_SYSSPACE);
BUG_ON(x1 != 0); ASSERT(x1 == 0);
BUG_ON(x2 != RLIM64_INFINITY); ASSERT(x2 == RLIM64_INFINITY);
offset = off; offset = off;
fp = vp->v_file; fp = vp->v_file;
@ -175,16 +188,16 @@ vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
set_fs(saved_fs); set_fs(saved_fs);
if (rc < 0) if (rc < 0)
return -rc; RETURN(-rc);
if (residp) { if (residp) {
*residp = len - rc; *residp = len - rc;
} else { } else {
if (rc != len) if (rc != len)
return EIO; RETURN(EIO);
} }
return 0; RETURN(0);
} /* vn_rdwr() */ } /* vn_rdwr() */
EXPORT_SYMBOL(vn_rdwr); EXPORT_SYMBOL(vn_rdwr);
@ -192,14 +205,15 @@ int
vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4) vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
{ {
int rc; int rc;
ENTRY;
BUG_ON(!vp); ASSERT(vp);
BUG_ON(!vp->v_file); ASSERT(vp->v_file);
rc = filp_close(vp->v_file, 0); rc = filp_close(vp->v_file, 0);
vn_free(vp); vn_free(vp);
return -rc; RETURN(-rc);
} /* vn_close() */ } /* vn_close() */
EXPORT_SYMBOL(vn_close); EXPORT_SYMBOL(vn_close);
@ -216,17 +230,18 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
struct nameidata nd; struct nameidata nd;
struct inode *inode = NULL; struct inode *inode = NULL;
int rc = 0; int rc = 0;
ENTRY;
BUG_ON(seg != UIO_SYSSPACE); ASSERT(seg == UIO_SYSSPACE);
BUG_ON(flags != RMFILE); ASSERT(flags == RMFILE);
rc = path_lookup(path, LOOKUP_PARENT, &nd); rc = path_lookup(path, LOOKUP_PARENT, &nd);
if (rc) if (rc)
goto exit; GOTO(exit, rc);
rc = -EISDIR; rc = -EISDIR;
if (nd.last_type != LAST_NORM) if (nd.last_type != LAST_NORM)
goto exit1; GOTO(exit1, rc);
mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd); dentry = lookup_hash(&nd);
@ -234,7 +249,8 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
if (!IS_ERR(dentry)) { if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct rc value */ /* Why not before? Because we want correct rc value */
if (nd.last.name[nd.last.len]) if (nd.last.name[nd.last.len])
goto slashes; GOTO(slashes, rc);
inode = dentry->d_inode; inode = dentry->d_inode;
if (inode) if (inode)
atomic_inc(&inode->i_count); atomic_inc(&inode->i_count);
@ -248,12 +264,12 @@ exit2:
exit1: exit1:
path_release(&nd); path_release(&nd);
exit: exit:
return -rc; RETURN(-rc);
slashes: slashes:
rc = !dentry->d_inode ? -ENOENT : rc = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
goto exit2; GOTO(exit2, rc);
} /* vn_remove() */ } /* vn_remove() */
EXPORT_SYMBOL(vn_remove); EXPORT_SYMBOL(vn_remove);
@ -266,27 +282,28 @@ vn_rename(const char *oldname, const char *newname, int x1)
struct dentry * trap; struct dentry * trap;
struct nameidata oldnd, newnd; struct nameidata oldnd, newnd;
int rc = 0; int rc = 0;
ENTRY;
rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd); rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
if (rc) if (rc)
goto exit; GOTO(exit, rc);
rc = path_lookup(newname, LOOKUP_PARENT, &newnd); rc = path_lookup(newname, LOOKUP_PARENT, &newnd);
if (rc) if (rc)
goto exit1; GOTO(exit1, rc);
rc = -EXDEV; rc = -EXDEV;
if (oldnd.mnt != newnd.mnt) if (oldnd.mnt != newnd.mnt)
goto exit2; GOTO(exit2, rc);
old_dir = oldnd.dentry; old_dir = oldnd.dentry;
rc = -EBUSY; rc = -EBUSY;
if (oldnd.last_type != LAST_NORM) if (oldnd.last_type != LAST_NORM)
goto exit2; GOTO(exit2, rc);
new_dir = newnd.dentry; new_dir = newnd.dentry;
if (newnd.last_type != LAST_NORM) if (newnd.last_type != LAST_NORM)
goto exit2; GOTO(exit2, rc);
trap = lock_rename(new_dir, old_dir); trap = lock_rename(new_dir, old_dir);
@ -294,36 +311,36 @@ vn_rename(const char *oldname, const char *newname, int x1)
rc = PTR_ERR(old_dentry); rc = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry)) if (IS_ERR(old_dentry))
goto exit3; GOTO(exit3, rc);
/* source must exist */ /* source must exist */
rc = -ENOENT; rc = -ENOENT;
if (!old_dentry->d_inode) if (!old_dentry->d_inode)
goto exit4; GOTO(exit4, rc);
/* unless the source is a directory trailing slashes give -ENOTDIR */ /* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) { if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
rc = -ENOTDIR; rc = -ENOTDIR;
if (oldnd.last.name[oldnd.last.len]) if (oldnd.last.name[oldnd.last.len])
goto exit4; GOTO(exit4, rc);
if (newnd.last.name[newnd.last.len]) if (newnd.last.name[newnd.last.len])
goto exit4; GOTO(exit4, rc);
} }
/* source should not be ancestor of target */ /* source should not be ancestor of target */
rc = -EINVAL; rc = -EINVAL;
if (old_dentry == trap) if (old_dentry == trap)
goto exit4; GOTO(exit4, rc);
new_dentry = lookup_hash(&newnd); new_dentry = lookup_hash(&newnd);
rc = PTR_ERR(new_dentry); rc = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry)) if (IS_ERR(new_dentry))
goto exit4; GOTO(exit4, rc);
/* target should not be an ancestor of source */ /* target should not be an ancestor of source */
rc = -ENOTEMPTY; rc = -ENOTEMPTY;
if (new_dentry == trap) if (new_dentry == trap)
goto exit5; GOTO(exit5, rc);
rc = vfs_rename(old_dir->d_inode, old_dentry, rc = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry); new_dir->d_inode, new_dentry);
@ -338,7 +355,7 @@ exit2:
exit1: exit1:
path_release(&oldnd); path_release(&oldnd);
exit: exit:
return -rc; RETURN(-rc);
} }
EXPORT_SYMBOL(vn_rename); EXPORT_SYMBOL(vn_rename);
@ -348,16 +365,17 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
struct file *fp; struct file *fp;
struct kstat stat; struct kstat stat;
int rc; int rc;
ENTRY;
BUG_ON(!vp); ASSERT(vp);
BUG_ON(!vp->v_file); ASSERT(vp->v_file);
BUG_ON(!vap); ASSERT(vap);
fp = vp->v_file; fp = vp->v_file;
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat); rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc) if (rc)
return -rc; RETURN(-rc);
vap->va_type = vn_get_sol_type(stat.mode); vap->va_type = vn_get_sol_type(stat.mode);
vap->va_mode = stat.mode; vap->va_mode = stat.mode;
@ -377,21 +395,22 @@ vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
vap->va_rdev = stat.rdev; vap->va_rdev = stat.rdev;
vap->va_blocks = stat.blocks; vap->va_blocks = stat.blocks;
return 0; RETURN(0);
} }
EXPORT_SYMBOL(vn_getattr); EXPORT_SYMBOL(vn_getattr);
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4) int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{ {
int datasync = 0; int datasync = 0;
ENTRY;
BUG_ON(!vp); ASSERT(vp);
BUG_ON(!vp->v_file); ASSERT(vp->v_file);
if (flags & FDSYNC) if (flags & FDSYNC)
datasync = 1; datasync = 1;
return -file_fsync(vp->v_file, vp->v_file->f_dentry, datasync); RETURN(-file_fsync(vp->v_file, vp->v_file->f_dentry, datasync));
} /* vn_fsync() */ } /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync); EXPORT_SYMBOL(vn_fsync);
@ -401,11 +420,11 @@ file_find(int fd)
{ {
file_t *fp; file_t *fp;
BUG_ON(!spin_is_locked(&vn_file_lock)); ASSERT(spin_is_locked(&vn_file_lock));
list_for_each_entry(fp, &vn_file_list, f_list) { list_for_each_entry(fp, &vn_file_list, f_list) {
if (fd == fp->f_fd) { if (fd == fp->f_fd) {
BUG_ON(atomic_read(&fp->f_ref) == 0); ASSERT(atomic_read(&fp->f_ref) != 0);
return fp; return fp;
} }
} }
@ -420,6 +439,8 @@ vn_getf(int fd)
struct file *lfp; struct file *lfp;
file_t *fp; file_t *fp;
vnode_t *vp; vnode_t *vp;
int rc = 0;
ENTRY;
/* Already open just take an extra reference */ /* Already open just take an extra reference */
spin_lock(&vn_file_lock); spin_lock(&vn_file_lock);
@ -428,7 +449,7 @@ vn_getf(int fd)
if (fp) { if (fp) {
atomic_inc(&fp->f_ref); atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
return fp; RETURN(fp);
} }
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
@ -436,7 +457,7 @@ vn_getf(int fd)
/* File was not yet opened create the object and setup */ /* File was not yet opened create the object and setup */
fp = kmem_cache_alloc(vn_file_cache, 0); fp = kmem_cache_alloc(vn_file_cache, 0);
if (fp == NULL) if (fp == NULL)
goto out; GOTO(out, rc);
mutex_enter(&fp->f_lock); mutex_enter(&fp->f_lock);
@ -446,14 +467,14 @@ vn_getf(int fd)
lfp = fget(fd); lfp = fget(fd);
if (lfp == NULL) if (lfp == NULL)
goto out_mutex; GOTO(out_mutex, rc);
vp = vn_alloc(KM_SLEEP); vp = vn_alloc(KM_SLEEP);
if (vp == NULL) if (vp == NULL)
goto out_fget; GOTO(out_fget, rc);
if (vfs_getattr(lfp->f_vfsmnt, lfp->f_dentry, &stat)) if (vfs_getattr(lfp->f_vfsmnt, lfp->f_dentry, &stat))
goto out_vnode; GOTO(out_vnode, rc);
mutex_enter(&vp->v_lock); mutex_enter(&vp->v_lock);
vp->v_type = vn_get_sol_type(stat.mode); vp->v_type = vn_get_sol_type(stat.mode);
@ -469,7 +490,7 @@ vn_getf(int fd)
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
mutex_exit(&fp->f_lock); mutex_exit(&fp->f_lock);
return fp; RETURN(fp);
out_vnode: out_vnode:
vn_free(vp); vn_free(vp);
@ -479,14 +500,14 @@ out_mutex:
mutex_exit(&fp->f_lock); mutex_exit(&fp->f_lock);
kmem_cache_free(vn_file_cache, fp); kmem_cache_free(vn_file_cache, fp);
out: out:
return NULL; RETURN(NULL);
} /* getf() */ } /* getf() */
EXPORT_SYMBOL(getf); EXPORT_SYMBOL(getf);
static void releasef_locked(file_t *fp) static void releasef_locked(file_t *fp)
{ {
BUG_ON(fp->f_file == NULL); ASSERT(fp->f_file);
BUG_ON(fp->f_vnode == NULL); ASSERT(fp->f_vnode);
/* Unlinked from list, no refs, safe to free outside mutex */ /* Unlinked from list, no refs, safe to free outside mutex */
fput(fp->f_file); fput(fp->f_file);
@ -499,6 +520,7 @@ void
vn_releasef(int fd) vn_releasef(int fd)
{ {
file_t *fp; file_t *fp;
ENTRY;
spin_lock(&vn_file_lock); spin_lock(&vn_file_lock);
fp = file_find(fd); fp = file_find(fd);
@ -506,6 +528,7 @@ vn_releasef(int fd)
atomic_dec(&fp->f_ref); atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) { if (atomic_read(&fp->f_ref) > 0) {
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
EXIT;
return; return;
} }
@ -514,6 +537,7 @@ vn_releasef(int fd)
} }
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
EXIT;
return; return;
} /* releasef() */ } /* releasef() */
EXPORT_SYMBOL(releasef); EXPORT_SYMBOL(releasef);
@ -559,6 +583,7 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
int int
vn_init(void) vn_init(void)
{ {
ENTRY;
vn_cache = kmem_cache_create("spl_vn_cache", sizeof(struct vnode), 64, vn_cache = kmem_cache_create("spl_vn_cache", sizeof(struct vnode), 64,
vn_cache_constructor, vn_cache_constructor,
vn_cache_destructor, vn_cache_destructor,
@ -569,7 +594,7 @@ vn_init(void)
vn_file_cache_constructor, vn_file_cache_constructor,
vn_file_cache_destructor, vn_file_cache_destructor,
NULL, NULL, NULL, 0); NULL, NULL, NULL, 0);
return 0; RETURN(0);
} /* vn_init() */ } /* vn_init() */
void void
@ -577,6 +602,7 @@ vn_fini(void)
{ {
file_t *fp, *next_fp; file_t *fp, *next_fp;
int rc, leaked = 0; int rc, leaked = 0;
ENTRY;
spin_lock(&vn_file_lock); spin_lock(&vn_file_lock);
@ -588,17 +614,18 @@ vn_fini(void)
rc = kmem_cache_destroy(vn_file_cache); rc = kmem_cache_destroy(vn_file_cache);
if (rc) if (rc)
printk("spl: Warning leaked vn_file_cache objects, %d\n", rc); CWARN("Warning leaked vn_file_cache objects, %d\n", rc);
vn_file_cache = NULL; vn_file_cache = NULL;
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
if (leaked > 0) if (leaked > 0)
printk("spl: Warning %d files leaked\n", leaked); CWARN("Warning %d files leaked\n", leaked);
rc = kmem_cache_destroy(vn_cache); rc = kmem_cache_destroy(vn_cache);
if (rc) if (rc)
printk("spl: Warning leaked vn_cache objects, %d\n", rc); CWARN("Warning leaked vn_cache objects, %d\n", rc);
EXIT;
return; return;
} /* vn_fini() */ } /* vn_fini() */