Optimize spl_rwsem_is_locked()

The spl_rwsem_is_locked() compatibility function has been observed
to be a hot spot.  The root cause of this is that we must check the
rwsem activity under the rwsem->wait_lock to avoid a race.  When
the lock is busy significant contention can occur.

The upstream kernel fix for this race had the insight that by using
spin_trylock_irqsave() this contention could be avoided.  When the
lock is contended it's reasonable to return that it is locked.

This change updates the SPLs implemention to be like the upstream
kernel.  Since the kernel code has been in use for years now this
a low risk change.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2012-07-13 12:49:40 -07:00
parent d801db1487
commit d503b971f4
1 changed files with 25 additions and 42 deletions

View File

@ -27,57 +27,40 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
#ifdef RWSEM_SPINLOCK_IS_RAW #if defined(RWSEM_SPINLOCK_IS_RAW)
#define spl_rwsem_lock_irqsave(lock, flags) \ #define spl_rwsem_lock_irqsave(lk, fl) raw_spin_lock_irqsave(lk, fl)
({ \ #define spl_rwsem_unlock_irqrestore(lk, fl) raw_spin_unlock_irqrestore(lk, fl)
raw_spin_lock_irqsave(lock, flags); \ #define spl_rwsem_trylock_irqsave(lk, fl) raw_spin_trylock_irqsave(lk, fl)
})
#define spl_rwsem_unlock_irqrestore(lock, flags) \
({ \
raw_spin_unlock_irqrestore(lock, flags); \
})
#else #else
#define spl_rwsem_lock_irqsave(lock, flags) \ #define spl_rwsem_lock_irqsave(lk, fl) spin_lock_irqsave(lk, fl)
({ \ #define spl_rwsem_unlock_irqrestore(lk, fl) spin_unlock_irqrestore(lk, fl)
spin_lock_irqsave(lock, flags); \ #define spl_rwsem_trylock_irqsave(lk, fl) spin_trylock_irqsave(lk, fl)
})
#define spl_rwsem_unlock_irqrestore(lock, flags) \
({ \
spin_unlock_irqrestore(lock, flags); \
})
#endif /* RWSEM_SPINLOCK_IS_RAW */ #endif /* RWSEM_SPINLOCK_IS_RAW */
#ifdef RWSEM_IS_LOCKED_TAKES_WAIT_LOCK
/* /*
* A race condition in rwsem_is_locked() was fixed in Linux 2.6.33 and the fix * Prior to Linux 2.6.33 there existed a race condition in rwsem_is_locked().
* was backported to RHEL5 as of kernel 2.6.18-190.el5. Details can be found * The semaphore's activity was checked outside of the wait_lock which
* here: * could result in some readers getting the incorrect activity value.
* *
* https://bugzilla.redhat.com/show_bug.cgi?id=526092 * When a kernel without this fix is detected the SPL takes responsibility
* for acquiring the wait_lock to avoid this race.
* The race condition was fixed in the kernel by acquiring the semaphore's
* wait_lock inside rwsem_is_locked(). The SPL worked around the race
* condition by acquiring the wait_lock before calling that function, but
* with the fix in place we must not do that.
*/ */
#if defined(RWSEM_IS_LOCKED_TAKES_WAIT_LOCK)
#define spl_rwsem_is_locked(rwsem) \ #define spl_rwsem_is_locked(rwsem) rwsem_is_locked(rwsem)
({ \
rwsem_is_locked(rwsem); \
})
#else #else
static inline int
spl_rwsem_is_locked(struct rw_semaphore *rwsem)
{
unsigned long flags;
int rc = 1;
#define spl_rwsem_is_locked(rwsem) \ if (spl_rwsem_trylock_irqsave(&rwsem->wait_lock, flags)) {
({ \ rc = rwsem_is_locked(rwsem);
unsigned long _flags_; \ spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, flags);
int _rc_; \ }
spl_rwsem_lock_irqsave(&rwsem->wait_lock, _flags_); \
_rc_ = rwsem_is_locked(rwsem); \
spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, _flags_); \
_rc_; \
})
return (rc);
}
#endif /* RWSEM_IS_LOCKED_TAKES_WAIT_LOCK */ #endif /* RWSEM_IS_LOCKED_TAKES_WAIT_LOCK */
#endif /* _SPL_RWSEM_COMPAT_H */ #endif /* _SPL_RWSEM_COMPAT_H */