Correctly implement atomic_cas_ptr() function. Ideally all of these

atomic operations will be rewritten anyway with the correct arch
specific assembly.  But not today.


git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@65 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-04-03 21:48:57 +00:00
parent 0a6fd143fd
commit 996faa6869
2 changed files with 10 additions and 14 deletions

View File

@ -16,7 +16,6 @@ extern "C" {
*/ */
extern spinlock_t atomic64_lock; extern spinlock_t atomic64_lock;
extern spinlock_t atomic32_lock; extern spinlock_t atomic32_lock;
extern spinlock_t atomic_lock;
static __inline__ uint32_t static __inline__ uint32_t
atomic_add_32(volatile uint32_t *target, int32_t delta) atomic_add_32(volatile uint32_t *target, int32_t delta)
@ -108,19 +107,18 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
return rc; return rc;
} }
#if defined(__x86_64__)
/* XXX: Implement atomic_cas_ptr() in terms of uint64'ts. This
* is of course only safe and correct for 64 bit arches... but
* for now I'm OK with that.
*/
static __inline__ void * static __inline__ void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval) atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{ {
void *rc; return (void *)atomic_cas_64((volatile uint64_t *)target,
(uint64_t)cmp, (uint64_t)newval);
spin_lock(&atomic_lock);
rc = (void *)target;
if (target == cmp)
target = newval;
spin_unlock(&atomic_lock);
return rc;
} }
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -3,8 +3,6 @@
/* Global atomic lock declarations */ /* Global atomic lock declarations */
spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED; spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED;
spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED; spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED;
spinlock_t atomic_lock = SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL(atomic64_lock); EXPORT_SYMBOL(atomic64_lock);
EXPORT_SYMBOL(atomic32_lock); EXPORT_SYMBOL(atomic32_lock);
EXPORT_SYMBOL(atomic_lock);