First pass at getting userspace 64bit atomic ops working:

This commit is contained in:
Brian Behlendorf 2008-12-19 15:11:43 -08:00
parent 545f76d526
commit febd743891
5 changed files with 1650 additions and 189 deletions

View File

@ -16,5 +16,6 @@ libspl_la_SOURCES = \
${top_srcdir}/lib/libspl/strnlen.c \
${top_srcdir}/lib/libspl/u8_textprep.c \
${top_srcdir}/lib/libspl/zone.c \
${top_srcdir}/lib/libspl/asm/atomic.S \
${top_srcdir}/lib/libspl/include/sys/list.h \
${top_srcdir}/lib/libspl/include/sys/list_impl.h

View File

@ -0,0 +1,595 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
.ident "%Z%%M% %I% %E% SMI"
.file "%M%"
#define _ASM
#include <ia32/sys/asm_linkage.h>
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
lock
incb (%rdi)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
lock
incw (%rdi)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
lock
incl (%rdi)
ret
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_ulong)
lock
incq (%rdi)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movb (%rdi), %al
1:
leaq 1(%rax), %rcx
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movw (%rdi), %ax
1:
leaq 1(%rax), %rcx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
movl (%rdi), %eax
1:
leaq 1(%rax), %rcx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64_nv)
ALTENTRY(atomic_inc_ulong_nv)
movq (%rdi), %rax
1:
leaq 1(%rax), %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_64_nv)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
lock
decb (%rdi)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
lock
decw (%rdi)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
lock
decl (%rdi)
ret
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_ulong)
lock
decq (%rdi)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movb (%rdi), %al
1:
leaq -1(%rax), %rcx
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movw (%rdi), %ax
1:
leaq -1(%rax), %rcx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
movl (%rdi), %eax
1:
leaq -1(%rax), %rcx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64_nv)
ALTENTRY(atomic_dec_ulong_nv)
movq (%rdi), %rax
1:
leaq -1(%rax), %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_64_nv)
ENTRY(atomic_add_8)
ALTENTRY(atomic_add_char)
lock
addb %sil, (%rdi)
ret
SET_SIZE(atomic_add_char)
SET_SIZE(atomic_add_8)
ENTRY(atomic_add_16)
ALTENTRY(atomic_add_short)
lock
addw %si, (%rdi)
ret
SET_SIZE(atomic_add_short)
SET_SIZE(atomic_add_16)
ENTRY(atomic_add_32)
ALTENTRY(atomic_add_int)
lock
addl %esi, (%rdi)
ret
SET_SIZE(atomic_add_int)
SET_SIZE(atomic_add_32)
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_ptr)
ALTENTRY(atomic_add_long)
lock
addq %rsi, (%rdi)
ret
SET_SIZE(atomic_add_long)
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_64)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
lock
orb %sil, (%rdi)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
lock
orw %si, (%rdi)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
lock
orl %esi, (%rdi)
ret
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_ulong)
lock
orq %rsi, (%rdi)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
lock
andb %sil, (%rdi)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
lock
andw %si, (%rdi)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
lock
andl %esi, (%rdi)
ret
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_ulong)
lock
andq %rsi, (%rdi)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_64)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movb (%rdi), %al
1:
movb %sil, %cl
addb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movw (%rdi), %ax
1:
movw %si, %cx
addw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
addq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movb (%rdi), %al
1:
movb %sil, %cl
andb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movw (%rdi), %ax
1:
movw %si, %cx
andw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64_nv)
ALTENTRY(atomic_and_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
andq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_64_nv)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movb (%rdi), %al
1:
movb %sil, %cl
orb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movw (%rdi), %ax
1:
movw %si, %cx
orw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64_nv)
ALTENTRY(atomic_or_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
orq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_64_nv)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movzbl %sil, %eax
lock
cmpxchgb %dl, (%rdi)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movzwl %si, %eax
lock
cmpxchgw %dx, (%rdi)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movzbl %sil, %eax
lock
xchgb %al, (%rdi)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movzwl %si, %eax
lock
xchgw %ax, (%rdi)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
movl %esi, %eax
lock
xchgl %eax, (%rdi)
ret
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
ALTENTRY(atomic_swap_ulong)
ALTENTRY(atomic_swap_ptr)
movq %rsi, %rax
lock
xchgq %rax, (%rdi)
ret
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
xorl %eax, %eax
lock
btsq %rsi, (%rdi)
jnc 1f
decl %eax
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
xorl %eax, %eax
lock
btrq %rsi, (%rdi)
jc 1f
decl %eax
1:
ret
SET_SIZE(atomic_clear_long_excl)
/*
* NOTE: membar_enter, and membar_exit are identical routines.
* We define them separately, instead of using an ALTENTRY
* definitions to alias them together, so that DTrace and
* debuggers will see a unique address for them, allowing
* more accurate tracing.
*/
ENTRY(membar_enter)
mfence
ret
SET_SIZE(membar_enter)
ENTRY(membar_exit)
mfence
ret
SET_SIZE(membar_exit)
ENTRY(membar_producer)
sfence
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lfence
ret
SET_SIZE(membar_consumer)
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

View File

@ -0,0 +1,730 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
.ident "%Z%%M% %I% %E% SMI"
.file "%M%"
#define _ASM
#include <ia32/sys/asm_linkage.h>
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
movl 4(%esp), %eax
lock
incb (%eax)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
movl 4(%esp), %eax
lock
incw (%eax)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
ALTENTRY(atomic_inc_ulong)
movl 4(%esp), %eax
lock
incl (%eax)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
leal 1(%eax), %ecx
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
leal 1(%eax), %ecx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
ALTENTRY(atomic_inc_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
leal 1(%eax), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
/*
* NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
* separated, you need to also edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_64_nv.
*/
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
incl %ebx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_inc_64_nv)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
movl 4(%esp), %eax
lock
decb (%eax)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
movl 4(%esp), %eax
lock
decw (%eax)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
ALTENTRY(atomic_dec_ulong)
movl 4(%esp), %eax
lock
decl (%eax)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
leal -1(%eax), %ecx
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
leal -1(%eax), %ecx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
ALTENTRY(atomic_dec_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
leal -1(%eax), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
/*
* NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_64_nv.
*/
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
not %ecx
not %ebx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_dec_64_nv)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_add_8)
ALTENTRY(atomic_add_char)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addb %cl, (%eax)
ret
SET_SIZE(atomic_add_char)
SET_SIZE(atomic_add_8)
ENTRY(atomic_add_16)
ALTENTRY(atomic_add_short)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addw %cx, (%eax)
ret
SET_SIZE(atomic_add_short)
SET_SIZE(atomic_add_16)
ENTRY(atomic_add_32)
ALTENTRY(atomic_add_int)
ALTENTRY(atomic_add_ptr)
ALTENTRY(atomic_add_long)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addl %ecx, (%eax)
ret
SET_SIZE(atomic_add_long)
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_int)
SET_SIZE(atomic_add_32)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
orb %cl, (%eax)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
orw %cx, (%eax)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
ALTENTRY(atomic_or_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
orl %ecx, (%eax)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
andb %cl, (%eax)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
andw %cx, (%eax)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
ALTENTRY(atomic_and_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
andl %ecx, (%eax)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
addb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
addw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
/*
* NOTE: If atomic_add_64 and atomic_add_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_64_nv.
*/
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
orb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_or_uchar_nv)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
orw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
ALTENTRY(atomic_or_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
/*
* NOTE: If atomic_or_64 and atomic_or_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_64_nv.
*/
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
orl %eax, %ebx
orl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_or_64_nv)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
andb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
andw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
ALTENTRY(atomic_and_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
/*
* NOTE: If atomic_and_64 and atomic_and_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_64_nv.
*/
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
andl %eax, %ebx
andl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_and_64_nv)
SET_SIZE(atomic_and_64)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
movb 12(%esp), %cl
lock
cmpxchgb %cl, (%edx)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
movw 12(%esp), %cx
lock
cmpxchgw %cx, (%edx)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
lock
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
pushl %ebx
pushl %esi
movl 12(%esp), %esi
movl 16(%esp), %eax
movl 20(%esp), %edx
movl 24(%esp), %ebx
movl 28(%esp), %ecx
lock
cmpxchg8b (%esi)
popl %esi
popl %ebx
ret
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
lock
xchgb %al, (%edx)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
lock
xchgw %ax, (%edx)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
ALTENTRY(atomic_swap_ptr)
ALTENTRY(atomic_swap_ulong)
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xchgl %eax, (%edx)
ret
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
pushl %esi
pushl %ebx
movl 12(%esp), %esi
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl (%esi), %eax
movl 4(%esi), %edx
1:
lock
cmpxchg8b (%esi)
jne 1b
popl %ebx
popl %esi
ret
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
movl 4(%esp), %edx
movl 8(%esp), %ecx
xorl %eax, %eax
lock
btsl %ecx, (%edx)
jnc 1f
decl %eax
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
movl 4(%esp), %edx
movl 8(%esp), %ecx
xorl %eax, %eax
lock
btrl %ecx, (%edx)
jc 1f
decl %eax
1:
ret
SET_SIZE(atomic_clear_long_excl)
/*
* NOTE: membar_enter, membar_exit, membar_producer, and
* membar_consumer are all identical routines. We define them
* separately, instead of using ALTENTRY definitions to alias them
* together, so that DTrace and debuggers will see a unique address
* for them, allowing more accurate tracing.
*/
ENTRY(membar_enter)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_enter)
ENTRY(membar_exit)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_exit)
ENTRY(membar_producer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_consumer)
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

View File

@ -24,10 +24,8 @@
* Use is subject to license terms.
*/
#ifndef _LIBSPL_ATOMIC_H
#define _LIBSPL_ATOMIC_H
#ifndef _SYS_ATOMIC_H
#define _SYS_ATOMIC_H
#include <sys/types.h>
#include <sys/inttypes.h>
@ -36,12 +34,7 @@
extern "C" {
#endif
#if defined(_KERNEL) && defined(__GNUC__) && defined(_ASM_INLINES) && \
(defined(__i386) || defined(__amd64))
#include <asm/atomic.h>
#endif
#if defined(_KERNEL) || defined(__STDC__)
#if defined(__STDC__)
/*
* Increment target.
*/
@ -52,7 +45,7 @@ extern void atomic_inc_ushort(volatile ushort_t *);
extern void atomic_inc_32(volatile uint32_t *);
extern void atomic_inc_uint(volatile uint_t *);
extern void atomic_inc_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern void atomic_inc_64(volatile uint64_t *);
#endif
@ -66,7 +59,7 @@ extern void atomic_dec_ushort(volatile ushort_t *);
extern void atomic_dec_32(volatile uint32_t *);
extern void atomic_dec_uint(volatile uint_t *);
extern void atomic_dec_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern void atomic_dec_64(volatile uint64_t *);
#endif
@ -81,7 +74,7 @@ extern void atomic_add_32(volatile uint32_t *, int32_t);
extern void atomic_add_int(volatile uint_t *, int);
extern void atomic_add_ptr(volatile void *, ssize_t);
extern void atomic_add_long(volatile ulong_t *, long);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern void atomic_add_64(volatile uint64_t *, int64_t);
#endif
@ -95,7 +88,7 @@ extern void atomic_or_ushort(volatile ushort_t *, ushort_t);
extern void atomic_or_32(volatile uint32_t *, uint32_t);
extern void atomic_or_uint(volatile uint_t *, uint_t);
extern void atomic_or_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern void atomic_or_64(volatile uint64_t *, uint64_t);
#endif
@ -109,7 +102,7 @@ extern void atomic_and_ushort(volatile ushort_t *, ushort_t);
extern void atomic_and_32(volatile uint32_t *, uint32_t);
extern void atomic_and_uint(volatile uint_t *, uint_t);
extern void atomic_and_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern void atomic_and_64(volatile uint64_t *, uint64_t);
#endif
@ -131,7 +124,7 @@ extern ushort_t atomic_inc_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_inc_32_nv(volatile uint32_t *);
extern uint_t atomic_inc_uint_nv(volatile uint_t *);
extern ulong_t atomic_inc_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv(volatile uint64_t *);
#endif
@ -145,7 +138,7 @@ extern ushort_t atomic_dec_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_dec_32_nv(volatile uint32_t *);
extern uint_t atomic_dec_uint_nv(volatile uint_t *);
extern ulong_t atomic_dec_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv(volatile uint64_t *);
#endif
@ -160,7 +153,7 @@ extern uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_add_int_nv(volatile uint_t *, int);
extern void *atomic_add_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
@ -174,7 +167,7 @@ extern ushort_t atomic_or_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_or_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_or_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t);
#endif
@ -188,7 +181,7 @@ extern ushort_t atomic_and_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_and_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_and_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t);
#endif
@ -203,7 +196,7 @@ extern uint32_t atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t);
extern uint_t atomic_cas_uint(volatile uint_t *, uint_t, uint_t);
extern void *atomic_cas_ptr(volatile void *, void *, void *);
extern ulong_t atomic_cas_ulong(volatile ulong_t *, ulong_t, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t);
#endif
@ -218,7 +211,7 @@ extern uint32_t atomic_swap_32(volatile uint32_t *, uint32_t);
extern uint_t atomic_swap_uint(volatile uint_t *, uint_t);
extern void *atomic_swap_ptr(volatile void *, void *);
extern ulong_t atomic_swap_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
#if defined(_INT64_TYPE)
extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
#endif
@ -264,175 +257,10 @@ extern void membar_producer(void);
* after the available flag has been seen, i.e. it imposes load ordering.
*/
extern void membar_consumer(void);
#endif
#if !defined(_KERNEL) && !defined(__STDC__)
extern void atomic_inc_8();
extern void atomic_inc_uchar();
extern void atomic_inc_16();
extern void atomic_inc_ushort();
extern void atomic_inc_32();
extern void atomic_inc_uint();
extern void atomic_inc_ulong();
#if defined(_INT64_TYPE)
extern void atomic_inc_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_dec_8();
extern void atomic_dec_uchar();
extern void atomic_dec_16();
extern void atomic_dec_ushort();
extern void atomic_dec_32();
extern void atomic_dec_uint();
extern void atomic_dec_ulong();
#if defined(_INT64_TYPE)
extern void atomic_dec_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_add_8();
extern void atomic_add_char();
extern void atomic_add_16();
extern void atomic_add_short();
extern void atomic_add_32();
extern void atomic_add_int();
extern void atomic_add_ptr();
extern void atomic_add_long();
#if defined(_INT64_TYPE)
extern void atomic_add_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_or_8();
extern void atomic_or_uchar();
extern void atomic_or_16();
extern void atomic_or_ushort();
extern void atomic_or_32();
extern void atomic_or_uint();
extern void atomic_or_ulong();
#if defined(_INT64_TYPE)
extern void atomic_or_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_and_8();
extern void atomic_and_uchar();
extern void atomic_and_16();
extern void atomic_and_ushort();
extern void atomic_and_32();
extern void atomic_and_uint();
extern void atomic_and_ulong();
#if defined(_INT64_TYPE)
extern void atomic_and_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_inc_8_nv();
extern uchar_t atomic_inc_uchar_nv();
extern uint16_t atomic_inc_16_nv();
extern ushort_t atomic_inc_ushort_nv();
extern uint32_t atomic_inc_32_nv();
extern uint_t atomic_inc_uint_nv();
extern ulong_t atomic_inc_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_dec_8_nv();
extern uchar_t atomic_dec_uchar_nv();
extern uint16_t atomic_dec_16_nv();
extern ushort_t atomic_dec_ushort_nv();
extern uint32_t atomic_dec_32_nv();
extern uint_t atomic_dec_uint_nv();
extern ulong_t atomic_dec_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_add_8_nv();
extern uchar_t atomic_add_char_nv();
extern uint16_t atomic_add_16_nv();
extern ushort_t atomic_add_short_nv();
extern uint32_t atomic_add_32_nv();
extern uint_t atomic_add_int_nv();
extern void *atomic_add_ptr_nv();
extern ulong_t atomic_add_long_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_or_8_nv();
extern uchar_t atomic_or_uchar_nv();
extern uint16_t atomic_or_16_nv();
extern ushort_t atomic_or_ushort_nv();
extern uint32_t atomic_or_32_nv();
extern uint_t atomic_or_uint_nv();
extern ulong_t atomic_or_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_and_8_nv();
extern uchar_t atomic_and_uchar_nv();
extern uint16_t atomic_and_16_nv();
extern ushort_t atomic_and_ushort_nv();
extern uint32_t atomic_and_32_nv();
extern uint_t atomic_and_uint_nv();
extern ulong_t atomic_and_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_cas_8();
extern uchar_t atomic_cas_uchar();
extern uint16_t atomic_cas_16();
extern ushort_t atomic_cas_ushort();
extern uint32_t atomic_cas_32();
extern uint_t atomic_cas_uint();
extern void *atomic_cas_ptr();
extern ulong_t atomic_cas_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_cas_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_swap_8();
extern uchar_t atomic_swap_uchar();
extern uint16_t atomic_swap_16();
extern ushort_t atomic_swap_ushort();
extern uint32_t atomic_swap_32();
extern uint_t atomic_swap_uint();
extern void *atomic_swap_ptr();
extern ulong_t atomic_swap_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_swap_64();
#endif /* defined(_INT64_TYPE) */
extern int atomic_set_long_excl();
extern int atomic_clear_long_excl();
extern void membar_enter();
extern void membar_exit();
extern void membar_producer();
extern void membar_consumer();
#endif
#if defined(_KERNEL)
#if defined(_LP64) || defined(_ILP32)
#define atomic_add_ip atomic_add_long
#define atomic_add_ip_nv atomic_add_long_nv
#define casip atomic_cas_ulong
#endif
#if defined(__sparc)
extern uint8_t ldstub(uint8_t *);
#endif
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
extern uint8_t cas8(uint8_t *, uint8_t, uint8_t);
extern uint32_t cas32(uint32_t *, uint32_t, uint32_t);
extern uint64_t cas64(uint64_t *, uint64_t, uint64_t);
extern ulong_t caslong(ulong_t *, ulong_t, ulong_t);
extern void *casptr(void *, void *, void *);
extern void atomic_and_long(ulong_t *, ulong_t);
extern void atomic_or_long(ulong_t *, ulong_t);
#if defined(__sparc)
extern uint32_t swapl(uint32_t *, uint32_t);
#endif
#endif /* _KERNEL */
#endif /* __STDC__ */
#ifdef __cplusplus
}
#endif
#endif /* _LIBSPL_ATOMIC_H */
#endif /* _SYS_ATOMIC_H */

View File

@ -0,0 +1,307 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _IA32_SYS_ASM_LINKAGE_H
#define _IA32_SYS_ASM_LINKAGE_H
#include <sys/stack.h>
#include <sys/trap.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASM /* The remainder of this file is only for assembly files */
/*
* make annoying differences in assembler syntax go away
*/
/*
* D16 and A16 are used to insert instructions prefixes; the
* macros help the assembler code be slightly more portable.
*/
#if !defined(__GNUC_AS__)
/*
* /usr/ccs/bin/as prefixes are parsed as separate instructions
*/
#define D16 data16;
#define A16 addr16;
/*
* (There are some weird constructs in constant expressions)
*/
#define _CONST(const) [const]
#define _BITNOT(const) -1!_CONST(const)
#define _MUL(a, b) _CONST(a \* b)
#else
/*
* Why not use the 'data16' and 'addr16' prefixes .. well, the
* assembler doesn't quite believe in real mode, and thus argues with
* us about what we're trying to do.
*/
#define D16 .byte 0x66;
#define A16 .byte 0x67;
#define _CONST(const) (const)
#define _BITNOT(const) ~_CONST(const)
#define _MUL(a, b) _CONST(a * b)
#endif
/*
* C pointers are different sizes between i386 and amd64.
* These constants can be used to compute offsets into pointer arrays.
*/
#if defined(__amd64)
#define CLONGSHIFT 3
#define CLONGSIZE 8
#define CLONGMASK 7
#elif defined(__i386)
#define CLONGSHIFT 2
#define CLONGSIZE 4
#define CLONGMASK 3
#endif
/*
* Since we know we're either ILP32 or LP64 ..
*/
#define CPTRSHIFT CLONGSHIFT
#define CPTRSIZE CLONGSIZE
#define CPTRMASK CLONGMASK
#if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT)
#error "inconsistent shift constants"
#endif
#if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1)
#error "inconsistent mask constants"
#endif
#define ASM_ENTRY_ALIGN 16
/*
* SSE register alignment and save areas
*/
#define XMM_SIZE 16
#define XMM_ALIGN 16
#if defined(__amd64)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \
movq %rsp, sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp
#elif defined(__i386)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \
movl %esp, sreg; \
addl $XMM_ALIGN, sreg; \
andl $_BITNOT(XMM_ALIGN-1), sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp;
#endif /* __i386 */
/*
* profiling causes definitions of the MCOUNT and RTMCOUNT
* particular to the type
*/
#ifdef GPROF
#define MCOUNT(x) \
pushl %ebp; \
movl %esp, %ebp; \
call _mcount; \
popl %ebp
#endif /* GPROF */
#ifdef PROF
#define MCOUNT(x) \
/* CSTYLED */ \
.lcomm .L_/**/x/**/1, 4, 4; \
pushl %ebp; \
movl %esp, %ebp; \
/* CSTYLED */ \
movl $.L_/**/x/**/1, %edx; \
call _mcount; \
popl %ebp
#endif /* PROF */
/*
* if we are not profiling, MCOUNT should be defined to nothing
*/
#if !defined(PROF) && !defined(GPROF)
#define MCOUNT(x)
#endif /* !defined(PROF) && !defined(GPROF) */
#define RTMCOUNT(x) MCOUNT(x)
/*
* Macro to define weak symbol aliases. These are similar to the ANSI-C
* #pragma weak name = _name
* except a compiler can determine type. The assembler must be told. Hence,
* the second parameter must be the type of the symbol (i.e.: function,...)
*/
#define ANSI_PRAGMA_WEAK(sym, stype) \
.weak sym; \
.type sym, @stype; \
/* CSTYLED */ \
sym = _/**/sym
/*
* Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in:
* #pragma weak sym1 = sym2
*/
#define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \
.weak sym1; \
.type sym1, @stype; \
sym1 = sym2
/*
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
* never calls mcount.
*/
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define RTENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: RTMCOUNT(x)
/*
* ENTRY2 is identical to ENTRY but provides two labels for the entry point.
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y:
/*
* ALTENTRY provides for additional entry points.
*/
#define ALTENTRY(x) \
.globl x; \
.type x, @function; \
x:
/*
* DGDEF and DGDEF2 provide global data declarations.
*
* DGDEF provides a word aligned word of storage.
*
* DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This
* implies this macro is best used for byte arrays.
*
* DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
*/
#define DGDEF2(name, sz) \
.data; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF3(name, sz, algn) \
.data; \
.align algn; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF(name) DGDEF3(name, 4, 4)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, [.-x]
/*
* NWORD provides native word value.
*/
#if defined(__amd64)
/*CSTYLED*/
#define NWORD quad
#elif defined(__i386)
#define NWORD long
#endif /* __i386 */
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _IA32_SYS_ASM_LINKAGE_H */