X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Finternal%2Fatomic.h;h=f938879b07798380254791406a4de252db0833db;hb=1ef37aa00ea830dfda76e04e3d941cafa74d8b76;hp=e74e45352574859ed06fbfea614750068846c623;hpb=0bcbb53dc492aae22039445bfdb609bd8d615992;p=musl diff --git a/src/internal/atomic.h b/src/internal/atomic.h index e74e4535..f938879b 100644 --- a/src/internal/atomic.h +++ b/src/internal/atomic.h @@ -1,110 +1,318 @@ -#ifndef _INTERNAA_ATOMIC_H -#define _INTERNAA_ATOMIC_H +#ifndef _ATOMIC_H +#define _ATOMIC_H #include -static inline int a_ctz_64(uint64_t x) +#include "atomic_arch.h" + +#ifdef a_ll + +#ifndef a_pre_llsc +#define a_pre_llsc() +#endif + +#ifndef a_post_llsc +#define a_post_llsc() +#endif + +#ifndef a_cas +#define a_cas a_cas +static inline int a_cas(volatile int *p, int t, int s) { - int r; - __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:" - : "=r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) ); - return r; + int old; + a_pre_llsc(); + do old = a_ll(p); + while (old==t && !a_sc(p, s)); + a_post_llsc(); + return old; } +#endif - -static inline void a_and_64(volatile uint64_t *p, uint64_t v) +#ifndef a_swap +#define a_swap a_swap +static inline int a_swap(volatile int *p, int v) { - __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)" - : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" ); + int old; + a_pre_llsc(); + do old = a_ll(p); + while (!a_sc(p, v)); + a_post_llsc(); + return old; } +#endif -static inline void a_or_64(volatile uint64_t *p, uint64_t v) +#ifndef a_fetch_add +#define a_fetch_add a_fetch_add +static inline int a_fetch_add(volatile int *p, int v) { - __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)" - : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" ); + int old; + a_pre_llsc(); + do old = a_ll(p); + while (!a_sc(p, (unsigned)old + v)); + a_post_llsc(); + return old; } +#endif -static inline void a_store_l(volatile void *p, long x) +#ifndef a_fetch_and +#define a_fetch_and a_fetch_and +static inline int a_fetch_and(volatile int *p, int v) { - __asm__( "movl %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); + int old; + a_pre_llsc(); + do old = a_ll(p); + while (!a_sc(p, old & v)); + a_post_llsc(); + return old; } +#endif -static inline void a_or_l(volatile void *p, long v) +#ifndef a_fetch_or +#define a_fetch_or a_fetch_or +static inline int a_fetch_or(volatile int *p, int v) { - __asm__( "lock ; orl %1, %0" - : "=m"(*(long *)p) : "r"(v) : "memory" ); + int old; + a_pre_llsc(); + do old = a_ll(p); + while (!a_sc(p, old | v)); + a_post_llsc(); + return old; } +#endif + +#endif +#ifdef a_ll_p + +#ifndef a_cas_p +#define a_cas_p a_cas_p static inline void *a_cas_p(volatile void *p, void *t, void *s) { - __asm__( "lock ; cmpxchg %3, %1" - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); - return t; + void *old; + a_pre_llsc(); + do old = a_ll_p(p); + while (old==t && !a_sc_p(p, s)); + a_post_llsc(); + return old; +} +#endif + +#endif + +#ifndef a_cas +#error missing definition of a_cas +#endif + +#ifndef a_swap +#define a_swap a_swap +static inline int a_swap(volatile int *p, int v) +{ + int old; + do old = *p; + while (a_cas(p, old, v) != old); + return old; +} +#endif + +#ifndef a_fetch_add +#define a_fetch_add a_fetch_add +static inline int a_fetch_add(volatile int *p, int v) +{ + int old; + do old = *p; + while (a_cas(p, old, (unsigned)old+v) != old); + return old; +} +#endif + +#ifndef a_fetch_and +#define a_fetch_and a_fetch_and +static inline int a_fetch_and(volatile int *p, int v) +{ + int old; + do old = *p; + while (a_cas(p, old, old&v) != old); + return old; +} +#endif +#ifndef a_fetch_or +#define a_fetch_or a_fetch_or +static inline int a_fetch_or(volatile int *p, int v) +{ + int old; + do old = *p; + while (a_cas(p, old, old|v) != old); + return old; } +#endif -static inline long a_cas_l(volatile void *p, long t, long s) +#ifndef a_and +#define a_and a_and +static inline void a_and(volatile int *p, int v) { - __asm__( "lock ; cmpxchg %3, %1" - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); - return t; + a_fetch_and(p, v); } +#endif -static inline void *a_swap_p(void *volatile *x, void *v) +#ifndef a_or +#define a_or a_or +static inline void a_or(volatile int *p, int v) { - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); - return v; + a_fetch_or(p, v); } -static inline long a_swap_l(volatile void *x, long v) +#endif + +#ifndef a_inc +#define a_inc a_inc +static inline void a_inc(volatile int *p) { - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); - return v; + a_fetch_add(p, 1); } +#endif -static inline void a_or(volatile void *p, int v) +#ifndef a_dec +#define a_dec a_dec +static inline void a_dec(volatile int *p) { - __asm__( "lock ; orl %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + a_fetch_add(p, -1); } +#endif -static inline void a_and(volatile void *p, int v) +#ifndef a_store +#define a_store a_store +static inline void a_store(volatile int *p, int v) +{ +#ifdef a_barrier + a_barrier(); + *p = v; + a_barrier(); +#else + a_swap(p, v); +#endif +} +#endif + +#ifndef a_barrier +#define a_barrier a_barrier +static void a_barrier() +{ + volatile int tmp = 0; + a_cas(&tmp, 0, 0); +} +#endif + +#ifndef a_spin +#define a_spin a_barrier +#endif + +#ifndef a_and_64 +#define a_and_64 a_and_64 +static inline void a_and_64(volatile uint64_t *p, uint64_t v) { - __asm__( "lock ; andl %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + union { uint64_t v; uint32_t r[2]; } u = { v }; + if (u.r[0]+1) a_and((int *)p, u.r[0]); + if (u.r[1]+1) a_and((int *)p+1, u.r[1]); } +#endif -static inline int a_swap(volatile int *x, int v) +#ifndef a_or_64 +#define a_or_64 a_or_64 +static inline void a_or_64(volatile uint64_t *p, uint64_t v) { - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); - return v; + union { uint64_t v; uint32_t r[2]; } u = { v }; + if (u.r[0]) a_or((int *)p, u.r[0]); + if (u.r[1]) a_or((int *)p+1, u.r[1]); } +#endif -#define a_xchg a_swap +#ifndef a_cas_p +typedef char a_cas_p_undefined_but_pointer_not_32bit[-sizeof(char) == 0xffffffff ? 1 : -1]; +#define a_cas_p a_cas_p +static inline void *a_cas_p(volatile void *p, void *t, void *s) +{ + return (void *)a_cas((volatile int *)p, (int)t, (int)s); +} +#endif -static inline int a_fetch_add(volatile int *x, int v) +#ifndef a_or_l +#define a_or_l a_or_l +static inline void a_or_l(volatile void *p, long v) { - __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); - return v; + if (sizeof(long) == sizeof(int)) a_or(p, v); + else a_or_64(p, v); } +#endif -static inline void a_inc(volatile int *x) +#ifndef a_crash +#define a_crash a_crash +static inline void a_crash() { - __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" ); + *(volatile char *)0=0; } +#endif -static inline void a_dec(volatile int *x) +#ifndef a_ctz_32 +#define a_ctz_32 a_ctz_32 +static inline int a_ctz_32(uint32_t x) { - __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" ); +#ifdef a_clz_32 + return 31-a_clz_32(x&-x); +#else + static const char debruijn32[32] = { + 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, + 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 + }; + return debruijn32[(x&-x)*0x076be629 >> 27]; +#endif } +#endif -static inline void a_store(volatile int *p, int x) +#ifndef a_ctz_64 +#define a_ctz_64 a_ctz_64 +static inline int a_ctz_64(uint64_t x) { - __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" ); + static const char debruijn64[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + if (sizeof(long) < 8) { + uint32_t y = x; + if (!y) { + y = x>>32; + return 32 + a_ctz_32(y); + } + return a_ctz_32(y); + } + return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; } +#endif -static inline void a_spin() +static inline int a_ctz_l(unsigned long x) { - __asm__ __volatile__( "pause" : : : "memory" ); + return (sizeof(long) < 8) ? a_ctz_32(x) : a_ctz_64(x); } +#ifndef a_clz_64 +#define a_clz_64 a_clz_64 +static inline int a_clz_64(uint64_t x) +{ +#ifdef a_clz_32 + if (x>>32) + return a_clz_32(x>>32); + return a_clz_32(x) + 32; +#else + uint32_t y; + int r; + if (x>>32) y=x>>32, r=0; else y=x, r=32; + if (y>>16) y>>=16; else r |= 16; + if (y>>8) y>>=8; else r |= 8; + if (y>>4) y>>=4; else r |= 4; + if (y>>2) y>>=2; else r |= 2; + return r | !(y>>1); +#endif +} +#endif #endif