X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=arch%2Fsh%2Fatomic.h;h=f2e6dacbee9856103a6767f9409ccaf77de4fdde;hb=cd7159e7be73451befedce2e9dfd9f6a4c3b21f0;hp=93ab54fe11abdf01eeacb6330500ee996294e2b1;hpb=90e51e45f57eb0e1564b3610b9bbd768215e5d6d;p=musl diff --git a/arch/sh/atomic.h b/arch/sh/atomic.h index 93ab54fe..f2e6dacb 100644 --- a/arch/sh/atomic.h +++ b/arch/sh/atomic.h @@ -22,6 +22,88 @@ static inline int a_ctz_64(uint64_t x) return a_ctz_l(y); } +#define LLSC_CLOBBERS "r0", "t", "memory" +#define LLSC_START(mem) "synco\n" \ + "0: movli.l @" mem ", r0\n" +#define LLSC_END(mem) \ + "1: movco.l r0, @" mem "\n" \ + " bf 0b\n" \ + " synco\n" + +static inline int __sh_cas_llsc(volatile int *p, int t, int s) +{ + int old; + __asm__ __volatile__( + LLSC_START("%1") + " mov r0, %0\n" + " cmp/eq %0, %2\n" + " bf 1f\n" + " mov %3, r0\n" + LLSC_END("%1") + : "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS); + return old; +} + +static inline int __sh_swap_llsc(volatile int *x, int v) +{ + int old; + __asm__ __volatile__( + LLSC_START("%1") + " mov r0, %0\n" + " mov %2, r0\n" + LLSC_END("%1") + : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS); + return old; +} + +static inline int __sh_fetch_add_llsc(volatile int *x, int v) +{ + int old; + __asm__ __volatile__( + LLSC_START("%1") + " mov r0, %0\n" + " add %2, r0\n" + LLSC_END("%1") + : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS); + return old; +} + +static inline void __sh_store_llsc(volatile int *p, int x) +{ + __asm__ __volatile__( + " synco\n" + " mov.l %1, @%0\n" + " synco\n" + : : "r"(p), "r"(x) : "memory"); +} + +static inline void __sh_and_llsc(volatile int *x, int v) +{ + __asm__ __volatile__( + LLSC_START("%0") + " and %1, r0\n" + LLSC_END("%0") + : : "r"(x), "r"(v) : LLSC_CLOBBERS); +} + +static inline void __sh_or_llsc(volatile int *x, int v) +{ + __asm__ __volatile__( + LLSC_START("%0") + " or %1, r0\n" + LLSC_END("%0") + : : "r"(x), "r"(v) : LLSC_CLOBBERS); +} + +#ifdef __SH4A__ +#define a_cas(p,t,s) __sh_cas_llsc(p,t,s) +#define a_swap(x,v) __sh_swap_llsc(x,v) +#define a_fetch_add(x,v) __sh_fetch_add_llsc(x, v) +#define a_store(x,v) __sh_store_llsc(x, v) +#define a_and(x,v) __sh_and_llsc(x, v) +#define a_or(x,v) __sh_or_llsc(x, v) +#else + int __sh_cas(volatile int *, int, int); int __sh_swap(volatile int *, int); int __sh_fetch_add(volatile int *, int); @@ -35,6 +117,7 @@ void __sh_or(volatile int *, int); #define a_store(x,v) __sh_store(x, v) #define a_and(x,v) __sh_and(x, v) #define a_or(x,v) __sh_or(x, v) +#endif static inline void *a_cas_p(volatile void *p, void *t, void *s) { @@ -51,8 +134,11 @@ static inline void a_dec(volatile int *x) a_fetch_add(x, -1); } -static inline void a_spin() +#define a_spin a_barrier + +static inline void a_barrier() { + a_cas(&(int){0}, 0, 0); } static inline void a_crash()