7 static inline unsigned mask()
10 __asm__ __volatile__ ( "\n"
15 : "=&r"(sr) : : "memory", "r0" );
19 static inline void unmask(unsigned sr)
21 __asm__ __volatile__ ( "ldc %0,sr" : : "r"(sr) : "memory" );
24 /* gusa is a hack in the kernel which lets you create a sequence of instructions
25 * which will be restarted if the process is preempted in the middle of the
26 * sequence. It will do for implementing atomics on non-smp systems. ABI is:
27 * r0 = address of first instruction after the atomic sequence
28 * r1 = original stack pointer
29 * r15 = -1 * length of atomic sequence in bytes
31 #define GUSA_CLOBBERS "r0", "r1", "memory"
32 #define GUSA_START(mem,old,nop) \
37 " mov #(0f-1f), r15\n" \
38 "0: mov.l @" mem ", " old "\n"
39 /* the target of mova must be 4 byte aligned, so we may need a nop */
40 #define GUSA_START_ODD(mem,old) GUSA_START(mem,old,"")
41 #define GUSA_START_EVEN(mem,old) GUSA_START(mem,old,"\tnop\n")
42 #define GUSA_END(mem,new) \
43 " mov.l " new ", @" mem "\n" \
46 int __sh_cas(volatile int *p, int t, int s)
48 if (__sh_atomic_model == SH_A_LLSC) return __sh_cas_llsc(p, t, s);
50 if (__sh_atomic_model == SH_A_IMASK) {
60 GUSA_START_EVEN("%1", "%0")
64 : "=&r"(old) : "r"(p), "r"(t), "r"(s) : GUSA_CLOBBERS, "t");
68 int __sh_swap(volatile int *x, int v)
70 if (__sh_atomic_model == SH_A_LLSC) return __sh_swap_llsc(x, v);
72 if (__sh_atomic_model == SH_A_IMASK) {
82 GUSA_START_EVEN("%1", "%0")
84 : "=&r"(old) : "r"(x), "r"(v) : GUSA_CLOBBERS);
88 int __sh_fetch_add(volatile int *x, int v)
90 if (__sh_atomic_model == SH_A_LLSC) return __sh_fetch_add_llsc(x, v);
92 if (__sh_atomic_model == SH_A_IMASK) {
101 __asm__ __volatile__(
102 GUSA_START_EVEN("%2", "%0")
106 : "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
110 void __sh_store(volatile int *p, int x)
112 if (__sh_atomic_model == SH_A_LLSC) return __sh_store_llsc(p, x);
113 __asm__ __volatile__(
115 : : "r"(p), "r"(x) : "memory");
118 void __sh_and(volatile int *x, int v)
120 if (__sh_atomic_model == SH_A_LLSC) return __sh_and_llsc(x, v);
122 if (__sh_atomic_model == SH_A_IMASK) {
123 unsigned sr = mask();
131 __asm__ __volatile__(
132 GUSA_START_ODD("%1", "%0")
135 : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
138 void __sh_or(volatile int *x, int v)
140 if (__sh_atomic_model == SH_A_LLSC) return __sh_or_llsc(x, v);
142 if (__sh_atomic_model == SH_A_IMASK) {
143 unsigned sr = mask();
151 __asm__ __volatile__(
152 GUSA_START_ODD("%1", "%0")
155 : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);