X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=arch%2Farm%2Fsyscall_arch.h;h=53fb155c0dcb7e94d3878f3a0634890003552bda;hb=a730639273fd5040ea3528a9fc7b8590f46a6702;hp=845f2e4d4e7238c0743d200d3f1c1b5aa0f234f5;hpb=7d310ed1d086d9874b7ca40933ff870a8adc75d9;p=musl diff --git a/arch/arm/syscall_arch.h b/arch/arm/syscall_arch.h index 845f2e4d..53fb155c 100644 --- a/arch/arm/syscall_arch.h +++ b/arch/arm/syscall_arch.h @@ -3,61 +3,105 @@ ((union { long long ll; long l[2]; }){ .ll = x }).l[1] #define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x)) -long (__syscall)(long, ...); +#ifdef __thumb__ +/* Avoid use of r7 in asm constraints when producing thumb code, + * since it's reserved as frame pointer and might not be supported. */ +#define __ASM____R7__ +#define __asm_syscall(...) do { \ + __asm__ __volatile__ ( "mov %1,r7 ; mov r7,%2 ; svc 0 ; mov r7,%1" \ + : "=r"(r0), "=&r"((int){0}) : __VA_ARGS__ : "memory"); \ + return r0; \ + } while (0) + +#else + +#define __ASM____R7__ __asm__("r7") #define __asm_syscall(...) do { \ __asm__ __volatile__ ( "svc 0" \ : "=r"(r0) : __VA_ARGS__ : "memory"); \ return r0; \ } while (0) +#endif + +/* For thumb2, we can allow 8-bit immediate syscall numbers, saving a + * register in the above dance around r7. Does not work for thumb1 where + * only movs, not mov, supports immediates, and we can't use movs because + * it doesn't support high regs. */ +#ifdef __thumb2__ +#define R7_OPERAND "rI"(r7) +#else +#define R7_OPERAND "r"(r7) +#endif static inline long __syscall0(long n) { - register long r7 __asm__("r7") = n; + register long r7 __ASM____R7__ = n; register long r0 __asm__("r0"); - __asm_syscall("r"(r7)); + __asm_syscall(R7_OPERAND); } static inline long __syscall1(long n, long a) { - register long r7 __asm__("r7") = n; + register long r7 __ASM____R7__ = n; register long r0 __asm__("r0") = a; - __asm_syscall("r"(r7), "0"(r0)); + __asm_syscall(R7_OPERAND, "0"(r0)); } static inline long __syscall2(long n, long a, long b) { - register long r7 __asm__("r7") = n; + register long r7 __ASM____R7__ = n; register long r0 __asm__("r0") = a; register long r1 __asm__("r1") = b; - __asm_syscall("r"(r7), "0"(r0), "r"(r1)); + __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1)); } static inline long __syscall3(long n, long a, long b, long c) { - register long r7 __asm__("r7") = n; + register long r7 __ASM____R7__ = n; register long r0 __asm__("r0") = a; register long r1 __asm__("r1") = b; register long r2 __asm__("r2") = c; - __asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2)); + __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2)); } static inline long __syscall4(long n, long a, long b, long c, long d) { - register long r7 __asm__("r7") = n; + register long r7 __ASM____R7__ = n; register long r0 __asm__("r0") = a; register long r1 __asm__("r1") = b; register long r2 __asm__("r2") = c; register long r3 __asm__("r3") = d; - __asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2), "r"(r3)); + __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3)); } static inline long __syscall5(long n, long a, long b, long c, long d, long e) { - return (__syscall)(n, a, b, c, d, e); + register long r7 __ASM____R7__ = n; + register long r0 __asm__("r0") = a; + register long r1 __asm__("r1") = b; + register long r2 __asm__("r2") = c; + register long r3 __asm__("r3") = d; + register long r4 __asm__("r4") = e; + __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4)); } static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f) { - return (__syscall)(n, a, b, c, d, e, f); + register long r7 __ASM____R7__ = n; + register long r0 __asm__("r0") = a; + register long r1 __asm__("r1") = b; + register long r2 __asm__("r2") = c; + register long r3 __asm__("r3") = d; + register long r4 __asm__("r4") = e; + register long r5 __asm__("r5") = f; + __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5)); } + +#define VDSO_USEFUL +#define VDSO_CGT_SYM "__vdso_clock_gettime" +#define VDSO_CGT_VER "LINUX_2.6" + +#define SYSCALL_FADVISE_6_ARG + +#define SYSCALL_IPC_BROKEN_MODE