1 /* 2 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 3 * 4 * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball. 5 * 6 */ 7 #ifndef _BITS_SYSCALLS_H 8 #define _BITS_SYSCALLS_H 9 #ifndef _SYSCALL_H 10 #error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead." 11 #endif 12 13 #ifndef __ASSEMBLER__ 14 15 #include <errno.h> 16 17 /* 18 * Fine tuned code for errno handling in syscall wrappers. 19 * 20 * 1. __syscall_error(raw_syscall_ret_val) is used to set the errno (vs. 21 * the typical __set_errno). This helps elide the generated code for 22 * GOT fetch for __errno_location pointer etc, in each wrapper. 23 * 24 * 2. The call to above is also disguised in inline asm. This elides 25 * unconditional save/restore of a few callee regs which gcc almost 26 * always generates if the call is exposed 27 * 28 * 3. The function can't be hidden because wrappers from librt et all also 29 * call it. However hidden is not really needed to bypass PLT for 30 * intra-libc calls as the branch insn w/o @plt is sufficient. 31 */ 32 33 #ifdef IS_IN_rtld 34 /* ldso doesn't have real errno */ 35 #define ERRNO_ERRANDS(_sys_result) 36 #else /* !IS_IN_rtld */ 37 extern long __syscall_error (int); 38 #ifndef IS_IN_libc 39 /* Inter-libc callers use PLT */ 40 #define CALL_ERRNO_SETTER "bl __syscall_error@plt \n\t" 41 #else 42 /* intra-libc callers, despite PIC can bypass PLT */ 43 #define CALL_ERRNO_SETTER "bl __syscall_error \n\t" 44 #endif 45 46 #define ERRNO_ERRANDS(_sys_result) \ 47 __asm__ volatile ( \ 48 "st.a blink, [sp, -4] \n\t" \ 49 CALL_ERRNO_SETTER \ 50 "ld.ab blink, [sp, 4] \n\t" \ 51 :"+r" (_sys_result) \ 52 : \ 53 :"r1","r2","r3","r4","r5","r6", \ 54 "r7","r8","r9","r10","r11","r12" \ 55 ); 56 57 #endif /* IS_IN_rtld */ 58 59 /* -1 to -1023 as valid error values will suffice for some time */ 60 #define INTERNAL_SYSCALL_ERROR_P(val, err) \ 61 ((unsigned int) (val) > (unsigned int) -1024) 62 63 /* 64 * Standard sycall wrapper 65 * -Gets syscall name (conv to __NR_xxx) 66 * -sets errno, return success/error-codes 67 */ 68 #define INLINE_SYSCALL(name, nr_args, args...) \ 69 ({ \ 70 register int __res __asm__("r0"); \ 71 __res = INTERNAL_SYSCALL_NCS(__NR_##name, , nr_args, args); \ 72 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P ((__res), ), 0)) \ 73 { \ 74 ERRNO_ERRANDS(__res); \ 75 } \ 76 __res; \ 77 }) 78 79 /* variant of INLINE_SYSCALL, gets syscall number 80 */ 81 #define INLINE_SYSCALL_NCS(num, nr_args, args...) \ 82 ({ \ 83 register int __res __asm__("r0"); \ 84 __res = INTERNAL_SYSCALL_NCS(num, , nr_args, args); \ 85 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P ((__res), ), 0)) \ 86 { \ 87 ERRNO_ERRANDS(__res); \ 88 } \ 89 __res; \ 90 }) 91 92 /*------------------------------------------------------------------------- 93 * Mechanics of Trap - specific to ARC700 94 * 95 * Note the memory clobber is not strictly needed for intended semantics of 96 * the inline asm. However some of the cases, such as old-style 6 arg mmap 97 * gcc was generating code for inline syscall ahead of buffer packing needed 98 * for syscall itself. 99 *-------------------------------------------------------------------------*/ 100 101 #ifdef __A7__ 102 #define ARC_TRAP_INSN "trap0 \n\t" 103 #else 104 #define ARC_TRAP_INSN "trap_s 0 \n\t" 105 #endif 106 107 #define INTERNAL_SYSCALL_NCS(nm, err, nr_args, args...) \ 108 ({ \ 109 /* Per ABI, r0 is 1st arg and return reg */ \ 110 register int __ret __asm__("r0"); \ 111 register int _sys_num __asm__("r8"); \ 112 \ 113 LOAD_ARGS_##nr_args (nm, args) \ 114 \ 115 __asm__ volatile ( \ 116 ARC_TRAP_INSN \ 117 : "+r" (__ret) \ 118 : "r"(_sys_num) ASM_ARGS_##nr_args \ 119 : "memory"); \ 120 \ 121 __ret; \ 122 }) 123 124 /* Macros for setting up inline __asm__ input regs */ 125 #define ASM_ARGS_0 126 #define ASM_ARGS_1 ASM_ARGS_0, "r" (__ret) 127 #define ASM_ARGS_2 ASM_ARGS_1, "r" (_arg2) 128 #define ASM_ARGS_3 ASM_ARGS_2, "r" (_arg3) 129 #define ASM_ARGS_4 ASM_ARGS_3, "r" (_arg4) 130 #define ASM_ARGS_5 ASM_ARGS_4, "r" (_arg5) 131 #define ASM_ARGS_6 ASM_ARGS_5, "r" (_arg6) 132 #define ASM_ARGS_7 ASM_ARGS_6, "r" (_arg7) 133 134 /* Macros for converting sys-call wrapper args into sys call args */ 135 #define LOAD_ARGS_0(nm, arg) \ 136 _sys_num = (int) (nm); \ 137 138 #define LOAD_ARGS_1(nm, arg1) \ 139 __ret = (int) (arg1); \ 140 LOAD_ARGS_0 (nm, arg1) 141 142 /* 143 * Note that the use of _tmpX might look superflous, however it is needed 144 * to ensure that register variables are not clobbered if arg happens to be 145 * a function call itself. e.g. sched_setaffinity() calling getpid() for arg2 146 * 147 * Also this specific order of recursive calling is important to segregate 148 * the tmp args evaluation (function call case described above) and assigment 149 * of register variables 150 */ 151 #define LOAD_ARGS_2(nm, arg1, arg2) \ 152 int _tmp2 = (int) (arg2); \ 153 LOAD_ARGS_1 (nm, arg1) \ 154 register int _arg2 __asm__ ("r1") = _tmp2; 155 156 #define LOAD_ARGS_3(nm, arg1, arg2, arg3) \ 157 int _tmp3 = (int) (arg3); \ 158 LOAD_ARGS_2 (nm, arg1, arg2) \ 159 register int _arg3 __asm__ ("r2") = _tmp3; 160 161 #define LOAD_ARGS_4(nm, arg1, arg2, arg3, arg4) \ 162 int _tmp4 = (int) (arg4); \ 163 LOAD_ARGS_3 (nm, arg1, arg2, arg3) \ 164 register int _arg4 __asm__ ("r3") = _tmp4; 165 166 #define LOAD_ARGS_5(nm, arg1, arg2, arg3, arg4, arg5) \ 167 int _tmp5 = (int) (arg5); \ 168 LOAD_ARGS_4 (nm, arg1, arg2, arg3, arg4) \ 169 register int _arg5 __asm__ ("r4") = _tmp5; 170 171 #define LOAD_ARGS_6(nm, arg1, arg2, arg3, arg4, arg5, arg6) \ 172 int _tmp6 = (int) (arg6); \ 173 LOAD_ARGS_5 (nm, arg1, arg2, arg3, arg4, arg5) \ 174 register int _arg6 __asm__ ("r5") = _tmp6; 175 176 #define LOAD_ARGS_7(nm, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\ 177 int _tmp7 = (int) (arg7); \ 178 LOAD_ARGS_6 (nm, arg1, arg2, arg3, arg4, arg5, arg6) \ 179 register int _arg7 __asm__ ("r6") = _tmp7; 180 181 #else 182 183 #ifdef __A7__ 184 #define ARC_TRAP_INSN trap0 185 #else 186 #define ARC_TRAP_INSN trap_s 0 187 #endif 188 189 #endif /* __ASSEMBLER__ */ 190 191 #endif /* _BITS_SYSCALLS_H */ 192