1 /* Copyright (C) 1992,1997-2003,2004,2005,2006 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3 
4    The GNU C Library is free software; you can redistribute it and/or
5    modify it under the terms of the GNU Lesser General Public
6    License as published by the Free Software Foundation; either
7    version 2.1 of the License, or (at your option) any later version.
8 
9    The GNU C Library is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    Lesser General Public License for more details.
13 
14    You should have received a copy of the GNU Lesser General Public
15    License along with the GNU C Library; if not, see
16    <http://www.gnu.org/licenses/>.  */
17 
18 #ifndef _BITS_SYSCALLS_H
19 #define _BITS_SYSCALLS_H
20 
21 #ifndef _SYSCALL_H
22 # error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead."
23 #endif
24 
25 #ifndef __ASSEMBLER__
26 
27 # include <errno.h>
28 
29 # ifdef SHARED
30 #  define INLINE_VSYSCALL(name, nr, args...) \
31   ({									      \
32     __label__ out;							      \
33     __label__ iserr;							      \
34     INTERNAL_SYSCALL_DECL (sc_err);					      \
35     long int sc_ret;							      \
36 									      \
37     if (__vdso_##name != NULL)						      \
38       {									      \
39 	sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args);   \
40 	if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
41 	  goto out;							      \
42 	if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS)		      \
43 	  goto iserr;							      \
44       }									      \
45 									      \
46     sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args);		      \
47     if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err))			      \
48       {									      \
49       iserr:								      \
50         __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));		      \
51         sc_ret = -1L;							      \
52       }									      \
53   out:									      \
54     sc_ret;								      \
55   })
56 # else
57 #  define INLINE_VSYSCALL(name, nr, args...) \
58   INLINE_SYSCALL (name, nr, ##args)
59 # endif
60 
61 # ifdef SHARED
62 #  define INTERNAL_VSYSCALL(name, err, nr, args...) \
63   ({									      \
64     __label__ out;							      \
65     long int v_ret;							      \
66 									      \
67     if (__vdso_##name != NULL)						      \
68       {									      \
69 	v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
70 	if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err)			      \
71 	    || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS)		      \
72 	  goto out;							      \
73       }									      \
74     v_ret = INTERNAL_SYSCALL (name, err, nr, ##args);			      \
75   out:									      \
76     v_ret;								      \
77   })
78 # else
79 #  define INTERNAL_VSYSCALL(name, err, nr, args...) \
80   INTERNAL_SYSCALL (name, err, nr, ##args)
81 # endif
82 
83 # define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...)	      \
84   ({									      \
85     long int sc_ret = ENOSYS;						      \
86 									      \
87     if (__vdso_##name != NULL)						      \
88       sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args);	      \
89     else								      \
90       err = 1 << 28;							      \
91     sc_ret;								      \
92   })
93 
94 /* List of system calls which are supported as vsyscalls.  */
95 # define HAVE_CLOCK_GETRES_VSYSCALL	1
96 # define HAVE_CLOCK_GETTIME_VSYSCALL	1
97 
98 /* Define a macro which expands inline into the wrapper code for a VDSO
99    call. This use is for internal calls that do not need to handle errors
100    normally. It will never touch errno.
101    On powerpc a system call basically clobbers the same registers like a
102    function call, with the exception of LR (which is needed for the
103    "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
104    an error return status).  */
105 # define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
106   ({									      \
107     register void *r0  __asm__ ("r0");					      \
108     register long int r3  __asm__ ("r3");				      \
109     register long int r4  __asm__ ("r4");				      \
110     register long int r5  __asm__ ("r5");				      \
111     register long int r6  __asm__ ("r6");				      \
112     register long int r7  __asm__ ("r7");				      \
113     register long int r8  __asm__ ("r8");				      \
114     register long int r9  __asm__ ("r9");				      \
115     register long int r10 __asm__ ("r10");				      \
116     register long int r11 __asm__ ("r11");				      \
117     register long int r12 __asm__ ("r12");				      \
118     LOAD_ARGS_##nr (funcptr, args);					      \
119     __asm__ __volatile__						      \
120       ("mtctr %0\n\t"							      \
121        "bctrl\n\t"							      \
122        "mfcr %0"							      \
123        : "=&r" (r0),							      \
124 	 "=&r" (r3), "=&r" (r4), "=&r" (r5),  "=&r" (r6),  "=&r" (r7),	      \
125 	 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12)	      \
126        : ASM_INPUT_##nr							      \
127        : "cr0", "ctr", "lr", "memory");					      \
128     err = (long int) r0;						      \
129     (int) r3;								      \
130   })
131 
132 /* Define a macro which expands inline into the wrapper code for a system
133    call. This use is for internal calls that do not need to handle errors
134    normally. It will never touch errno.
135    On powerpc a system call basically clobbers the same registers like a
136    function call, with the exception of LR (which is needed for the
137    "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
138    an error return status).  */
139 
140 # undef INTERNAL_SYSCALL_DECL
141 # define INTERNAL_SYSCALL_DECL(err) long int err __attribute__((unused))
142 
143 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...)			\
144 (__extension__ \
145   ({									\
146     register long int r0  __asm__ ("r0");				\
147     register long int r3  __asm__ ("r3");				\
148     register long int r4  __asm__ ("r4");				\
149     register long int r5  __asm__ ("r5");				\
150     register long int r6  __asm__ ("r6");				\
151     register long int r7  __asm__ ("r7");				\
152     register long int r8  __asm__ ("r8");				\
153     register long int r9  __asm__ ("r9");				\
154     register long int r10 __asm__ ("r10");				\
155     register long int r11 __asm__ ("r11");				\
156     register long int r12 __asm__ ("r12");				\
157     LOAD_ARGS_##nr(name, args);						\
158     __asm__ __volatile__						\
159       ("sc   \n\t"							\
160        "mfcr %0"							\
161        : "=&r" (r0),							\
162 	 "=&r" (r3), "=&r" (r4), "=&r" (r5),  "=&r" (r6),  "=&r" (r7),	\
163 	 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12)	\
164        : ASM_INPUT_##nr							\
165        : "cr0", "ctr", "memory");					\
166     err = r0;								\
167     (int) r3;								\
168   }) \
169 )
170 # define INTERNAL_SYSCALL_ERROR_P(val, err) \
171   ((void) (val), unlikely ((err) & (1 << 28)))
172 
173 # define INTERNAL_SYSCALL_ERRNO(val, err)     (val)
174 
175 extern void __illegally_sized_syscall_arg1(void);
176 extern void __illegally_sized_syscall_arg2(void);
177 extern void __illegally_sized_syscall_arg3(void);
178 extern void __illegally_sized_syscall_arg4(void);
179 extern void __illegally_sized_syscall_arg5(void);
180 extern void __illegally_sized_syscall_arg6(void);
181 
182 # define LOAD_ARGS_0(name, dummy) \
183 	r0 = name
184 # define LOAD_ARGS_1(name, __arg1) \
185 	long int arg1 = (long int) (__arg1); \
186 	LOAD_ARGS_0(name, 0); \
187 	if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
188 	  __illegally_sized_syscall_arg1 (); \
189 	r3 = arg1
190 # define LOAD_ARGS_2(name, __arg1, __arg2) \
191 	long int arg2 = (long int) (__arg2); \
192 	LOAD_ARGS_1(name, __arg1); \
193 	if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
194 	  __illegally_sized_syscall_arg2 (); \
195 	r4 = arg2
196 # define LOAD_ARGS_3(name, __arg1, __arg2, __arg3) \
197 	long int arg3 = (long int) (__arg3); \
198 	LOAD_ARGS_2(name, __arg1, __arg2); \
199 	if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
200 	  __illegally_sized_syscall_arg3 (); \
201 	r5 = arg3
202 # define LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
203 	long int arg4 = (long int) (__arg4); \
204 	LOAD_ARGS_3(name, __arg1, __arg2, __arg3); \
205 	if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
206 	  __illegally_sized_syscall_arg4 (); \
207 	r6 = arg4
208 # define LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
209 	long int arg5 = (long int) (__arg5); \
210 	LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
211 	if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
212 	  __illegally_sized_syscall_arg5 (); \
213 	r7 = arg5
214 # define LOAD_ARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
215 	long int arg6 = (long int) (__arg6); \
216 	LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
217 	if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
218 	  __illegally_sized_syscall_arg6 (); \
219 	r8 = arg6
220 
221 # define ASM_INPUT_0 "0" (r0)
222 # define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
223 # define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
224 # define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
225 # define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
226 # define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
227 # define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
228 
229 #endif /* __ASSEMBLER__ */
230 #endif /* _BITS_SYSCALLS_H */
231