1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3 * MIPS specific definitions for NOLIBC
4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5 */
6
7 #ifndef _NOLIBC_ARCH_MIPS_H
8 #define _NOLIBC_ARCH_MIPS_H
9
10 #include "compiler.h"
11 #include "crt.h"
12
13 #if !defined(_ABIO32) && !defined(_ABIN32) && !defined(_ABI64)
14 #error Unsupported MIPS ABI
15 #endif
16
17 /* Syscalls for MIPS ABI O32 :
18 * - WARNING! there's always a delayed slot!
19 * - WARNING again, the syntax is different, registers take a '$' and numbers
20 * do not.
21 * - registers are 32-bit
22 * - stack is 8-byte aligned
23 * - syscall number is passed in v0 (starts at 0xfa0).
24 * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
25 * leave some room in the stack for the callee to save a0..a3 if needed.
26 * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
27 * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
28 * scall32-o32.S in the kernel sources.
29 * - the system call is performed by calling "syscall"
30 * - syscall return comes in v0, and register a3 needs to be checked to know
31 * if an error occurred, in which case errno is in v0.
32 * - the arguments are cast to long and assigned into the target registers
33 * which are then simply passed as registers to the asm code, so that we
34 * don't have to experience issues with register constraints.
35 *
36 * Syscalls for MIPS ABI N32, same as ABI O32 with the following differences :
37 * - arguments are in a0, a1, a2, a3, t0, t1, t2, t3.
38 * t0..t3 are also known as a4..a7.
39 * - stack is 16-byte aligned
40 */
41
42 #if defined(_ABIO32)
43
44 #define _NOLIBC_SYSCALL_CLOBBERLIST \
45 "memory", "cc", "at", "v1", "hi", "lo", \
46 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9"
47 #define _NOLIBC_SYSCALL_STACK_RESERVE "addiu $sp, $sp, -32\n"
48 #define _NOLIBC_SYSCALL_STACK_UNRESERVE "addiu $sp, $sp, 32\n"
49
50 #else /* _ABIN32 || _ABI64 */
51
52 /* binutils, GCC and clang disagree about register aliases, use numbers instead. */
53 #define _NOLIBC_SYSCALL_CLOBBERLIST \
54 "memory", "cc", "at", "v1", \
55 "10", "11", "12", "13", "14", "15", "24", "25"
56
57 #define _NOLIBC_SYSCALL_STACK_RESERVE
58 #define _NOLIBC_SYSCALL_STACK_UNRESERVE
59
60 #endif /* _ABIO32 */
61
62 #define my_syscall0(num) \
63 ({ \
64 register long _num __asm__ ("v0") = (num); \
65 register long _arg4 __asm__ ("a3"); \
66 \
67 __asm__ volatile ( \
68 _NOLIBC_SYSCALL_STACK_RESERVE \
69 "syscall\n" \
70 _NOLIBC_SYSCALL_STACK_UNRESERVE \
71 : "=r"(_num), "=r"(_arg4) \
72 : "r"(_num) \
73 : _NOLIBC_SYSCALL_CLOBBERLIST \
74 ); \
75 _arg4 ? -_num : _num; \
76 })
77
78 #define my_syscall1(num, arg1) \
79 ({ \
80 register long _num __asm__ ("v0") = (num); \
81 register long _arg1 __asm__ ("a0") = (long)(arg1); \
82 register long _arg4 __asm__ ("a3"); \
83 \
84 __asm__ volatile ( \
85 _NOLIBC_SYSCALL_STACK_RESERVE \
86 "syscall\n" \
87 _NOLIBC_SYSCALL_STACK_UNRESERVE \
88 : "=r"(_num), "=r"(_arg4) \
89 : "0"(_num), \
90 "r"(_arg1) \
91 : _NOLIBC_SYSCALL_CLOBBERLIST \
92 ); \
93 _arg4 ? -_num : _num; \
94 })
95
96 #define my_syscall2(num, arg1, arg2) \
97 ({ \
98 register long _num __asm__ ("v0") = (num); \
99 register long _arg1 __asm__ ("a0") = (long)(arg1); \
100 register long _arg2 __asm__ ("a1") = (long)(arg2); \
101 register long _arg4 __asm__ ("a3"); \
102 \
103 __asm__ volatile ( \
104 _NOLIBC_SYSCALL_STACK_RESERVE \
105 "syscall\n" \
106 _NOLIBC_SYSCALL_STACK_UNRESERVE \
107 : "=r"(_num), "=r"(_arg4) \
108 : "0"(_num), \
109 "r"(_arg1), "r"(_arg2) \
110 : _NOLIBC_SYSCALL_CLOBBERLIST \
111 ); \
112 _arg4 ? -_num : _num; \
113 })
114
115 #define my_syscall3(num, arg1, arg2, arg3) \
116 ({ \
117 register long _num __asm__ ("v0") = (num); \
118 register long _arg1 __asm__ ("a0") = (long)(arg1); \
119 register long _arg2 __asm__ ("a1") = (long)(arg2); \
120 register long _arg3 __asm__ ("a2") = (long)(arg3); \
121 register long _arg4 __asm__ ("a3"); \
122 \
123 __asm__ volatile ( \
124 _NOLIBC_SYSCALL_STACK_RESERVE \
125 "syscall\n" \
126 _NOLIBC_SYSCALL_STACK_UNRESERVE \
127 : "=r"(_num), "=r"(_arg4) \
128 : "0"(_num), \
129 "r"(_arg1), "r"(_arg2), "r"(_arg3) \
130 : _NOLIBC_SYSCALL_CLOBBERLIST \
131 ); \
132 _arg4 ? -_num : _num; \
133 })
134
135 #define my_syscall4(num, arg1, arg2, arg3, arg4) \
136 ({ \
137 register long _num __asm__ ("v0") = (num); \
138 register long _arg1 __asm__ ("a0") = (long)(arg1); \
139 register long _arg2 __asm__ ("a1") = (long)(arg2); \
140 register long _arg3 __asm__ ("a2") = (long)(arg3); \
141 register long _arg4 __asm__ ("a3") = (long)(arg4); \
142 \
143 __asm__ volatile ( \
144 _NOLIBC_SYSCALL_STACK_RESERVE \
145 "syscall\n" \
146 _NOLIBC_SYSCALL_STACK_UNRESERVE \
147 : "=r" (_num), "=r"(_arg4) \
148 : "0"(_num), \
149 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
150 : _NOLIBC_SYSCALL_CLOBBERLIST \
151 ); \
152 _arg4 ? -_num : _num; \
153 })
154
155 #if defined(_ABIO32)
156
157 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
158 ({ \
159 register long _num __asm__ ("v0") = (num); \
160 register long _arg1 __asm__ ("a0") = (long)(arg1); \
161 register long _arg2 __asm__ ("a1") = (long)(arg2); \
162 register long _arg3 __asm__ ("a2") = (long)(arg3); \
163 register long _arg4 __asm__ ("a3") = (long)(arg4); \
164 register long _arg5 = (long)(arg5); \
165 \
166 __asm__ volatile ( \
167 _NOLIBC_SYSCALL_STACK_RESERVE \
168 "sw %7, 16($sp)\n" \
169 "syscall\n" \
170 _NOLIBC_SYSCALL_STACK_UNRESERVE \
171 : "=r" (_num), "=r"(_arg4) \
172 : "0"(_num), \
173 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
174 : _NOLIBC_SYSCALL_CLOBBERLIST \
175 ); \
176 _arg4 ? -_num : _num; \
177 })
178
179 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
180 ({ \
181 register long _num __asm__ ("v0") = (num); \
182 register long _arg1 __asm__ ("a0") = (long)(arg1); \
183 register long _arg2 __asm__ ("a1") = (long)(arg2); \
184 register long _arg3 __asm__ ("a2") = (long)(arg3); \
185 register long _arg4 __asm__ ("a3") = (long)(arg4); \
186 register long _arg5 = (long)(arg5); \
187 register long _arg6 = (long)(arg6); \
188 \
189 __asm__ volatile ( \
190 _NOLIBC_SYSCALL_STACK_RESERVE \
191 "sw %7, 16($sp)\n" \
192 "sw %8, 20($sp)\n" \
193 "syscall\n" \
194 _NOLIBC_SYSCALL_STACK_UNRESERVE \
195 : "=r" (_num), "=r"(_arg4) \
196 : "0"(_num), \
197 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
198 "r"(_arg6) \
199 : _NOLIBC_SYSCALL_CLOBBERLIST \
200 ); \
201 _arg4 ? -_num : _num; \
202 })
203
204 #else /* _ABIN32 || _ABI64 */
205
206 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
207 ({ \
208 register long _num __asm__ ("v0") = (num); \
209 register long _arg1 __asm__ ("$4") = (long)(arg1); \
210 register long _arg2 __asm__ ("$5") = (long)(arg2); \
211 register long _arg3 __asm__ ("$6") = (long)(arg3); \
212 register long _arg4 __asm__ ("$7") = (long)(arg4); \
213 register long _arg5 __asm__ ("$8") = (long)(arg5); \
214 \
215 __asm__ volatile ( \
216 "syscall\n" \
217 : "=r" (_num), "=r"(_arg4) \
218 : "0"(_num), \
219 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
220 : _NOLIBC_SYSCALL_CLOBBERLIST \
221 ); \
222 _arg4 ? -_num : _num; \
223 })
224
225 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
226 ({ \
227 register long _num __asm__ ("v0") = (num); \
228 register long _arg1 __asm__ ("$4") = (long)(arg1); \
229 register long _arg2 __asm__ ("$5") = (long)(arg2); \
230 register long _arg3 __asm__ ("$6") = (long)(arg3); \
231 register long _arg4 __asm__ ("$7") = (long)(arg4); \
232 register long _arg5 __asm__ ("$8") = (long)(arg5); \
233 register long _arg6 __asm__ ("$9") = (long)(arg6); \
234 \
235 __asm__ volatile ( \
236 "syscall\n" \
237 : "=r" (_num), "=r"(_arg4) \
238 : "0"(_num), \
239 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
240 "r"(_arg6) \
241 : _NOLIBC_SYSCALL_CLOBBERLIST \
242 ); \
243 _arg4 ? -_num : _num; \
244 })
245
246 #endif /* _ABIO32 */
247
248 /* startup code, note that it's called __start on MIPS */
249 void __start(void);
__start(void)250 void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
251 {
252 __asm__ volatile (
253 "move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
254 #if defined(_ABIO32)
255 "addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
256 #endif /* _ABIO32 */
257 "lui $t9, %hi(_start_c)\n" /* ABI requires current function address in $t9 */
258 "ori $t9, %lo(_start_c)\n"
259 #if defined(_ABI64)
260 "lui $t0, %highest(_start_c)\n"
261 "ori $t0, %higher(_start_c)\n"
262 "dsll $t0, 0x20\n"
263 "or $t9, $t0\n"
264 #endif /* _ABI64 */
265 "jalr $t9\n" /* transfer to c runtime */
266 );
267 __nolibc_entrypoint_epilogue();
268 }
269
270 #endif /* _NOLIBC_ARCH_MIPS_H */
271