1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3 * x86_64 specific definitions for NOLIBC
4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5 */
6
7 #ifndef _NOLIBC_ARCH_X86_64_H
8 #define _NOLIBC_ARCH_X86_64_H
9
10 /* The struct returned by the stat() syscall, equivalent to stat64(). The
11 * syscall returns 116 bytes and stops in the middle of __unused.
12 */
13 struct sys_stat_struct {
14 unsigned long st_dev;
15 unsigned long st_ino;
16 unsigned long st_nlink;
17 unsigned int st_mode;
18 unsigned int st_uid;
19
20 unsigned int st_gid;
21 unsigned int __pad0;
22 unsigned long st_rdev;
23 long st_size;
24 long st_blksize;
25
26 long st_blocks;
27 unsigned long st_atime;
28 unsigned long st_atime_nsec;
29 unsigned long st_mtime;
30
31 unsigned long st_mtime_nsec;
32 unsigned long st_ctime;
33 unsigned long st_ctime_nsec;
34 long __unused[3];
35 };
36
37 /* Syscalls for x86_64 :
38 * - registers are 64-bit
39 * - syscall number is passed in rax
40 * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
41 * - the system call is performed by calling the syscall instruction
42 * - syscall return comes in rax
43 * - rcx and r11 are clobbered, others are preserved.
44 * - the arguments are cast to long and assigned into the target registers
45 * which are then simply passed as registers to the asm code, so that we
46 * don't have to experience issues with register constraints.
47 * - the syscall number is always specified last in order to allow to force
48 * some registers before (gcc refuses a %-register at the last position).
49 * - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
50 * Calling Conventions.
51 *
52 * Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/home
53 *
54 */
55
56 #define my_syscall0(num) \
57 ({ \
58 long _ret; \
59 register long _num __asm__ ("rax") = (num); \
60 \
61 __asm__ volatile ( \
62 "syscall\n" \
63 : "=a"(_ret) \
64 : "0"(_num) \
65 : "rcx", "r11", "memory", "cc" \
66 ); \
67 _ret; \
68 })
69
70 #define my_syscall1(num, arg1) \
71 ({ \
72 long _ret; \
73 register long _num __asm__ ("rax") = (num); \
74 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
75 \
76 __asm__ volatile ( \
77 "syscall\n" \
78 : "=a"(_ret) \
79 : "r"(_arg1), \
80 "0"(_num) \
81 : "rcx", "r11", "memory", "cc" \
82 ); \
83 _ret; \
84 })
85
86 #define my_syscall2(num, arg1, arg2) \
87 ({ \
88 long _ret; \
89 register long _num __asm__ ("rax") = (num); \
90 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
91 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
92 \
93 __asm__ volatile ( \
94 "syscall\n" \
95 : "=a"(_ret) \
96 : "r"(_arg1), "r"(_arg2), \
97 "0"(_num) \
98 : "rcx", "r11", "memory", "cc" \
99 ); \
100 _ret; \
101 })
102
103 #define my_syscall3(num, arg1, arg2, arg3) \
104 ({ \
105 long _ret; \
106 register long _num __asm__ ("rax") = (num); \
107 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
108 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
109 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
110 \
111 __asm__ volatile ( \
112 "syscall\n" \
113 : "=a"(_ret) \
114 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
115 "0"(_num) \
116 : "rcx", "r11", "memory", "cc" \
117 ); \
118 _ret; \
119 })
120
121 #define my_syscall4(num, arg1, arg2, arg3, arg4) \
122 ({ \
123 long _ret; \
124 register long _num __asm__ ("rax") = (num); \
125 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
126 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
127 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
128 register long _arg4 __asm__ ("r10") = (long)(arg4); \
129 \
130 __asm__ volatile ( \
131 "syscall\n" \
132 : "=a"(_ret) \
133 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
134 "0"(_num) \
135 : "rcx", "r11", "memory", "cc" \
136 ); \
137 _ret; \
138 })
139
140 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
141 ({ \
142 long _ret; \
143 register long _num __asm__ ("rax") = (num); \
144 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
145 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
146 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
147 register long _arg4 __asm__ ("r10") = (long)(arg4); \
148 register long _arg5 __asm__ ("r8") = (long)(arg5); \
149 \
150 __asm__ volatile ( \
151 "syscall\n" \
152 : "=a"(_ret) \
153 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
154 "0"(_num) \
155 : "rcx", "r11", "memory", "cc" \
156 ); \
157 _ret; \
158 })
159
160 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
161 ({ \
162 long _ret; \
163 register long _num __asm__ ("rax") = (num); \
164 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
165 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
166 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
167 register long _arg4 __asm__ ("r10") = (long)(arg4); \
168 register long _arg5 __asm__ ("r8") = (long)(arg5); \
169 register long _arg6 __asm__ ("r9") = (long)(arg6); \
170 \
171 __asm__ volatile ( \
172 "syscall\n" \
173 : "=a"(_ret) \
174 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
175 "r"(_arg6), "0"(_num) \
176 : "rcx", "r11", "memory", "cc" \
177 ); \
178 _ret; \
179 })
180
181 char **environ __attribute__((weak));
182 const unsigned long *_auxv __attribute__((weak));
183
184 /* startup code */
185 /*
186 * x86-64 System V ABI mandates:
187 * 1) %rsp must be 16-byte aligned right before the function call.
188 * 2) The deepest stack frame should be zero (the %rbp).
189 *
190 */
_start(void)191 void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
192 {
193 __asm__ volatile (
194 "pop %rdi\n" // argc (first arg, %rdi)
195 "mov %rsp, %rsi\n" // argv[] (second arg, %rsi)
196 "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx)
197 "mov %rdx, environ\n" // save environ
198 "xor %ebp, %ebp\n" // zero the stack frame
199 "mov %rdx, %rax\n" // search for auxv (follows NULL after last env)
200 "0:\n"
201 "add $8, %rax\n" // search for auxv using rax, it follows the
202 "cmp -8(%rax), %rbp\n" // ... NULL after last env (rbp is zero here)
203 "jnz 0b\n"
204 "mov %rax, _auxv\n" // save it into _auxv
205 "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call
206 "call main\n" // main() returns the status code, we'll exit with it.
207 "mov %eax, %edi\n" // retrieve exit code (32 bit)
208 "mov $60, %eax\n" // NR_exit == 60
209 "syscall\n" // really exit
210 "hlt\n" // ensure it does not return
211 );
212 __builtin_unreachable();
213 }
214
215 #endif // _NOLIBC_ARCH_X86_64_H
216