1#ifndef LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
2#define LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
3
4// Save all registers while keeping 16B stack alignment
5#define SAVE_ALL \
6 "push %%rax\n" \
7 "push %%rbx\n" \
8 "push %%rcx\n" \
9 "push %%rdx\n" \
10 "push %%rdi\n" \
11 "push %%rsi\n" \
12 "push %%rbp\n" \
13 "push %%r8\n" \
14 "push %%r9\n" \
15 "push %%r10\n" \
16 "push %%r11\n" \
17 "push %%r12\n" \
18 "push %%r13\n" \
19 "push %%r14\n" \
20 "push %%r15\n" \
21 "sub $8, %%rsp\n"
22// Mirrors SAVE_ALL
23#define RESTORE_ALL \
24 "add $8, %%rsp\n" \
25 "pop %%r15\n" \
26 "pop %%r14\n" \
27 "pop %%r13\n" \
28 "pop %%r12\n" \
29 "pop %%r11\n" \
30 "pop %%r10\n" \
31 "pop %%r9\n" \
32 "pop %%r8\n" \
33 "pop %%rbp\n" \
34 "pop %%rsi\n" \
35 "pop %%rdi\n" \
36 "pop %%rdx\n" \
37 "pop %%rcx\n" \
38 "pop %%rbx\n" \
39 "pop %%rax\n"
40
41namespace {
42
43// Get the difference between runtime addrress of .text section and
44// static address in section header table. Can be extracted from arbitrary
45// pc value recorded at runtime to get the corresponding static address, which
46// in turn can be used to search for indirect call description. Needed because
47// indirect call descriptions are read-only non-relocatable data.
48uint64_t getTextBaseAddress() {
49 uint64_t DynAddr;
50 uint64_t StaticAddr;
51 __asm__ volatile("leaq __hot_end(%%rip), %0\n\t"
52 "movabsq $__hot_end, %1\n\t"
53 : "=r"(DynAddr), "=r"(StaticAddr));
54 return DynAddr - StaticAddr;
55}
56
57#define _STRINGIFY(x) #x
58#define STRINGIFY(x) _STRINGIFY(x)
59
60uint64_t __read(uint64_t fd, const void *buf, uint64_t count) {
61 uint64_t ret;
62#if defined(__APPLE__)
63#define READ_SYSCALL 0x2000003
64#else
65#define READ_SYSCALL 0
66#endif
67 __asm__ __volatile__("movq $" STRINGIFY(READ_SYSCALL) ", %%rax\n"
68 "syscall\n"
69 : "=a"(ret)
70 : "D"(fd), "S"(buf), "d"(count)
71 : "cc", "rcx", "r11", "memory");
72 return ret;
73}
74
75uint64_t __write(uint64_t fd, const void *buf, uint64_t count) {
76 uint64_t ret;
77#if defined(__APPLE__)
78#define WRITE_SYSCALL 0x2000004
79#else
80#define WRITE_SYSCALL 1
81#endif
82 __asm__ __volatile__("movq $" STRINGIFY(WRITE_SYSCALL) ", %%rax\n"
83 "syscall\n"
84 : "=a"(ret)
85 : "D"(fd), "S"(buf), "d"(count)
86 : "cc", "rcx", "r11", "memory");
87 return ret;
88}
89
90void *__mmap(uint64_t addr, uint64_t size, uint64_t prot, uint64_t flags,
91 uint64_t fd, uint64_t offset) {
92#if defined(__APPLE__)
93#define MMAP_SYSCALL 0x20000c5
94#else
95#define MMAP_SYSCALL 9
96#endif
97 void *ret;
98 register uint64_t r8 asm("r8") = fd;
99 register uint64_t r9 asm("r9") = offset;
100 register uint64_t r10 asm("r10") = flags;
101 __asm__ __volatile__("movq $" STRINGIFY(MMAP_SYSCALL) ", %%rax\n"
102 "syscall\n"
103 : "=a"(ret)
104 : "D"(addr), "S"(size), "d"(prot), "r"(r10), "r"(r8),
105 "r"(r9)
106 : "cc", "rcx", "r11", "memory");
107 return ret;
108}
109
110uint64_t __munmap(void *addr, uint64_t size) {
111#if defined(__APPLE__)
112#define MUNMAP_SYSCALL 0x2000049
113#else
114#define MUNMAP_SYSCALL 11
115#endif
116 uint64_t ret;
117 __asm__ __volatile__("movq $" STRINGIFY(MUNMAP_SYSCALL) ", %%rax\n"
118 "syscall\n"
119 : "=a"(ret)
120 : "D"(addr), "S"(size)
121 : "cc", "rcx", "r11", "memory");
122 return ret;
123}
124
125uint64_t __sigprocmask(int how, const void *set, void *oldset) {
126#if defined(__APPLE__)
127#define SIGPROCMASK_SYSCALL 0x2000030
128#else
129#define SIGPROCMASK_SYSCALL 14
130#endif
131 uint64_t ret;
132 register long r10 asm("r10") = sizeof(uint64_t);
133 __asm__ __volatile__("movq $" STRINGIFY(SIGPROCMASK_SYSCALL) ", %%rax\n"
134 "syscall\n"
135 : "=a"(ret)
136 : "D"(how), "S"(set), "d"(oldset), "r"(r10)
137 : "cc", "rcx", "r11", "memory");
138 return ret;
139}
140
141uint64_t __getpid() {
142 uint64_t ret;
143#if defined(__APPLE__)
144#define GETPID_SYSCALL 20
145#else
146#define GETPID_SYSCALL 39
147#endif
148 __asm__ __volatile__("movq $" STRINGIFY(GETPID_SYSCALL) ", %%rax\n"
149 "syscall\n"
150 : "=a"(ret)
151 :
152 : "cc", "rcx", "r11", "memory");
153 return ret;
154}
155
156uint64_t __exit(uint64_t code) {
157#if defined(__APPLE__)
158#define EXIT_SYSCALL 0x2000001
159#else
160#define EXIT_SYSCALL 231
161#endif
162 uint64_t ret;
163 __asm__ __volatile__("movq $" STRINGIFY(EXIT_SYSCALL) ", %%rax\n"
164 "syscall\n"
165 : "=a"(ret)
166 : "D"(code)
167 : "cc", "rcx", "r11", "memory");
168 return ret;
169}
170
171#if !defined(__APPLE__)
172// We use a stack-allocated buffer for string manipulation in many pieces of
173// this code, including the code that prints each line of the fdata file. This
174// buffer needs to accomodate large function names, but shouldn't be arbitrarily
175// large (dynamically allocated) for simplicity of our memory space usage.
176
177// Declare some syscall wrappers we use throughout this code to avoid linking
178// against system libc.
179uint64_t __open(const char *pathname, uint64_t flags, uint64_t mode) {
180 uint64_t ret;
181 __asm__ __volatile__("movq $2, %%rax\n"
182 "syscall"
183 : "=a"(ret)
184 : "D"(pathname), "S"(flags), "d"(mode)
185 : "cc", "rcx", "r11", "memory");
186 return ret;
187}
188
189long __getdents64(unsigned int fd, dirent64 *dirp, size_t count) {
190 long ret;
191 __asm__ __volatile__("movq $217, %%rax\n"
192 "syscall"
193 : "=a"(ret)
194 : "D"(fd), "S"(dirp), "d"(count)
195 : "cc", "rcx", "r11", "memory");
196 return ret;
197}
198
199uint64_t __readlink(const char *pathname, char *buf, size_t bufsize) {
200 uint64_t ret;
201 __asm__ __volatile__("movq $89, %%rax\n"
202 "syscall"
203 : "=a"(ret)
204 : "D"(pathname), "S"(buf), "d"(bufsize)
205 : "cc", "rcx", "r11", "memory");
206 return ret;
207}
208
209uint64_t __lseek(uint64_t fd, uint64_t pos, uint64_t whence) {
210 uint64_t ret;
211 __asm__ __volatile__("movq $8, %%rax\n"
212 "syscall\n"
213 : "=a"(ret)
214 : "D"(fd), "S"(pos), "d"(whence)
215 : "cc", "rcx", "r11", "memory");
216 return ret;
217}
218
219int __ftruncate(uint64_t fd, uint64_t length) {
220 int ret;
221 __asm__ __volatile__("movq $77, %%rax\n"
222 "syscall\n"
223 : "=a"(ret)
224 : "D"(fd), "S"(length)
225 : "cc", "rcx", "r11", "memory");
226 return ret;
227}
228
229int __close(uint64_t fd) {
230 uint64_t ret;
231 __asm__ __volatile__("movq $3, %%rax\n"
232 "syscall\n"
233 : "=a"(ret)
234 : "D"(fd)
235 : "cc", "rcx", "r11", "memory");
236 return ret;
237}
238
239int __madvise(void *addr, size_t length, int advice) {
240 int ret;
241 __asm__ __volatile__("movq $28, %%rax\n"
242 "syscall\n"
243 : "=a"(ret)
244 : "D"(addr), "S"(length), "d"(advice)
245 : "cc", "rcx", "r11", "memory");
246 return ret;
247}
248
249int __uname(struct UtsNameTy *Buf) {
250 int Ret;
251 __asm__ __volatile__("movq $63, %%rax\n"
252 "syscall\n"
253 : "=a"(Ret)
254 : "D"(Buf)
255 : "cc", "rcx", "r11", "memory");
256 return Ret;
257}
258
259uint64_t __nanosleep(const timespec *req, timespec *rem) {
260 uint64_t ret;
261 __asm__ __volatile__("movq $35, %%rax\n"
262 "syscall\n"
263 : "=a"(ret)
264 : "D"(req), "S"(rem)
265 : "cc", "rcx", "r11", "memory");
266 return ret;
267}
268
269int64_t __fork() {
270 uint64_t ret;
271 __asm__ __volatile__("movq $57, %%rax\n"
272 "syscall\n"
273 : "=a"(ret)
274 :
275 : "cc", "rcx", "r11", "memory");
276 return ret;
277}
278
279int __mprotect(void *addr, size_t len, int prot) {
280 int ret;
281 __asm__ __volatile__("movq $10, %%rax\n"
282 "syscall\n"
283 : "=a"(ret)
284 : "D"(addr), "S"(len), "d"(prot)
285 : "cc", "rcx", "r11", "memory");
286 return ret;
287}
288
289uint64_t __getppid() {
290 uint64_t ret;
291 __asm__ __volatile__("movq $110, %%rax\n"
292 "syscall\n"
293 : "=a"(ret)
294 :
295 : "cc", "rcx", "r11", "memory");
296 return ret;
297}
298
299int __setpgid(uint64_t pid, uint64_t pgid) {
300 int ret;
301 __asm__ __volatile__("movq $109, %%rax\n"
302 "syscall\n"
303 : "=a"(ret)
304 : "D"(pid), "S"(pgid)
305 : "cc", "rcx", "r11", "memory");
306 return ret;
307}
308
309uint64_t __getpgid(uint64_t pid) {
310 uint64_t ret;
311 __asm__ __volatile__("movq $121, %%rax\n"
312 "syscall\n"
313 : "=a"(ret)
314 : "D"(pid)
315 : "cc", "rcx", "r11", "memory");
316 return ret;
317}
318
319int __kill(uint64_t pid, int sig) {
320 int ret;
321 __asm__ __volatile__("movq $62, %%rax\n"
322 "syscall\n"
323 : "=a"(ret)
324 : "D"(pid), "S"(sig)
325 : "cc", "rcx", "r11", "memory");
326 return ret;
327}
328
329int __fsync(int fd) {
330 int ret;
331 __asm__ __volatile__("movq $74, %%rax\n"
332 "syscall\n"
333 : "=a"(ret)
334 : "D"(fd)
335 : "cc", "rcx", "r11", "memory");
336 return ret;
337}
338
339// %rdi %rsi %rdx %r10 %r8
340// sys_prctl int option unsigned unsigned unsigned unsigned
341// long arg2 long arg3 long arg4 long arg5
342int __prctl(int Option, unsigned long Arg2, unsigned long Arg3,
343 unsigned long Arg4, unsigned long Arg5) {
344 int Ret;
345 register long rdx asm("rdx") = Arg3;
346 register long r8 asm("r8") = Arg5;
347 register long r10 asm("r10") = Arg4;
348 __asm__ __volatile__("movq $157, %%rax\n"
349 "syscall\n"
350 : "=a"(Ret)
351 : "D"(Option), "S"(Arg2), "d"(rdx), "r"(r10), "r"(r8)
352 :);
353 return Ret;
354}
355
356#endif
357
358} // anonymous namespace
359
360#endif
361

source code of bolt/runtime/sys_x86_64.h