1 | /* Assembler macros for x86-64. |
2 | Copyright (C) 2001-2024 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <https://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _X86_64_SYSDEP_H |
20 | #define _X86_64_SYSDEP_H 1 |
21 | |
22 | #include <sysdeps/x86/sysdep.h> |
23 | #include <x86-lp_size.h> |
24 | |
25 | /* __CET__ is defined by GCC with Control-Flow Protection values: |
26 | |
27 | enum cf_protection_level |
28 | { |
29 | CF_NONE = 0, |
30 | CF_BRANCH = 1 << 0, |
31 | CF_RETURN = 1 << 1, |
32 | CF_FULL = CF_BRANCH | CF_RETURN, |
33 | CF_SET = 1 << 2 |
34 | }; |
35 | */ |
36 | |
37 | /* Set if CF_BRANCH (IBT) is enabled. */ |
38 | #define X86_FEATURE_1_IBT (1U << 0) |
39 | /* Set if CF_RETURN (SHSTK) is enabled. */ |
40 | #define X86_FEATURE_1_SHSTK (1U << 1) |
41 | |
42 | #ifdef __CET__ |
43 | # define CET_ENABLED 1 |
44 | # define SHSTK_ENABLED (__CET__ & X86_FEATURE_1_SHSTK) |
45 | #else |
46 | # define CET_ENABLED 0 |
47 | # define SHSTK_ENABLED 0 |
48 | #endif |
49 | |
50 | #ifdef __ASSEMBLER__ |
51 | |
52 | /* Syntactic details of assembler. */ |
53 | |
54 | #ifdef _CET_ENDBR |
55 | # define _CET_NOTRACK notrack |
56 | #else |
57 | # define _CET_ENDBR |
58 | # define _CET_NOTRACK |
59 | #endif |
60 | |
61 | /* Define an entry point visible from C. */ |
62 | #define ENTRY_P2ALIGN(name, alignment) \ |
63 | .globl C_SYMBOL_NAME(name); \ |
64 | .type C_SYMBOL_NAME(name),@function; \ |
65 | .align ALIGNARG(alignment); \ |
66 | C_LABEL(name) \ |
67 | cfi_startproc; \ |
68 | _CET_ENDBR; \ |
69 | CALL_MCOUNT |
70 | |
71 | /* This macro is for setting proper CFI with DW_CFA_expression describing |
72 | the register as saved relative to %rsp instead of relative to the CFA. |
73 | Expression is DW_OP_drop, DW_OP_breg7 (%rsp is register 7), sleb128 offset |
74 | from %rsp. */ |
75 | #define cfi_offset_rel_rsp(regn, off) .cfi_escape 0x10, regn, 0x4, 0x13, \ |
76 | 0x77, off & 0x7F | 0x80, off >> 7 |
77 | |
78 | /* If compiled for profiling, call `mcount' at the start of each function. */ |
79 | #ifdef PROF |
80 | /* The mcount code relies on a normal frame pointer being on the stack |
81 | to locate our caller, so push one just for its benefit. */ |
82 | #define CALL_MCOUNT \ |
83 | pushq %rbp; \ |
84 | cfi_adjust_cfa_offset(8); \ |
85 | movq %rsp, %rbp; \ |
86 | cfi_def_cfa_register(%rbp); \ |
87 | call JUMPTARGET(mcount); \ |
88 | popq %rbp; \ |
89 | cfi_def_cfa(rsp,8); |
90 | #else |
91 | #define CALL_MCOUNT /* Do nothing. */ |
92 | #endif |
93 | |
94 | #define PSEUDO(name, syscall_name, args) \ |
95 | lose: \ |
96 | jmp JUMPTARGET(syscall_error) \ |
97 | .globl syscall_error; \ |
98 | ENTRY (name) \ |
99 | DO_CALL (syscall_name, args); \ |
100 | jb lose |
101 | |
102 | #undef JUMPTARGET |
103 | #ifdef SHARED |
104 | # ifdef BIND_NOW |
105 | # define JUMPTARGET(name) *name##@GOTPCREL(%rip) |
106 | # else |
107 | # define JUMPTARGET(name) name##@PLT |
108 | # endif |
109 | #else |
110 | /* For static archives, branch to target directly. */ |
111 | # define JUMPTARGET(name) name |
112 | #endif |
113 | |
114 | /* Instruction to operate on long and pointer. */ |
115 | #define LP_OP(insn) insn##q |
116 | |
117 | /* Assembler address directive. */ |
118 | #define ASM_ADDR .quad |
119 | |
120 | /* Registers to hold long and pointer. */ |
121 | #define RAX_LP rax |
122 | #define RBP_LP rbp |
123 | #define RBX_LP rbx |
124 | #define RCX_LP rcx |
125 | #define RDI_LP rdi |
126 | #define RDX_LP rdx |
127 | #define RSI_LP rsi |
128 | #define RSP_LP rsp |
129 | #define R8_LP r8 |
130 | #define R9_LP r9 |
131 | #define R10_LP r10 |
132 | #define R11_LP r11 |
133 | #define R12_LP r12 |
134 | #define R13_LP r13 |
135 | #define R14_LP r14 |
136 | #define R15_LP r15 |
137 | |
138 | /* Zero upper vector registers and return with xtest. NB: Use VZEROALL |
139 | to avoid RTM abort triggered by VZEROUPPER inside transactionally. */ |
140 | #define ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST \ |
141 | xtest; \ |
142 | jnz 1f; \ |
143 | vzeroupper; \ |
144 | ret; \ |
145 | 1: \ |
146 | vzeroall; \ |
147 | ret |
148 | |
149 | /* Can be used to replace vzeroupper that is not directly before a |
150 | return. This is useful when hoisting a vzeroupper from multiple |
151 | return paths to decrease the total number of vzerouppers and code |
152 | size. */ |
153 | #define COND_VZEROUPPER_XTEST \ |
154 | xtest; \ |
155 | jz 1f; \ |
156 | vzeroall; \ |
157 | jmp 2f; \ |
158 | 1: \ |
159 | vzeroupper; \ |
160 | 2: |
161 | |
162 | /* In RTM define this as COND_VZEROUPPER_XTEST. */ |
163 | #ifndef COND_VZEROUPPER |
164 | # define COND_VZEROUPPER vzeroupper |
165 | #endif |
166 | |
167 | /* Zero upper vector registers and return. */ |
168 | #ifndef ZERO_UPPER_VEC_REGISTERS_RETURN |
169 | # define ZERO_UPPER_VEC_REGISTERS_RETURN \ |
170 | VZEROUPPER; \ |
171 | ret |
172 | #endif |
173 | |
174 | #ifndef VZEROUPPER_RETURN |
175 | # define VZEROUPPER_RETURN VZEROUPPER; ret |
176 | #endif |
177 | |
178 | #else /* __ASSEMBLER__ */ |
179 | |
180 | /* Instruction to operate on long and pointer. */ |
181 | #define LP_OP(insn) #insn "q" |
182 | |
183 | /* Assembler address directive. */ |
184 | #define ASM_ADDR ".quad" |
185 | |
186 | /* Registers to hold long and pointer. */ |
187 | #define RAX_LP "rax" |
188 | #define RBP_LP "rbp" |
189 | #define RBX_LP "rbx" |
190 | #define RCX_LP "rcx" |
191 | #define RDI_LP "rdi" |
192 | #define RDX_LP "rdx" |
193 | #define RSI_LP "rsi" |
194 | #define RSP_LP "rsp" |
195 | #define R8_LP "r8" |
196 | #define R9_LP "r9" |
197 | #define R10_LP "r10" |
198 | #define R11_LP "r11" |
199 | #define R12_LP "r12" |
200 | #define R13_LP "r13" |
201 | #define R14_LP "r14" |
202 | #define R15_LP "r15" |
203 | |
204 | #endif /* __ASSEMBLER__ */ |
205 | |
206 | #endif /* _X86_64_SYSDEP_H */ |
207 | |