1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/jump_label.h> |
3 | #include <asm/unwind_hints.h> |
4 | #include <asm/cpufeatures.h> |
5 | #include <asm/page_types.h> |
6 | #include <asm/percpu.h> |
7 | #include <asm/asm-offsets.h> |
8 | #include <asm/processor-flags.h> |
9 | #include <asm/ptrace-abi.h> |
10 | #include <asm/msr.h> |
11 | #include <asm/nospec-branch.h> |
12 | |
13 | /* |
14 | |
15 | x86 function call convention, 64-bit: |
16 | ------------------------------------- |
17 | arguments | callee-saved | extra caller-saved | return |
18 | [callee-clobbered] | | [callee-clobbered] | |
19 | --------------------------------------------------------------------------- |
20 | rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] |
21 | |
22 | ( rsp is obviously invariant across normal function calls. (gcc can 'merge' |
23 | functions when it sees tail-call optimization possibilities) rflags is |
24 | clobbered. Leftover arguments are passed over the stack frame.) |
25 | |
26 | [*] In the frame-pointers case rbp is fixed to the stack frame. |
27 | |
28 | [**] for struct return values wider than 64 bits the return convention is a |
29 | bit more complex: up to 128 bits width we return small structures |
30 | straight in rax, rdx. For structures larger than that (3 words or |
31 | larger) the caller puts a pointer to an on-stack return struct |
32 | [allocated in the caller's stack frame] into the first argument - i.e. |
33 | into rdi. All other arguments shift up by one in this case. |
34 | Fortunately this case is rare in the kernel. |
35 | |
36 | For 32-bit we have the following conventions - kernel is built with |
37 | -mregparm=3 and -freg-struct-return: |
38 | |
39 | x86 function calling convention, 32-bit: |
40 | ---------------------------------------- |
41 | arguments | callee-saved | extra caller-saved | return |
42 | [callee-clobbered] | | [callee-clobbered] | |
43 | ------------------------------------------------------------------------- |
44 | eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] |
45 | |
46 | ( here too esp is obviously invariant across normal function calls. eflags |
47 | is clobbered. Leftover arguments are passed over the stack frame. ) |
48 | |
49 | [*] In the frame-pointers case ebp is fixed to the stack frame. |
50 | |
51 | [**] We build with -freg-struct-return, which on 32-bit means similar |
52 | semantics as on 64-bit: edx can be used for a second return value |
53 | (i.e. covering integer and structure sizes up to 64 bits) - after that |
54 | it gets more complex and more expensive: 3-word or larger struct returns |
55 | get done in the caller's frame and the pointer to the return struct goes |
56 | into regparm0, i.e. eax - the other arguments shift up and the |
57 | function's register parameters degenerate to regparm=2 in essence. |
58 | |
59 | */ |
60 | |
61 | #ifdef CONFIG_X86_64 |
62 | |
63 | /* |
64 | * 64-bit system call stack frame layout defines and helpers, |
65 | * for assembly code: |
66 | */ |
67 | |
68 | .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 unwind_hint=1 |
69 | .if \save_ret |
70 | pushq %rsi /* pt_regs->si */ |
71 | movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ |
72 | movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ |
73 | .else |
74 | pushq %rdi /* pt_regs->di */ |
75 | pushq %rsi /* pt_regs->si */ |
76 | .endif |
77 | pushq \rdx /* pt_regs->dx */ |
78 | pushq \rcx /* pt_regs->cx */ |
79 | pushq \rax /* pt_regs->ax */ |
80 | pushq %r8 /* pt_regs->r8 */ |
81 | pushq %r9 /* pt_regs->r9 */ |
82 | pushq %r10 /* pt_regs->r10 */ |
83 | pushq %r11 /* pt_regs->r11 */ |
84 | pushq %rbx /* pt_regs->rbx */ |
85 | pushq %rbp /* pt_regs->rbp */ |
86 | pushq %r12 /* pt_regs->r12 */ |
87 | pushq %r13 /* pt_regs->r13 */ |
88 | pushq %r14 /* pt_regs->r14 */ |
89 | pushq %r15 /* pt_regs->r15 */ |
90 | |
91 | .if \unwind_hint |
92 | UNWIND_HINT_REGS |
93 | .endif |
94 | |
95 | .if \save_ret |
96 | pushq %rsi /* return address on top of stack */ |
97 | .endif |
98 | .endm |
99 | |
100 | .macro CLEAR_REGS clear_bp=1 |
101 | /* |
102 | * Sanitize registers of values that a speculation attack might |
103 | * otherwise want to exploit. The lower registers are likely clobbered |
104 | * well before they could be put to use in a speculative execution |
105 | * gadget. |
106 | */ |
107 | xorl %esi, %esi /* nospec si */ |
108 | xorl %edx, %edx /* nospec dx */ |
109 | xorl %ecx, %ecx /* nospec cx */ |
110 | xorl %r8d, %r8d /* nospec r8 */ |
111 | xorl %r9d, %r9d /* nospec r9 */ |
112 | xorl %r10d, %r10d /* nospec r10 */ |
113 | xorl %r11d, %r11d /* nospec r11 */ |
114 | xorl %ebx, %ebx /* nospec rbx */ |
115 | .if \clear_bp |
116 | xorl %ebp, %ebp /* nospec rbp */ |
117 | .endif |
118 | xorl %r12d, %r12d /* nospec r12 */ |
119 | xorl %r13d, %r13d /* nospec r13 */ |
120 | xorl %r14d, %r14d /* nospec r14 */ |
121 | xorl %r15d, %r15d /* nospec r15 */ |
122 | |
123 | .endm |
124 | |
125 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 clear_bp=1 unwind_hint=1 |
126 | PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret unwind_hint=\unwind_hint |
127 | CLEAR_REGS clear_bp=\clear_bp |
128 | .endm |
129 | |
130 | .macro POP_REGS pop_rdi=1 |
131 | popq %r15 |
132 | popq %r14 |
133 | popq %r13 |
134 | popq %r12 |
135 | popq %rbp |
136 | popq %rbx |
137 | popq %r11 |
138 | popq %r10 |
139 | popq %r9 |
140 | popq %r8 |
141 | popq %rax |
142 | popq %rcx |
143 | popq %rdx |
144 | popq %rsi |
145 | .if \pop_rdi |
146 | popq %rdi |
147 | .endif |
148 | .endm |
149 | |
150 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
151 | |
152 | /* |
153 | * MITIGATION_PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two |
154 | * halves: |
155 | */ |
156 | #define PTI_USER_PGTABLE_BIT PAGE_SHIFT |
157 | #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) |
158 | #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT |
159 | #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) |
160 | #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) |
161 | |
162 | .macro SET_NOFLUSH_BIT reg:req |
163 | bts $X86_CR3_PCID_NOFLUSH_BIT, \reg |
164 | .endm |
165 | |
166 | .macro ADJUST_KERNEL_CR3 reg:req |
167 | ALTERNATIVE "" , "SET_NOFLUSH_BIT \reg" , X86_FEATURE_PCID |
168 | /* Clear PCID and "MITIGATION_PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ |
169 | andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg |
170 | .endm |
171 | |
172 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
173 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
174 | mov %cr3, \scratch_reg |
175 | ADJUST_KERNEL_CR3 \scratch_reg |
176 | mov \scratch_reg, %cr3 |
177 | .Lend_\@: |
178 | .endm |
179 | |
180 | #define THIS_CPU_user_pcid_flush_mask \ |
181 | PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask) |
182 | |
183 | .macro SWITCH_TO_USER_CR3 scratch_reg:req scratch_reg2:req |
184 | mov %cr3, \scratch_reg |
185 | |
186 | ALTERNATIVE "jmp .Lwrcr3_\@" , "" , X86_FEATURE_PCID |
187 | |
188 | /* |
189 | * Test if the ASID needs a flush. |
190 | */ |
191 | movq \scratch_reg, \scratch_reg2 |
192 | andq $(0x7FF), \scratch_reg /* mask ASID */ |
193 | bt \scratch_reg, THIS_CPU_user_pcid_flush_mask |
194 | jnc .Lnoflush_\@ |
195 | |
196 | /* Flush needed, clear the bit */ |
197 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
198 | movq \scratch_reg2, \scratch_reg |
199 | jmp .Lwrcr3_pcid_\@ |
200 | |
201 | .Lnoflush_\@: |
202 | movq \scratch_reg2, \scratch_reg |
203 | SET_NOFLUSH_BIT \scratch_reg |
204 | |
205 | .Lwrcr3_pcid_\@: |
206 | /* Flip the ASID to the user version */ |
207 | orq $(PTI_USER_PCID_MASK), \scratch_reg |
208 | |
209 | .Lwrcr3_\@: |
210 | /* Flip the PGD to the user version */ |
211 | orq $(PTI_USER_PGTABLE_MASK), \scratch_reg |
212 | mov \scratch_reg, %cr3 |
213 | .endm |
214 | |
215 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
216 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
217 | SWITCH_TO_USER_CR3 \scratch_reg \scratch_reg2 |
218 | .Lend_\@: |
219 | .endm |
220 | |
221 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
222 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
223 | pushq %rax |
224 | SWITCH_TO_USER_CR3 scratch_reg=\scratch_reg scratch_reg2=%rax |
225 | popq %rax |
226 | .Lend_\@: |
227 | .endm |
228 | |
229 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
230 | ALTERNATIVE "jmp .Ldone_\@" , "" , X86_FEATURE_PTI |
231 | movq %cr3, \scratch_reg |
232 | movq \scratch_reg, \save_reg |
233 | /* |
234 | * Test the user pagetable bit. If set, then the user page tables |
235 | * are active. If clear CR3 already has the kernel page table |
236 | * active. |
237 | */ |
238 | bt $PTI_USER_PGTABLE_BIT, \scratch_reg |
239 | jnc .Ldone_\@ |
240 | |
241 | ADJUST_KERNEL_CR3 \scratch_reg |
242 | movq \scratch_reg, %cr3 |
243 | |
244 | .Ldone_\@: |
245 | .endm |
246 | |
247 | /* Restore CR3 from a kernel context. May restore a user CR3 value. */ |
248 | .macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req |
249 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_PTI |
250 | |
251 | /* |
252 | * If CR3 contained the kernel page tables at the paranoid exception |
253 | * entry, then there is nothing to restore as CR3 is not modified while |
254 | * handling the exception. |
255 | */ |
256 | bt $PTI_USER_PGTABLE_BIT, \save_reg |
257 | jnc .Lend_\@ |
258 | |
259 | ALTERNATIVE "jmp .Lwrcr3_\@" , "" , X86_FEATURE_PCID |
260 | |
261 | /* |
262 | * Check if there's a pending flush for the user ASID we're |
263 | * about to set. |
264 | */ |
265 | movq \save_reg, \scratch_reg |
266 | andq $(0x7FF), \scratch_reg |
267 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
268 | jc .Lwrcr3_\@ |
269 | |
270 | SET_NOFLUSH_BIT \save_reg |
271 | |
272 | .Lwrcr3_\@: |
273 | movq \save_reg, %cr3 |
274 | .Lend_\@: |
275 | .endm |
276 | |
277 | #else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION=n: */ |
278 | |
279 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
280 | .endm |
281 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
282 | .endm |
283 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
284 | .endm |
285 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
286 | .endm |
287 | .macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req |
288 | .endm |
289 | |
290 | #endif |
291 | |
292 | /* |
293 | * IBRS kernel mitigation for Spectre_v2. |
294 | * |
295 | * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers |
296 | * the regs it uses (AX, CX, DX). Must be called before the first RET |
297 | * instruction (NOTE! UNTRAIN_RET includes a RET instruction) |
298 | * |
299 | * The optional argument is used to save/restore the current value, |
300 | * which is used on the paranoid paths. |
301 | * |
302 | * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. |
303 | */ |
304 | .macro IBRS_ENTER save_reg |
305 | #ifdef CONFIG_MITIGATION_IBRS_ENTRY |
306 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_KERNEL_IBRS |
307 | movl $MSR_IA32_SPEC_CTRL, %ecx |
308 | |
309 | .ifnb \save_reg |
310 | rdmsr |
311 | shl $32, %rdx |
312 | or %rdx, %rax |
313 | mov %rax, \save_reg |
314 | test $SPEC_CTRL_IBRS, %eax |
315 | jz .Ldo_wrmsr_\@ |
316 | lfence |
317 | jmp .Lend_\@ |
318 | .Ldo_wrmsr_\@: |
319 | .endif |
320 | |
321 | movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
322 | movl %edx, %eax |
323 | shr $32, %rdx |
324 | wrmsr |
325 | .Lend_\@: |
326 | #endif |
327 | .endm |
328 | |
329 | /* |
330 | * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) |
331 | * regs. Must be called after the last RET. |
332 | */ |
333 | .macro IBRS_EXIT save_reg |
334 | #ifdef CONFIG_MITIGATION_IBRS_ENTRY |
335 | ALTERNATIVE "jmp .Lend_\@" , "" , X86_FEATURE_KERNEL_IBRS |
336 | movl $MSR_IA32_SPEC_CTRL, %ecx |
337 | |
338 | .ifnb \save_reg |
339 | mov \save_reg, %rdx |
340 | .else |
341 | movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
342 | andl $(~SPEC_CTRL_IBRS), %edx |
343 | .endif |
344 | |
345 | movl %edx, %eax |
346 | shr $32, %rdx |
347 | wrmsr |
348 | .Lend_\@: |
349 | #endif |
350 | .endm |
351 | |
352 | /* |
353 | * Mitigate Spectre v1 for conditional swapgs code paths. |
354 | * |
355 | * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to |
356 | * prevent a speculative swapgs when coming from kernel space. |
357 | * |
358 | * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, |
359 | * to prevent the swapgs from getting speculatively skipped when coming from |
360 | * user space. |
361 | */ |
362 | .macro FENCE_SWAPGS_USER_ENTRY |
363 | ALTERNATIVE "" , "lfence" , X86_FEATURE_FENCE_SWAPGS_USER |
364 | .endm |
365 | .macro FENCE_SWAPGS_KERNEL_ENTRY |
366 | ALTERNATIVE "" , "lfence" , X86_FEATURE_FENCE_SWAPGS_KERNEL |
367 | .endm |
368 | |
369 | .macro STACKLEAK_ERASE_NOCLOBBER |
370 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
371 | PUSH_AND_CLEAR_REGS |
372 | call stackleak_erase |
373 | POP_REGS |
374 | #endif |
375 | .endm |
376 | |
377 | .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req |
378 | rdgsbase \save_reg |
379 | GET_PERCPU_BASE \scratch_reg |
380 | wrgsbase \scratch_reg |
381 | .endm |
382 | |
383 | #else /* CONFIG_X86_64 */ |
384 | # undef UNWIND_HINT_IRET_REGS |
385 | # define UNWIND_HINT_IRET_REGS |
386 | #endif /* !CONFIG_X86_64 */ |
387 | |
388 | .macro STACKLEAK_ERASE |
389 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
390 | call stackleak_erase |
391 | #endif |
392 | .endm |
393 | |
394 | #ifdef CONFIG_SMP |
395 | |
396 | /* |
397 | * CPU/node NR is loaded from the limit (size) field of a special segment |
398 | * descriptor entry in GDT. |
399 | */ |
400 | .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req |
401 | movq $__CPUNODE_SEG, \reg |
402 | lsl \reg, \reg |
403 | .endm |
404 | |
405 | /* |
406 | * Fetch the per-CPU GSBASE value for this processor and put it in @reg. |
407 | * We normally use %gs for accessing per-CPU data, but we are setting up |
408 | * %gs here and obviously can not use %gs itself to access per-CPU data. |
409 | * |
410 | * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and |
411 | * may not restore the host's value until the CPU returns to userspace. |
412 | * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives |
413 | * while running KVM's run loop. |
414 | */ |
415 | .macro GET_PERCPU_BASE reg:req |
416 | LOAD_CPU_AND_NODE_SEG_LIMIT \reg |
417 | andq $VDSO_CPUNODE_MASK, \reg |
418 | movq __per_cpu_offset(, \reg, 8), \reg |
419 | .endm |
420 | |
421 | #else |
422 | |
423 | .macro GET_PERCPU_BASE reg:req |
424 | movq pcpu_unit_offsets(%rip), \reg |
425 | .endm |
426 | |
427 | #endif /* CONFIG_SMP */ |
428 | |
429 | #ifdef CONFIG_X86_64 |
430 | |
431 | /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ |
432 | .macro THUNK name, func |
433 | SYM_FUNC_START(\name) |
434 | pushq %rbp |
435 | movq %rsp, %rbp |
436 | |
437 | pushq %rdi |
438 | pushq %rsi |
439 | pushq %rdx |
440 | pushq %rcx |
441 | pushq %rax |
442 | pushq %r8 |
443 | pushq %r9 |
444 | pushq %r10 |
445 | pushq %r11 |
446 | |
447 | call \func |
448 | |
449 | popq %r11 |
450 | popq %r10 |
451 | popq %r9 |
452 | popq %r8 |
453 | popq %rax |
454 | popq %rcx |
455 | popq %rdx |
456 | popq %rsi |
457 | popq %rdi |
458 | popq %rbp |
459 | RET |
460 | SYM_FUNC_END(\name) |
461 | _ASM_NOKPROBE(\name) |
462 | .endm |
463 | |
464 | #else /* CONFIG_X86_32 */ |
465 | |
466 | /* put return address in eax (arg1) */ |
467 | .macro THUNK name, func, put_ret_addr_in_eax=0 |
468 | SYM_CODE_START_NOALIGN(\name) |
469 | pushl %eax |
470 | pushl %ecx |
471 | pushl %edx |
472 | |
473 | .if \put_ret_addr_in_eax |
474 | /* Place EIP in the arg1 */ |
475 | movl 3*4(%esp), %eax |
476 | .endif |
477 | |
478 | call \func |
479 | popl %edx |
480 | popl %ecx |
481 | popl %eax |
482 | RET |
483 | _ASM_NOKPROBE(\name) |
484 | SYM_CODE_END(\name) |
485 | .endm |
486 | |
487 | #endif |
488 | |