1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86_64/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
8 *
9 * entry.S contains the system-call and fault low-level handling routines.
10 *
11 * Some of this is documented in Documentation/arch/x86/entry_64.rst
12 *
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
16 *
17 * Some macro usage:
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
20 */
21#include <linux/export.h>
22#include <linux/linkage.h>
23#include <asm/segment.h>
24#include <asm/cache.h>
25#include <asm/errno.h>
26#include <asm/asm-offsets.h>
27#include <asm/msr.h>
28#include <asm/unistd.h>
29#include <asm/thread_info.h>
30#include <asm/hw_irq.h>
31#include <asm/page_types.h>
32#include <asm/irqflags.h>
33#include <asm/paravirt.h>
34#include <asm/percpu.h>
35#include <asm/asm.h>
36#include <asm/smap.h>
37#include <asm/pgtable_types.h>
38#include <asm/frame.h>
39#include <asm/trapnr.h>
40#include <asm/nospec-branch.h>
41#include <asm/fsgsbase.h>
42#include <linux/err.h>
43
44#include "calling.h"
45
46.code64
47.section .entry.text, "ax"
48
49/*
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
51 *
52 * This is the only entry point used for 64-bit system calls. The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
56 *
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
61 *
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
67 *
68 * Registers on entry:
69 * rax system call number
70 * rcx return address
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
72 * rdi arg0
73 * rsi arg1
74 * rdx arg2
75 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
76 * r8 arg4
77 * r9 arg5
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
79 *
80 * Only called from user space.
81 *
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
85 */
86
87SYM_CODE_START(entry_SYSCALL_64)
88 UNWIND_HINT_ENTRY
89 ENDBR
90
91 swapgs
92 /* tss.sp2 is scratch space. */
93 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
94 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
95 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
96
97SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
98 ANNOTATE_NOENDBR
99
100 /* Construct struct pt_regs on stack */
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
106SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
107 pushq %rax /* pt_regs->orig_ax */
108
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
110
111 /* IRQs are off. */
112 movq %rsp, %rdi
113 /* Sign extend the lower 32bit as syscall numbers are treated as int */
114 movslq %eax, %rsi
115
116 /* clobbers %rax, make sure it is after saving the syscall nr */
117 IBRS_ENTER
118 UNTRAIN_RET
119 CLEAR_BRANCH_HISTORY
120
121 call do_syscall_64 /* returns with IRQs disabled */
122
123 /*
124 * Try to use SYSRET instead of IRET if we're returning to
125 * a completely clean 64-bit userspace context. If we're not,
126 * go to the slow exit path.
127 * In the Xen PV case we must use iret anyway.
128 */
129
130 ALTERNATIVE "testb %al, %al; jz swapgs_restore_regs_and_return_to_usermode", \
131 "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
132
133 /*
134 * We win! This label is here just for ease of understanding
135 * perf profiles. Nothing jumps here.
136 */
137syscall_return_via_sysret:
138 IBRS_EXIT
139 POP_REGS pop_rdi=0
140
141 /*
142 * Now all regs are restored except RSP and RDI.
143 * Save old stack pointer and switch to trampoline stack.
144 */
145 movq %rsp, %rdi
146 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
147 UNWIND_HINT_END_OF_STACK
148
149 pushq RSP-RDI(%rdi) /* RSP */
150 pushq (%rdi) /* RDI */
151
152 /*
153 * We are on the trampoline stack. All regs except RDI are live.
154 * We can do future final exit work right here.
155 */
156 STACKLEAK_ERASE_NOCLOBBER
157
158 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
159
160 popq %rdi
161 popq %rsp
162SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
163 ANNOTATE_NOENDBR
164 swapgs
165 CLEAR_CPU_BUFFERS
166 sysretq
167SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
168 ANNOTATE_NOENDBR
169 int3
170SYM_CODE_END(entry_SYSCALL_64)
171
172/*
173 * %rdi: prev task
174 * %rsi: next task
175 */
176.pushsection .text, "ax"
177SYM_FUNC_START(__switch_to_asm)
178 /*
179 * Save callee-saved registers
180 * This must match the order in inactive_task_frame
181 */
182 pushq %rbp
183 pushq %rbx
184 pushq %r12
185 pushq %r13
186 pushq %r14
187 pushq %r15
188
189 /* switch stack */
190 movq %rsp, TASK_threadsp(%rdi)
191 movq TASK_threadsp(%rsi), %rsp
192
193#ifdef CONFIG_STACKPROTECTOR
194 movq TASK_stack_canary(%rsi), %rbx
195 movq %rbx, PER_CPU_VAR(fixed_percpu_data + FIXED_stack_canary)
196#endif
197
198 /*
199 * When switching from a shallower to a deeper call stack
200 * the RSB may either underflow or use entries populated
201 * with userspace addresses. On CPUs where those concerns
202 * exist, overwrite the RSB with entries which capture
203 * speculative execution to prevent attack.
204 */
205 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
206
207 /* restore callee-saved registers */
208 popq %r15
209 popq %r14
210 popq %r13
211 popq %r12
212 popq %rbx
213 popq %rbp
214
215 jmp __switch_to
216SYM_FUNC_END(__switch_to_asm)
217.popsection
218
219/*
220 * A newly forked process directly context switches into this address.
221 *
222 * rax: prev task we switched from
223 * rbx: kernel thread func (NULL for user thread)
224 * r12: kernel thread arg
225 */
226.pushsection .text, "ax"
227SYM_CODE_START(ret_from_fork_asm)
228 /*
229 * This is the start of the kernel stack; even through there's a
230 * register set at the top, the regset isn't necessarily coherent
231 * (consider kthreads) and one cannot unwind further.
232 *
233 * This ensures stack unwinds of kernel threads terminate in a known
234 * good state.
235 */
236 UNWIND_HINT_END_OF_STACK
237 ANNOTATE_NOENDBR // copy_thread
238 CALL_DEPTH_ACCOUNT
239
240 movq %rax, %rdi /* prev */
241 movq %rsp, %rsi /* regs */
242 movq %rbx, %rdx /* fn */
243 movq %r12, %rcx /* fn_arg */
244 call ret_from_fork
245
246 /*
247 * Set the stack state to what is expected for the target function
248 * -- at this point the register set should be a valid user set
249 * and unwind should work normally.
250 */
251 UNWIND_HINT_REGS
252
253#ifdef CONFIG_X86_FRED
254 ALTERNATIVE "jmp swapgs_restore_regs_and_return_to_usermode", \
255 "jmp asm_fred_exit_user", X86_FEATURE_FRED
256#else
257 jmp swapgs_restore_regs_and_return_to_usermode
258#endif
259SYM_CODE_END(ret_from_fork_asm)
260.popsection
261
262.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
263#ifdef CONFIG_DEBUG_ENTRY
264 pushq %rax
265 SAVE_FLAGS
266 testl $X86_EFLAGS_IF, %eax
267 jz .Lokay_\@
268 ud2
269.Lokay_\@:
270 popq %rax
271#endif
272.endm
273
274SYM_CODE_START(xen_error_entry)
275 ANNOTATE_NOENDBR
276 UNWIND_HINT_FUNC
277 PUSH_AND_CLEAR_REGS save_ret=1
278 ENCODE_FRAME_POINTER 8
279 UNTRAIN_RET_FROM_CALL
280 RET
281SYM_CODE_END(xen_error_entry)
282
283/**
284 * idtentry_body - Macro to emit code calling the C function
285 * @cfunc: C function to be called
286 * @has_error_code: Hardware pushed error code on stack
287 */
288.macro idtentry_body cfunc has_error_code:req
289
290 /*
291 * Call error_entry() and switch to the task stack if from userspace.
292 *
293 * When in XENPV, it is already in the task stack, and it can't fault
294 * for native_iret() nor native_load_gs_index() since XENPV uses its
295 * own pvops for IRET and load_gs_index(). And it doesn't need to
296 * switch the CR3. So it can skip invoking error_entry().
297 */
298 ALTERNATIVE "call error_entry; movq %rax, %rsp", \
299 "call xen_error_entry", X86_FEATURE_XENPV
300
301 ENCODE_FRAME_POINTER
302 UNWIND_HINT_REGS
303
304 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
305
306 .if \has_error_code == 1
307 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
308 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
309 .endif
310
311 call \cfunc
312
313 /* For some configurations \cfunc ends up being a noreturn. */
314 REACHABLE
315
316 jmp error_return
317.endm
318
319/**
320 * idtentry - Macro to generate entry stubs for simple IDT entries
321 * @vector: Vector number
322 * @asmsym: ASM symbol for the entry point
323 * @cfunc: C function to be called
324 * @has_error_code: Hardware pushed error code on stack
325 *
326 * The macro emits code to set up the kernel context for straight forward
327 * and simple IDT entries. No IST stack, no paranoid entry checks.
328 */
329.macro idtentry vector asmsym cfunc has_error_code:req
330SYM_CODE_START(\asmsym)
331
332 .if \vector == X86_TRAP_BP
333 /* #BP advances %rip to the next instruction */
334 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
335 .else
336 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
337 .endif
338
339 ENDBR
340 ASM_CLAC
341 cld
342
343 .if \has_error_code == 0
344 pushq $-1 /* ORIG_RAX: no syscall to restart */
345 .endif
346
347 .if \vector == X86_TRAP_BP
348 /*
349 * If coming from kernel space, create a 6-word gap to allow the
350 * int3 handler to emulate a call instruction.
351 */
352 testb $3, CS-ORIG_RAX(%rsp)
353 jnz .Lfrom_usermode_no_gap_\@
354 .rept 6
355 pushq 5*8(%rsp)
356 .endr
357 UNWIND_HINT_IRET_REGS offset=8
358.Lfrom_usermode_no_gap_\@:
359 .endif
360
361 idtentry_body \cfunc \has_error_code
362
363_ASM_NOKPROBE(\asmsym)
364SYM_CODE_END(\asmsym)
365.endm
366
367/*
368 * Interrupt entry/exit.
369 *
370 + The interrupt stubs push (vector) onto the stack, which is the error_code
371 * position of idtentry exceptions, and jump to one of the two idtentry points
372 * (common/spurious).
373 *
374 * common_interrupt is a hotpath, align it to a cache line
375 */
376.macro idtentry_irq vector cfunc
377 .p2align CONFIG_X86_L1_CACHE_SHIFT
378 idtentry \vector asm_\cfunc \cfunc has_error_code=1
379.endm
380
381/**
382 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
383 * @vector: Vector number
384 * @asmsym: ASM symbol for the entry point
385 * @cfunc: C function to be called
386 *
387 * The macro emits code to set up the kernel context for #MC and #DB
388 *
389 * If the entry comes from user space it uses the normal entry path
390 * including the return to user space work and preemption checks on
391 * exit.
392 *
393 * If hits in kernel mode then it needs to go through the paranoid
394 * entry as the exception can hit any random state. No preemption
395 * check on exit to keep the paranoid path simple.
396 */
397.macro idtentry_mce_db vector asmsym cfunc
398SYM_CODE_START(\asmsym)
399 UNWIND_HINT_IRET_ENTRY
400 ENDBR
401 ASM_CLAC
402 cld
403
404 pushq $-1 /* ORIG_RAX: no syscall to restart */
405
406 /*
407 * If the entry is from userspace, switch stacks and treat it as
408 * a normal entry.
409 */
410 testb $3, CS-ORIG_RAX(%rsp)
411 jnz .Lfrom_usermode_switch_stack_\@
412
413 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
414 call paranoid_entry
415
416 UNWIND_HINT_REGS
417
418 movq %rsp, %rdi /* pt_regs pointer */
419
420 call \cfunc
421
422 jmp paranoid_exit
423
424 /* Switch to the regular task stack and use the noist entry point */
425.Lfrom_usermode_switch_stack_\@:
426 idtentry_body noist_\cfunc, has_error_code=0
427
428_ASM_NOKPROBE(\asmsym)
429SYM_CODE_END(\asmsym)
430.endm
431
432#ifdef CONFIG_AMD_MEM_ENCRYPT
433/**
434 * idtentry_vc - Macro to generate entry stub for #VC
435 * @vector: Vector number
436 * @asmsym: ASM symbol for the entry point
437 * @cfunc: C function to be called
438 *
439 * The macro emits code to set up the kernel context for #VC. The #VC handler
440 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
441 *
442 * To make this work the #VC entry code tries its best to pretend it doesn't use
443 * an IST stack by switching to the task stack if coming from user-space (which
444 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
445 * entered from kernel-mode.
446 *
447 * If entered from kernel-mode the return stack is validated first, and if it is
448 * not safe to use (e.g. because it points to the entry stack) the #VC handler
449 * will switch to a fall-back stack (VC2) and call a special handler function.
450 *
451 * The macro is only used for one vector, but it is planned to be extended in
452 * the future for the #HV exception.
453 */
454.macro idtentry_vc vector asmsym cfunc
455SYM_CODE_START(\asmsym)
456 UNWIND_HINT_IRET_ENTRY
457 ENDBR
458 ASM_CLAC
459 cld
460
461 /*
462 * If the entry is from userspace, switch stacks and treat it as
463 * a normal entry.
464 */
465 testb $3, CS-ORIG_RAX(%rsp)
466 jnz .Lfrom_usermode_switch_stack_\@
467
468 /*
469 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
470 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
471 */
472 call paranoid_entry
473
474 UNWIND_HINT_REGS
475
476 /*
477 * Switch off the IST stack to make it free for nested exceptions. The
478 * vc_switch_off_ist() function will switch back to the interrupted
479 * stack if it is safe to do so. If not it switches to the VC fall-back
480 * stack.
481 */
482 movq %rsp, %rdi /* pt_regs pointer */
483 call vc_switch_off_ist
484 movq %rax, %rsp /* Switch to new stack */
485
486 ENCODE_FRAME_POINTER
487 UNWIND_HINT_REGS
488
489 /* Update pt_regs */
490 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
491 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
492
493 movq %rsp, %rdi /* pt_regs pointer */
494
495 call kernel_\cfunc
496
497 /*
498 * No need to switch back to the IST stack. The current stack is either
499 * identical to the stack in the IRET frame or the VC fall-back stack,
500 * so it is definitely mapped even with PTI enabled.
501 */
502 jmp paranoid_exit
503
504 /* Switch to the regular task stack */
505.Lfrom_usermode_switch_stack_\@:
506 idtentry_body user_\cfunc, has_error_code=1
507
508_ASM_NOKPROBE(\asmsym)
509SYM_CODE_END(\asmsym)
510.endm
511#endif
512
513/*
514 * Double fault entry. Straight paranoid. No checks from which context
515 * this comes because for the espfix induced #DF this would do the wrong
516 * thing.
517 */
518.macro idtentry_df vector asmsym cfunc
519SYM_CODE_START(\asmsym)
520 UNWIND_HINT_IRET_ENTRY offset=8
521 ENDBR
522 ASM_CLAC
523 cld
524
525 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
526 call paranoid_entry
527 UNWIND_HINT_REGS
528
529 movq %rsp, %rdi /* pt_regs pointer into first argument */
530 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
531 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
532 call \cfunc
533
534 /* For some configurations \cfunc ends up being a noreturn. */
535 REACHABLE
536
537 jmp paranoid_exit
538
539_ASM_NOKPROBE(\asmsym)
540SYM_CODE_END(\asmsym)
541.endm
542
543/*
544 * Include the defines which emit the idt entries which are shared
545 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
546 * so the stacktrace boundary checks work.
547 */
548 __ALIGN
549 .globl __irqentry_text_start
550__irqentry_text_start:
551
552#include <asm/idtentry.h>
553
554 __ALIGN
555 .globl __irqentry_text_end
556__irqentry_text_end:
557 ANNOTATE_NOENDBR
558
559SYM_CODE_START_LOCAL(common_interrupt_return)
560SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
561 IBRS_EXIT
562#ifdef CONFIG_XEN_PV
563 ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
564#endif
565#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
566 ALTERNATIVE "", "jmp .Lpti_restore_regs_and_return_to_usermode", X86_FEATURE_PTI
567#endif
568
569 STACKLEAK_ERASE
570 POP_REGS
571 add $8, %rsp /* orig_ax */
572 UNWIND_HINT_IRET_REGS
573
574.Lswapgs_and_iret:
575 swapgs
576 CLEAR_CPU_BUFFERS
577 /* Assert that the IRET frame indicates user mode. */
578 testb $3, 8(%rsp)
579 jnz .Lnative_iret
580 ud2
581
582#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
583.Lpti_restore_regs_and_return_to_usermode:
584 POP_REGS pop_rdi=0
585
586 /*
587 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
588 * Save old stack pointer and switch to trampoline stack.
589 */
590 movq %rsp, %rdi
591 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
592 UNWIND_HINT_END_OF_STACK
593
594 /* Copy the IRET frame to the trampoline stack. */
595 pushq 6*8(%rdi) /* SS */
596 pushq 5*8(%rdi) /* RSP */
597 pushq 4*8(%rdi) /* EFLAGS */
598 pushq 3*8(%rdi) /* CS */
599 pushq 2*8(%rdi) /* RIP */
600
601 /* Push user RDI on the trampoline stack. */
602 pushq (%rdi)
603
604 /*
605 * We are on the trampoline stack. All regs except RDI are live.
606 * We can do future final exit work right here.
607 */
608 STACKLEAK_ERASE_NOCLOBBER
609
610 push %rax
611 SWITCH_TO_USER_CR3 scratch_reg=%rdi scratch_reg2=%rax
612 pop %rax
613
614 /* Restore RDI. */
615 popq %rdi
616 jmp .Lswapgs_and_iret
617#endif
618
619SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
620#ifdef CONFIG_DEBUG_ENTRY
621 /* Assert that pt_regs indicates kernel mode. */
622 testb $3, CS(%rsp)
623 jz 1f
624 ud2
6251:
626#endif
627 POP_REGS
628 addq $8, %rsp /* skip regs->orig_ax */
629 /*
630 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
631 * when returning from IPI handler.
632 */
633#ifdef CONFIG_XEN_PV
634SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
635 ANNOTATE_NOENDBR
636 .byte 0xe9
637 .long .Lnative_iret - (. + 4)
638#endif
639
640.Lnative_iret:
641 UNWIND_HINT_IRET_REGS
642 /*
643 * Are we returning to a stack segment from the LDT? Note: in
644 * 64-bit mode SS:RSP on the exception stack is always valid.
645 */
646#ifdef CONFIG_X86_ESPFIX64
647 testb $4, (SS-RIP)(%rsp)
648 jnz native_irq_return_ldt
649#endif
650
651SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
652 ANNOTATE_NOENDBR // exc_double_fault
653 /*
654 * This may fault. Non-paranoid faults on return to userspace are
655 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
656 * Double-faults due to espfix64 are handled in exc_double_fault.
657 * Other faults here are fatal.
658 */
659 iretq
660
661#ifdef CONFIG_X86_ESPFIX64
662native_irq_return_ldt:
663 /*
664 * We are running with user GSBASE. All GPRs contain their user
665 * values. We have a percpu ESPFIX stack that is eight slots
666 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
667 * of the ESPFIX stack.
668 *
669 * We clobber RAX and RDI in this code. We stash RDI on the
670 * normal stack and RAX on the ESPFIX stack.
671 *
672 * The ESPFIX stack layout we set up looks like this:
673 *
674 * --- top of ESPFIX stack ---
675 * SS
676 * RSP
677 * RFLAGS
678 * CS
679 * RIP <-- RSP points here when we're done
680 * RAX <-- espfix_waddr points here
681 * --- bottom of ESPFIX stack ---
682 */
683
684 pushq %rdi /* Stash user RDI */
685 swapgs /* to kernel GS */
686 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
687
688 movq PER_CPU_VAR(espfix_waddr), %rdi
689 movq %rax, (0*8)(%rdi) /* user RAX */
690 movq (1*8)(%rsp), %rax /* user RIP */
691 movq %rax, (1*8)(%rdi)
692 movq (2*8)(%rsp), %rax /* user CS */
693 movq %rax, (2*8)(%rdi)
694 movq (3*8)(%rsp), %rax /* user RFLAGS */
695 movq %rax, (3*8)(%rdi)
696 movq (5*8)(%rsp), %rax /* user SS */
697 movq %rax, (5*8)(%rdi)
698 movq (4*8)(%rsp), %rax /* user RSP */
699 movq %rax, (4*8)(%rdi)
700 /* Now RAX == RSP. */
701
702 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
703
704 /*
705 * espfix_stack[31:16] == 0. The page tables are set up such that
706 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
707 * espfix_waddr for any X. That is, there are 65536 RO aliases of
708 * the same page. Set up RSP so that RSP[31:16] contains the
709 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
710 * still points to an RO alias of the ESPFIX stack.
711 */
712 orq PER_CPU_VAR(espfix_stack), %rax
713
714 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
715 swapgs /* to user GS */
716 popq %rdi /* Restore user RDI */
717
718 movq %rax, %rsp
719 UNWIND_HINT_IRET_REGS offset=8
720
721 /*
722 * At this point, we cannot write to the stack any more, but we can
723 * still read.
724 */
725 popq %rax /* Restore user RAX */
726
727 CLEAR_CPU_BUFFERS
728
729 /*
730 * RSP now points to an ordinary IRET frame, except that the page
731 * is read-only and RSP[31:16] are preloaded with the userspace
732 * values. We can now IRET back to userspace.
733 */
734 jmp native_irq_return_iret
735#endif
736SYM_CODE_END(common_interrupt_return)
737_ASM_NOKPROBE(common_interrupt_return)
738
739/*
740 * Reload gs selector with exception handling
741 * di: new selector
742 *
743 * Is in entry.text as it shouldn't be instrumented.
744 */
745SYM_FUNC_START(asm_load_gs_index)
746 FRAME_BEGIN
747 swapgs
748.Lgs_change:
749 ANNOTATE_NOENDBR // error_entry
750 movl %edi, %gs
7512: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
752 swapgs
753 FRAME_END
754 RET
755
756 /* running with kernelgs */
757.Lbad_gs:
758 swapgs /* switch back to user gs */
759.macro ZAP_GS
760 /* This can't be a string because the preprocessor needs to see it. */
761 movl $__USER_DS, %eax
762 movl %eax, %gs
763.endm
764 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
765 xorl %eax, %eax
766 movl %eax, %gs
767 jmp 2b
768
769 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
770
771SYM_FUNC_END(asm_load_gs_index)
772EXPORT_SYMBOL(asm_load_gs_index)
773
774#ifdef CONFIG_XEN_PV
775/*
776 * A note on the "critical region" in our callback handler.
777 * We want to avoid stacking callback handlers due to events occurring
778 * during handling of the last event. To do this, we keep events disabled
779 * until we've done all processing. HOWEVER, we must enable events before
780 * popping the stack frame (can't be done atomically) and so it would still
781 * be possible to get enough handler activations to overflow the stack.
782 * Although unlikely, bugs of that kind are hard to track down, so we'd
783 * like to avoid the possibility.
784 * So, on entry to the handler we detect whether we interrupted an
785 * existing activation in its critical region -- if so, we pop the current
786 * activation and restart the handler using the previous one.
787 *
788 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
789 */
790 __FUNC_ALIGN
791SYM_CODE_START_LOCAL_NOALIGN(exc_xen_hypervisor_callback)
792
793/*
794 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
795 * see the correct pointer to the pt_regs
796 */
797 UNWIND_HINT_FUNC
798 movq %rdi, %rsp /* we don't return, adjust the stack frame */
799 UNWIND_HINT_REGS
800
801 call xen_pv_evtchn_do_upcall
802
803 jmp error_return
804SYM_CODE_END(exc_xen_hypervisor_callback)
805
806/*
807 * Hypervisor uses this for application faults while it executes.
808 * We get here for two reasons:
809 * 1. Fault while reloading DS, ES, FS or GS
810 * 2. Fault while executing IRET
811 * Category 1 we do not need to fix up as Xen has already reloaded all segment
812 * registers that could be reloaded and zeroed the others.
813 * Category 2 we fix up by killing the current process. We cannot use the
814 * normal Linux return path in this case because if we use the IRET hypercall
815 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
816 * We distinguish between categories by comparing each saved segment register
817 * with its current contents: any discrepancy means we in category 1.
818 */
819 __FUNC_ALIGN
820SYM_CODE_START_NOALIGN(xen_failsafe_callback)
821 UNWIND_HINT_UNDEFINED
822 ENDBR
823 movl %ds, %ecx
824 cmpw %cx, 0x10(%rsp)
825 jne 1f
826 movl %es, %ecx
827 cmpw %cx, 0x18(%rsp)
828 jne 1f
829 movl %fs, %ecx
830 cmpw %cx, 0x20(%rsp)
831 jne 1f
832 movl %gs, %ecx
833 cmpw %cx, 0x28(%rsp)
834 jne 1f
835 /* All segments match their saved values => Category 2 (Bad IRET). */
836 movq (%rsp), %rcx
837 movq 8(%rsp), %r11
838 addq $0x30, %rsp
839 pushq $0 /* RIP */
840 UNWIND_HINT_IRET_REGS offset=8
841 jmp asm_exc_general_protection
8421: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
843 movq (%rsp), %rcx
844 movq 8(%rsp), %r11
845 addq $0x30, %rsp
846 UNWIND_HINT_IRET_REGS
847 pushq $-1 /* orig_ax = -1 => not a system call */
848 PUSH_AND_CLEAR_REGS
849 ENCODE_FRAME_POINTER
850 jmp error_return
851SYM_CODE_END(xen_failsafe_callback)
852#endif /* CONFIG_XEN_PV */
853
854/*
855 * Save all registers in pt_regs. Return GSBASE related information
856 * in EBX depending on the availability of the FSGSBASE instructions:
857 *
858 * FSGSBASE R/EBX
859 * N 0 -> SWAPGS on exit
860 * 1 -> no SWAPGS on exit
861 *
862 * Y GSBASE value at entry, must be restored in paranoid_exit
863 *
864 * R14 - old CR3
865 * R15 - old SPEC_CTRL
866 */
867SYM_CODE_START(paranoid_entry)
868 ANNOTATE_NOENDBR
869 UNWIND_HINT_FUNC
870 PUSH_AND_CLEAR_REGS save_ret=1
871 ENCODE_FRAME_POINTER 8
872
873 /*
874 * Always stash CR3 in %r14. This value will be restored,
875 * verbatim, at exit. Needed if paranoid_entry interrupted
876 * another entry that already switched to the user CR3 value
877 * but has not yet returned to userspace.
878 *
879 * This is also why CS (stashed in the "iret frame" by the
880 * hardware at entry) can not be used: this may be a return
881 * to kernel code, but with a user CR3 value.
882 *
883 * Switching CR3 does not depend on kernel GSBASE so it can
884 * be done before switching to the kernel GSBASE. This is
885 * required for FSGSBASE because the kernel GSBASE has to
886 * be retrieved from a kernel internal table.
887 */
888 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
889
890 /*
891 * Handling GSBASE depends on the availability of FSGSBASE.
892 *
893 * Without FSGSBASE the kernel enforces that negative GSBASE
894 * values indicate kernel GSBASE. With FSGSBASE no assumptions
895 * can be made about the GSBASE value when entering from user
896 * space.
897 */
898 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
899
900 /*
901 * Read the current GSBASE and store it in %rbx unconditionally,
902 * retrieve and set the current CPUs kernel GSBASE. The stored value
903 * has to be restored in paranoid_exit unconditionally.
904 *
905 * The unconditional write to GS base below ensures that no subsequent
906 * loads based on a mispredicted GS base can happen, therefore no LFENCE
907 * is needed here.
908 */
909 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
910 jmp .Lparanoid_gsbase_done
911
912.Lparanoid_entry_checkgs:
913 /* EBX = 1 -> kernel GSBASE active, no restore required */
914 movl $1, %ebx
915
916 /*
917 * The kernel-enforced convention is a negative GSBASE indicates
918 * a kernel value. No SWAPGS needed on entry and exit.
919 */
920 movl $MSR_GS_BASE, %ecx
921 rdmsr
922 testl %edx, %edx
923 js .Lparanoid_kernel_gsbase
924
925 /* EBX = 0 -> SWAPGS required on exit */
926 xorl %ebx, %ebx
927 swapgs
928.Lparanoid_kernel_gsbase:
929 FENCE_SWAPGS_KERNEL_ENTRY
930.Lparanoid_gsbase_done:
931
932 /*
933 * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
934 * CR3 above, keep the old value in a callee saved register.
935 */
936 IBRS_ENTER save_reg=%r15
937 UNTRAIN_RET_FROM_CALL
938
939 RET
940SYM_CODE_END(paranoid_entry)
941
942/*
943 * "Paranoid" exit path from exception stack. This is invoked
944 * only on return from non-NMI IST interrupts that came
945 * from kernel space.
946 *
947 * We may be returning to very strange contexts (e.g. very early
948 * in syscall entry), so checking for preemption here would
949 * be complicated. Fortunately, there's no good reason to try
950 * to handle preemption here.
951 *
952 * R/EBX contains the GSBASE related information depending on the
953 * availability of the FSGSBASE instructions:
954 *
955 * FSGSBASE R/EBX
956 * N 0 -> SWAPGS on exit
957 * 1 -> no SWAPGS on exit
958 *
959 * Y User space GSBASE, must be restored unconditionally
960 *
961 * R14 - old CR3
962 * R15 - old SPEC_CTRL
963 */
964SYM_CODE_START_LOCAL(paranoid_exit)
965 UNWIND_HINT_REGS
966
967 /*
968 * Must restore IBRS state before both CR3 and %GS since we need access
969 * to the per-CPU x86_spec_ctrl_shadow variable.
970 */
971 IBRS_EXIT save_reg=%r15
972
973 /*
974 * The order of operations is important. PARANOID_RESTORE_CR3 requires
975 * kernel GSBASE.
976 *
977 * NB to anyone to try to optimize this code: this code does
978 * not execute at all for exceptions from user mode. Those
979 * exceptions go through error_return instead.
980 */
981 PARANOID_RESTORE_CR3 scratch_reg=%rax save_reg=%r14
982
983 /* Handle the three GSBASE cases */
984 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
985
986 /* With FSGSBASE enabled, unconditionally restore GSBASE */
987 wrgsbase %rbx
988 jmp restore_regs_and_return_to_kernel
989
990.Lparanoid_exit_checkgs:
991 /* On non-FSGSBASE systems, conditionally do SWAPGS */
992 testl %ebx, %ebx
993 jnz restore_regs_and_return_to_kernel
994
995 /* We are returning to a context with user GSBASE */
996 swapgs
997 jmp restore_regs_and_return_to_kernel
998SYM_CODE_END(paranoid_exit)
999
1000/*
1001 * Switch GS and CR3 if needed.
1002 */
1003SYM_CODE_START(error_entry)
1004 ANNOTATE_NOENDBR
1005 UNWIND_HINT_FUNC
1006
1007 PUSH_AND_CLEAR_REGS save_ret=1
1008 ENCODE_FRAME_POINTER 8
1009
1010 testb $3, CS+8(%rsp)
1011 jz .Lerror_kernelspace
1012
1013 /*
1014 * We entered from user mode or we're pretending to have entered
1015 * from user mode due to an IRET fault.
1016 */
1017 swapgs
1018 FENCE_SWAPGS_USER_ENTRY
1019 /* We have user CR3. Change to kernel CR3. */
1020 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1021 IBRS_ENTER
1022 UNTRAIN_RET_FROM_CALL
1023
1024 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1025 /* Put us onto the real thread stack. */
1026 jmp sync_regs
1027
1028 /*
1029 * There are two places in the kernel that can potentially fault with
1030 * usergs. Handle them here. B stepping K8s sometimes report a
1031 * truncated RIP for IRET exceptions returning to compat mode. Check
1032 * for these here too.
1033 */
1034.Lerror_kernelspace:
1035 leaq native_irq_return_iret(%rip), %rcx
1036 cmpq %rcx, RIP+8(%rsp)
1037 je .Lerror_bad_iret
1038 movl %ecx, %eax /* zero extend */
1039 cmpq %rax, RIP+8(%rsp)
1040 je .Lbstep_iret
1041 cmpq $.Lgs_change, RIP+8(%rsp)
1042 jne .Lerror_entry_done_lfence
1043
1044 /*
1045 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1046 * gsbase and proceed. We'll fix up the exception and land in
1047 * .Lgs_change's error handler with kernel gsbase.
1048 */
1049 swapgs
1050
1051 /*
1052 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1053 * kernel or user gsbase.
1054 */
1055.Lerror_entry_done_lfence:
1056 FENCE_SWAPGS_KERNEL_ENTRY
1057 CALL_DEPTH_ACCOUNT
1058 leaq 8(%rsp), %rax /* return pt_regs pointer */
1059 VALIDATE_UNRET_END
1060 RET
1061
1062.Lbstep_iret:
1063 /* Fix truncated RIP */
1064 movq %rcx, RIP+8(%rsp)
1065 /* fall through */
1066
1067.Lerror_bad_iret:
1068 /*
1069 * We came from an IRET to user mode, so we have user
1070 * gsbase and CR3. Switch to kernel gsbase and CR3:
1071 */
1072 swapgs
1073 FENCE_SWAPGS_USER_ENTRY
1074 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1075 IBRS_ENTER
1076 UNTRAIN_RET_FROM_CALL
1077
1078 /*
1079 * Pretend that the exception came from user mode: set up pt_regs
1080 * as if we faulted immediately after IRET.
1081 */
1082 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1083 call fixup_bad_iret
1084 mov %rax, %rdi
1085 jmp sync_regs
1086SYM_CODE_END(error_entry)
1087
1088SYM_CODE_START_LOCAL(error_return)
1089 UNWIND_HINT_REGS
1090 DEBUG_ENTRY_ASSERT_IRQS_OFF
1091 testb $3, CS(%rsp)
1092 jz restore_regs_and_return_to_kernel
1093 jmp swapgs_restore_regs_and_return_to_usermode
1094SYM_CODE_END(error_return)
1095
1096/*
1097 * Runs on exception stack. Xen PV does not go through this path at all,
1098 * so we can use real assembly here.
1099 *
1100 * Registers:
1101 * %r14: Used to save/restore the CR3 of the interrupted context
1102 * when MITIGATION_PAGE_TABLE_ISOLATION is in use. Do not clobber.
1103 */
1104SYM_CODE_START(asm_exc_nmi)
1105 UNWIND_HINT_IRET_ENTRY
1106 ENDBR
1107
1108 /*
1109 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1110 * the iretq it performs will take us out of NMI context.
1111 * This means that we can have nested NMIs where the next
1112 * NMI is using the top of the stack of the previous NMI. We
1113 * can't let it execute because the nested NMI will corrupt the
1114 * stack of the previous NMI. NMI handlers are not re-entrant
1115 * anyway.
1116 *
1117 * To handle this case we do the following:
1118 * Check a special location on the stack that contains a
1119 * variable that is set when NMIs are executing.
1120 * The interrupted task's stack is also checked to see if it
1121 * is an NMI stack.
1122 * If the variable is not set and the stack is not the NMI
1123 * stack then:
1124 * o Set the special variable on the stack
1125 * o Copy the interrupt frame into an "outermost" location on the
1126 * stack
1127 * o Copy the interrupt frame into an "iret" location on the stack
1128 * o Continue processing the NMI
1129 * If the variable is set or the previous stack is the NMI stack:
1130 * o Modify the "iret" location to jump to the repeat_nmi
1131 * o return back to the first NMI
1132 *
1133 * Now on exit of the first NMI, we first clear the stack variable
1134 * The NMI stack will tell any nested NMIs at that point that it is
1135 * nested. Then we pop the stack normally with iret, and if there was
1136 * a nested NMI that updated the copy interrupt stack frame, a
1137 * jump will be made to the repeat_nmi code that will handle the second
1138 * NMI.
1139 *
1140 * However, espfix prevents us from directly returning to userspace
1141 * with a single IRET instruction. Similarly, IRET to user mode
1142 * can fault. We therefore handle NMIs from user space like
1143 * other IST entries.
1144 */
1145
1146 ASM_CLAC
1147 cld
1148
1149 /* Use %rdx as our temp variable throughout */
1150 pushq %rdx
1151
1152 testb $3, CS-RIP+8(%rsp)
1153 jz .Lnmi_from_kernel
1154
1155 /*
1156 * NMI from user mode. We need to run on the thread stack, but we
1157 * can't go through the normal entry paths: NMIs are masked, and
1158 * we don't want to enable interrupts, because then we'll end
1159 * up in an awkward situation in which IRQs are on but NMIs
1160 * are off.
1161 *
1162 * We also must not push anything to the stack before switching
1163 * stacks lest we corrupt the "NMI executing" variable.
1164 */
1165
1166 swapgs
1167 FENCE_SWAPGS_USER_ENTRY
1168 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1169 movq %rsp, %rdx
1170 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
1171 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1172 pushq 5*8(%rdx) /* pt_regs->ss */
1173 pushq 4*8(%rdx) /* pt_regs->rsp */
1174 pushq 3*8(%rdx) /* pt_regs->flags */
1175 pushq 2*8(%rdx) /* pt_regs->cs */
1176 pushq 1*8(%rdx) /* pt_regs->rip */
1177 UNWIND_HINT_IRET_REGS
1178 pushq $-1 /* pt_regs->orig_ax */
1179 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1180 ENCODE_FRAME_POINTER
1181
1182 IBRS_ENTER
1183 UNTRAIN_RET
1184
1185 /*
1186 * At this point we no longer need to worry about stack damage
1187 * due to nesting -- we're on the normal thread stack and we're
1188 * done with the NMI stack.
1189 */
1190
1191 movq %rsp, %rdi
1192 call exc_nmi
1193
1194 /*
1195 * Return back to user mode. We must *not* do the normal exit
1196 * work, because we don't want to enable interrupts.
1197 */
1198 jmp swapgs_restore_regs_and_return_to_usermode
1199
1200.Lnmi_from_kernel:
1201 /*
1202 * Here's what our stack frame will look like:
1203 * +---------------------------------------------------------+
1204 * | original SS |
1205 * | original Return RSP |
1206 * | original RFLAGS |
1207 * | original CS |
1208 * | original RIP |
1209 * +---------------------------------------------------------+
1210 * | temp storage for rdx |
1211 * +---------------------------------------------------------+
1212 * | "NMI executing" variable |
1213 * +---------------------------------------------------------+
1214 * | iret SS } Copied from "outermost" frame |
1215 * | iret Return RSP } on each loop iteration; overwritten |
1216 * | iret RFLAGS } by a nested NMI to force another |
1217 * | iret CS } iteration if needed. |
1218 * | iret RIP } |
1219 * +---------------------------------------------------------+
1220 * | outermost SS } initialized in first_nmi; |
1221 * | outermost Return RSP } will not be changed before |
1222 * | outermost RFLAGS } NMI processing is done. |
1223 * | outermost CS } Copied to "iret" frame on each |
1224 * | outermost RIP } iteration. |
1225 * +---------------------------------------------------------+
1226 * | pt_regs |
1227 * +---------------------------------------------------------+
1228 *
1229 * The "original" frame is used by hardware. Before re-enabling
1230 * NMIs, we need to be done with it, and we need to leave enough
1231 * space for the asm code here.
1232 *
1233 * We return by executing IRET while RSP points to the "iret" frame.
1234 * That will either return for real or it will loop back into NMI
1235 * processing.
1236 *
1237 * The "outermost" frame is copied to the "iret" frame on each
1238 * iteration of the loop, so each iteration starts with the "iret"
1239 * frame pointing to the final return target.
1240 */
1241
1242 /*
1243 * Determine whether we're a nested NMI.
1244 *
1245 * If we interrupted kernel code between repeat_nmi and
1246 * end_repeat_nmi, then we are a nested NMI. We must not
1247 * modify the "iret" frame because it's being written by
1248 * the outer NMI. That's okay; the outer NMI handler is
1249 * about to call exc_nmi() anyway, so we can just resume
1250 * the outer NMI.
1251 */
1252
1253 movq $repeat_nmi, %rdx
1254 cmpq 8(%rsp), %rdx
1255 ja 1f
1256 movq $end_repeat_nmi, %rdx
1257 cmpq 8(%rsp), %rdx
1258 ja nested_nmi_out
12591:
1260
1261 /*
1262 * Now check "NMI executing". If it's set, then we're nested.
1263 * This will not detect if we interrupted an outer NMI just
1264 * before IRET.
1265 */
1266 cmpl $1, -8(%rsp)
1267 je nested_nmi
1268
1269 /*
1270 * Now test if the previous stack was an NMI stack. This covers
1271 * the case where we interrupt an outer NMI after it clears
1272 * "NMI executing" but before IRET. We need to be careful, though:
1273 * there is one case in which RSP could point to the NMI stack
1274 * despite there being no NMI active: naughty userspace controls
1275 * RSP at the very beginning of the SYSCALL targets. We can
1276 * pull a fast one on naughty userspace, though: we program
1277 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1278 * if it controls the kernel's RSP. We set DF before we clear
1279 * "NMI executing".
1280 */
1281 lea 6*8(%rsp), %rdx
1282 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1283 cmpq %rdx, 4*8(%rsp)
1284 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1285 ja first_nmi
1286
1287 subq $EXCEPTION_STKSZ, %rdx
1288 cmpq %rdx, 4*8(%rsp)
1289 /* If it is below the NMI stack, it is a normal NMI */
1290 jb first_nmi
1291
1292 /* Ah, it is within the NMI stack. */
1293
1294 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1295 jz first_nmi /* RSP was user controlled. */
1296
1297 /* This is a nested NMI. */
1298
1299nested_nmi:
1300 /*
1301 * Modify the "iret" frame to point to repeat_nmi, forcing another
1302 * iteration of NMI handling.
1303 */
1304 subq $8, %rsp
1305 leaq -10*8(%rsp), %rdx
1306 pushq $__KERNEL_DS
1307 pushq %rdx
1308 pushfq
1309 pushq $__KERNEL_CS
1310 pushq $repeat_nmi
1311
1312 /* Put stack back */
1313 addq $(6*8), %rsp
1314
1315nested_nmi_out:
1316 popq %rdx
1317
1318 /* We are returning to kernel mode, so this cannot result in a fault. */
1319 iretq
1320
1321first_nmi:
1322 /* Restore rdx. */
1323 movq (%rsp), %rdx
1324
1325 /* Make room for "NMI executing". */
1326 pushq $0
1327
1328 /* Leave room for the "iret" frame */
1329 subq $(5*8), %rsp
1330
1331 /* Copy the "original" frame to the "outermost" frame */
1332 .rept 5
1333 pushq 11*8(%rsp)
1334 .endr
1335 UNWIND_HINT_IRET_REGS
1336
1337 /* Everything up to here is safe from nested NMIs */
1338
1339#ifdef CONFIG_DEBUG_ENTRY
1340 /*
1341 * For ease of testing, unmask NMIs right away. Disabled by
1342 * default because IRET is very expensive.
1343 */
1344 pushq $0 /* SS */
1345 pushq %rsp /* RSP (minus 8 because of the previous push) */
1346 addq $8, (%rsp) /* Fix up RSP */
1347 pushfq /* RFLAGS */
1348 pushq $__KERNEL_CS /* CS */
1349 pushq $1f /* RIP */
1350 iretq /* continues at repeat_nmi below */
1351 UNWIND_HINT_IRET_REGS
13521:
1353#endif
1354
1355repeat_nmi:
1356 ANNOTATE_NOENDBR // this code
1357 /*
1358 * If there was a nested NMI, the first NMI's iret will return
1359 * here. But NMIs are still enabled and we can take another
1360 * nested NMI. The nested NMI checks the interrupted RIP to see
1361 * if it is between repeat_nmi and end_repeat_nmi, and if so
1362 * it will just return, as we are about to repeat an NMI anyway.
1363 * This makes it safe to copy to the stack frame that a nested
1364 * NMI will update.
1365 *
1366 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1367 * we're repeating an NMI, gsbase has the same value that it had on
1368 * the first iteration. paranoid_entry will load the kernel
1369 * gsbase if needed before we call exc_nmi(). "NMI executing"
1370 * is zero.
1371 */
1372 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1373
1374 /*
1375 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1376 * here must not modify the "iret" frame while we're writing to
1377 * it or it will end up containing garbage.
1378 */
1379 addq $(10*8), %rsp
1380 .rept 5
1381 pushq -6*8(%rsp)
1382 .endr
1383 subq $(5*8), %rsp
1384end_repeat_nmi:
1385 ANNOTATE_NOENDBR // this code
1386
1387 /*
1388 * Everything below this point can be preempted by a nested NMI.
1389 * If this happens, then the inner NMI will change the "iret"
1390 * frame to point back to repeat_nmi.
1391 */
1392 pushq $-1 /* ORIG_RAX: no syscall to restart */
1393
1394 /*
1395 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1396 * as we should not be calling schedule in NMI context.
1397 * Even with normal interrupts enabled. An NMI should not be
1398 * setting NEED_RESCHED or anything that normal interrupts and
1399 * exceptions might do.
1400 */
1401 call paranoid_entry
1402 UNWIND_HINT_REGS
1403
1404 movq %rsp, %rdi
1405 call exc_nmi
1406
1407 /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
1408 IBRS_EXIT save_reg=%r15
1409
1410 PARANOID_RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1411
1412 /*
1413 * The above invocation of paranoid_entry stored the GSBASE
1414 * related information in R/EBX depending on the availability
1415 * of FSGSBASE.
1416 *
1417 * If FSGSBASE is enabled, restore the saved GSBASE value
1418 * unconditionally, otherwise take the conditional SWAPGS path.
1419 */
1420 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1421
1422 wrgsbase %rbx
1423 jmp nmi_restore
1424
1425nmi_no_fsgsbase:
1426 /* EBX == 0 -> invoke SWAPGS */
1427 testl %ebx, %ebx
1428 jnz nmi_restore
1429
1430nmi_swapgs:
1431 swapgs
1432
1433nmi_restore:
1434 POP_REGS
1435
1436 /*
1437 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1438 * at the "iret" frame.
1439 */
1440 addq $6*8, %rsp
1441
1442 /*
1443 * Clear "NMI executing". Set DF first so that we can easily
1444 * distinguish the remaining code between here and IRET from
1445 * the SYSCALL entry and exit paths.
1446 *
1447 * We arguably should just inspect RIP instead, but I (Andy) wrote
1448 * this code when I had the misapprehension that Xen PV supported
1449 * NMIs, and Xen PV would break that approach.
1450 */
1451 std
1452 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1453
1454 /*
1455 * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
1456 * NMI in kernel after user state is restored. For an unprivileged user
1457 * these conditions are hard to meet.
1458 */
1459
1460 /*
1461 * iretq reads the "iret" frame and exits the NMI stack in a
1462 * single instruction. We are returning to kernel mode, so this
1463 * cannot result in a fault. Similarly, we don't need to worry
1464 * about espfix64 on the way back to kernel mode.
1465 */
1466 iretq
1467SYM_CODE_END(asm_exc_nmi)
1468
1469/*
1470 * This handles SYSCALL from 32-bit code. There is no way to program
1471 * MSRs to fully disable 32-bit SYSCALL.
1472 */
1473SYM_CODE_START(entry_SYSCALL32_ignore)
1474 UNWIND_HINT_END_OF_STACK
1475 ENDBR
1476 mov $-ENOSYS, %eax
1477 CLEAR_CPU_BUFFERS
1478 sysretl
1479SYM_CODE_END(entry_SYSCALL32_ignore)
1480
1481.pushsection .text, "ax"
1482 __FUNC_ALIGN
1483SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
1484 UNWIND_HINT_FUNC
1485 /* Prevent any naive code from trying to unwind to our caller. */
1486 xorl %ebp, %ebp
1487
1488 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
1489 leaq -PTREGS_SIZE(%rax), %rsp
1490 UNWIND_HINT_REGS
1491
1492 call make_task_dead
1493SYM_CODE_END(rewind_stack_and_make_dead)
1494.popsection
1495
1496/*
1497 * This sequence executes branches in order to remove user branch information
1498 * from the branch history tracker in the Branch Predictor, therefore removing
1499 * user influence on subsequent BTB lookups.
1500 *
1501 * It should be used on parts prior to Alder Lake. Newer parts should use the
1502 * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
1503 * virtualized on newer hardware the VMM should protect against BHI attacks by
1504 * setting BHI_DIS_S for the guests.
1505 *
1506 * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
1507 * and not clearing the branch history. The call tree looks like:
1508 *
1509 * call 1
1510 * call 2
1511 * call 2
1512 * call 2
1513 * call 2
1514 * call 2
1515 * ret
1516 * ret
1517 * ret
1518 * ret
1519 * ret
1520 * ret
1521 *
1522 * This means that the stack is non-constant and ORC can't unwind it with %rsp
1523 * alone. Therefore we unconditionally set up the frame pointer, which allows
1524 * ORC to unwind properly.
1525 *
1526 * The alignment is for performance and not for safety, and may be safely
1527 * refactored in the future if needed.
1528 */
1529SYM_FUNC_START(clear_bhb_loop)
1530 push %rbp
1531 mov %rsp, %rbp
1532 movl $5, %ecx
1533 ANNOTATE_INTRA_FUNCTION_CALL
1534 call 1f
1535 jmp 5f
1536 .align 64, 0xcc
1537 ANNOTATE_INTRA_FUNCTION_CALL
15381: call 2f
1539 RET
1540 .align 64, 0xcc
15412: movl $5, %eax
15423: jmp 4f
1543 nop
15444: sub $1, %eax
1545 jnz 3b
1546 sub $1, %ecx
1547 jnz 1b
1548 RET
15495: lfence
1550 pop %rbp
1551 RET
1552SYM_FUNC_END(clear_bhb_loop)
1553EXPORT_SYMBOL_GPL(clear_bhb_loop)
1554STACK_FRAME_NON_STANDARD(clear_bhb_loop)
1555

source code of linux/arch/x86/entry/entry_64.S