1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_SWITCH_TO_H |
3 | #define _ASM_X86_SWITCH_TO_H |
4 | |
5 | #include <linux/sched/task_stack.h> |
6 | |
7 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
8 | |
9 | struct task_struct *__switch_to_asm(struct task_struct *prev, |
10 | struct task_struct *next); |
11 | |
12 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
13 | struct task_struct *next); |
14 | |
15 | asmlinkage void ret_from_fork(void); |
16 | |
17 | /* |
18 | * This is the structure pointed to by thread.sp for an inactive task. The |
19 | * order of the fields must match the code in __switch_to_asm(). |
20 | */ |
21 | struct inactive_task_frame { |
22 | #ifdef CONFIG_X86_64 |
23 | unsigned long r15; |
24 | unsigned long r14; |
25 | unsigned long r13; |
26 | unsigned long r12; |
27 | #else |
28 | unsigned long flags; |
29 | unsigned long si; |
30 | unsigned long di; |
31 | #endif |
32 | unsigned long bx; |
33 | |
34 | /* |
35 | * These two fields must be together. They form a stack frame header, |
36 | * needed by get_frame_pointer(). |
37 | */ |
38 | unsigned long bp; |
39 | unsigned long ret_addr; |
40 | }; |
41 | |
42 | struct fork_frame { |
43 | struct inactive_task_frame frame; |
44 | struct pt_regs regs; |
45 | }; |
46 | |
47 | #define switch_to(prev, next, last) \ |
48 | do { \ |
49 | ((last) = __switch_to_asm((prev), (next))); \ |
50 | } while (0) |
51 | |
52 | #ifdef CONFIG_X86_32 |
53 | static inline void refresh_sysenter_cs(struct thread_struct *thread) |
54 | { |
55 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
56 | if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) |
57 | return; |
58 | |
59 | this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); |
60 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
61 | } |
62 | #endif |
63 | |
64 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
65 | static inline void update_task_stack(struct task_struct *task) |
66 | { |
67 | /* sp0 always points to the entry trampoline stack, which is constant: */ |
68 | #ifdef CONFIG_X86_32 |
69 | if (static_cpu_has(X86_FEATURE_XENPV)) |
70 | load_sp0(task->thread.sp0); |
71 | else |
72 | this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0); |
73 | #else |
74 | /* Xen PV enters the kernel on the thread stack. */ |
75 | if (static_cpu_has(X86_FEATURE_XENPV)) |
76 | load_sp0(task_top_of_stack(task)); |
77 | #endif |
78 | } |
79 | |
80 | static inline void kthread_frame_init(struct inactive_task_frame *frame, |
81 | int (*fun)(void *), void *arg) |
82 | { |
83 | frame->bx = (unsigned long)fun; |
84 | #ifdef CONFIG_X86_32 |
85 | frame->di = (unsigned long)arg; |
86 | #else |
87 | frame->r12 = (unsigned long)arg; |
88 | #endif |
89 | } |
90 | |
91 | #endif /* _ASM_X86_SWITCH_TO_H */ |
92 | |