1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_MWAIT_H |
3 | #define _ASM_X86_MWAIT_H |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/sched/idle.h> |
7 | |
8 | #include <asm/cpufeature.h> |
9 | #include <asm/nospec-branch.h> |
10 | |
11 | #define MWAIT_SUBSTATE_MASK 0xf |
12 | #define MWAIT_CSTATE_MASK 0xf |
13 | #define MWAIT_SUBSTATE_SIZE 4 |
14 | #define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) |
15 | #define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK) |
16 | #define MWAIT_C1_SUBSTATE_MASK 0xf0 |
17 | |
18 | #define CPUID_MWAIT_LEAF 5 |
19 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 |
20 | #define CPUID5_ECX_INTERRUPT_BREAK 0x2 |
21 | |
22 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 |
23 | #define MWAITX_ECX_TIMER_ENABLE BIT(1) |
24 | #define MWAITX_MAX_WAIT_CYCLES UINT_MAX |
25 | #define MWAITX_DISABLE_CSTATES 0xf0 |
26 | #define TPAUSE_C01_STATE 1 |
27 | #define TPAUSE_C02_STATE 0 |
28 | |
29 | static __always_inline void __monitor(const void *eax, unsigned long ecx, |
30 | unsigned long edx) |
31 | { |
32 | /* "monitor %eax, %ecx, %edx;" */ |
33 | asm volatile(".byte 0x0f, 0x01, 0xc8;" |
34 | :: "a" (eax), "c" (ecx), "d" (edx)); |
35 | } |
36 | |
37 | static __always_inline void __monitorx(const void *eax, unsigned long ecx, |
38 | unsigned long edx) |
39 | { |
40 | /* "monitorx %eax, %ecx, %edx;" */ |
41 | asm volatile(".byte 0x0f, 0x01, 0xfa;" |
42 | :: "a" (eax), "c" (ecx), "d" (edx)); |
43 | } |
44 | |
45 | static __always_inline void __mwait(unsigned long eax, unsigned long ecx) |
46 | { |
47 | mds_idle_clear_cpu_buffers(); |
48 | |
49 | /* "mwait %eax, %ecx;" */ |
50 | asm volatile(".byte 0x0f, 0x01, 0xc9;" |
51 | :: "a" (eax), "c" (ecx)); |
52 | } |
53 | |
54 | /* |
55 | * MWAITX allows for a timer expiration to get the core out a wait state in |
56 | * addition to the default MWAIT exit condition of a store appearing at a |
57 | * monitored virtual address. |
58 | * |
59 | * Registers: |
60 | * |
61 | * MWAITX ECX[1]: enable timer if set |
62 | * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0 |
63 | * frequency is the same as the TSC frequency. |
64 | * |
65 | * Below is a comparison between MWAIT and MWAITX on AMD processors: |
66 | * |
67 | * MWAIT MWAITX |
68 | * opcode 0f 01 c9 | 0f 01 fb |
69 | * ECX[0] value of RFLAGS.IF seen by instruction |
70 | * ECX[1] unused/#GP if set | enable timer if set |
71 | * ECX[31:2] unused/#GP if set |
72 | * EAX unused (reserve for hint) |
73 | * EBX[31:0] unused | max wait time (P0 clocks) |
74 | * |
75 | * MONITOR MONITORX |
76 | * opcode 0f 01 c8 | 0f 01 fa |
77 | * EAX (logical) address to monitor |
78 | * ECX #GP if not zero |
79 | */ |
80 | static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, |
81 | unsigned long ecx) |
82 | { |
83 | /* No MDS buffer clear as this is AMD/HYGON only */ |
84 | |
85 | /* "mwaitx %eax, %ebx, %ecx;" */ |
86 | asm volatile(".byte 0x0f, 0x01, 0xfb;" |
87 | :: "a" (eax), "b" (ebx), "c" (ecx)); |
88 | } |
89 | |
90 | static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
91 | { |
92 | mds_idle_clear_cpu_buffers(); |
93 | /* "mwait %eax, %ecx;" */ |
94 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
95 | :: "a" (eax), "c" (ecx)); |
96 | } |
97 | |
98 | /* |
99 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, |
100 | * which can obviate IPI to trigger checking of need_resched. |
101 | * We execute MONITOR against need_resched and enter optimized wait state |
102 | * through MWAIT. Whenever someone changes need_resched, we would be woken |
103 | * up from MWAIT (without an IPI). |
104 | * |
105 | * New with Core Duo processors, MWAIT can take some hints based on CPU |
106 | * capability. |
107 | */ |
108 | static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
109 | { |
110 | if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { |
111 | if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { |
112 | mb(); |
113 | clflush(p: (void *)¤t_thread_info()->flags); |
114 | mb(); |
115 | } |
116 | |
117 | __monitor(eax: (void *)¤t_thread_info()->flags, ecx: 0, edx: 0); |
118 | if (!need_resched()) |
119 | __mwait(eax, ecx); |
120 | } |
121 | current_clr_polling(); |
122 | } |
123 | |
124 | /* |
125 | * Caller can specify whether to enter C0.1 (low latency, less |
126 | * power saving) or C0.2 state (saves more power, but longer wakeup |
127 | * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR |
128 | * which can force requests for C0.2 to be downgraded to C0.1. |
129 | */ |
130 | static inline void __tpause(u32 ecx, u32 edx, u32 eax) |
131 | { |
132 | /* "tpause %ecx, %edx, %eax;" */ |
133 | #ifdef CONFIG_AS_TPAUSE |
134 | asm volatile("tpause %%ecx\n" |
135 | : |
136 | : "c" (ecx), "d" (edx), "a" (eax)); |
137 | #else |
138 | asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n" |
139 | : |
140 | : "c" (ecx), "d" (edx), "a" (eax)); |
141 | #endif |
142 | } |
143 | |
144 | #endif /* _ASM_X86_MWAIT_H */ |
145 | |