1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | |
3 | #ifndef _ASM_X86_NOSPEC_BRANCH_H_ |
4 | #define _ASM_X86_NOSPEC_BRANCH_H_ |
5 | |
6 | #include <linux/static_key.h> |
7 | #include <linux/objtool.h> |
8 | #include <linux/linkage.h> |
9 | |
10 | #include <asm/alternative.h> |
11 | #include <asm/cpufeatures.h> |
12 | #include <asm/msr-index.h> |
13 | #include <asm/unwind_hints.h> |
14 | #include <asm/percpu.h> |
15 | #include <asm/current.h> |
16 | |
17 | /* |
18 | * Call depth tracking for Intel SKL CPUs to address the RSB underflow |
19 | * issue in software. |
20 | * |
21 | * The tracking does not use a counter. It uses uses arithmetic shift |
22 | * right on call entry and logical shift left on return. |
23 | * |
24 | * The depth tracking variable is initialized to 0x8000.... when the call |
25 | * depth is zero. The arithmetic shift right sign extends the MSB and |
26 | * saturates after the 12th call. The shift count is 5 for both directions |
27 | * so the tracking covers 12 nested calls. |
28 | * |
29 | * Call |
30 | * 0: 0x8000000000000000 0x0000000000000000 |
31 | * 1: 0xfc00000000000000 0xf000000000000000 |
32 | * ... |
33 | * 11: 0xfffffffffffffff8 0xfffffffffffffc00 |
34 | * 12: 0xffffffffffffffff 0xffffffffffffffe0 |
35 | * |
36 | * After a return buffer fill the depth is credited 12 calls before the |
37 | * next stuffing has to take place. |
38 | * |
39 | * There is a inaccuracy for situations like this: |
40 | * |
41 | * 10 calls |
42 | * 5 returns |
43 | * 3 calls |
44 | * 4 returns |
45 | * 3 calls |
46 | * .... |
47 | * |
48 | * The shift count might cause this to be off by one in either direction, |
49 | * but there is still a cushion vs. the RSB depth. The algorithm does not |
50 | * claim to be perfect and it can be speculated around by the CPU, but it |
51 | * is considered that it obfuscates the problem enough to make exploitation |
52 | * extremly difficult. |
53 | */ |
54 | #define RET_DEPTH_SHIFT 5 |
55 | #define RSB_RET_STUFF_LOOPS 16 |
56 | #define RET_DEPTH_INIT 0x8000000000000000ULL |
57 | #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL |
58 | #define RET_DEPTH_CREDIT 0xffffffffffffffffULL |
59 | |
60 | #ifdef CONFIG_CALL_THUNKS_DEBUG |
61 | # define CALL_THUNKS_DEBUG_INC_CALLS \ |
62 | incq %gs:__x86_call_count; |
63 | # define CALL_THUNKS_DEBUG_INC_RETS \ |
64 | incq %gs:__x86_ret_count; |
65 | # define CALL_THUNKS_DEBUG_INC_STUFFS \ |
66 | incq %gs:__x86_stuffs_count; |
67 | # define CALL_THUNKS_DEBUG_INC_CTXSW \ |
68 | incq %gs:__x86_ctxsw_count; |
69 | #else |
70 | # define CALL_THUNKS_DEBUG_INC_CALLS |
71 | # define CALL_THUNKS_DEBUG_INC_RETS |
72 | # define CALL_THUNKS_DEBUG_INC_STUFFS |
73 | # define CALL_THUNKS_DEBUG_INC_CTXSW |
74 | #endif |
75 | |
76 | #if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS) |
77 | |
78 | #include <asm/asm-offsets.h> |
79 | |
80 | #define CREDIT_CALL_DEPTH \ |
81 | movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); |
82 | |
83 | #define ASM_CREDIT_CALL_DEPTH \ |
84 | movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth); |
85 | |
86 | #define RESET_CALL_DEPTH \ |
87 | xor %eax, %eax; \ |
88 | bts $63, %rax; \ |
89 | movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); |
90 | |
91 | #define RESET_CALL_DEPTH_FROM_CALL \ |
92 | movb $0xfc, %al; \ |
93 | shl $56, %rax; \ |
94 | movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ |
95 | CALL_THUNKS_DEBUG_INC_CALLS |
96 | |
97 | #define INCREMENT_CALL_DEPTH \ |
98 | sarq $5, %gs:pcpu_hot + X86_call_depth; \ |
99 | CALL_THUNKS_DEBUG_INC_CALLS |
100 | |
101 | #define ASM_INCREMENT_CALL_DEPTH \ |
102 | sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \ |
103 | CALL_THUNKS_DEBUG_INC_CALLS |
104 | |
105 | #else |
106 | #define CREDIT_CALL_DEPTH |
107 | #define ASM_CREDIT_CALL_DEPTH |
108 | #define RESET_CALL_DEPTH |
109 | #define INCREMENT_CALL_DEPTH |
110 | #define ASM_INCREMENT_CALL_DEPTH |
111 | #define RESET_CALL_DEPTH_FROM_CALL |
112 | #endif |
113 | |
114 | /* |
115 | * Fill the CPU return stack buffer. |
116 | * |
117 | * Each entry in the RSB, if used for a speculative 'ret', contains an |
118 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. |
119 | * |
120 | * This is required in various cases for retpoline and IBRS-based |
121 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to |
122 | * eliminate potentially bogus entries from the RSB, and sometimes |
123 | * purely to ensure that it doesn't get empty, which on some CPUs would |
124 | * allow predictions from other (unwanted!) sources to be used. |
125 | * |
126 | * We define a CPP macro such that it can be used from both .S files and |
127 | * inline assembly. It's possible to do a .macro and then include that |
128 | * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. |
129 | */ |
130 | |
131 | #define RETPOLINE_THUNK_SIZE 32 |
132 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ |
133 | |
134 | /* |
135 | * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. |
136 | */ |
137 | #define __FILL_RETURN_SLOT \ |
138 | ANNOTATE_INTRA_FUNCTION_CALL; \ |
139 | call 772f; \ |
140 | int3; \ |
141 | 772: |
142 | |
143 | /* |
144 | * Stuff the entire RSB. |
145 | * |
146 | * Google experimented with loop-unrolling and this turned out to be |
147 | * the optimal version - two calls, each with their own speculation |
148 | * trap should their return address end up getting used, in a loop. |
149 | */ |
150 | #ifdef CONFIG_X86_64 |
151 | #define __FILL_RETURN_BUFFER(reg, nr) \ |
152 | mov $(nr/2), reg; \ |
153 | 771: \ |
154 | __FILL_RETURN_SLOT \ |
155 | __FILL_RETURN_SLOT \ |
156 | add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ |
157 | dec reg; \ |
158 | jnz 771b; \ |
159 | /* barrier for jnz misprediction */ \ |
160 | lfence; \ |
161 | ASM_CREDIT_CALL_DEPTH \ |
162 | CALL_THUNKS_DEBUG_INC_CTXSW |
163 | #else |
164 | /* |
165 | * i386 doesn't unconditionally have LFENCE, as such it can't |
166 | * do a loop. |
167 | */ |
168 | #define __FILL_RETURN_BUFFER(reg, nr) \ |
169 | .rept nr; \ |
170 | __FILL_RETURN_SLOT; \ |
171 | .endr; \ |
172 | add $(BITS_PER_LONG/8) * nr, %_ASM_SP; |
173 | #endif |
174 | |
175 | /* |
176 | * Stuff a single RSB slot. |
177 | * |
178 | * To mitigate Post-Barrier RSB speculation, one CALL instruction must be |
179 | * forced to retire before letting a RET instruction execute. |
180 | * |
181 | * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed |
182 | * before this point. |
183 | */ |
184 | #define __FILL_ONE_RETURN \ |
185 | __FILL_RETURN_SLOT \ |
186 | add $(BITS_PER_LONG/8), %_ASM_SP; \ |
187 | lfence; |
188 | |
189 | #ifdef __ASSEMBLY__ |
190 | |
191 | /* |
192 | * This should be used immediately before an indirect jump/call. It tells |
193 | * objtool the subsequent indirect jump/call is vouched safe for retpoline |
194 | * builds. |
195 | */ |
196 | .macro ANNOTATE_RETPOLINE_SAFE |
197 | .Lhere_\@: |
198 | .pushsection .discard.retpoline_safe |
199 | .long .Lhere_\@ |
200 | .popsection |
201 | .endm |
202 | |
203 | /* |
204 | * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions |
205 | * vs RETBleed validation. |
206 | */ |
207 | #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE |
208 | |
209 | /* |
210 | * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should |
211 | * eventually turn into it's own annotation. |
212 | */ |
213 | .macro VALIDATE_UNRET_END |
214 | #if defined(CONFIG_NOINSTR_VALIDATION) && \ |
215 | (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) |
216 | ANNOTATE_RETPOLINE_SAFE |
217 | nop |
218 | #endif |
219 | .endm |
220 | |
221 | /* |
222 | * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call |
223 | * to the retpoline thunk with a CS prefix when the register requires |
224 | * a RAX prefix byte to encode. Also see apply_retpolines(). |
225 | */ |
226 | .macro __CS_PREFIX reg:req |
227 | .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 |
228 | .ifc \reg,\rs |
229 | .byte 0x2e |
230 | .endif |
231 | .endr |
232 | .endm |
233 | |
234 | /* |
235 | * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple |
236 | * indirect jmp/call which may be susceptible to the Spectre variant 2 |
237 | * attack. |
238 | * |
239 | * NOTE: these do not take kCFI into account and are thus not comparable to C |
240 | * indirect calls, take care when using. The target of these should be an ENDBR |
241 | * instruction irrespective of kCFI. |
242 | */ |
243 | .macro JMP_NOSPEC reg:req |
244 | #ifdef CONFIG_RETPOLINE |
245 | __CS_PREFIX \reg |
246 | jmp __x86_indirect_thunk_\reg |
247 | #else |
248 | jmp *%\reg |
249 | int3 |
250 | #endif |
251 | .endm |
252 | |
253 | .macro CALL_NOSPEC reg:req |
254 | #ifdef CONFIG_RETPOLINE |
255 | __CS_PREFIX \reg |
256 | call __x86_indirect_thunk_\reg |
257 | #else |
258 | call *%\reg |
259 | #endif |
260 | .endm |
261 | |
262 | /* |
263 | * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP |
264 | * monstrosity above, manually. |
265 | */ |
266 | .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) |
267 | ALTERNATIVE_2 "jmp .Lskip_rsb_\@" , \ |
268 | __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ |
269 | __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2 |
270 | |
271 | .Lskip_rsb_\@: |
272 | .endm |
273 | |
274 | #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO) |
275 | #define CALL_UNTRAIN_RET "call entry_untrain_ret" |
276 | #else |
277 | #define CALL_UNTRAIN_RET "" |
278 | #endif |
279 | |
280 | /* |
281 | * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the |
282 | * return thunk isn't mapped into the userspace tables (then again, AMD |
283 | * typically has NO_MELTDOWN). |
284 | * |
285 | * While retbleed_untrain_ret() doesn't clobber anything but requires stack, |
286 | * entry_ibpb() will clobber AX, CX, DX. |
287 | * |
288 | * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point |
289 | * where we have a stack but before any RET instruction. |
290 | */ |
291 | .macro __UNTRAIN_RET ibpb_feature, call_depth_insns |
292 | #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY) |
293 | VALIDATE_UNRET_END |
294 | ALTERNATIVE_3 "" , \ |
295 | CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ |
296 | "call entry_ibpb" , \ibpb_feature, \ |
297 | __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH |
298 | #endif |
299 | .endm |
300 | |
301 | #define UNTRAIN_RET \ |
302 | __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH) |
303 | |
304 | #define UNTRAIN_RET_VM \ |
305 | __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH) |
306 | |
307 | #define UNTRAIN_RET_FROM_CALL \ |
308 | __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL) |
309 | |
310 | |
311 | .macro CALL_DEPTH_ACCOUNT |
312 | #ifdef CONFIG_CALL_DEPTH_TRACKING |
313 | ALTERNATIVE "" , \ |
314 | __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH |
315 | #endif |
316 | .endm |
317 | |
318 | #else /* __ASSEMBLY__ */ |
319 | |
320 | #define ANNOTATE_RETPOLINE_SAFE \ |
321 | "999:\n\t" \ |
322 | ".pushsection .discard.retpoline_safe\n\t" \ |
323 | ".long 999b\n\t" \ |
324 | ".popsection\n\t" |
325 | |
326 | typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; |
327 | extern retpoline_thunk_t __x86_indirect_thunk_array[]; |
328 | extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; |
329 | extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; |
330 | |
331 | #ifdef CONFIG_RETHUNK |
332 | extern void __x86_return_thunk(void); |
333 | #else |
334 | static inline void __x86_return_thunk(void) {} |
335 | #endif |
336 | |
337 | #ifdef CONFIG_CPU_UNRET_ENTRY |
338 | extern void retbleed_return_thunk(void); |
339 | #else |
340 | static inline void retbleed_return_thunk(void) {} |
341 | #endif |
342 | |
343 | #ifdef CONFIG_CPU_SRSO |
344 | extern void srso_return_thunk(void); |
345 | extern void srso_alias_return_thunk(void); |
346 | #else |
347 | static inline void srso_return_thunk(void) {} |
348 | static inline void srso_alias_return_thunk(void) {} |
349 | #endif |
350 | |
351 | extern void retbleed_return_thunk(void); |
352 | extern void srso_return_thunk(void); |
353 | extern void srso_alias_return_thunk(void); |
354 | |
355 | extern void entry_untrain_ret(void); |
356 | extern void entry_ibpb(void); |
357 | |
358 | extern void (*x86_return_thunk)(void); |
359 | |
360 | #ifdef CONFIG_CALL_DEPTH_TRACKING |
361 | extern void call_depth_return_thunk(void); |
362 | |
363 | #define CALL_DEPTH_ACCOUNT \ |
364 | ALTERNATIVE("", \ |
365 | __stringify(INCREMENT_CALL_DEPTH), \ |
366 | X86_FEATURE_CALL_DEPTH) |
367 | |
368 | #ifdef CONFIG_CALL_THUNKS_DEBUG |
369 | DECLARE_PER_CPU(u64, __x86_call_count); |
370 | DECLARE_PER_CPU(u64, __x86_ret_count); |
371 | DECLARE_PER_CPU(u64, __x86_stuffs_count); |
372 | DECLARE_PER_CPU(u64, __x86_ctxsw_count); |
373 | #endif |
374 | #else /* !CONFIG_CALL_DEPTH_TRACKING */ |
375 | |
376 | static inline void call_depth_return_thunk(void) {} |
377 | #define CALL_DEPTH_ACCOUNT "" |
378 | |
379 | #endif /* CONFIG_CALL_DEPTH_TRACKING */ |
380 | |
381 | #ifdef CONFIG_RETPOLINE |
382 | |
383 | #define GEN(reg) \ |
384 | extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; |
385 | #include <asm/GEN-for-each-reg.h> |
386 | #undef GEN |
387 | |
388 | #define GEN(reg) \ |
389 | extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg; |
390 | #include <asm/GEN-for-each-reg.h> |
391 | #undef GEN |
392 | |
393 | #define GEN(reg) \ |
394 | extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg; |
395 | #include <asm/GEN-for-each-reg.h> |
396 | #undef GEN |
397 | |
398 | #ifdef CONFIG_X86_64 |
399 | |
400 | /* |
401 | * Inline asm uses the %V modifier which is only in newer GCC |
402 | * which is ensured when CONFIG_RETPOLINE is defined. |
403 | */ |
404 | # define CALL_NOSPEC \ |
405 | ALTERNATIVE_2( \ |
406 | ANNOTATE_RETPOLINE_SAFE \ |
407 | "call *%[thunk_target]\n", \ |
408 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ |
409 | X86_FEATURE_RETPOLINE, \ |
410 | "lfence;\n" \ |
411 | ANNOTATE_RETPOLINE_SAFE \ |
412 | "call *%[thunk_target]\n", \ |
413 | X86_FEATURE_RETPOLINE_LFENCE) |
414 | |
415 | # define THUNK_TARGET(addr) [thunk_target] "r" (addr) |
416 | |
417 | #else /* CONFIG_X86_32 */ |
418 | /* |
419 | * For i386 we use the original ret-equivalent retpoline, because |
420 | * otherwise we'll run out of registers. We don't care about CET |
421 | * here, anyway. |
422 | */ |
423 | # define CALL_NOSPEC \ |
424 | ALTERNATIVE_2( \ |
425 | ANNOTATE_RETPOLINE_SAFE \ |
426 | "call *%[thunk_target]\n", \ |
427 | " jmp 904f;\n" \ |
428 | " .align 16\n" \ |
429 | "901: call 903f;\n" \ |
430 | "902: pause;\n" \ |
431 | " lfence;\n" \ |
432 | " jmp 902b;\n" \ |
433 | " .align 16\n" \ |
434 | "903: lea 4(%%esp), %%esp;\n" \ |
435 | " pushl %[thunk_target];\n" \ |
436 | " ret;\n" \ |
437 | " .align 16\n" \ |
438 | "904: call 901b;\n", \ |
439 | X86_FEATURE_RETPOLINE, \ |
440 | "lfence;\n" \ |
441 | ANNOTATE_RETPOLINE_SAFE \ |
442 | "call *%[thunk_target]\n", \ |
443 | X86_FEATURE_RETPOLINE_LFENCE) |
444 | |
445 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) |
446 | #endif |
447 | #else /* No retpoline for C / inline asm */ |
448 | # define CALL_NOSPEC "call *%[thunk_target]\n" |
449 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) |
450 | #endif |
451 | |
452 | /* The Spectre V2 mitigation variants */ |
453 | enum spectre_v2_mitigation { |
454 | SPECTRE_V2_NONE, |
455 | SPECTRE_V2_RETPOLINE, |
456 | SPECTRE_V2_LFENCE, |
457 | SPECTRE_V2_EIBRS, |
458 | SPECTRE_V2_EIBRS_RETPOLINE, |
459 | SPECTRE_V2_EIBRS_LFENCE, |
460 | SPECTRE_V2_IBRS, |
461 | }; |
462 | |
463 | /* The indirect branch speculation control variants */ |
464 | enum spectre_v2_user_mitigation { |
465 | SPECTRE_V2_USER_NONE, |
466 | SPECTRE_V2_USER_STRICT, |
467 | SPECTRE_V2_USER_STRICT_PREFERRED, |
468 | SPECTRE_V2_USER_PRCTL, |
469 | SPECTRE_V2_USER_SECCOMP, |
470 | }; |
471 | |
472 | /* The Speculative Store Bypass disable variants */ |
473 | enum ssb_mitigation { |
474 | SPEC_STORE_BYPASS_NONE, |
475 | SPEC_STORE_BYPASS_DISABLE, |
476 | SPEC_STORE_BYPASS_PRCTL, |
477 | SPEC_STORE_BYPASS_SECCOMP, |
478 | }; |
479 | |
480 | static __always_inline |
481 | void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) |
482 | { |
483 | asm volatile(ALTERNATIVE("" , "wrmsr" , %c[feature]) |
484 | : : "c" (msr), |
485 | "a" ((u32)val), |
486 | "d" ((u32)(val >> 32)), |
487 | [feature] "i" (feature) |
488 | : "memory" ); |
489 | } |
490 | |
491 | extern u64 x86_pred_cmd; |
492 | |
493 | static inline void indirect_branch_prediction_barrier(void) |
494 | { |
495 | alternative_msr_write(MSR_IA32_PRED_CMD, val: x86_pred_cmd, X86_FEATURE_USE_IBPB); |
496 | } |
497 | |
498 | /* The Intel SPEC CTRL MSR base value cache */ |
499 | extern u64 x86_spec_ctrl_base; |
500 | DECLARE_PER_CPU(u64, x86_spec_ctrl_current); |
501 | extern void update_spec_ctrl_cond(u64 val); |
502 | extern u64 spec_ctrl_current(void); |
503 | |
504 | /* |
505 | * With retpoline, we must use IBRS to restrict branch prediction |
506 | * before calling into firmware. |
507 | * |
508 | * (Implemented as CPP macros due to header hell.) |
509 | */ |
510 | #define firmware_restrict_branch_speculation_start() \ |
511 | do { \ |
512 | preempt_disable(); \ |
513 | alternative_msr_write(MSR_IA32_SPEC_CTRL, \ |
514 | spec_ctrl_current() | SPEC_CTRL_IBRS, \ |
515 | X86_FEATURE_USE_IBRS_FW); \ |
516 | alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ |
517 | X86_FEATURE_USE_IBPB_FW); \ |
518 | } while (0) |
519 | |
520 | #define firmware_restrict_branch_speculation_end() \ |
521 | do { \ |
522 | alternative_msr_write(MSR_IA32_SPEC_CTRL, \ |
523 | spec_ctrl_current(), \ |
524 | X86_FEATURE_USE_IBRS_FW); \ |
525 | preempt_enable(); \ |
526 | } while (0) |
527 | |
528 | DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); |
529 | DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); |
530 | DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); |
531 | |
532 | DECLARE_STATIC_KEY_FALSE(mds_user_clear); |
533 | DECLARE_STATIC_KEY_FALSE(mds_idle_clear); |
534 | |
535 | DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); |
536 | |
537 | DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); |
538 | |
539 | #include <asm/segment.h> |
540 | |
541 | /** |
542 | * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability |
543 | * |
544 | * This uses the otherwise unused and obsolete VERW instruction in |
545 | * combination with microcode which triggers a CPU buffer flush when the |
546 | * instruction is executed. |
547 | */ |
548 | static __always_inline void mds_clear_cpu_buffers(void) |
549 | { |
550 | static const u16 ds = __KERNEL_DS; |
551 | |
552 | /* |
553 | * Has to be the memory-operand variant because only that |
554 | * guarantees the CPU buffer flush functionality according to |
555 | * documentation. The register-operand variant does not. |
556 | * Works with any segment selector, but a valid writable |
557 | * data segment is the fastest variant. |
558 | * |
559 | * "cc" clobber is required because VERW modifies ZF. |
560 | */ |
561 | asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc" ); |
562 | } |
563 | |
564 | /** |
565 | * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability |
566 | * |
567 | * Clear CPU buffers if the corresponding static key is enabled |
568 | */ |
569 | static __always_inline void mds_user_clear_cpu_buffers(void) |
570 | { |
571 | if (static_branch_likely(&mds_user_clear)) |
572 | mds_clear_cpu_buffers(); |
573 | } |
574 | |
575 | /** |
576 | * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability |
577 | * |
578 | * Clear CPU buffers if the corresponding static key is enabled |
579 | */ |
580 | static __always_inline void mds_idle_clear_cpu_buffers(void) |
581 | { |
582 | if (static_branch_likely(&mds_idle_clear)) |
583 | mds_clear_cpu_buffers(); |
584 | } |
585 | |
586 | #endif /* __ASSEMBLY__ */ |
587 | |
588 | #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ |
589 | |