1// SPDX-License-Identifier: GPL-2.0+
2//
3// Security related flags and so on.
4//
5// Copyright 2018, Michael Ellerman, IBM Corporation.
6
7#include <linux/cpu.h>
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/memblock.h>
11#include <linux/nospec.h>
12#include <linux/prctl.h>
13#include <linux/seq_buf.h>
14#include <linux/debugfs.h>
15
16#include <asm/asm-prototypes.h>
17#include <asm/code-patching.h>
18#include <asm/security_features.h>
19#include <asm/sections.h>
20#include <asm/setup.h>
21#include <asm/inst.h>
22
23#include "setup.h"
24
25u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
26
27enum branch_cache_flush_type {
28 BRANCH_CACHE_FLUSH_NONE = 0x1,
29 BRANCH_CACHE_FLUSH_SW = 0x2,
30 BRANCH_CACHE_FLUSH_HW = 0x4,
31};
32static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
33static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
34
35bool barrier_nospec_enabled;
36static bool no_nospec;
37static bool btb_flush_enabled;
38#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
39static bool no_spectrev2;
40#endif
41
42static void enable_barrier_nospec(bool enable)
43{
44 barrier_nospec_enabled = enable;
45 do_barrier_nospec_fixups(enable);
46}
47
48void __init setup_barrier_nospec(void)
49{
50 bool enable;
51
52 /*
53 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
54 * But there's a good reason not to. The two flags we check below are
55 * both are enabled by default in the kernel, so if the hcall is not
56 * functional they will be enabled.
57 * On a system where the host firmware has been updated (so the ori
58 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
59 * not been updated, we would like to enable the barrier. Dropping the
60 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
61 * we potentially enable the barrier on systems where the host firmware
62 * is not updated, but that's harmless as it's a no-op.
63 */
64 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
65 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
66
67 if (!no_nospec && !cpu_mitigations_off())
68 enable_barrier_nospec(enable);
69}
70
71static int __init handle_nospectre_v1(char *p)
72{
73 no_nospec = true;
74
75 return 0;
76}
77early_param("nospectre_v1", handle_nospectre_v1);
78
79#ifdef CONFIG_DEBUG_FS
80static int barrier_nospec_set(void *data, u64 val)
81{
82 switch (val) {
83 case 0:
84 case 1:
85 break;
86 default:
87 return -EINVAL;
88 }
89
90 if (!!val == !!barrier_nospec_enabled)
91 return 0;
92
93 enable_barrier_nospec(enable: !!val);
94
95 return 0;
96}
97
98static int barrier_nospec_get(void *data, u64 *val)
99{
100 *val = barrier_nospec_enabled ? 1 : 0;
101 return 0;
102}
103
104DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
105 barrier_nospec_set, "%llu\n");
106
107static __init int barrier_nospec_debugfs_init(void)
108{
109 debugfs_create_file_unsafe(name: "barrier_nospec", mode: 0600,
110 parent: arch_debugfs_dir, NULL,
111 fops: &fops_barrier_nospec);
112 return 0;
113}
114device_initcall(barrier_nospec_debugfs_init);
115
116static __init int security_feature_debugfs_init(void)
117{
118 debugfs_create_x64(name: "security_features", mode: 0400, parent: arch_debugfs_dir,
119 value: &powerpc_security_features);
120 return 0;
121}
122device_initcall(security_feature_debugfs_init);
123#endif /* CONFIG_DEBUG_FS */
124
125#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
126static int __init handle_nospectre_v2(char *p)
127{
128 no_spectrev2 = true;
129
130 return 0;
131}
132early_param("nospectre_v2", handle_nospectre_v2);
133#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
134
135#ifdef CONFIG_PPC_E500
136void __init setup_spectre_v2(void)
137{
138 if (no_spectrev2 || cpu_mitigations_off())
139 do_btb_flush_fixups();
140 else
141 btb_flush_enabled = true;
142}
143#endif /* CONFIG_PPC_E500 */
144
145#ifdef CONFIG_PPC_BOOK3S_64
146ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
147{
148 bool thread_priv;
149
150 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
151
152 if (rfi_flush) {
153 struct seq_buf s;
154 seq_buf_init(&s, buf, PAGE_SIZE - 1);
155
156 seq_buf_printf(&s, "Mitigation: RFI Flush");
157 if (thread_priv)
158 seq_buf_printf(&s, ", L1D private per thread");
159
160 seq_buf_printf(&s, "\n");
161
162 return s.len;
163 }
164
165 if (thread_priv)
166 return sprintf(buf, "Vulnerable: L1D private per thread\n");
167
168 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
169 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
170 return sprintf(buf, "Not affected\n");
171
172 return sprintf(buf, "Vulnerable\n");
173}
174
175ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
176{
177 return cpu_show_meltdown(dev, attr, buf);
178}
179#endif
180
181ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
182{
183 struct seq_buf s;
184
185 seq_buf_init(s: &s, buf, PAGE_SIZE - 1);
186
187 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
188 if (barrier_nospec_enabled)
189 seq_buf_printf(s: &s, fmt: "Mitigation: __user pointer sanitization");
190 else
191 seq_buf_printf(s: &s, fmt: "Vulnerable");
192
193 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
194 seq_buf_printf(s: &s, fmt: ", ori31 speculation barrier enabled");
195
196 seq_buf_printf(s: &s, fmt: "\n");
197 } else
198 seq_buf_printf(s: &s, fmt: "Not affected\n");
199
200 return s.len;
201}
202
203ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
204{
205 struct seq_buf s;
206 bool bcs, ccd;
207
208 seq_buf_init(s: &s, buf, PAGE_SIZE - 1);
209
210 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
211 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
212
213 if (bcs || ccd) {
214 seq_buf_printf(s: &s, fmt: "Mitigation: ");
215
216 if (bcs)
217 seq_buf_printf(s: &s, fmt: "Indirect branch serialisation (kernel only)");
218
219 if (bcs && ccd)
220 seq_buf_printf(s: &s, fmt: ", ");
221
222 if (ccd)
223 seq_buf_printf(s: &s, fmt: "Indirect branch cache disabled");
224
225 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
226 seq_buf_printf(s: &s, fmt: "Mitigation: Software count cache flush");
227
228 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
229 seq_buf_printf(s: &s, fmt: " (hardware accelerated)");
230
231 } else if (btb_flush_enabled) {
232 seq_buf_printf(s: &s, fmt: "Mitigation: Branch predictor state flush");
233 } else {
234 seq_buf_printf(s: &s, fmt: "Vulnerable");
235 }
236
237 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
238 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
239 seq_buf_printf(s: &s, fmt: ", Software link stack flush");
240 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
241 seq_buf_printf(s: &s, fmt: " (hardware accelerated)");
242 }
243
244 seq_buf_printf(s: &s, fmt: "\n");
245
246 return s.len;
247}
248
249#ifdef CONFIG_PPC_BOOK3S_64
250/*
251 * Store-forwarding barrier support.
252 */
253
254static enum stf_barrier_type stf_enabled_flush_types;
255static bool no_stf_barrier;
256static bool stf_barrier;
257
258static int __init handle_no_stf_barrier(char *p)
259{
260 pr_info("stf-barrier: disabled on command line.");
261 no_stf_barrier = true;
262 return 0;
263}
264
265early_param("no_stf_barrier", handle_no_stf_barrier);
266
267enum stf_barrier_type stf_barrier_type_get(void)
268{
269 return stf_enabled_flush_types;
270}
271
272/* This is the generic flag used by other architectures */
273static int __init handle_ssbd(char *p)
274{
275 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
276 /* Until firmware tells us, we have the barrier with auto */
277 return 0;
278 } else if (strncmp(p, "off", 3) == 0) {
279 handle_no_stf_barrier(NULL);
280 return 0;
281 } else
282 return 1;
283
284 return 0;
285}
286early_param("spec_store_bypass_disable", handle_ssbd);
287
288/* This is the generic flag used by other architectures */
289static int __init handle_no_ssbd(char *p)
290{
291 handle_no_stf_barrier(NULL);
292 return 0;
293}
294early_param("nospec_store_bypass_disable", handle_no_ssbd);
295
296static void stf_barrier_enable(bool enable)
297{
298 if (enable)
299 do_stf_barrier_fixups(stf_enabled_flush_types);
300 else
301 do_stf_barrier_fixups(STF_BARRIER_NONE);
302
303 stf_barrier = enable;
304}
305
306void setup_stf_barrier(void)
307{
308 enum stf_barrier_type type;
309 bool enable;
310
311 /* Default to fallback in case fw-features are not available */
312 if (cpu_has_feature(CPU_FTR_ARCH_300))
313 type = STF_BARRIER_EIEIO;
314 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
315 type = STF_BARRIER_SYNC_ORI;
316 else if (cpu_has_feature(CPU_FTR_ARCH_206))
317 type = STF_BARRIER_FALLBACK;
318 else
319 type = STF_BARRIER_NONE;
320
321 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
322 security_ftr_enabled(SEC_FTR_STF_BARRIER);
323
324 if (type == STF_BARRIER_FALLBACK) {
325 pr_info("stf-barrier: fallback barrier available\n");
326 } else if (type == STF_BARRIER_SYNC_ORI) {
327 pr_info("stf-barrier: hwsync barrier available\n");
328 } else if (type == STF_BARRIER_EIEIO) {
329 pr_info("stf-barrier: eieio barrier available\n");
330 }
331
332 stf_enabled_flush_types = type;
333
334 if (!no_stf_barrier && !cpu_mitigations_off())
335 stf_barrier_enable(enable);
336}
337
338ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
339{
340 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
341 const char *type;
342 switch (stf_enabled_flush_types) {
343 case STF_BARRIER_EIEIO:
344 type = "eieio";
345 break;
346 case STF_BARRIER_SYNC_ORI:
347 type = "hwsync";
348 break;
349 case STF_BARRIER_FALLBACK:
350 type = "fallback";
351 break;
352 default:
353 type = "unknown";
354 }
355 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
356 }
357
358 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
359 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
360 return sprintf(buf, "Not affected\n");
361
362 return sprintf(buf, "Vulnerable\n");
363}
364
365static int ssb_prctl_get(struct task_struct *task)
366{
367 /*
368 * The STF_BARRIER feature is on by default, so if it's off that means
369 * firmware has explicitly said the CPU is not vulnerable via either
370 * the hypercall or device tree.
371 */
372 if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
373 return PR_SPEC_NOT_AFFECTED;
374
375 /*
376 * If the system's CPU has no known barrier (see setup_stf_barrier())
377 * then assume that the CPU is not vulnerable.
378 */
379 if (stf_enabled_flush_types == STF_BARRIER_NONE)
380 return PR_SPEC_NOT_AFFECTED;
381
382 /*
383 * Otherwise the CPU is vulnerable. The barrier is not a global or
384 * per-process mitigation, so the only value that can be reported here
385 * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
386 */
387 return PR_SPEC_ENABLE;
388}
389
390int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
391{
392 switch (which) {
393 case PR_SPEC_STORE_BYPASS:
394 return ssb_prctl_get(task);
395 default:
396 return -ENODEV;
397 }
398}
399
400#ifdef CONFIG_DEBUG_FS
401static int stf_barrier_set(void *data, u64 val)
402{
403 bool enable;
404
405 if (val == 1)
406 enable = true;
407 else if (val == 0)
408 enable = false;
409 else
410 return -EINVAL;
411
412 /* Only do anything if we're changing state */
413 if (enable != stf_barrier)
414 stf_barrier_enable(enable);
415
416 return 0;
417}
418
419static int stf_barrier_get(void *data, u64 *val)
420{
421 *val = stf_barrier ? 1 : 0;
422 return 0;
423}
424
425DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
426 "%llu\n");
427
428static __init int stf_barrier_debugfs_init(void)
429{
430 debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
431 NULL, &fops_stf_barrier);
432 return 0;
433}
434device_initcall(stf_barrier_debugfs_init);
435#endif /* CONFIG_DEBUG_FS */
436
437static void update_branch_cache_flush(void)
438{
439 u32 *site, __maybe_unused *site2;
440
441#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
442 site = &patch__call_kvm_flush_link_stack;
443 site2 = &patch__call_kvm_flush_link_stack_p9;
444 // This controls the branch from guest_exit_cont to kvm_flush_link_stack
445 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
446 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
447 patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
448 } else {
449 // Could use HW flush, but that could also flush count cache
450 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
451 patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
452 }
453#endif
454
455 // Patch out the bcctr first, then nop the rest
456 site = &patch__call_flush_branch_caches3;
457 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
458 site = &patch__call_flush_branch_caches2;
459 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
460 site = &patch__call_flush_branch_caches1;
461 patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
462
463 // This controls the branch from _switch to flush_branch_caches
464 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
465 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
466 // Nothing to be done
467
468 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
469 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
470 // Patch in the bcctr last
471 site = &patch__call_flush_branch_caches1;
472 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
473 site = &patch__call_flush_branch_caches2;
474 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
475 site = &patch__call_flush_branch_caches3;
476 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
477
478 } else {
479 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
480
481 // If we just need to flush the link stack, early return
482 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
483 patch_instruction_site(&patch__flush_link_stack_return,
484 ppc_inst(PPC_RAW_BLR()));
485
486 // If we have flush instruction, early return
487 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
488 patch_instruction_site(&patch__flush_count_cache_return,
489 ppc_inst(PPC_RAW_BLR()));
490 }
491 }
492}
493
494static void toggle_branch_cache_flush(bool enable)
495{
496 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
497 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
498 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
499
500 pr_info("count-cache-flush: flush disabled.\n");
501 } else {
502 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
503 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
504 pr_info("count-cache-flush: hardware flush enabled.\n");
505 } else {
506 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
507 pr_info("count-cache-flush: software flush enabled.\n");
508 }
509 }
510
511 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
512 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
513 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
514
515 pr_info("link-stack-flush: flush disabled.\n");
516 } else {
517 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
518 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
519 pr_info("link-stack-flush: hardware flush enabled.\n");
520 } else {
521 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
522 pr_info("link-stack-flush: software flush enabled.\n");
523 }
524 }
525
526 update_branch_cache_flush();
527}
528
529void setup_count_cache_flush(void)
530{
531 bool enable = true;
532
533 if (no_spectrev2 || cpu_mitigations_off()) {
534 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
535 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
536 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
537
538 enable = false;
539 }
540
541 /*
542 * There's no firmware feature flag/hypervisor bit to tell us we need to
543 * flush the link stack on context switch. So we set it here if we see
544 * either of the Spectre v2 mitigations that aim to protect userspace.
545 */
546 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
547 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
548 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
549
550 toggle_branch_cache_flush(enable);
551}
552
553static enum l1d_flush_type enabled_flush_types;
554static void *l1d_flush_fallback_area;
555static bool no_rfi_flush;
556static bool no_entry_flush;
557static bool no_uaccess_flush;
558bool rfi_flush;
559static bool entry_flush;
560static bool uaccess_flush;
561DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
562EXPORT_SYMBOL(uaccess_flush_key);
563
564static int __init handle_no_rfi_flush(char *p)
565{
566 pr_info("rfi-flush: disabled on command line.");
567 no_rfi_flush = true;
568 return 0;
569}
570early_param("no_rfi_flush", handle_no_rfi_flush);
571
572static int __init handle_no_entry_flush(char *p)
573{
574 pr_info("entry-flush: disabled on command line.");
575 no_entry_flush = true;
576 return 0;
577}
578early_param("no_entry_flush", handle_no_entry_flush);
579
580static int __init handle_no_uaccess_flush(char *p)
581{
582 pr_info("uaccess-flush: disabled on command line.");
583 no_uaccess_flush = true;
584 return 0;
585}
586early_param("no_uaccess_flush", handle_no_uaccess_flush);
587
588/*
589 * The RFI flush is not KPTI, but because users will see doco that says to use
590 * nopti we hijack that option here to also disable the RFI flush.
591 */
592static int __init handle_no_pti(char *p)
593{
594 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
595 handle_no_rfi_flush(NULL);
596 return 0;
597}
598early_param("nopti", handle_no_pti);
599
600static void do_nothing(void *unused)
601{
602 /*
603 * We don't need to do the flush explicitly, just enter+exit kernel is
604 * sufficient, the RFI exit handlers will do the right thing.
605 */
606}
607
608void rfi_flush_enable(bool enable)
609{
610 if (enable) {
611 do_rfi_flush_fixups(enabled_flush_types);
612 on_each_cpu(do_nothing, NULL, 1);
613 } else
614 do_rfi_flush_fixups(L1D_FLUSH_NONE);
615
616 rfi_flush = enable;
617}
618
619static void entry_flush_enable(bool enable)
620{
621 if (enable) {
622 do_entry_flush_fixups(enabled_flush_types);
623 on_each_cpu(do_nothing, NULL, 1);
624 } else {
625 do_entry_flush_fixups(L1D_FLUSH_NONE);
626 }
627
628 entry_flush = enable;
629}
630
631static void uaccess_flush_enable(bool enable)
632{
633 if (enable) {
634 do_uaccess_flush_fixups(enabled_flush_types);
635 static_branch_enable(&uaccess_flush_key);
636 on_each_cpu(do_nothing, NULL, 1);
637 } else {
638 static_branch_disable(&uaccess_flush_key);
639 do_uaccess_flush_fixups(L1D_FLUSH_NONE);
640 }
641
642 uaccess_flush = enable;
643}
644
645static void __ref init_fallback_flush(void)
646{
647 u64 l1d_size, limit;
648 int cpu;
649
650 /* Only allocate the fallback flush area once (at boot time). */
651 if (l1d_flush_fallback_area)
652 return;
653
654 l1d_size = ppc64_caches.l1d.size;
655
656 /*
657 * If there is no d-cache-size property in the device tree, l1d_size
658 * could be zero. That leads to the loop in the asm wrapping around to
659 * 2^64-1, and then walking off the end of the fallback area and
660 * eventually causing a page fault which is fatal. Just default to
661 * something vaguely sane.
662 */
663 if (!l1d_size)
664 l1d_size = (64 * 1024);
665
666 limit = min(ppc64_bolted_size(), ppc64_rma_size);
667
668 /*
669 * Align to L1d size, and size it at 2x L1d size, to catch possible
670 * hardware prefetch runoff. We don't have a recipe for load patterns to
671 * reliably avoid the prefetcher.
672 */
673 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
674 l1d_size, MEMBLOCK_LOW_LIMIT,
675 limit, NUMA_NO_NODE);
676 if (!l1d_flush_fallback_area)
677 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
678 __func__, l1d_size * 2, l1d_size, &limit);
679
680
681 for_each_possible_cpu(cpu) {
682 struct paca_struct *paca = paca_ptrs[cpu];
683 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
684 paca->l1d_flush_size = l1d_size;
685 }
686}
687
688void setup_rfi_flush(enum l1d_flush_type types, bool enable)
689{
690 if (types & L1D_FLUSH_FALLBACK) {
691 pr_info("rfi-flush: fallback displacement flush available\n");
692 init_fallback_flush();
693 }
694
695 if (types & L1D_FLUSH_ORI)
696 pr_info("rfi-flush: ori type flush available\n");
697
698 if (types & L1D_FLUSH_MTTRIG)
699 pr_info("rfi-flush: mttrig type flush available\n");
700
701 enabled_flush_types = types;
702
703 if (!cpu_mitigations_off() && !no_rfi_flush)
704 rfi_flush_enable(enable);
705}
706
707void setup_entry_flush(bool enable)
708{
709 if (cpu_mitigations_off())
710 return;
711
712 if (!no_entry_flush)
713 entry_flush_enable(enable);
714}
715
716void setup_uaccess_flush(bool enable)
717{
718 if (cpu_mitigations_off())
719 return;
720
721 if (!no_uaccess_flush)
722 uaccess_flush_enable(enable);
723}
724
725#ifdef CONFIG_DEBUG_FS
726static int count_cache_flush_set(void *data, u64 val)
727{
728 bool enable;
729
730 if (val == 1)
731 enable = true;
732 else if (val == 0)
733 enable = false;
734 else
735 return -EINVAL;
736
737 toggle_branch_cache_flush(enable);
738
739 return 0;
740}
741
742static int count_cache_flush_get(void *data, u64 *val)
743{
744 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
745 *val = 0;
746 else
747 *val = 1;
748
749 return 0;
750}
751
752static int link_stack_flush_get(void *data, u64 *val)
753{
754 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
755 *val = 0;
756 else
757 *val = 1;
758
759 return 0;
760}
761
762DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
763 count_cache_flush_set, "%llu\n");
764DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
765 count_cache_flush_set, "%llu\n");
766
767static __init int count_cache_flush_debugfs_init(void)
768{
769 debugfs_create_file_unsafe("count_cache_flush", 0600,
770 arch_debugfs_dir, NULL,
771 &fops_count_cache_flush);
772 debugfs_create_file_unsafe("link_stack_flush", 0600,
773 arch_debugfs_dir, NULL,
774 &fops_link_stack_flush);
775 return 0;
776}
777device_initcall(count_cache_flush_debugfs_init);
778
779static int rfi_flush_set(void *data, u64 val)
780{
781 bool enable;
782
783 if (val == 1)
784 enable = true;
785 else if (val == 0)
786 enable = false;
787 else
788 return -EINVAL;
789
790 /* Only do anything if we're changing state */
791 if (enable != rfi_flush)
792 rfi_flush_enable(enable);
793
794 return 0;
795}
796
797static int rfi_flush_get(void *data, u64 *val)
798{
799 *val = rfi_flush ? 1 : 0;
800 return 0;
801}
802
803DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
804
805static int entry_flush_set(void *data, u64 val)
806{
807 bool enable;
808
809 if (val == 1)
810 enable = true;
811 else if (val == 0)
812 enable = false;
813 else
814 return -EINVAL;
815
816 /* Only do anything if we're changing state */
817 if (enable != entry_flush)
818 entry_flush_enable(enable);
819
820 return 0;
821}
822
823static int entry_flush_get(void *data, u64 *val)
824{
825 *val = entry_flush ? 1 : 0;
826 return 0;
827}
828
829DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
830
831static int uaccess_flush_set(void *data, u64 val)
832{
833 bool enable;
834
835 if (val == 1)
836 enable = true;
837 else if (val == 0)
838 enable = false;
839 else
840 return -EINVAL;
841
842 /* Only do anything if we're changing state */
843 if (enable != uaccess_flush)
844 uaccess_flush_enable(enable);
845
846 return 0;
847}
848
849static int uaccess_flush_get(void *data, u64 *val)
850{
851 *val = uaccess_flush ? 1 : 0;
852 return 0;
853}
854
855DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
856
857static __init int rfi_flush_debugfs_init(void)
858{
859 debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
860 debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
861 debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
862 return 0;
863}
864device_initcall(rfi_flush_debugfs_init);
865#endif /* CONFIG_DEBUG_FS */
866#endif /* CONFIG_PPC_BOOK3S_64 */
867

source code of linux/arch/powerpc/kernel/security.c