1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2017 SiFive |
4 | */ |
5 | |
6 | #include <linux/acpi.h> |
7 | #include <linux/of.h> |
8 | #include <asm/acpi.h> |
9 | #include <asm/cacheflush.h> |
10 | |
11 | #ifdef CONFIG_SMP |
12 | |
13 | #include <asm/sbi.h> |
14 | |
15 | static void ipi_remote_fence_i(void *info) |
16 | { |
17 | return local_flush_icache_all(); |
18 | } |
19 | |
20 | void flush_icache_all(void) |
21 | { |
22 | local_flush_icache_all(); |
23 | |
24 | if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence()) |
25 | sbi_remote_fence_i(NULL); |
26 | else |
27 | on_each_cpu(func: ipi_remote_fence_i, NULL, wait: 1); |
28 | } |
29 | EXPORT_SYMBOL(flush_icache_all); |
30 | |
31 | /* |
32 | * Performs an icache flush for the given MM context. RISC-V has no direct |
33 | * mechanism for instruction cache shoot downs, so instead we send an IPI that |
34 | * informs the remote harts they need to flush their local instruction caches. |
35 | * To avoid pathologically slow behavior in a common case (a bunch of |
36 | * single-hart processes on a many-hart machine, ie 'make -j') we avoid the |
37 | * IPIs for harts that are not currently executing a MM context and instead |
38 | * schedule a deferred local instruction cache flush to be performed before |
39 | * execution resumes on each hart. |
40 | */ |
41 | void flush_icache_mm(struct mm_struct *mm, bool local) |
42 | { |
43 | unsigned int cpu; |
44 | cpumask_t others, *mask; |
45 | |
46 | preempt_disable(); |
47 | |
48 | /* Mark every hart's icache as needing a flush for this MM. */ |
49 | mask = &mm->context.icache_stale_mask; |
50 | cpumask_setall(dstp: mask); |
51 | /* Flush this hart's I$ now, and mark it as flushed. */ |
52 | cpu = smp_processor_id(); |
53 | cpumask_clear_cpu(cpu, dstp: mask); |
54 | local_flush_icache_all(); |
55 | |
56 | /* |
57 | * Flush the I$ of other harts concurrently executing, and mark them as |
58 | * flushed. |
59 | */ |
60 | cpumask_andnot(dstp: &others, src1p: mm_cpumask(mm), cpumask_of(cpu)); |
61 | local |= cpumask_empty(srcp: &others); |
62 | if (mm == current->active_mm && local) { |
63 | /* |
64 | * It's assumed that at least one strongly ordered operation is |
65 | * performed on this hart between setting a hart's cpumask bit |
66 | * and scheduling this MM context on that hart. Sending an SBI |
67 | * remote message will do this, but in the case where no |
68 | * messages are sent we still need to order this hart's writes |
69 | * with flush_icache_deferred(). |
70 | */ |
71 | smp_mb(); |
72 | } else if (IS_ENABLED(CONFIG_RISCV_SBI) && |
73 | !riscv_use_ipi_for_rfence()) { |
74 | sbi_remote_fence_i(&others); |
75 | } else { |
76 | on_each_cpu_mask(mask: &others, func: ipi_remote_fence_i, NULL, wait: 1); |
77 | } |
78 | |
79 | preempt_enable(); |
80 | } |
81 | |
82 | #endif /* CONFIG_SMP */ |
83 | |
84 | #ifdef CONFIG_MMU |
85 | void flush_icache_pte(struct mm_struct *mm, pte_t pte) |
86 | { |
87 | struct folio *folio = page_folio(pte_page(pte)); |
88 | |
89 | if (!test_bit(PG_dcache_clean, &folio->flags)) { |
90 | flush_icache_mm(mm, local: false); |
91 | set_bit(nr: PG_dcache_clean, addr: &folio->flags); |
92 | } |
93 | } |
94 | #endif /* CONFIG_MMU */ |
95 | |
96 | unsigned int riscv_cbom_block_size; |
97 | EXPORT_SYMBOL_GPL(riscv_cbom_block_size); |
98 | |
99 | unsigned int riscv_cboz_block_size; |
100 | EXPORT_SYMBOL_GPL(riscv_cboz_block_size); |
101 | |
102 | static void __init cbo_get_block_size(struct device_node *node, |
103 | const char *name, u32 *block_size, |
104 | unsigned long *first_hartid) |
105 | { |
106 | unsigned long hartid; |
107 | u32 val; |
108 | |
109 | if (riscv_of_processor_hartid(node, &hartid)) |
110 | return; |
111 | |
112 | if (of_property_read_u32(np: node, propname: name, out_value: &val)) |
113 | return; |
114 | |
115 | if (!*block_size) { |
116 | *block_size = val; |
117 | *first_hartid = hartid; |
118 | } else if (*block_size != val) { |
119 | pr_warn("%s mismatched between harts %lu and %lu\n" , |
120 | name, *first_hartid, hartid); |
121 | } |
122 | } |
123 | |
124 | void __init riscv_init_cbo_blocksizes(void) |
125 | { |
126 | unsigned long cbom_hartid, cboz_hartid; |
127 | u32 cbom_block_size = 0, cboz_block_size = 0; |
128 | struct device_node *node; |
129 | struct acpi_table_header *rhct; |
130 | acpi_status status; |
131 | |
132 | if (acpi_disabled) { |
133 | for_each_of_cpu_node(node) { |
134 | /* set block-size for cbom and/or cboz extension if available */ |
135 | cbo_get_block_size(node, name: "riscv,cbom-block-size" , |
136 | block_size: &cbom_block_size, first_hartid: &cbom_hartid); |
137 | cbo_get_block_size(node, name: "riscv,cboz-block-size" , |
138 | block_size: &cboz_block_size, first_hartid: &cboz_hartid); |
139 | } |
140 | } else { |
141 | status = acpi_get_table(ACPI_SIG_RHCT, instance: 0, out_table: &rhct); |
142 | if (ACPI_FAILURE(status)) |
143 | return; |
144 | |
145 | acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL); |
146 | acpi_put_table(table: (struct acpi_table_header *)rhct); |
147 | } |
148 | |
149 | if (cbom_block_size) |
150 | riscv_cbom_block_size = cbom_block_size; |
151 | |
152 | if (cboz_block_size) |
153 | riscv_cboz_block_size = cboz_block_size; |
154 | } |
155 | |