1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Contains CPU feature definitions |
4 | * |
5 | * Copyright (C) 2015 ARM Ltd. |
6 | * |
7 | * A note for the weary kernel hacker: the code here is confusing and hard to |
8 | * follow! That's partly because it's solving a nasty problem, but also because |
9 | * there's a little bit of over-abstraction that tends to obscure what's going |
10 | * on behind a maze of helper functions and macros. |
11 | * |
12 | * The basic problem is that hardware folks have started gluing together CPUs |
13 | * with distinct architectural features; in some cases even creating SoCs where |
14 | * user-visible instructions are available only on a subset of the available |
15 | * cores. We try to address this by snapshotting the feature registers of the |
16 | * boot CPU and comparing these with the feature registers of each secondary |
17 | * CPU when bringing them up. If there is a mismatch, then we update the |
18 | * snapshot state to indicate the lowest-common denominator of the feature, |
19 | * known as the "safe" value. This snapshot state can be queried to view the |
20 | * "sanitised" value of a feature register. |
21 | * |
22 | * The sanitised register values are used to decide which capabilities we |
23 | * have in the system. These may be in the form of traditional "hwcaps" |
24 | * advertised to userspace or internal "cpucaps" which are used to configure |
25 | * things like alternative patching and static keys. While a feature mismatch |
26 | * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch |
27 | * may prevent a CPU from being onlined at all. |
28 | * |
29 | * Some implementation details worth remembering: |
30 | * |
31 | * - Mismatched features are *always* sanitised to a "safe" value, which |
32 | * usually indicates that the feature is not supported. |
33 | * |
34 | * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK" |
35 | * warning when onlining an offending CPU and the kernel will be tainted |
36 | * with TAINT_CPU_OUT_OF_SPEC. |
37 | * |
38 | * - Features marked as FTR_VISIBLE have their sanitised value visible to |
39 | * userspace. FTR_VISIBLE features in registers that are only visible |
40 | * to EL0 by trapping *must* have a corresponding HWCAP so that late |
41 | * onlining of CPUs cannot lead to features disappearing at runtime. |
42 | * |
43 | * - A "feature" is typically a 4-bit register field. A "capability" is the |
44 | * high-level description derived from the sanitised field value. |
45 | * |
46 | * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID |
47 | * scheme for fields in ID registers") to understand when feature fields |
48 | * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly). |
49 | * |
50 | * - KVM exposes its own view of the feature registers to guest operating |
51 | * systems regardless of FTR_VISIBLE. This is typically driven from the |
52 | * sanitised register values to allow virtual CPUs to be migrated between |
53 | * arbitrary physical CPUs, but some features not present on the host are |
54 | * also advertised and emulated. Look at sys_reg_descs[] for the gory |
55 | * details. |
56 | * |
57 | * - If the arm64_ftr_bits[] for a register has a missing field, then this |
58 | * field is treated as STRICT RES0, including for read_sanitised_ftr_reg(). |
59 | * This is stronger than FTR_HIDDEN and can be used to hide features from |
60 | * KVM guests. |
61 | */ |
62 | |
63 | #define pr_fmt(fmt) "CPU features: " fmt |
64 | |
65 | #include <linux/bsearch.h> |
66 | #include <linux/cpumask.h> |
67 | #include <linux/crash_dump.h> |
68 | #include <linux/kstrtox.h> |
69 | #include <linux/sort.h> |
70 | #include <linux/stop_machine.h> |
71 | #include <linux/sysfs.h> |
72 | #include <linux/types.h> |
73 | #include <linux/minmax.h> |
74 | #include <linux/mm.h> |
75 | #include <linux/cpu.h> |
76 | #include <linux/kasan.h> |
77 | #include <linux/percpu.h> |
78 | #include <linux/sched/isolation.h> |
79 | |
80 | #include <asm/cpu.h> |
81 | #include <asm/cpufeature.h> |
82 | #include <asm/cpu_ops.h> |
83 | #include <asm/fpsimd.h> |
84 | #include <asm/hwcap.h> |
85 | #include <asm/insn.h> |
86 | #include <asm/kvm_host.h> |
87 | #include <asm/mmu_context.h> |
88 | #include <asm/mte.h> |
89 | #include <asm/hypervisor.h> |
90 | #include <asm/processor.h> |
91 | #include <asm/smp.h> |
92 | #include <asm/sysreg.h> |
93 | #include <asm/traps.h> |
94 | #include <asm/vectors.h> |
95 | #include <asm/virt.h> |
96 | |
97 | /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ |
98 | static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly; |
99 | |
100 | #ifdef CONFIG_COMPAT |
101 | #define COMPAT_ELF_HWCAP_DEFAULT \ |
102 | (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ |
103 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ |
104 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\ |
105 | COMPAT_HWCAP_LPAE) |
106 | unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; |
107 | unsigned int compat_elf_hwcap2 __read_mostly; |
108 | unsigned int compat_elf_hwcap3 __read_mostly; |
109 | #endif |
110 | |
111 | DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS); |
112 | EXPORT_SYMBOL(system_cpucaps); |
113 | static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS]; |
114 | |
115 | DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS); |
116 | |
117 | /* |
118 | * arm64_use_ng_mappings must be placed in the .data section, otherwise it |
119 | * ends up in the .bss section where it is initialized in early_map_kernel() |
120 | * after the MMU (with the idmap) was enabled. create_init_idmap() - which |
121 | * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG - |
122 | * may end up generating an incorrect idmap page table attributes. |
123 | */ |
124 | bool arm64_use_ng_mappings __read_mostly = false; |
125 | EXPORT_SYMBOL(arm64_use_ng_mappings); |
126 | |
127 | DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; |
128 | |
129 | /* |
130 | * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs |
131 | * support it? |
132 | */ |
133 | static bool __read_mostly allow_mismatched_32bit_el0; |
134 | |
135 | /* |
136 | * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have |
137 | * seen at least one CPU capable of 32-bit EL0. |
138 | */ |
139 | DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); |
140 | |
141 | /* |
142 | * Mask of CPUs supporting 32-bit EL0. |
143 | * Only valid if arm64_mismatched_32bit_el0 is enabled. |
144 | */ |
145 | static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; |
146 | |
147 | void dump_cpu_features(void) |
148 | { |
149 | /* file-wide pr_fmt adds "CPU features: " prefix */ |
150 | pr_emerg("0x%*pb\n" , ARM64_NCAPS, &system_cpucaps); |
151 | } |
152 | |
153 | #define __ARM64_MAX_POSITIVE(reg, field) \ |
154 | ((reg##_##field##_SIGNED ? \ |
155 | BIT(reg##_##field##_WIDTH - 1) : \ |
156 | BIT(reg##_##field##_WIDTH)) - 1) |
157 | |
158 | #define __ARM64_MIN_NEGATIVE(reg, field) BIT(reg##_##field##_WIDTH - 1) |
159 | |
160 | #define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value) \ |
161 | .sys_reg = SYS_##reg, \ |
162 | .field_pos = reg##_##field##_SHIFT, \ |
163 | .field_width = reg##_##field##_WIDTH, \ |
164 | .sign = reg##_##field##_SIGNED, \ |
165 | .min_field_value = min_value, \ |
166 | .max_field_value = max_value, |
167 | |
168 | /* |
169 | * ARM64_CPUID_FIELDS() encodes a field with a range from min_value to |
170 | * an implicit maximum that depends on the sign-ess of the field. |
171 | * |
172 | * An unsigned field will be capped at all ones, while a signed field |
173 | * will be limited to the positive half only. |
174 | */ |
175 | #define ARM64_CPUID_FIELDS(reg, field, min_value) \ |
176 | __ARM64_CPUID_FIELDS(reg, field, \ |
177 | SYS_FIELD_VALUE(reg, field, min_value), \ |
178 | __ARM64_MAX_POSITIVE(reg, field)) |
179 | |
180 | /* |
181 | * ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an |
182 | * implicit minimal value to max_value. This should be used when |
183 | * matching a non-implemented property. |
184 | */ |
185 | #define ARM64_CPUID_FIELDS_NEG(reg, field, max_value) \ |
186 | __ARM64_CPUID_FIELDS(reg, field, \ |
187 | __ARM64_MIN_NEGATIVE(reg, field), \ |
188 | SYS_FIELD_VALUE(reg, field, max_value)) |
189 | |
190 | #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ |
191 | { \ |
192 | .sign = SIGNED, \ |
193 | .visible = VISIBLE, \ |
194 | .strict = STRICT, \ |
195 | .type = TYPE, \ |
196 | .shift = SHIFT, \ |
197 | .width = WIDTH, \ |
198 | .safe_val = SAFE_VAL, \ |
199 | } |
200 | |
201 | /* Define a feature with unsigned values */ |
202 | #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ |
203 | __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) |
204 | |
205 | /* Define a feature with a signed value */ |
206 | #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ |
207 | __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) |
208 | |
209 | #define ARM64_FTR_END \ |
210 | { \ |
211 | .width = 0, \ |
212 | } |
213 | |
214 | static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); |
215 | |
216 | static bool __system_matches_cap(unsigned int n); |
217 | |
218 | /* |
219 | * NOTE: Any changes to the visibility of features should be kept in |
220 | * sync with the documentation of the CPU feature register ABI. |
221 | */ |
222 | static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { |
223 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0), |
224 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0), |
225 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0), |
226 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0), |
227 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0), |
228 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0), |
229 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0), |
230 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0), |
231 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0), |
232 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0), |
233 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0), |
234 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0), |
235 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0), |
236 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0), |
237 | ARM64_FTR_END, |
238 | }; |
239 | |
240 | static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { |
241 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0), |
242 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0), |
243 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0), |
244 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0), |
245 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0), |
246 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0), |
247 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0), |
248 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
249 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0), |
250 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
251 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0), |
252 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0), |
253 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0), |
254 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0), |
255 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
256 | FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0), |
257 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
258 | FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0), |
259 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0), |
260 | ARM64_FTR_END, |
261 | }; |
262 | |
263 | static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { |
264 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_LUT_SHIFT, 4, 0), |
265 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0), |
266 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0), |
267 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0), |
268 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), |
269 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0), |
270 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
271 | FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), |
272 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
273 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0), |
274 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0), |
275 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0), |
276 | ARM64_FTR_END, |
277 | }; |
278 | |
279 | static const struct arm64_ftr_bits ftr_id_aa64isar3[] = { |
280 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FPRCVT_SHIFT, 4, 0), |
281 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0), |
282 | ARM64_FTR_END, |
283 | }; |
284 | |
285 | static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { |
286 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0), |
287 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0), |
288 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0), |
289 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0), |
290 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0), |
291 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0), |
292 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
293 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0), |
294 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0), |
295 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0), |
296 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI), |
297 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), |
298 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), |
299 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), |
300 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_EL1_IMP), |
301 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_EL0_IMP), |
302 | ARM64_FTR_END, |
303 | }; |
304 | |
305 | static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { |
306 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS), |
307 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0), |
308 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0), |
309 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
310 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0), |
311 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0), |
312 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0), |
313 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), |
314 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI), |
315 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI), |
316 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), |
317 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0), |
318 | ARM64_FTR_END, |
319 | }; |
320 | |
321 | static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = { |
322 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0), |
323 | ARM64_FTR_END, |
324 | }; |
325 | |
326 | static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { |
327 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
328 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0), |
329 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
330 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0), |
331 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
332 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F16MM_SHIFT, 4, 0), |
333 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
334 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0), |
335 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
336 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0), |
337 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
338 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0), |
339 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
340 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0), |
341 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
342 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0), |
343 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
344 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0), |
345 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
346 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_EltPerm_SHIFT, 4, 0), |
347 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
348 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0), |
349 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
350 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0), |
351 | ARM64_FTR_END, |
352 | }; |
353 | |
354 | static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = { |
355 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
356 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0), |
357 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
358 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_LUTv2_SHIFT, 1, 0), |
359 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
360 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0), |
361 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
362 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0), |
363 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
364 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0), |
365 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
366 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0), |
367 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
368 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0), |
369 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
370 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0), |
371 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
372 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F16_SHIFT, 1, 0), |
373 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
374 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F32_SHIFT, 1, 0), |
375 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
376 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0), |
377 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
378 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0), |
379 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
380 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0), |
381 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
382 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0), |
383 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
384 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0), |
385 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
386 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8FMA_SHIFT, 1, 0), |
387 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
388 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP4_SHIFT, 1, 0), |
389 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
390 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP2_SHIFT, 1, 0), |
391 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
392 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SBitPerm_SHIFT, 1, 0), |
393 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
394 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_AES_SHIFT, 1, 0), |
395 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
396 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SFEXPA_SHIFT, 1, 0), |
397 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
398 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_STMOP_SHIFT, 1, 0), |
399 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), |
400 | FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMOP4_SHIFT, 1, 0), |
401 | ARM64_FTR_END, |
402 | }; |
403 | |
404 | static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = { |
405 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8CVT_SHIFT, 1, 0), |
406 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8FMA_SHIFT, 1, 0), |
407 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP4_SHIFT, 1, 0), |
408 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP2_SHIFT, 1, 0), |
409 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8MM8_SHIFT, 1, 0), |
410 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8MM4_SHIFT, 1, 0), |
411 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E4M3_SHIFT, 1, 0), |
412 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E5M2_SHIFT, 1, 0), |
413 | ARM64_FTR_END, |
414 | }; |
415 | |
416 | static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { |
417 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0), |
418 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0), |
419 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0), |
420 | /* |
421 | * Page size not being supported at Stage-2 is not fatal. You |
422 | * just give up KVM if PAGE_SIZE isn't supported there. Go fix |
423 | * your favourite nesting hypervisor. |
424 | * |
425 | * There is a small corner case where the hypervisor explicitly |
426 | * advertises a given granule size at Stage-2 (value 2) on some |
427 | * vCPUs, and uses the fallback to Stage-1 (value 0) for other |
428 | * vCPUs. Although this is not forbidden by the architecture, it |
429 | * indicates that the hypervisor is being silly (or buggy). |
430 | * |
431 | * We make no effort to cope with this and pretend that if these |
432 | * fields are inconsistent across vCPUs, then it isn't worth |
433 | * trying to bring KVM up. |
434 | */ |
435 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1), |
436 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1), |
437 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1), |
438 | /* |
439 | * We already refuse to boot CPUs that don't support our configured |
440 | * page size, so we can only detect mismatches for a page size other |
441 | * than the one we're currently using. Unfortunately, SoCs like this |
442 | * exist in the wild so, even though we don't like it, we'll have to go |
443 | * along with it and treat them as non-strict. |
444 | */ |
445 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI), |
446 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI), |
447 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI), |
448 | |
449 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0), |
450 | /* Linux shouldn't care about secure memory */ |
451 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0), |
452 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0), |
453 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0), |
454 | /* |
455 | * Differing PARange is fine as long as all peripherals and memory are mapped |
456 | * within the minimum PARange of all CPUs |
457 | */ |
458 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0), |
459 | ARM64_FTR_END, |
460 | }; |
461 | |
462 | static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { |
463 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ECBHB_SHIFT, 4, 0), |
464 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), |
465 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), |
466 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0), |
467 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), |
468 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), |
469 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), |
470 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0), |
471 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0), |
472 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0), |
473 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0), |
474 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0), |
475 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0), |
476 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0), |
477 | ARM64_FTR_END, |
478 | }; |
479 | |
480 | static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { |
481 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0), |
482 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0), |
483 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0), |
484 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0), |
485 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0), |
486 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0), |
487 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0), |
488 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0), |
489 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0), |
490 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0), |
491 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0), |
492 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0), |
493 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0), |
494 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0), |
495 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0), |
496 | ARM64_FTR_END, |
497 | }; |
498 | |
499 | static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = { |
500 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE), |
501 | FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0), |
502 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0), |
503 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0), |
504 | ARM64_FTR_END, |
505 | }; |
506 | |
507 | static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = { |
508 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0), |
509 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_NV_frac_SHIFT, 4, 0), |
510 | ARM64_FTR_END, |
511 | }; |
512 | |
513 | static const struct arm64_ftr_bits ftr_ctr[] = { |
514 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ |
515 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1), |
516 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1), |
517 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0), |
518 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0), |
519 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1), |
520 | /* |
521 | * Linux can handle differing I-cache policies. Userspace JITs will |
522 | * make use of *minLine. |
523 | * If we have differing I-cache policies, report it as the weakest - VIPT. |
524 | */ |
525 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT), /* L1Ip */ |
526 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0), |
527 | ARM64_FTR_END, |
528 | }; |
529 | |
530 | static struct arm64_ftr_override __ro_after_init no_override = { }; |
531 | |
532 | struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { |
533 | .name = "SYS_CTR_EL0" , |
534 | .ftr_bits = ftr_ctr, |
535 | .override = &no_override, |
536 | }; |
537 | |
538 | static const struct arm64_ftr_bits ftr_id_mmfr0[] = { |
539 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf), |
540 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0), |
541 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0), |
542 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0), |
543 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0), |
544 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf), |
545 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0), |
546 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0), |
547 | ARM64_FTR_END, |
548 | }; |
549 | |
550 | static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { |
551 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0), |
552 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0), |
553 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0), |
554 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0), |
555 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0), |
556 | /* |
557 | * We can instantiate multiple PMU instances with different levels |
558 | * of support. |
559 | */ |
560 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), |
561 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), |
562 | ARM64_FTR_END, |
563 | }; |
564 | |
565 | static const struct arm64_ftr_bits ftr_mvfr0[] = { |
566 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0), |
567 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0), |
568 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0), |
569 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0), |
570 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0), |
571 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0), |
572 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0), |
573 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0), |
574 | ARM64_FTR_END, |
575 | }; |
576 | |
577 | static const struct arm64_ftr_bits ftr_mvfr1[] = { |
578 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0), |
579 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0), |
580 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0), |
581 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0), |
582 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0), |
583 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0), |
584 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0), |
585 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0), |
586 | ARM64_FTR_END, |
587 | }; |
588 | |
589 | static const struct arm64_ftr_bits ftr_mvfr2[] = { |
590 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0), |
591 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0), |
592 | ARM64_FTR_END, |
593 | }; |
594 | |
595 | static const struct arm64_ftr_bits ftr_dczid[] = { |
596 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1), |
597 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0), |
598 | ARM64_FTR_END, |
599 | }; |
600 | |
601 | static const struct arm64_ftr_bits ftr_gmid[] = { |
602 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0), |
603 | ARM64_FTR_END, |
604 | }; |
605 | |
606 | static const struct arm64_ftr_bits ftr_id_isar0[] = { |
607 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0), |
608 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0), |
609 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0), |
610 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0), |
611 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0), |
612 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0), |
613 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0), |
614 | ARM64_FTR_END, |
615 | }; |
616 | |
617 | static const struct arm64_ftr_bits ftr_id_isar5[] = { |
618 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0), |
619 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0), |
620 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0), |
621 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0), |
622 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0), |
623 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0), |
624 | ARM64_FTR_END, |
625 | }; |
626 | |
627 | static const struct arm64_ftr_bits ftr_id_mmfr4[] = { |
628 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0), |
629 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0), |
630 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0), |
631 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0), |
632 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0), |
633 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0), |
634 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0), |
635 | |
636 | /* |
637 | * SpecSEI = 1 indicates that the PE might generate an SError on an |
638 | * external abort on speculative read. It is safe to assume that an |
639 | * SError might be generated than it will not be. Hence it has been |
640 | * classified as FTR_HIGHER_SAFE. |
641 | */ |
642 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0), |
643 | ARM64_FTR_END, |
644 | }; |
645 | |
646 | static const struct arm64_ftr_bits ftr_id_isar4[] = { |
647 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0), |
648 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0), |
649 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0), |
650 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0), |
651 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0), |
652 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0), |
653 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0), |
654 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0), |
655 | ARM64_FTR_END, |
656 | }; |
657 | |
658 | static const struct arm64_ftr_bits ftr_id_mmfr5[] = { |
659 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0), |
660 | ARM64_FTR_END, |
661 | }; |
662 | |
663 | static const struct arm64_ftr_bits ftr_id_isar6[] = { |
664 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0), |
665 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0), |
666 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0), |
667 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0), |
668 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0), |
669 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0), |
670 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0), |
671 | ARM64_FTR_END, |
672 | }; |
673 | |
674 | static const struct arm64_ftr_bits ftr_id_pfr0[] = { |
675 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0), |
676 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0), |
677 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0), |
678 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0), |
679 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0), |
680 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0), |
681 | ARM64_FTR_END, |
682 | }; |
683 | |
684 | static const struct arm64_ftr_bits ftr_id_pfr1[] = { |
685 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0), |
686 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0), |
687 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0), |
688 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0), |
689 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0), |
690 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0), |
691 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0), |
692 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0), |
693 | ARM64_FTR_END, |
694 | }; |
695 | |
696 | static const struct arm64_ftr_bits ftr_id_pfr2[] = { |
697 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0), |
698 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0), |
699 | ARM64_FTR_END, |
700 | }; |
701 | |
702 | static const struct arm64_ftr_bits ftr_id_dfr0[] = { |
703 | /* [31:28] TraceFilt */ |
704 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0), |
705 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0), |
706 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0), |
707 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0), |
708 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0), |
709 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0), |
710 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0), |
711 | ARM64_FTR_END, |
712 | }; |
713 | |
714 | static const struct arm64_ftr_bits ftr_id_dfr1[] = { |
715 | S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0), |
716 | ARM64_FTR_END, |
717 | }; |
718 | |
719 | static const struct arm64_ftr_bits ftr_mpamidr[] = { |
720 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0), |
721 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0), |
722 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0), |
723 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0), |
724 | ARM64_FTR_END, |
725 | }; |
726 | |
727 | /* |
728 | * Common ftr bits for a 32bit register with all hidden, strict |
729 | * attributes, with 4bit feature fields and a default safe value of |
730 | * 0. Covers the following 32bit registers: |
731 | * id_isar[1-3], id_mmfr[1-3] |
732 | */ |
733 | static const struct arm64_ftr_bits ftr_generic_32bits[] = { |
734 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), |
735 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), |
736 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), |
737 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), |
738 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), |
739 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), |
740 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), |
741 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), |
742 | ARM64_FTR_END, |
743 | }; |
744 | |
745 | /* Table for a single 32bit feature value */ |
746 | static const struct arm64_ftr_bits ftr_single32[] = { |
747 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0), |
748 | ARM64_FTR_END, |
749 | }; |
750 | |
751 | static const struct arm64_ftr_bits ftr_raz[] = { |
752 | ARM64_FTR_END, |
753 | }; |
754 | |
755 | #define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \ |
756 | .sys_id = id, \ |
757 | .reg = &(struct arm64_ftr_reg){ \ |
758 | .name = id_str, \ |
759 | .override = (ovr), \ |
760 | .ftr_bits = &((table)[0]), \ |
761 | }} |
762 | |
763 | #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \ |
764 | __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr) |
765 | |
766 | #define ARM64_FTR_REG(id, table) \ |
767 | __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override) |
768 | |
769 | struct arm64_ftr_override __read_mostly id_aa64mmfr0_override; |
770 | struct arm64_ftr_override __read_mostly id_aa64mmfr1_override; |
771 | struct arm64_ftr_override __read_mostly id_aa64mmfr2_override; |
772 | struct arm64_ftr_override __read_mostly id_aa64pfr0_override; |
773 | struct arm64_ftr_override __read_mostly id_aa64pfr1_override; |
774 | struct arm64_ftr_override __read_mostly id_aa64zfr0_override; |
775 | struct arm64_ftr_override __read_mostly id_aa64smfr0_override; |
776 | struct arm64_ftr_override __read_mostly id_aa64isar1_override; |
777 | struct arm64_ftr_override __read_mostly id_aa64isar2_override; |
778 | |
779 | struct arm64_ftr_override __read_mostly arm64_sw_feature_override; |
780 | |
781 | static const struct __ftr_reg_entry { |
782 | u32 sys_id; |
783 | struct arm64_ftr_reg *reg; |
784 | } arm64_ftr_regs[] = { |
785 | |
786 | /* Op1 = 0, CRn = 0, CRm = 1 */ |
787 | ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), |
788 | ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1), |
789 | ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0), |
790 | ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), |
791 | ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), |
792 | ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), |
793 | ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), |
794 | |
795 | /* Op1 = 0, CRn = 0, CRm = 2 */ |
796 | ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0), |
797 | ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), |
798 | ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), |
799 | ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), |
800 | ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4), |
801 | ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), |
802 | ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), |
803 | ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), |
804 | |
805 | /* Op1 = 0, CRn = 0, CRm = 3 */ |
806 | ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0), |
807 | ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1), |
808 | ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), |
809 | ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2), |
810 | ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1), |
811 | ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5), |
812 | |
813 | /* Op1 = 0, CRn = 0, CRm = 4 */ |
814 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0, |
815 | &id_aa64pfr0_override), |
816 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, |
817 | &id_aa64pfr1_override), |
818 | ARM64_FTR_REG(SYS_ID_AA64PFR2_EL1, ftr_id_aa64pfr2), |
819 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0, |
820 | &id_aa64zfr0_override), |
821 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0, |
822 | &id_aa64smfr0_override), |
823 | ARM64_FTR_REG(SYS_ID_AA64FPFR0_EL1, ftr_id_aa64fpfr0), |
824 | |
825 | /* Op1 = 0, CRn = 0, CRm = 5 */ |
826 | ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), |
827 | ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz), |
828 | |
829 | /* Op1 = 0, CRn = 0, CRm = 6 */ |
830 | ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), |
831 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, |
832 | &id_aa64isar1_override), |
833 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2, |
834 | &id_aa64isar2_override), |
835 | ARM64_FTR_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3), |
836 | |
837 | /* Op1 = 0, CRn = 0, CRm = 7 */ |
838 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0, |
839 | &id_aa64mmfr0_override), |
840 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1, |
841 | &id_aa64mmfr1_override), |
842 | ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2, |
843 | &id_aa64mmfr2_override), |
844 | ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3), |
845 | ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4), |
846 | |
847 | /* Op1 = 0, CRn = 10, CRm = 4 */ |
848 | ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr), |
849 | |
850 | /* Op1 = 1, CRn = 0, CRm = 0 */ |
851 | ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), |
852 | |
853 | /* Op1 = 3, CRn = 0, CRm = 0 */ |
854 | { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, |
855 | ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), |
856 | |
857 | /* Op1 = 3, CRn = 14, CRm = 0 */ |
858 | ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32), |
859 | }; |
860 | |
861 | static int search_cmp_ftr_reg(const void *id, const void *regp) |
862 | { |
863 | return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; |
864 | } |
865 | |
866 | /* |
867 | * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using |
868 | * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the |
869 | * ascending order of sys_id, we use binary search to find a matching |
870 | * entry. |
871 | * |
872 | * returns - Upon success, matching ftr_reg entry for id. |
873 | * - NULL on failure. It is upto the caller to decide |
874 | * the impact of a failure. |
875 | */ |
876 | static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id) |
877 | { |
878 | const struct __ftr_reg_entry *ret; |
879 | |
880 | ret = bsearch((const void *)(unsigned long)sys_id, |
881 | arm64_ftr_regs, |
882 | ARRAY_SIZE(arm64_ftr_regs), |
883 | sizeof(arm64_ftr_regs[0]), |
884 | search_cmp_ftr_reg); |
885 | if (ret) |
886 | return ret->reg; |
887 | return NULL; |
888 | } |
889 | |
890 | /* |
891 | * get_arm64_ftr_reg - Looks up a feature register entry using |
892 | * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn(). |
893 | * |
894 | * returns - Upon success, matching ftr_reg entry for id. |
895 | * - NULL on failure but with an WARN_ON(). |
896 | */ |
897 | struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) |
898 | { |
899 | struct arm64_ftr_reg *reg; |
900 | |
901 | reg = get_arm64_ftr_reg_nowarn(sys_id); |
902 | |
903 | /* |
904 | * Requesting a non-existent register search is an error. Warn |
905 | * and let the caller handle it. |
906 | */ |
907 | WARN_ON(!reg); |
908 | return reg; |
909 | } |
910 | |
911 | static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, |
912 | s64 ftr_val) |
913 | { |
914 | u64 mask = arm64_ftr_mask(ftrp); |
915 | |
916 | reg &= ~mask; |
917 | reg |= (ftr_val << ftrp->shift) & mask; |
918 | return reg; |
919 | } |
920 | |
921 | s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, |
922 | s64 cur) |
923 | { |
924 | s64 ret = 0; |
925 | |
926 | switch (ftrp->type) { |
927 | case FTR_EXACT: |
928 | ret = ftrp->safe_val; |
929 | break; |
930 | case FTR_LOWER_SAFE: |
931 | ret = min(new, cur); |
932 | break; |
933 | case FTR_HIGHER_OR_ZERO_SAFE: |
934 | if (!cur || !new) |
935 | break; |
936 | fallthrough; |
937 | case FTR_HIGHER_SAFE: |
938 | ret = max(new, cur); |
939 | break; |
940 | default: |
941 | BUG(); |
942 | } |
943 | |
944 | return ret; |
945 | } |
946 | |
947 | static void __init sort_ftr_regs(void) |
948 | { |
949 | unsigned int i; |
950 | |
951 | for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) { |
952 | const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg; |
953 | const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; |
954 | unsigned int j = 0; |
955 | |
956 | /* |
957 | * Features here must be sorted in descending order with respect |
958 | * to their shift values and should not overlap with each other. |
959 | */ |
960 | for (; ftr_bits->width != 0; ftr_bits++, j++) { |
961 | unsigned int width = ftr_reg->ftr_bits[j].width; |
962 | unsigned int shift = ftr_reg->ftr_bits[j].shift; |
963 | unsigned int prev_shift; |
964 | |
965 | WARN((shift + width) > 64, |
966 | "%s has invalid feature at shift %d\n" , |
967 | ftr_reg->name, shift); |
968 | |
969 | /* |
970 | * Skip the first feature. There is nothing to |
971 | * compare against for now. |
972 | */ |
973 | if (j == 0) |
974 | continue; |
975 | |
976 | prev_shift = ftr_reg->ftr_bits[j - 1].shift; |
977 | WARN((shift + width) > prev_shift, |
978 | "%s has feature overlap at shift %d\n" , |
979 | ftr_reg->name, shift); |
980 | } |
981 | |
982 | /* |
983 | * Skip the first register. There is nothing to |
984 | * compare against for now. |
985 | */ |
986 | if (i == 0) |
987 | continue; |
988 | /* |
989 | * Registers here must be sorted in ascending order with respect |
990 | * to sys_id for subsequent binary search in get_arm64_ftr_reg() |
991 | * to work correctly. |
992 | */ |
993 | BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id); |
994 | } |
995 | } |
996 | |
997 | /* |
998 | * Initialise the CPU feature register from Boot CPU values. |
999 | * Also initiliases the strict_mask for the register. |
1000 | * Any bits that are not covered by an arm64_ftr_bits entry are considered |
1001 | * RES0 for the system-wide value, and must strictly match. |
1002 | */ |
1003 | static void init_cpu_ftr_reg(u32 sys_reg, u64 new) |
1004 | { |
1005 | u64 val = 0; |
1006 | u64 strict_mask = ~0x0ULL; |
1007 | u64 user_mask = 0; |
1008 | u64 valid_mask = 0; |
1009 | |
1010 | const struct arm64_ftr_bits *ftrp; |
1011 | struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_id: sys_reg); |
1012 | |
1013 | if (!reg) |
1014 | return; |
1015 | |
1016 | for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { |
1017 | u64 ftr_mask = arm64_ftr_mask(ftrp); |
1018 | s64 ftr_new = arm64_ftr_value(ftrp, new); |
1019 | s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val); |
1020 | |
1021 | if ((ftr_mask & reg->override->mask) == ftr_mask) { |
1022 | s64 tmp = arm64_ftr_safe_value(ftrp, new: ftr_ovr, cur: ftr_new); |
1023 | char *str = NULL; |
1024 | |
1025 | if (ftr_ovr != tmp) { |
1026 | /* Unsafe, remove the override */ |
1027 | reg->override->mask &= ~ftr_mask; |
1028 | reg->override->val &= ~ftr_mask; |
1029 | tmp = ftr_ovr; |
1030 | str = "ignoring override" ; |
1031 | } else if (ftr_new != tmp) { |
1032 | /* Override was valid */ |
1033 | ftr_new = tmp; |
1034 | str = "forced" ; |
1035 | } else { |
1036 | /* Override was the safe value */ |
1037 | str = "already set" ; |
1038 | } |
1039 | |
1040 | pr_warn("%s[%d:%d]: %s to %llx\n" , |
1041 | reg->name, |
1042 | ftrp->shift + ftrp->width - 1, |
1043 | ftrp->shift, str, |
1044 | tmp & (BIT(ftrp->width) - 1)); |
1045 | } else if ((ftr_mask & reg->override->val) == ftr_mask) { |
1046 | reg->override->val &= ~ftr_mask; |
1047 | pr_warn("%s[%d:%d]: impossible override, ignored\n" , |
1048 | reg->name, |
1049 | ftrp->shift + ftrp->width - 1, |
1050 | ftrp->shift); |
1051 | } |
1052 | |
1053 | val = arm64_ftr_set_value(ftrp, reg: val, ftr_val: ftr_new); |
1054 | |
1055 | valid_mask |= ftr_mask; |
1056 | if (!ftrp->strict) |
1057 | strict_mask &= ~ftr_mask; |
1058 | if (ftrp->visible) |
1059 | user_mask |= ftr_mask; |
1060 | else |
1061 | reg->user_val = arm64_ftr_set_value(ftrp, |
1062 | reg: reg->user_val, |
1063 | ftr_val: ftrp->safe_val); |
1064 | } |
1065 | |
1066 | val &= valid_mask; |
1067 | |
1068 | reg->sys_val = val; |
1069 | reg->strict_mask = strict_mask; |
1070 | reg->user_mask = user_mask; |
1071 | } |
1072 | |
1073 | extern const struct arm64_cpu_capabilities arm64_errata[]; |
1074 | static const struct arm64_cpu_capabilities arm64_features[]; |
1075 | |
1076 | static void __init |
1077 | init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) |
1078 | { |
1079 | for (; caps->matches; caps++) { |
1080 | if (WARN(caps->capability >= ARM64_NCAPS, |
1081 | "Invalid capability %d\n" , caps->capability)) |
1082 | continue; |
1083 | if (WARN(cpucap_ptrs[caps->capability], |
1084 | "Duplicate entry for capability %d\n" , |
1085 | caps->capability)) |
1086 | continue; |
1087 | cpucap_ptrs[caps->capability] = caps; |
1088 | } |
1089 | } |
1090 | |
1091 | static void __init init_cpucap_indirect_list(void) |
1092 | { |
1093 | init_cpucap_indirect_list_from_array(caps: arm64_features); |
1094 | init_cpucap_indirect_list_from_array(caps: arm64_errata); |
1095 | } |
1096 | |
1097 | static void __init setup_boot_cpu_capabilities(void); |
1098 | |
1099 | static void init_32bit_cpu_features(struct cpuinfo_32bit *info) |
1100 | { |
1101 | init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); |
1102 | init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); |
1103 | init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); |
1104 | init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); |
1105 | init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); |
1106 | init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); |
1107 | init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); |
1108 | init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); |
1109 | init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); |
1110 | init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); |
1111 | init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); |
1112 | init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); |
1113 | init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); |
1114 | init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); |
1115 | init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); |
1116 | init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); |
1117 | init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); |
1118 | init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); |
1119 | init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); |
1120 | init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); |
1121 | init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); |
1122 | } |
1123 | |
1124 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
1125 | static bool enable_pseudo_nmi; |
1126 | |
1127 | static int __init early_enable_pseudo_nmi(char *p) |
1128 | { |
1129 | return kstrtobool(p, &enable_pseudo_nmi); |
1130 | } |
1131 | early_param("irqchip.gicv3_pseudo_nmi" , early_enable_pseudo_nmi); |
1132 | |
1133 | static __init void detect_system_supports_pseudo_nmi(void) |
1134 | { |
1135 | struct device_node *np; |
1136 | |
1137 | if (!enable_pseudo_nmi) |
1138 | return; |
1139 | |
1140 | /* |
1141 | * Detect broken MediaTek firmware that doesn't properly save and |
1142 | * restore GIC priorities. |
1143 | */ |
1144 | np = of_find_compatible_node(NULL, NULL, "arm,gic-v3" ); |
1145 | if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw" )) { |
1146 | pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n" ); |
1147 | enable_pseudo_nmi = false; |
1148 | } |
1149 | of_node_put(np); |
1150 | } |
1151 | #else /* CONFIG_ARM64_PSEUDO_NMI */ |
1152 | static inline void detect_system_supports_pseudo_nmi(void) { } |
1153 | #endif |
1154 | |
1155 | void __init init_cpu_features(struct cpuinfo_arm64 *info) |
1156 | { |
1157 | /* Before we start using the tables, make sure it is sorted */ |
1158 | sort_ftr_regs(); |
1159 | |
1160 | init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); |
1161 | init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); |
1162 | init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); |
1163 | init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); |
1164 | init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); |
1165 | init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); |
1166 | init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); |
1167 | init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); |
1168 | init_cpu_ftr_reg(SYS_ID_AA64ISAR3_EL1, info->reg_id_aa64isar3); |
1169 | init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); |
1170 | init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); |
1171 | init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); |
1172 | init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3); |
1173 | init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4); |
1174 | init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); |
1175 | init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); |
1176 | init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2); |
1177 | init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); |
1178 | init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0); |
1179 | init_cpu_ftr_reg(SYS_ID_AA64FPFR0_EL1, info->reg_id_aa64fpfr0); |
1180 | |
1181 | if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) |
1182 | init_32bit_cpu_features(info: &info->aarch32); |
1183 | |
1184 | if (IS_ENABLED(CONFIG_ARM64_SVE) && |
1185 | id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { |
1186 | unsigned long cpacr = cpacr_save_enable_kernel_sve(); |
1187 | |
1188 | vec_init_vq_map(ARM64_VEC_SVE); |
1189 | |
1190 | cpacr_restore(cpacr); |
1191 | } |
1192 | |
1193 | if (IS_ENABLED(CONFIG_ARM64_SME) && |
1194 | id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { |
1195 | unsigned long cpacr = cpacr_save_enable_kernel_sme(); |
1196 | |
1197 | vec_init_vq_map(ARM64_VEC_SME); |
1198 | |
1199 | cpacr_restore(cpacr); |
1200 | } |
1201 | |
1202 | if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { |
1203 | info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); |
1204 | init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr); |
1205 | } |
1206 | |
1207 | if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) |
1208 | init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); |
1209 | } |
1210 | |
1211 | static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) |
1212 | { |
1213 | const struct arm64_ftr_bits *ftrp; |
1214 | |
1215 | for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { |
1216 | s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); |
1217 | s64 ftr_new = arm64_ftr_value(ftrp, new); |
1218 | |
1219 | if (ftr_cur == ftr_new) |
1220 | continue; |
1221 | /* Find a safe value */ |
1222 | ftr_new = arm64_ftr_safe_value(ftrp, new: ftr_new, cur: ftr_cur); |
1223 | reg->sys_val = arm64_ftr_set_value(ftrp, reg: reg->sys_val, ftr_val: ftr_new); |
1224 | } |
1225 | |
1226 | } |
1227 | |
1228 | static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) |
1229 | { |
1230 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); |
1231 | |
1232 | if (!regp) |
1233 | return 0; |
1234 | |
1235 | update_cpu_ftr_reg(reg: regp, new: val); |
1236 | if ((boot & regp->strict_mask) == (val & regp->strict_mask)) |
1237 | return 0; |
1238 | pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n" , |
1239 | regp->name, boot, cpu, val); |
1240 | return 1; |
1241 | } |
1242 | |
1243 | static void relax_cpu_ftr_reg(u32 sys_id, int field) |
1244 | { |
1245 | const struct arm64_ftr_bits *ftrp; |
1246 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); |
1247 | |
1248 | if (!regp) |
1249 | return; |
1250 | |
1251 | for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { |
1252 | if (ftrp->shift == field) { |
1253 | regp->strict_mask &= ~arm64_ftr_mask(ftrp); |
1254 | break; |
1255 | } |
1256 | } |
1257 | |
1258 | /* Bogus field? */ |
1259 | WARN_ON(!ftrp->width); |
1260 | } |
1261 | |
1262 | static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info, |
1263 | struct cpuinfo_arm64 *boot) |
1264 | { |
1265 | static bool boot_cpu_32bit_regs_overridden = false; |
1266 | |
1267 | if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden) |
1268 | return; |
1269 | |
1270 | if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0)) |
1271 | return; |
1272 | |
1273 | boot->aarch32 = info->aarch32; |
1274 | init_32bit_cpu_features(info: &boot->aarch32); |
1275 | boot_cpu_32bit_regs_overridden = true; |
1276 | } |
1277 | |
1278 | static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, |
1279 | struct cpuinfo_32bit *boot) |
1280 | { |
1281 | int taint = 0; |
1282 | u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
1283 | |
1284 | /* |
1285 | * If we don't have AArch32 at EL1, then relax the strictness of |
1286 | * EL1-dependent register fields to avoid spurious sanity check fails. |
1287 | */ |
1288 | if (!id_aa64pfr0_32bit_el1(pfr0)) { |
1289 | relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_EL1_SMC_SHIFT); |
1290 | relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virt_frac_SHIFT); |
1291 | relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Sec_frac_SHIFT); |
1292 | relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virtualization_SHIFT); |
1293 | relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Security_SHIFT); |
1294 | relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_ProgMod_SHIFT); |
1295 | } |
1296 | |
1297 | taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, |
1298 | info->reg_id_dfr0, boot->reg_id_dfr0); |
1299 | taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu, |
1300 | info->reg_id_dfr1, boot->reg_id_dfr1); |
1301 | taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, |
1302 | info->reg_id_isar0, boot->reg_id_isar0); |
1303 | taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, |
1304 | info->reg_id_isar1, boot->reg_id_isar1); |
1305 | taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, |
1306 | info->reg_id_isar2, boot->reg_id_isar2); |
1307 | taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, |
1308 | info->reg_id_isar3, boot->reg_id_isar3); |
1309 | taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, |
1310 | info->reg_id_isar4, boot->reg_id_isar4); |
1311 | taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, |
1312 | info->reg_id_isar5, boot->reg_id_isar5); |
1313 | taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, |
1314 | info->reg_id_isar6, boot->reg_id_isar6); |
1315 | |
1316 | /* |
1317 | * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and |
1318 | * ACTLR formats could differ across CPUs and therefore would have to |
1319 | * be trapped for virtualization anyway. |
1320 | */ |
1321 | taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, |
1322 | info->reg_id_mmfr0, boot->reg_id_mmfr0); |
1323 | taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, |
1324 | info->reg_id_mmfr1, boot->reg_id_mmfr1); |
1325 | taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, |
1326 | info->reg_id_mmfr2, boot->reg_id_mmfr2); |
1327 | taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, |
1328 | info->reg_id_mmfr3, boot->reg_id_mmfr3); |
1329 | taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu, |
1330 | info->reg_id_mmfr4, boot->reg_id_mmfr4); |
1331 | taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu, |
1332 | info->reg_id_mmfr5, boot->reg_id_mmfr5); |
1333 | taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, |
1334 | info->reg_id_pfr0, boot->reg_id_pfr0); |
1335 | taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, |
1336 | info->reg_id_pfr1, boot->reg_id_pfr1); |
1337 | taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu, |
1338 | info->reg_id_pfr2, boot->reg_id_pfr2); |
1339 | taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, |
1340 | info->reg_mvfr0, boot->reg_mvfr0); |
1341 | taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, |
1342 | info->reg_mvfr1, boot->reg_mvfr1); |
1343 | taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, |
1344 | info->reg_mvfr2, boot->reg_mvfr2); |
1345 | |
1346 | return taint; |
1347 | } |
1348 | |
1349 | /* |
1350 | * Update system wide CPU feature registers with the values from a |
1351 | * non-boot CPU. Also performs SANITY checks to make sure that there |
1352 | * aren't any insane variations from that of the boot CPU. |
1353 | */ |
1354 | void update_cpu_features(int cpu, |
1355 | struct cpuinfo_arm64 *info, |
1356 | struct cpuinfo_arm64 *boot) |
1357 | { |
1358 | int taint = 0; |
1359 | |
1360 | /* |
1361 | * The kernel can handle differing I-cache policies, but otherwise |
1362 | * caches should look identical. Userspace JITs will make use of |
1363 | * *minLine. |
1364 | */ |
1365 | taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, |
1366 | info->reg_ctr, boot->reg_ctr); |
1367 | |
1368 | /* |
1369 | * Userspace may perform DC ZVA instructions. Mismatched block sizes |
1370 | * could result in too much or too little memory being zeroed if a |
1371 | * process is preempted and migrated between CPUs. |
1372 | */ |
1373 | taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, |
1374 | info->reg_dczid, boot->reg_dczid); |
1375 | |
1376 | /* If different, timekeeping will be broken (especially with KVM) */ |
1377 | taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, |
1378 | info->reg_cntfrq, boot->reg_cntfrq); |
1379 | |
1380 | /* |
1381 | * The kernel uses self-hosted debug features and expects CPUs to |
1382 | * support identical debug features. We presently need CTX_CMPs, WRPs, |
1383 | * and BRPs to be identical. |
1384 | * ID_AA64DFR1 is currently RES0. |
1385 | */ |
1386 | taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, |
1387 | info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); |
1388 | taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, |
1389 | info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); |
1390 | /* |
1391 | * Even in big.LITTLE, processors should be identical instruction-set |
1392 | * wise. |
1393 | */ |
1394 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, |
1395 | info->reg_id_aa64isar0, boot->reg_id_aa64isar0); |
1396 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, |
1397 | info->reg_id_aa64isar1, boot->reg_id_aa64isar1); |
1398 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, |
1399 | info->reg_id_aa64isar2, boot->reg_id_aa64isar2); |
1400 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu, |
1401 | info->reg_id_aa64isar3, boot->reg_id_aa64isar3); |
1402 | |
1403 | /* |
1404 | * Differing PARange support is fine as long as all peripherals and |
1405 | * memory are mapped within the minimum PARange of all CPUs. |
1406 | * Linux should not care about secure memory. |
1407 | */ |
1408 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, |
1409 | info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); |
1410 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, |
1411 | info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); |
1412 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, |
1413 | info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); |
1414 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu, |
1415 | info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3); |
1416 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR4_EL1, cpu, |
1417 | info->reg_id_aa64mmfr4, boot->reg_id_aa64mmfr4); |
1418 | |
1419 | taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, |
1420 | info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); |
1421 | taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, |
1422 | info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); |
1423 | taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu, |
1424 | info->reg_id_aa64pfr2, boot->reg_id_aa64pfr2); |
1425 | |
1426 | taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, |
1427 | info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); |
1428 | |
1429 | taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu, |
1430 | info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0); |
1431 | |
1432 | taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu, |
1433 | info->reg_id_aa64fpfr0, boot->reg_id_aa64fpfr0); |
1434 | |
1435 | /* Probe vector lengths */ |
1436 | if (IS_ENABLED(CONFIG_ARM64_SVE) && |
1437 | id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { |
1438 | if (!system_capabilities_finalized()) { |
1439 | unsigned long cpacr = cpacr_save_enable_kernel_sve(); |
1440 | |
1441 | vec_update_vq_map(ARM64_VEC_SVE); |
1442 | |
1443 | cpacr_restore(cpacr); |
1444 | } |
1445 | } |
1446 | |
1447 | if (IS_ENABLED(CONFIG_ARM64_SME) && |
1448 | id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) { |
1449 | unsigned long cpacr = cpacr_save_enable_kernel_sme(); |
1450 | |
1451 | /* Probe vector lengths */ |
1452 | if (!system_capabilities_finalized()) |
1453 | vec_update_vq_map(ARM64_VEC_SME); |
1454 | |
1455 | cpacr_restore(cpacr); |
1456 | } |
1457 | |
1458 | if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) { |
1459 | info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); |
1460 | taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu, |
1461 | info->reg_mpamidr, boot->reg_mpamidr); |
1462 | } |
1463 | |
1464 | /* |
1465 | * The kernel uses the LDGM/STGM instructions and the number of tags |
1466 | * they read/write depends on the GMID_EL1.BS field. Check that the |
1467 | * value is the same on all CPUs. |
1468 | */ |
1469 | if (IS_ENABLED(CONFIG_ARM64_MTE) && |
1470 | id_aa64pfr1_mte(info->reg_id_aa64pfr1)) { |
1471 | taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu, |
1472 | info->reg_gmid, boot->reg_gmid); |
1473 | } |
1474 | |
1475 | /* |
1476 | * If we don't have AArch32 at all then skip the checks entirely |
1477 | * as the register values may be UNKNOWN and we're not going to be |
1478 | * using them for anything. |
1479 | * |
1480 | * This relies on a sanitised view of the AArch64 ID registers |
1481 | * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last. |
1482 | */ |
1483 | if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { |
1484 | lazy_init_32bit_cpu_features(info, boot); |
1485 | taint |= update_32bit_cpu_features(cpu, info: &info->aarch32, |
1486 | boot: &boot->aarch32); |
1487 | } |
1488 | |
1489 | /* |
1490 | * Mismatched CPU features are a recipe for disaster. Don't even |
1491 | * pretend to support them. |
1492 | */ |
1493 | if (taint) { |
1494 | pr_warn_once("Unsupported CPU feature variation detected.\n" ); |
1495 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); |
1496 | } |
1497 | } |
1498 | |
1499 | u64 read_sanitised_ftr_reg(u32 id) |
1500 | { |
1501 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id: id); |
1502 | |
1503 | if (!regp) |
1504 | return 0; |
1505 | return regp->sys_val; |
1506 | } |
1507 | EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); |
1508 | |
1509 | #define read_sysreg_case(r) \ |
1510 | case r: val = read_sysreg_s(r); break; |
1511 | |
1512 | /* |
1513 | * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated. |
1514 | * Read the system register on the current CPU |
1515 | */ |
1516 | u64 __read_sysreg_by_encoding(u32 sys_id) |
1517 | { |
1518 | struct arm64_ftr_reg *regp; |
1519 | u64 val; |
1520 | |
1521 | switch (sys_id) { |
1522 | read_sysreg_case(SYS_ID_PFR0_EL1); |
1523 | read_sysreg_case(SYS_ID_PFR1_EL1); |
1524 | read_sysreg_case(SYS_ID_PFR2_EL1); |
1525 | read_sysreg_case(SYS_ID_DFR0_EL1); |
1526 | read_sysreg_case(SYS_ID_DFR1_EL1); |
1527 | read_sysreg_case(SYS_ID_MMFR0_EL1); |
1528 | read_sysreg_case(SYS_ID_MMFR1_EL1); |
1529 | read_sysreg_case(SYS_ID_MMFR2_EL1); |
1530 | read_sysreg_case(SYS_ID_MMFR3_EL1); |
1531 | read_sysreg_case(SYS_ID_MMFR4_EL1); |
1532 | read_sysreg_case(SYS_ID_MMFR5_EL1); |
1533 | read_sysreg_case(SYS_ID_ISAR0_EL1); |
1534 | read_sysreg_case(SYS_ID_ISAR1_EL1); |
1535 | read_sysreg_case(SYS_ID_ISAR2_EL1); |
1536 | read_sysreg_case(SYS_ID_ISAR3_EL1); |
1537 | read_sysreg_case(SYS_ID_ISAR4_EL1); |
1538 | read_sysreg_case(SYS_ID_ISAR5_EL1); |
1539 | read_sysreg_case(SYS_ID_ISAR6_EL1); |
1540 | read_sysreg_case(SYS_MVFR0_EL1); |
1541 | read_sysreg_case(SYS_MVFR1_EL1); |
1542 | read_sysreg_case(SYS_MVFR2_EL1); |
1543 | |
1544 | read_sysreg_case(SYS_ID_AA64PFR0_EL1); |
1545 | read_sysreg_case(SYS_ID_AA64PFR1_EL1); |
1546 | read_sysreg_case(SYS_ID_AA64PFR2_EL1); |
1547 | read_sysreg_case(SYS_ID_AA64ZFR0_EL1); |
1548 | read_sysreg_case(SYS_ID_AA64SMFR0_EL1); |
1549 | read_sysreg_case(SYS_ID_AA64FPFR0_EL1); |
1550 | read_sysreg_case(SYS_ID_AA64DFR0_EL1); |
1551 | read_sysreg_case(SYS_ID_AA64DFR1_EL1); |
1552 | read_sysreg_case(SYS_ID_AA64MMFR0_EL1); |
1553 | read_sysreg_case(SYS_ID_AA64MMFR1_EL1); |
1554 | read_sysreg_case(SYS_ID_AA64MMFR2_EL1); |
1555 | read_sysreg_case(SYS_ID_AA64MMFR3_EL1); |
1556 | read_sysreg_case(SYS_ID_AA64MMFR4_EL1); |
1557 | read_sysreg_case(SYS_ID_AA64ISAR0_EL1); |
1558 | read_sysreg_case(SYS_ID_AA64ISAR1_EL1); |
1559 | read_sysreg_case(SYS_ID_AA64ISAR2_EL1); |
1560 | read_sysreg_case(SYS_ID_AA64ISAR3_EL1); |
1561 | |
1562 | read_sysreg_case(SYS_CNTFRQ_EL0); |
1563 | read_sysreg_case(SYS_CTR_EL0); |
1564 | read_sysreg_case(SYS_DCZID_EL0); |
1565 | |
1566 | default: |
1567 | BUG(); |
1568 | return 0; |
1569 | } |
1570 | |
1571 | regp = get_arm64_ftr_reg(sys_id); |
1572 | if (regp) { |
1573 | val &= ~regp->override->mask; |
1574 | val |= (regp->override->val & regp->override->mask); |
1575 | } |
1576 | |
1577 | return val; |
1578 | } |
1579 | |
1580 | #include <linux/irqchip/arm-gic-v3.h> |
1581 | |
1582 | static bool |
1583 | has_always(const struct arm64_cpu_capabilities *entry, int scope) |
1584 | { |
1585 | return true; |
1586 | } |
1587 | |
1588 | static bool |
1589 | feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) |
1590 | { |
1591 | int val, min, max; |
1592 | u64 tmp; |
1593 | |
1594 | val = cpuid_feature_extract_field_width(reg, entry->field_pos, |
1595 | entry->field_width, |
1596 | entry->sign); |
1597 | |
1598 | tmp = entry->min_field_value; |
1599 | tmp <<= entry->field_pos; |
1600 | |
1601 | min = cpuid_feature_extract_field_width(tmp, entry->field_pos, |
1602 | entry->field_width, |
1603 | entry->sign); |
1604 | |
1605 | tmp = entry->max_field_value; |
1606 | tmp <<= entry->field_pos; |
1607 | |
1608 | max = cpuid_feature_extract_field_width(tmp, entry->field_pos, |
1609 | entry->field_width, |
1610 | entry->sign); |
1611 | |
1612 | return val >= min && val <= max; |
1613 | } |
1614 | |
1615 | static u64 |
1616 | read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope) |
1617 | { |
1618 | WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); |
1619 | if (scope == SCOPE_SYSTEM) |
1620 | return read_sanitised_ftr_reg(entry->sys_reg); |
1621 | else |
1622 | return __read_sysreg_by_encoding(sys_id: entry->sys_reg); |
1623 | } |
1624 | |
1625 | static bool |
1626 | has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) |
1627 | { |
1628 | int mask; |
1629 | struct arm64_ftr_reg *regp; |
1630 | u64 val = read_scoped_sysreg(entry, scope); |
1631 | |
1632 | regp = get_arm64_ftr_reg(sys_id: entry->sys_reg); |
1633 | if (!regp) |
1634 | return false; |
1635 | |
1636 | mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask, |
1637 | entry->field_pos, |
1638 | entry->field_width); |
1639 | if (!mask) |
1640 | return false; |
1641 | |
1642 | return feature_matches(reg: val, entry); |
1643 | } |
1644 | |
1645 | static bool |
1646 | has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) |
1647 | { |
1648 | u64 val = read_scoped_sysreg(entry, scope); |
1649 | return feature_matches(reg: val, entry); |
1650 | } |
1651 | |
1652 | const struct cpumask *system_32bit_el0_cpumask(void) |
1653 | { |
1654 | if (!system_supports_32bit_el0()) |
1655 | return cpu_none_mask; |
1656 | |
1657 | if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
1658 | return cpu_32bit_el0_mask; |
1659 | |
1660 | return cpu_possible_mask; |
1661 | } |
1662 | |
1663 | const struct cpumask *task_cpu_fallback_mask(struct task_struct *p) |
1664 | { |
1665 | return __task_cpu_possible_mask(p, housekeeping_cpumask(HK_TYPE_TICK)); |
1666 | } |
1667 | |
1668 | static int __init parse_32bit_el0_param(char *str) |
1669 | { |
1670 | allow_mismatched_32bit_el0 = true; |
1671 | return 0; |
1672 | } |
1673 | early_param("allow_mismatched_32bit_el0" , parse_32bit_el0_param); |
1674 | |
1675 | static ssize_t aarch32_el0_show(struct device *dev, |
1676 | struct device_attribute *attr, char *buf) |
1677 | { |
1678 | const struct cpumask *mask = system_32bit_el0_cpumask(); |
1679 | |
1680 | return sysfs_emit(buf, fmt: "%*pbl\n" , cpumask_pr_args(mask)); |
1681 | } |
1682 | static const DEVICE_ATTR_RO(aarch32_el0); |
1683 | |
1684 | static int __init aarch32_el0_sysfs_init(void) |
1685 | { |
1686 | struct device *dev_root; |
1687 | int ret = 0; |
1688 | |
1689 | if (!allow_mismatched_32bit_el0) |
1690 | return 0; |
1691 | |
1692 | dev_root = bus_get_dev_root(bus: &cpu_subsys); |
1693 | if (dev_root) { |
1694 | ret = device_create_file(device: dev_root, entry: &dev_attr_aarch32_el0); |
1695 | put_device(dev: dev_root); |
1696 | } |
1697 | return ret; |
1698 | } |
1699 | device_initcall(aarch32_el0_sysfs_init); |
1700 | |
1701 | static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope) |
1702 | { |
1703 | if (!has_cpuid_feature(entry, scope)) |
1704 | return allow_mismatched_32bit_el0; |
1705 | |
1706 | if (scope == SCOPE_SYSTEM) |
1707 | pr_info("detected: 32-bit EL0 Support\n" ); |
1708 | |
1709 | return true; |
1710 | } |
1711 | |
1712 | static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) |
1713 | { |
1714 | bool has_sre; |
1715 | |
1716 | if (!has_cpuid_feature(entry, scope)) |
1717 | return false; |
1718 | |
1719 | has_sre = gic_enable_sre(); |
1720 | if (!has_sre) |
1721 | pr_warn_once("%s present but disabled by higher exception level\n" , |
1722 | entry->desc); |
1723 | |
1724 | return has_sre; |
1725 | } |
1726 | |
1727 | static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, |
1728 | int scope) |
1729 | { |
1730 | u64 ctr; |
1731 | |
1732 | if (scope == SCOPE_SYSTEM) |
1733 | ctr = arm64_ftr_reg_ctrel0.sys_val; |
1734 | else |
1735 | ctr = read_cpuid_effective_cachetype(); |
1736 | |
1737 | return ctr & BIT(CTR_EL0_IDC_SHIFT); |
1738 | } |
1739 | |
1740 | static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused) |
1741 | { |
1742 | /* |
1743 | * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively |
1744 | * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses |
1745 | * to the CTR_EL0 on this CPU and emulate it with the real/safe |
1746 | * value. |
1747 | */ |
1748 | if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT))) |
1749 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
1750 | } |
1751 | |
1752 | static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, |
1753 | int scope) |
1754 | { |
1755 | u64 ctr; |
1756 | |
1757 | if (scope == SCOPE_SYSTEM) |
1758 | ctr = arm64_ftr_reg_ctrel0.sys_val; |
1759 | else |
1760 | ctr = read_cpuid_cachetype(); |
1761 | |
1762 | return ctr & BIT(CTR_EL0_DIC_SHIFT); |
1763 | } |
1764 | |
1765 | static bool __maybe_unused |
1766 | has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) |
1767 | { |
1768 | /* |
1769 | * Kdump isn't guaranteed to power-off all secondary CPUs, CNP |
1770 | * may share TLB entries with a CPU stuck in the crashed |
1771 | * kernel. |
1772 | */ |
1773 | if (is_kdump_kernel()) |
1774 | return false; |
1775 | |
1776 | if (cpus_have_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) |
1777 | return false; |
1778 | |
1779 | return has_cpuid_feature(entry, scope); |
1780 | } |
1781 | |
1782 | static bool __meltdown_safe = true; |
1783 | static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ |
1784 | |
1785 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, |
1786 | int scope) |
1787 | { |
1788 | /* List of CPUs that are not vulnerable and don't need KPTI */ |
1789 | static const struct midr_range kpti_safe_list[] = { |
1790 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
1791 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
1792 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
1793 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
1794 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
1795 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
1796 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
1797 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
1798 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
1799 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
1800 | MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), |
1801 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD), |
1802 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), |
1803 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
1804 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), |
1805 | { /* sentinel */ } |
1806 | }; |
1807 | char const *str = "kpti command line option" ; |
1808 | bool meltdown_safe; |
1809 | |
1810 | meltdown_safe = is_midr_in_range_list(kpti_safe_list); |
1811 | |
1812 | /* Defer to CPU feature registers */ |
1813 | if (has_cpuid_feature(entry, scope)) |
1814 | meltdown_safe = true; |
1815 | |
1816 | if (!meltdown_safe) |
1817 | __meltdown_safe = false; |
1818 | |
1819 | /* |
1820 | * For reasons that aren't entirely clear, enabling KPTI on Cavium |
1821 | * ThunderX leads to apparent I-cache corruption of kernel text, which |
1822 | * ends as well as you might imagine. Don't even try. We cannot rely |
1823 | * on the cpus_have_*cap() helpers here to detect the CPU erratum |
1824 | * because cpucap detection order may change. However, since we know |
1825 | * affected CPUs are always in a homogeneous configuration, it is |
1826 | * safe to rely on this_cpu_has_cap() here. |
1827 | */ |
1828 | if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) { |
1829 | str = "ARM64_WORKAROUND_CAVIUM_27456" ; |
1830 | __kpti_forced = -1; |
1831 | } |
1832 | |
1833 | /* Useful for KASLR robustness */ |
1834 | if (kaslr_enabled() && kaslr_requires_kpti()) { |
1835 | if (!__kpti_forced) { |
1836 | str = "KASLR" ; |
1837 | __kpti_forced = 1; |
1838 | } |
1839 | } |
1840 | |
1841 | if (cpu_mitigations_off() && !__kpti_forced) { |
1842 | str = "mitigations=off" ; |
1843 | __kpti_forced = -1; |
1844 | } |
1845 | |
1846 | if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { |
1847 | pr_info_once("kernel page table isolation disabled by kernel configuration\n" ); |
1848 | return false; |
1849 | } |
1850 | |
1851 | /* Forced? */ |
1852 | if (__kpti_forced) { |
1853 | pr_info_once("kernel page table isolation forced %s by %s\n" , |
1854 | __kpti_forced > 0 ? "ON" : "OFF" , str); |
1855 | return __kpti_forced > 0; |
1856 | } |
1857 | |
1858 | return !meltdown_safe; |
1859 | } |
1860 | |
1861 | static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope) |
1862 | { |
1863 | /* |
1864 | * Although the Apple M2 family appears to support NV1, the |
1865 | * PTW barfs on the nVHE EL2 S1 page table format. Pretend |
1866 | * that it doesn't support NV1 at all. |
1867 | */ |
1868 | static const struct midr_range nv1_ni_list[] = { |
1869 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), |
1870 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), |
1871 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), |
1872 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), |
1873 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), |
1874 | MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), |
1875 | {} |
1876 | }; |
1877 | |
1878 | return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) && |
1879 | !(has_cpuid_feature(entry, scope) || |
1880 | is_midr_in_range_list(nv1_ni_list))); |
1881 | } |
1882 | |
1883 | #if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2) |
1884 | static bool has_lpa2_at_stage1(u64 mmfr0) |
1885 | { |
1886 | unsigned int tgran; |
1887 | |
1888 | tgran = cpuid_feature_extract_unsigned_field(mmfr0, |
1889 | ID_AA64MMFR0_EL1_TGRAN_SHIFT); |
1890 | return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2; |
1891 | } |
1892 | |
1893 | static bool has_lpa2_at_stage2(u64 mmfr0) |
1894 | { |
1895 | unsigned int tgran; |
1896 | |
1897 | tgran = cpuid_feature_extract_unsigned_field(mmfr0, |
1898 | ID_AA64MMFR0_EL1_TGRAN_2_SHIFT); |
1899 | return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2; |
1900 | } |
1901 | |
1902 | static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope) |
1903 | { |
1904 | u64 mmfr0; |
1905 | |
1906 | mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
1907 | return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0); |
1908 | } |
1909 | #else |
1910 | static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope) |
1911 | { |
1912 | return false; |
1913 | } |
1914 | #endif |
1915 | |
1916 | #ifdef CONFIG_HW_PERF_EVENTS |
1917 | static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope) |
1918 | { |
1919 | u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); |
1920 | unsigned int pmuver; |
1921 | |
1922 | /* |
1923 | * PMUVer follows the standard ID scheme for an unsigned field with the |
1924 | * exception of 0xF (IMP_DEF) which is treated specially and implies |
1925 | * FEAT_PMUv3 is not implemented. |
1926 | * |
1927 | * See DDI0487L.a D24.1.3.2 for more details. |
1928 | */ |
1929 | pmuver = cpuid_feature_extract_unsigned_field(dfr0, |
1930 | ID_AA64DFR0_EL1_PMUVer_SHIFT); |
1931 | if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
1932 | return false; |
1933 | |
1934 | return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP; |
1935 | } |
1936 | #endif |
1937 | |
1938 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
1939 | #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT)) |
1940 | |
1941 | extern |
1942 | void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, |
1943 | phys_addr_t size, pgprot_t prot, |
1944 | phys_addr_t (*pgtable_alloc)(int), int flags); |
1945 | |
1946 | static phys_addr_t __initdata kpti_ng_temp_alloc; |
1947 | |
1948 | static phys_addr_t __init kpti_ng_pgd_alloc(int shift) |
1949 | { |
1950 | kpti_ng_temp_alloc -= PAGE_SIZE; |
1951 | return kpti_ng_temp_alloc; |
1952 | } |
1953 | |
1954 | static int __init __kpti_install_ng_mappings(void *__unused) |
1955 | { |
1956 | typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long); |
1957 | extern kpti_remap_fn idmap_kpti_install_ng_mappings; |
1958 | kpti_remap_fn *remap_fn; |
1959 | |
1960 | int cpu = smp_processor_id(); |
1961 | int levels = CONFIG_PGTABLE_LEVELS; |
1962 | int order = order_base_2(levels); |
1963 | u64 kpti_ng_temp_pgd_pa = 0; |
1964 | pgd_t *kpti_ng_temp_pgd; |
1965 | u64 alloc = 0; |
1966 | |
1967 | if (levels == 5 && !pgtable_l5_enabled()) |
1968 | levels = 4; |
1969 | else if (levels == 4 && !pgtable_l4_enabled()) |
1970 | levels = 3; |
1971 | |
1972 | remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); |
1973 | |
1974 | if (!cpu) { |
1975 | alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); |
1976 | kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE); |
1977 | kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd); |
1978 | |
1979 | // |
1980 | // Create a minimal page table hierarchy that permits us to map |
1981 | // the swapper page tables temporarily as we traverse them. |
1982 | // |
1983 | // The physical pages are laid out as follows: |
1984 | // |
1985 | // +--------+-/-------+-/------ +-/------ +-\\\--------+ |
1986 | // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] : |
1987 | // +--------+-\-------+-\------ +-\------ +-///--------+ |
1988 | // ^ |
1989 | // The first page is mapped into this hierarchy at a PMD_SHIFT |
1990 | // aligned virtual address, so that we can manipulate the PTE |
1991 | // level entries while the mapping is active. The first entry |
1992 | // covers the PTE[] page itself, the remaining entries are free |
1993 | // to be used as a ad-hoc fixmap. |
1994 | // |
1995 | create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc), |
1996 | KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL, |
1997 | kpti_ng_pgd_alloc, 0); |
1998 | } |
1999 | |
2000 | cpu_install_idmap(); |
2001 | remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA); |
2002 | cpu_uninstall_idmap(); |
2003 | |
2004 | if (!cpu) { |
2005 | free_pages(alloc, order); |
2006 | arm64_use_ng_mappings = true; |
2007 | } |
2008 | |
2009 | return 0; |
2010 | } |
2011 | |
2012 | static void __init kpti_install_ng_mappings(void) |
2013 | { |
2014 | /* Check whether KPTI is going to be used */ |
2015 | if (!arm64_kernel_unmapped_at_el0()) |
2016 | return; |
2017 | |
2018 | /* |
2019 | * We don't need to rewrite the page-tables if either we've done |
2020 | * it already or we have KASLR enabled and therefore have not |
2021 | * created any global mappings at all. |
2022 | */ |
2023 | if (arm64_use_ng_mappings) |
2024 | return; |
2025 | |
2026 | stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask); |
2027 | } |
2028 | |
2029 | #else |
2030 | static inline void kpti_install_ng_mappings(void) |
2031 | { |
2032 | } |
2033 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
2034 | |
2035 | static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap) |
2036 | { |
2037 | if (__this_cpu_read(this_cpu_vector) == vectors) { |
2038 | const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); |
2039 | |
2040 | __this_cpu_write(this_cpu_vector, v); |
2041 | } |
2042 | |
2043 | } |
2044 | |
2045 | static int __init parse_kpti(char *str) |
2046 | { |
2047 | bool enabled; |
2048 | int ret = kstrtobool(s: str, res: &enabled); |
2049 | |
2050 | if (ret) |
2051 | return ret; |
2052 | |
2053 | __kpti_forced = enabled ? 1 : -1; |
2054 | return 0; |
2055 | } |
2056 | early_param("kpti" , parse_kpti); |
2057 | |
2058 | #ifdef CONFIG_ARM64_HW_AFDBM |
2059 | static struct cpumask dbm_cpus __read_mostly; |
2060 | |
2061 | static inline void __cpu_enable_hw_dbm(void) |
2062 | { |
2063 | u64 tcr = read_sysreg(tcr_el1) | TCR_HD; |
2064 | |
2065 | write_sysreg(tcr, tcr_el1); |
2066 | isb(); |
2067 | local_flush_tlb_all(); |
2068 | } |
2069 | |
2070 | static bool cpu_has_broken_dbm(void) |
2071 | { |
2072 | /* List of CPUs which have broken DBM support. */ |
2073 | static const struct midr_range cpus[] = { |
2074 | #ifdef CONFIG_ARM64_ERRATUM_1024718 |
2075 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
2076 | /* Kryo4xx Silver (rdpe => r1p0) */ |
2077 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), |
2078 | #endif |
2079 | #ifdef CONFIG_ARM64_ERRATUM_2051678 |
2080 | MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), |
2081 | #endif |
2082 | {}, |
2083 | }; |
2084 | |
2085 | return is_midr_in_range_list(cpus); |
2086 | } |
2087 | |
2088 | static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) |
2089 | { |
2090 | return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) && |
2091 | !cpu_has_broken_dbm(); |
2092 | } |
2093 | |
2094 | static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap) |
2095 | { |
2096 | if (cpu_can_use_dbm(cap)) { |
2097 | __cpu_enable_hw_dbm(); |
2098 | cpumask_set_cpu(smp_processor_id(), &dbm_cpus); |
2099 | } |
2100 | } |
2101 | |
2102 | static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, |
2103 | int __unused) |
2104 | { |
2105 | /* |
2106 | * DBM is a non-conflicting feature. i.e, the kernel can safely |
2107 | * run a mix of CPUs with and without the feature. So, we |
2108 | * unconditionally enable the capability to allow any late CPU |
2109 | * to use the feature. We only enable the control bits on the |
2110 | * CPU, if it is supported. |
2111 | */ |
2112 | |
2113 | return true; |
2114 | } |
2115 | |
2116 | #endif |
2117 | |
2118 | #ifdef CONFIG_ARM64_AMU_EXTN |
2119 | |
2120 | /* |
2121 | * The "amu_cpus" cpumask only signals that the CPU implementation for the |
2122 | * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide |
2123 | * information regarding all the events that it supports. When a CPU bit is |
2124 | * set in the cpumask, the user of this feature can only rely on the presence |
2125 | * of the 4 fixed counters for that CPU. But this does not guarantee that the |
2126 | * counters are enabled or access to these counters is enabled by code |
2127 | * executed at higher exception levels (firmware). |
2128 | */ |
2129 | static struct cpumask amu_cpus __read_mostly; |
2130 | |
2131 | bool cpu_has_amu_feat(int cpu) |
2132 | { |
2133 | return cpumask_test_cpu(cpu, &amu_cpus); |
2134 | } |
2135 | |
2136 | int get_cpu_with_amu_feat(void) |
2137 | { |
2138 | return cpumask_any(&amu_cpus); |
2139 | } |
2140 | |
2141 | static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) |
2142 | { |
2143 | if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { |
2144 | cpumask_set_cpu(smp_processor_id(), &amu_cpus); |
2145 | |
2146 | /* 0 reference values signal broken/disabled counters */ |
2147 | if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) |
2148 | update_freq_counters_refs(); |
2149 | } |
2150 | } |
2151 | |
2152 | static bool has_amu(const struct arm64_cpu_capabilities *cap, |
2153 | int __unused) |
2154 | { |
2155 | /* |
2156 | * The AMU extension is a non-conflicting feature: the kernel can |
2157 | * safely run a mix of CPUs with and without support for the |
2158 | * activity monitors extension. Therefore, unconditionally enable |
2159 | * the capability to allow any late CPU to use the feature. |
2160 | * |
2161 | * With this feature unconditionally enabled, the cpu_enable |
2162 | * function will be called for all CPUs that match the criteria, |
2163 | * including secondary and hotplugged, marking this feature as |
2164 | * present on that respective CPU. The enable function will also |
2165 | * print a detection message. |
2166 | */ |
2167 | |
2168 | return true; |
2169 | } |
2170 | #else |
2171 | int get_cpu_with_amu_feat(void) |
2172 | { |
2173 | return nr_cpu_ids; |
2174 | } |
2175 | #endif |
2176 | |
2177 | static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) |
2178 | { |
2179 | return is_kernel_in_hyp_mode(); |
2180 | } |
2181 | |
2182 | static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) |
2183 | { |
2184 | /* |
2185 | * Copy register values that aren't redirected by hardware. |
2186 | * |
2187 | * Before code patching, we only set tpidr_el1, all CPUs need to copy |
2188 | * this value to tpidr_el2 before we patch the code. Once we've done |
2189 | * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to |
2190 | * do anything here. |
2191 | */ |
2192 | if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) |
2193 | write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); |
2194 | } |
2195 | |
2196 | static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, |
2197 | int scope) |
2198 | { |
2199 | if (kvm_get_mode() != KVM_MODE_NV) |
2200 | return false; |
2201 | |
2202 | if (!cpucap_multi_entry_cap_matches(cap, scope)) { |
2203 | pr_warn("unavailable: %s\n" , cap->desc); |
2204 | return false; |
2205 | } |
2206 | |
2207 | return true; |
2208 | } |
2209 | |
2210 | static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, |
2211 | int __unused) |
2212 | { |
2213 | return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE); |
2214 | } |
2215 | |
2216 | #ifdef CONFIG_ARM64_PAN |
2217 | static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) |
2218 | { |
2219 | /* |
2220 | * We modify PSTATE. This won't work from irq context as the PSTATE |
2221 | * is discarded once we return from the exception. |
2222 | */ |
2223 | WARN_ON_ONCE(in_interrupt()); |
2224 | |
2225 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); |
2226 | set_pstate_pan(1); |
2227 | } |
2228 | #endif /* CONFIG_ARM64_PAN */ |
2229 | |
2230 | #ifdef CONFIG_ARM64_RAS_EXTN |
2231 | static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) |
2232 | { |
2233 | /* Firmware may have left a deferred SError in this register. */ |
2234 | write_sysreg_s(0, SYS_DISR_EL1); |
2235 | } |
2236 | #endif /* CONFIG_ARM64_RAS_EXTN */ |
2237 | |
2238 | #ifdef CONFIG_ARM64_PTR_AUTH |
2239 | static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope) |
2240 | { |
2241 | int boot_val, sec_val; |
2242 | |
2243 | /* We don't expect to be called with SCOPE_SYSTEM */ |
2244 | WARN_ON(scope == SCOPE_SYSTEM); |
2245 | /* |
2246 | * The ptr-auth feature levels are not intercompatible with lower |
2247 | * levels. Hence we must match ptr-auth feature level of the secondary |
2248 | * CPUs with that of the boot CPU. The level of boot cpu is fetched |
2249 | * from the sanitised register whereas direct register read is done for |
2250 | * the secondary CPUs. |
2251 | * The sanitised feature state is guaranteed to match that of the |
2252 | * boot CPU as a mismatched secondary CPU is parked before it gets |
2253 | * a chance to update the state, with the capability. |
2254 | */ |
2255 | boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), |
2256 | entry->field_pos, entry->sign); |
2257 | if (scope & SCOPE_BOOT_CPU) |
2258 | return boot_val >= entry->min_field_value; |
2259 | /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */ |
2260 | sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), |
2261 | entry->field_pos, entry->sign); |
2262 | return (sec_val >= entry->min_field_value) && (sec_val == boot_val); |
2263 | } |
2264 | |
2265 | static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry, |
2266 | int scope) |
2267 | { |
2268 | bool api = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); |
2269 | bool apa = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope); |
2270 | bool apa3 = has_address_auth_cpucap(cpucap_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope); |
2271 | |
2272 | return apa || apa3 || api; |
2273 | } |
2274 | |
2275 | static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, |
2276 | int __unused) |
2277 | { |
2278 | bool gpi = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF); |
2279 | bool gpa = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5); |
2280 | bool gpa3 = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3); |
2281 | |
2282 | return gpa || gpa3 || gpi; |
2283 | } |
2284 | #endif /* CONFIG_ARM64_PTR_AUTH */ |
2285 | |
2286 | #ifdef CONFIG_ARM64_E0PD |
2287 | static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) |
2288 | { |
2289 | if (this_cpu_has_cap(ARM64_HAS_E0PD)) |
2290 | sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); |
2291 | } |
2292 | #endif /* CONFIG_ARM64_E0PD */ |
2293 | |
2294 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
2295 | static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, |
2296 | int scope) |
2297 | { |
2298 | /* |
2299 | * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU |
2300 | * feature, so will be detected earlier. |
2301 | */ |
2302 | BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS); |
2303 | if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS)) |
2304 | return false; |
2305 | |
2306 | return enable_pseudo_nmi; |
2307 | } |
2308 | |
2309 | static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, |
2310 | int scope) |
2311 | { |
2312 | /* |
2313 | * If we're not using priority masking then we won't be poking PMR_EL1, |
2314 | * and there's no need to relax synchronization of writes to it, and |
2315 | * ICC_CTLR_EL1 might not be accessible and we must avoid reads from |
2316 | * that. |
2317 | * |
2318 | * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU |
2319 | * feature, so will be detected earlier. |
2320 | */ |
2321 | BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); |
2322 | if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) |
2323 | return false; |
2324 | |
2325 | /* |
2326 | * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a |
2327 | * hint for interrupt distribution, a DSB is not necessary when |
2328 | * unmasking IRQs via PMR, and we can relax the barrier to a NOP. |
2329 | * |
2330 | * Linux itself doesn't use 1:N distribution, so has no need to |
2331 | * set PMHE. The only reason to have it set is if EL3 requires it |
2332 | * (and we can't change it). |
2333 | */ |
2334 | return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; |
2335 | } |
2336 | #endif |
2337 | |
2338 | #ifdef CONFIG_ARM64_BTI |
2339 | static void bti_enable(const struct arm64_cpu_capabilities *__unused) |
2340 | { |
2341 | /* |
2342 | * Use of X16/X17 for tail-calls and trampolines that jump to |
2343 | * function entry points using BR is a requirement for |
2344 | * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI. |
2345 | * So, be strict and forbid other BRs using other registers to |
2346 | * jump onto a PACIxSP instruction: |
2347 | */ |
2348 | sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1); |
2349 | isb(); |
2350 | } |
2351 | #endif /* CONFIG_ARM64_BTI */ |
2352 | |
2353 | #ifdef CONFIG_ARM64_MTE |
2354 | static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) |
2355 | { |
2356 | sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); |
2357 | |
2358 | mte_cpu_setup(); |
2359 | |
2360 | /* |
2361 | * Clear the tags in the zero page. This needs to be done via the |
2362 | * linear map which has the Tagged attribute. |
2363 | */ |
2364 | if (try_page_mte_tagging(ZERO_PAGE(0))) { |
2365 | mte_clear_page_tags(lm_alias(empty_zero_page)); |
2366 | set_page_mte_tagged(ZERO_PAGE(0)); |
2367 | } |
2368 | |
2369 | kasan_init_hw_tags_cpu(); |
2370 | } |
2371 | #endif /* CONFIG_ARM64_MTE */ |
2372 | |
2373 | static void user_feature_fixup(void) |
2374 | { |
2375 | if (cpus_have_cap(ARM64_WORKAROUND_2658417)) { |
2376 | struct arm64_ftr_reg *regp; |
2377 | |
2378 | regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
2379 | if (regp) |
2380 | regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; |
2381 | } |
2382 | |
2383 | if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) { |
2384 | struct arm64_ftr_reg *regp; |
2385 | |
2386 | regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1); |
2387 | if (regp) |
2388 | regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK; |
2389 | } |
2390 | } |
2391 | |
2392 | static void elf_hwcap_fixup(void) |
2393 | { |
2394 | #ifdef CONFIG_COMPAT |
2395 | if (cpus_have_cap(ARM64_WORKAROUND_1742098)) |
2396 | compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; |
2397 | #endif /* CONFIG_COMPAT */ |
2398 | } |
2399 | |
2400 | #ifdef CONFIG_KVM |
2401 | static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) |
2402 | { |
2403 | return kvm_get_mode() == KVM_MODE_PROTECTED; |
2404 | } |
2405 | #endif /* CONFIG_KVM */ |
2406 | |
2407 | static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused) |
2408 | { |
2409 | sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP); |
2410 | } |
2411 | |
2412 | static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused) |
2413 | { |
2414 | set_pstate_dit(1); |
2415 | } |
2416 | |
2417 | static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused) |
2418 | { |
2419 | sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn); |
2420 | } |
2421 | |
2422 | #ifdef CONFIG_ARM64_POE |
2423 | static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused) |
2424 | { |
2425 | sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1_E0POE); |
2426 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_E0POE); |
2427 | } |
2428 | #endif |
2429 | |
2430 | #ifdef CONFIG_ARM64_GCS |
2431 | static void cpu_enable_gcs(const struct arm64_cpu_capabilities *__unused) |
2432 | { |
2433 | /* GCSPR_EL0 is always readable */ |
2434 | write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1); |
2435 | } |
2436 | #endif |
2437 | |
2438 | /* Internal helper functions to match cpu capability type */ |
2439 | static bool |
2440 | cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) |
2441 | { |
2442 | return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); |
2443 | } |
2444 | |
2445 | static bool |
2446 | cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) |
2447 | { |
2448 | return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); |
2449 | } |
2450 | |
2451 | static bool |
2452 | cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) |
2453 | { |
2454 | return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); |
2455 | } |
2456 | |
2457 | static bool |
2458 | test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) |
2459 | { |
2460 | if (!has_cpuid_feature(entry, scope)) |
2461 | return false; |
2462 | |
2463 | /* Check firmware actually enabled MPAM on this cpu. */ |
2464 | return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN); |
2465 | } |
2466 | |
2467 | static void |
2468 | cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) |
2469 | { |
2470 | /* |
2471 | * Access by the kernel (at EL1) should use the reserved PARTID |
2472 | * which is configured unrestricted. This avoids priority-inversion |
2473 | * where latency sensitive tasks have to wait for a task that has |
2474 | * been throttled to release the lock. |
2475 | */ |
2476 | write_sysreg_s(0, SYS_MPAM1_EL1); |
2477 | } |
2478 | |
2479 | static bool |
2480 | test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope) |
2481 | { |
2482 | u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); |
2483 | |
2484 | return idr & MPAMIDR_EL1_HAS_HCR; |
2485 | } |
2486 | |
2487 | static const struct arm64_cpu_capabilities arm64_features[] = { |
2488 | { |
2489 | .capability = ARM64_ALWAYS_BOOT, |
2490 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2491 | .matches = has_always, |
2492 | }, |
2493 | { |
2494 | .capability = ARM64_ALWAYS_SYSTEM, |
2495 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2496 | .matches = has_always, |
2497 | }, |
2498 | { |
2499 | .desc = "GIC system register CPU interface" , |
2500 | .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, |
2501 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2502 | .matches = has_useable_gicv3_cpuif, |
2503 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP) |
2504 | }, |
2505 | { |
2506 | .desc = "Enhanced Counter Virtualization" , |
2507 | .capability = ARM64_HAS_ECV, |
2508 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2509 | .matches = has_cpuid_feature, |
2510 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, IMP) |
2511 | }, |
2512 | { |
2513 | .desc = "Enhanced Counter Virtualization (CNTPOFF)" , |
2514 | .capability = ARM64_HAS_ECV_CNTPOFF, |
2515 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2516 | .matches = has_cpuid_feature, |
2517 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF) |
2518 | }, |
2519 | #ifdef CONFIG_ARM64_PAN |
2520 | { |
2521 | .desc = "Privileged Access Never" , |
2522 | .capability = ARM64_HAS_PAN, |
2523 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2524 | .matches = has_cpuid_feature, |
2525 | .cpu_enable = cpu_enable_pan, |
2526 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP) |
2527 | }, |
2528 | #endif /* CONFIG_ARM64_PAN */ |
2529 | #ifdef CONFIG_ARM64_EPAN |
2530 | { |
2531 | .desc = "Enhanced Privileged Access Never" , |
2532 | .capability = ARM64_HAS_EPAN, |
2533 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2534 | .matches = has_cpuid_feature, |
2535 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3) |
2536 | }, |
2537 | #endif /* CONFIG_ARM64_EPAN */ |
2538 | #ifdef CONFIG_ARM64_LSE_ATOMICS |
2539 | { |
2540 | .desc = "LSE atomic instructions" , |
2541 | .capability = ARM64_HAS_LSE_ATOMICS, |
2542 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2543 | .matches = has_cpuid_feature, |
2544 | ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP) |
2545 | }, |
2546 | #endif /* CONFIG_ARM64_LSE_ATOMICS */ |
2547 | { |
2548 | .desc = "Virtualization Host Extensions" , |
2549 | .capability = ARM64_HAS_VIRT_HOST_EXTN, |
2550 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2551 | .matches = runs_at_el2, |
2552 | .cpu_enable = cpu_copy_el2regs, |
2553 | }, |
2554 | { |
2555 | .desc = "Nested Virtualization Support" , |
2556 | .capability = ARM64_HAS_NESTED_VIRT, |
2557 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2558 | .matches = has_nested_virt_support, |
2559 | .match_list = (const struct arm64_cpu_capabilities []){ |
2560 | { |
2561 | .matches = has_cpuid_feature, |
2562 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2) |
2563 | }, |
2564 | { |
2565 | .matches = has_cpuid_feature, |
2566 | ARM64_CPUID_FIELDS(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY) |
2567 | }, |
2568 | { /* Sentinel */ } |
2569 | }, |
2570 | }, |
2571 | { |
2572 | .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, |
2573 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2574 | .matches = has_32bit_el0, |
2575 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL0, AARCH32) |
2576 | }, |
2577 | #ifdef CONFIG_KVM |
2578 | { |
2579 | .desc = "32-bit EL1 Support" , |
2580 | .capability = ARM64_HAS_32BIT_EL1, |
2581 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2582 | .matches = has_cpuid_feature, |
2583 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL1, AARCH32) |
2584 | }, |
2585 | { |
2586 | .desc = "Protected KVM" , |
2587 | .capability = ARM64_KVM_PROTECTED_MODE, |
2588 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2589 | .matches = is_kvm_protected_mode, |
2590 | }, |
2591 | { |
2592 | .desc = "HCRX_EL2 register" , |
2593 | .capability = ARM64_HAS_HCX, |
2594 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2595 | .matches = has_cpuid_feature, |
2596 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP) |
2597 | }, |
2598 | #endif |
2599 | { |
2600 | .desc = "Kernel page table isolation (KPTI)" , |
2601 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
2602 | .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, |
2603 | .cpu_enable = cpu_enable_kpti, |
2604 | .matches = unmap_kernel_at_el0, |
2605 | /* |
2606 | * The ID feature fields below are used to indicate that |
2607 | * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for |
2608 | * more details. |
2609 | */ |
2610 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP) |
2611 | }, |
2612 | { |
2613 | .capability = ARM64_HAS_FPSIMD, |
2614 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2615 | .matches = has_cpuid_feature, |
2616 | .cpu_enable = cpu_enable_fpsimd, |
2617 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, FP, IMP) |
2618 | }, |
2619 | #ifdef CONFIG_ARM64_PMEM |
2620 | { |
2621 | .desc = "Data cache clean to Point of Persistence" , |
2622 | .capability = ARM64_HAS_DCPOP, |
2623 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2624 | .matches = has_cpuid_feature, |
2625 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, IMP) |
2626 | }, |
2627 | { |
2628 | .desc = "Data cache clean to Point of Deep Persistence" , |
2629 | .capability = ARM64_HAS_DCPODP, |
2630 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2631 | .matches = has_cpuid_feature, |
2632 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, DPB2) |
2633 | }, |
2634 | #endif |
2635 | #ifdef CONFIG_ARM64_SVE |
2636 | { |
2637 | .desc = "Scalable Vector Extension" , |
2638 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2639 | .capability = ARM64_SVE, |
2640 | .cpu_enable = cpu_enable_sve, |
2641 | .matches = has_cpuid_feature, |
2642 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP) |
2643 | }, |
2644 | #endif /* CONFIG_ARM64_SVE */ |
2645 | #ifdef CONFIG_ARM64_RAS_EXTN |
2646 | { |
2647 | .desc = "RAS Extension Support" , |
2648 | .capability = ARM64_HAS_RAS_EXTN, |
2649 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2650 | .matches = has_cpuid_feature, |
2651 | .cpu_enable = cpu_clear_disr, |
2652 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP) |
2653 | }, |
2654 | #endif /* CONFIG_ARM64_RAS_EXTN */ |
2655 | #ifdef CONFIG_ARM64_AMU_EXTN |
2656 | { |
2657 | .desc = "Activity Monitors Unit (AMU)" , |
2658 | .capability = ARM64_HAS_AMU_EXTN, |
2659 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
2660 | .matches = has_amu, |
2661 | .cpu_enable = cpu_amu_enable, |
2662 | .cpus = &amu_cpus, |
2663 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP) |
2664 | }, |
2665 | #endif /* CONFIG_ARM64_AMU_EXTN */ |
2666 | { |
2667 | .desc = "Data cache clean to the PoU not required for I/D coherence" , |
2668 | .capability = ARM64_HAS_CACHE_IDC, |
2669 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2670 | .matches = has_cache_idc, |
2671 | .cpu_enable = cpu_emulate_effective_ctr, |
2672 | }, |
2673 | { |
2674 | .desc = "Instruction cache invalidation not required for I/D coherence" , |
2675 | .capability = ARM64_HAS_CACHE_DIC, |
2676 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2677 | .matches = has_cache_dic, |
2678 | }, |
2679 | { |
2680 | .desc = "Stage-2 Force Write-Back" , |
2681 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2682 | .capability = ARM64_HAS_STAGE2_FWB, |
2683 | .matches = has_cpuid_feature, |
2684 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, FWB, IMP) |
2685 | }, |
2686 | { |
2687 | .desc = "ARMv8.4 Translation Table Level" , |
2688 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2689 | .capability = ARM64_HAS_ARMv8_4_TTL, |
2690 | .matches = has_cpuid_feature, |
2691 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, TTL, IMP) |
2692 | }, |
2693 | { |
2694 | .desc = "TLB range maintenance instructions" , |
2695 | .capability = ARM64_HAS_TLB_RANGE, |
2696 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2697 | .matches = has_cpuid_feature, |
2698 | ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, TLB, RANGE) |
2699 | }, |
2700 | #ifdef CONFIG_ARM64_HW_AFDBM |
2701 | { |
2702 | .desc = "Hardware dirty bit management" , |
2703 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
2704 | .capability = ARM64_HW_DBM, |
2705 | .matches = has_hw_dbm, |
2706 | .cpu_enable = cpu_enable_hw_dbm, |
2707 | .cpus = &dbm_cpus, |
2708 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM) |
2709 | }, |
2710 | #endif |
2711 | #ifdef CONFIG_ARM64_HAFT |
2712 | { |
2713 | .desc = "Hardware managed Access Flag for Table Descriptors" , |
2714 | /* |
2715 | * Contrary to the page/block access flag, the table access flag |
2716 | * cannot be emulated in software (no access fault will occur). |
2717 | * Therefore this should be used only if it's supported system |
2718 | * wide. |
2719 | */ |
2720 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2721 | .capability = ARM64_HAFT, |
2722 | .matches = has_cpuid_feature, |
2723 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, HAFT) |
2724 | }, |
2725 | #endif |
2726 | { |
2727 | .desc = "CRC32 instructions" , |
2728 | .capability = ARM64_HAS_CRC32, |
2729 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2730 | .matches = has_cpuid_feature, |
2731 | ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, CRC32, IMP) |
2732 | }, |
2733 | { |
2734 | .desc = "Speculative Store Bypassing Safe (SSBS)" , |
2735 | .capability = ARM64_SSBS, |
2736 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2737 | .matches = has_cpuid_feature, |
2738 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SSBS, IMP) |
2739 | }, |
2740 | #ifdef CONFIG_ARM64_CNP |
2741 | { |
2742 | .desc = "Common not Private translations" , |
2743 | .capability = ARM64_HAS_CNP, |
2744 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2745 | .matches = has_useable_cnp, |
2746 | .cpu_enable = cpu_enable_cnp, |
2747 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, CnP, IMP) |
2748 | }, |
2749 | #endif |
2750 | { |
2751 | .desc = "Speculation barrier (SB)" , |
2752 | .capability = ARM64_HAS_SB, |
2753 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2754 | .matches = has_cpuid_feature, |
2755 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, SB, IMP) |
2756 | }, |
2757 | #ifdef CONFIG_ARM64_PTR_AUTH |
2758 | { |
2759 | .desc = "Address authentication (architected QARMA5 algorithm)" , |
2760 | .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5, |
2761 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2762 | .matches = has_address_auth_cpucap, |
2763 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth) |
2764 | }, |
2765 | { |
2766 | .desc = "Address authentication (architected QARMA3 algorithm)" , |
2767 | .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3, |
2768 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2769 | .matches = has_address_auth_cpucap, |
2770 | ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth) |
2771 | }, |
2772 | { |
2773 | .desc = "Address authentication (IMP DEF algorithm)" , |
2774 | .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, |
2775 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2776 | .matches = has_address_auth_cpucap, |
2777 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth) |
2778 | }, |
2779 | { |
2780 | .capability = ARM64_HAS_ADDRESS_AUTH, |
2781 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2782 | .matches = has_address_auth_metacap, |
2783 | }, |
2784 | { |
2785 | .desc = "Generic authentication (architected QARMA5 algorithm)" , |
2786 | .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5, |
2787 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2788 | .matches = has_cpuid_feature, |
2789 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP) |
2790 | }, |
2791 | { |
2792 | .desc = "Generic authentication (architected QARMA3 algorithm)" , |
2793 | .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3, |
2794 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2795 | .matches = has_cpuid_feature, |
2796 | ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP) |
2797 | }, |
2798 | { |
2799 | .desc = "Generic authentication (IMP DEF algorithm)" , |
2800 | .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, |
2801 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2802 | .matches = has_cpuid_feature, |
2803 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP) |
2804 | }, |
2805 | { |
2806 | .capability = ARM64_HAS_GENERIC_AUTH, |
2807 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2808 | .matches = has_generic_auth, |
2809 | }, |
2810 | #endif /* CONFIG_ARM64_PTR_AUTH */ |
2811 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
2812 | { |
2813 | /* |
2814 | * Depends on having GICv3 |
2815 | */ |
2816 | .desc = "IRQ priority masking" , |
2817 | .capability = ARM64_HAS_GIC_PRIO_MASKING, |
2818 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2819 | .matches = can_use_gic_priorities, |
2820 | }, |
2821 | { |
2822 | /* |
2823 | * Depends on ARM64_HAS_GIC_PRIO_MASKING |
2824 | */ |
2825 | .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, |
2826 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2827 | .matches = has_gic_prio_relaxed_sync, |
2828 | }, |
2829 | #endif |
2830 | #ifdef CONFIG_ARM64_E0PD |
2831 | { |
2832 | .desc = "E0PD" , |
2833 | .capability = ARM64_HAS_E0PD, |
2834 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2835 | .cpu_enable = cpu_enable_e0pd, |
2836 | .matches = has_cpuid_feature, |
2837 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, E0PD, IMP) |
2838 | }, |
2839 | #endif |
2840 | { |
2841 | .desc = "Random Number Generator" , |
2842 | .capability = ARM64_HAS_RNG, |
2843 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2844 | .matches = has_cpuid_feature, |
2845 | ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, RNDR, IMP) |
2846 | }, |
2847 | #ifdef CONFIG_ARM64_BTI |
2848 | { |
2849 | .desc = "Branch Target Identification" , |
2850 | .capability = ARM64_BTI, |
2851 | #ifdef CONFIG_ARM64_BTI_KERNEL |
2852 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2853 | #else |
2854 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2855 | #endif |
2856 | .matches = has_cpuid_feature, |
2857 | .cpu_enable = bti_enable, |
2858 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, BT, IMP) |
2859 | }, |
2860 | #endif |
2861 | #ifdef CONFIG_ARM64_MTE |
2862 | { |
2863 | .desc = "Memory Tagging Extension" , |
2864 | .capability = ARM64_MTE, |
2865 | .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
2866 | .matches = has_cpuid_feature, |
2867 | .cpu_enable = cpu_enable_mte, |
2868 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE2) |
2869 | }, |
2870 | { |
2871 | .desc = "Asymmetric MTE Tag Check Fault" , |
2872 | .capability = ARM64_MTE_ASYMM, |
2873 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2874 | .matches = has_cpuid_feature, |
2875 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3) |
2876 | }, |
2877 | #endif /* CONFIG_ARM64_MTE */ |
2878 | { |
2879 | .desc = "RCpc load-acquire (LDAPR)" , |
2880 | .capability = ARM64_HAS_LDAPR, |
2881 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2882 | .matches = has_cpuid_feature, |
2883 | ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP) |
2884 | }, |
2885 | { |
2886 | .desc = "Fine Grained Traps" , |
2887 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2888 | .capability = ARM64_HAS_FGT, |
2889 | .matches = has_cpuid_feature, |
2890 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP) |
2891 | }, |
2892 | { |
2893 | .desc = "Fine Grained Traps 2" , |
2894 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2895 | .capability = ARM64_HAS_FGT2, |
2896 | .matches = has_cpuid_feature, |
2897 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, FGT2) |
2898 | }, |
2899 | #ifdef CONFIG_ARM64_SME |
2900 | { |
2901 | .desc = "Scalable Matrix Extension" , |
2902 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2903 | .capability = ARM64_SME, |
2904 | .matches = has_cpuid_feature, |
2905 | .cpu_enable = cpu_enable_sme, |
2906 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP) |
2907 | }, |
2908 | /* FA64 should be sorted after the base SME capability */ |
2909 | { |
2910 | .desc = "FA64" , |
2911 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2912 | .capability = ARM64_SME_FA64, |
2913 | .matches = has_cpuid_feature, |
2914 | .cpu_enable = cpu_enable_fa64, |
2915 | ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP) |
2916 | }, |
2917 | { |
2918 | .desc = "SME2" , |
2919 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2920 | .capability = ARM64_SME2, |
2921 | .matches = has_cpuid_feature, |
2922 | .cpu_enable = cpu_enable_sme2, |
2923 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2) |
2924 | }, |
2925 | #endif /* CONFIG_ARM64_SME */ |
2926 | { |
2927 | .desc = "WFx with timeout" , |
2928 | .capability = ARM64_HAS_WFXT, |
2929 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2930 | .matches = has_cpuid_feature, |
2931 | ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, WFxT, IMP) |
2932 | }, |
2933 | { |
2934 | .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality" , |
2935 | .capability = ARM64_HAS_TIDCP1, |
2936 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2937 | .matches = has_cpuid_feature, |
2938 | .cpu_enable = cpu_trap_el0_impdef, |
2939 | ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TIDCP1, IMP) |
2940 | }, |
2941 | { |
2942 | .desc = "Data independent timing control (DIT)" , |
2943 | .capability = ARM64_HAS_DIT, |
2944 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2945 | .matches = has_cpuid_feature, |
2946 | .cpu_enable = cpu_enable_dit, |
2947 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) |
2948 | }, |
2949 | { |
2950 | .desc = "Memory Copy and Memory Set instructions" , |
2951 | .capability = ARM64_HAS_MOPS, |
2952 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2953 | .matches = has_cpuid_feature, |
2954 | .cpu_enable = cpu_enable_mops, |
2955 | ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP) |
2956 | }, |
2957 | { |
2958 | .capability = ARM64_HAS_TCR2, |
2959 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2960 | .matches = has_cpuid_feature, |
2961 | ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, TCRX, IMP) |
2962 | }, |
2963 | { |
2964 | .desc = "Stage-1 Permission Indirection Extension (S1PIE)" , |
2965 | .capability = ARM64_HAS_S1PIE, |
2966 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
2967 | .matches = has_cpuid_feature, |
2968 | ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1PIE, IMP) |
2969 | }, |
2970 | { |
2971 | .desc = "VHE for hypervisor only" , |
2972 | .capability = ARM64_KVM_HVHE, |
2973 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2974 | .matches = hvhe_possible, |
2975 | }, |
2976 | { |
2977 | .desc = "Enhanced Virtualization Traps" , |
2978 | .capability = ARM64_HAS_EVT, |
2979 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2980 | .matches = has_cpuid_feature, |
2981 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) |
2982 | }, |
2983 | { |
2984 | .desc = "52-bit Virtual Addressing for KVM (LPA2)" , |
2985 | .capability = ARM64_HAS_LPA2, |
2986 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2987 | .matches = has_lpa2, |
2988 | }, |
2989 | { |
2990 | .desc = "FPMR" , |
2991 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
2992 | .capability = ARM64_HAS_FPMR, |
2993 | .matches = has_cpuid_feature, |
2994 | .cpu_enable = cpu_enable_fpmr, |
2995 | ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP) |
2996 | }, |
2997 | #ifdef CONFIG_ARM64_VA_BITS_52 |
2998 | { |
2999 | .capability = ARM64_HAS_VA52, |
3000 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
3001 | .matches = has_cpuid_feature, |
3002 | #ifdef CONFIG_ARM64_64K_PAGES |
3003 | .desc = "52-bit Virtual Addressing (LVA)" , |
3004 | ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, VARange, 52) |
3005 | #else |
3006 | .desc = "52-bit Virtual Addressing (LPA2)" , |
3007 | #ifdef CONFIG_ARM64_4K_PAGES |
3008 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN4, 52_BIT) |
3009 | #else |
3010 | ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, TGRAN16, 52_BIT) |
3011 | #endif |
3012 | #endif |
3013 | }, |
3014 | #endif |
3015 | { |
3016 | .desc = "Memory Partitioning And Monitoring" , |
3017 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
3018 | .capability = ARM64_MPAM, |
3019 | .matches = test_has_mpam, |
3020 | .cpu_enable = cpu_enable_mpam, |
3021 | ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) |
3022 | }, |
3023 | { |
3024 | .desc = "Memory Partitioning And Monitoring Virtualisation" , |
3025 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
3026 | .capability = ARM64_MPAM_HCR, |
3027 | .matches = test_has_mpam_hcr, |
3028 | }, |
3029 | { |
3030 | .desc = "NV1" , |
3031 | .capability = ARM64_HAS_HCR_NV1, |
3032 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
3033 | .matches = has_nv1, |
3034 | ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1) |
3035 | }, |
3036 | #ifdef CONFIG_ARM64_POE |
3037 | { |
3038 | .desc = "Stage-1 Permission Overlay Extension (S1POE)" , |
3039 | .capability = ARM64_HAS_S1POE, |
3040 | .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
3041 | .matches = has_cpuid_feature, |
3042 | .cpu_enable = cpu_enable_poe, |
3043 | ARM64_CPUID_FIELDS(ID_AA64MMFR3_EL1, S1POE, IMP) |
3044 | }, |
3045 | #endif |
3046 | #ifdef CONFIG_ARM64_GCS |
3047 | { |
3048 | .desc = "Guarded Control Stack (GCS)" , |
3049 | .capability = ARM64_HAS_GCS, |
3050 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
3051 | .cpu_enable = cpu_enable_gcs, |
3052 | .matches = has_cpuid_feature, |
3053 | ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP) |
3054 | }, |
3055 | #endif |
3056 | #ifdef CONFIG_HW_PERF_EVENTS |
3057 | { |
3058 | .desc = "PMUv3" , |
3059 | .capability = ARM64_HAS_PMUV3, |
3060 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
3061 | .matches = has_pmuv3, |
3062 | }, |
3063 | #endif |
3064 | {}, |
3065 | }; |
3066 | |
3067 | #define HWCAP_CPUID_MATCH(reg, field, min_value) \ |
3068 | .matches = has_user_cpuid_feature, \ |
3069 | ARM64_CPUID_FIELDS(reg, field, min_value) |
3070 | |
3071 | #define __HWCAP_CAP(name, cap_type, cap) \ |
3072 | .desc = name, \ |
3073 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ |
3074 | .hwcap_type = cap_type, \ |
3075 | .hwcap = cap, \ |
3076 | |
3077 | #define HWCAP_CAP(reg, field, min_value, cap_type, cap) \ |
3078 | { \ |
3079 | __HWCAP_CAP(#cap, cap_type, cap) \ |
3080 | HWCAP_CPUID_MATCH(reg, field, min_value) \ |
3081 | } |
3082 | |
3083 | #define HWCAP_MULTI_CAP(list, cap_type, cap) \ |
3084 | { \ |
3085 | __HWCAP_CAP(#cap, cap_type, cap) \ |
3086 | .matches = cpucap_multi_entry_cap_matches, \ |
3087 | .match_list = list, \ |
3088 | } |
3089 | |
3090 | #define HWCAP_CAP_MATCH(match, cap_type, cap) \ |
3091 | { \ |
3092 | __HWCAP_CAP(#cap, cap_type, cap) \ |
3093 | .matches = match, \ |
3094 | } |
3095 | |
3096 | #define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap) \ |
3097 | { \ |
3098 | __HWCAP_CAP(#cap, cap_type, cap) \ |
3099 | HWCAP_CPUID_MATCH(reg, field, min_value) \ |
3100 | .matches = match, \ |
3101 | } |
3102 | |
3103 | #ifdef CONFIG_ARM64_PTR_AUTH |
3104 | static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { |
3105 | { |
3106 | HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth) |
3107 | }, |
3108 | { |
3109 | HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth) |
3110 | }, |
3111 | { |
3112 | HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth) |
3113 | }, |
3114 | {}, |
3115 | }; |
3116 | |
3117 | static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { |
3118 | { |
3119 | HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP) |
3120 | }, |
3121 | { |
3122 | HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP) |
3123 | }, |
3124 | { |
3125 | HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP) |
3126 | }, |
3127 | {}, |
3128 | }; |
3129 | #endif |
3130 | |
3131 | #ifdef CONFIG_ARM64_SVE |
3132 | static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope) |
3133 | { |
3134 | return system_supports_sve() && has_user_cpuid_feature(cap, scope); |
3135 | } |
3136 | #endif |
3137 | |
3138 | static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { |
3139 | HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), |
3140 | HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), |
3141 | HWCAP_CAP(ID_AA64ISAR0_EL1, SHA1, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA1), |
3142 | HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA256, CAP_HWCAP, KERNEL_HWCAP_SHA2), |
3143 | HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512), |
3144 | HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32), |
3145 | HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), |
3146 | HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, FEAT_LSE128, CAP_HWCAP, KERNEL_HWCAP_LSE128), |
3147 | HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), |
3148 | HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3), |
3149 | HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3), |
3150 | HWCAP_CAP(ID_AA64ISAR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SM4), |
3151 | HWCAP_CAP(ID_AA64ISAR0_EL1, DP, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), |
3152 | HWCAP_CAP(ID_AA64ISAR0_EL1, FHM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), |
3153 | HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM), |
3154 | HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), |
3155 | HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG), |
3156 | HWCAP_CAP(ID_AA64ISAR3_EL1, FPRCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_FPRCVT), |
3157 | HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP), |
3158 | HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP), |
3159 | HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD), |
3160 | HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, FP16, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), |
3161 | HWCAP_CAP(ID_AA64PFR0_EL1, DIT, IMP, CAP_HWCAP, KERNEL_HWCAP_DIT), |
3162 | HWCAP_CAP(ID_AA64PFR2_EL1, FPMR, IMP, CAP_HWCAP, KERNEL_HWCAP_FPMR), |
3163 | HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, IMP, CAP_HWCAP, KERNEL_HWCAP_DCPOP), |
3164 | HWCAP_CAP(ID_AA64ISAR1_EL1, DPB, DPB2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), |
3165 | HWCAP_CAP(ID_AA64ISAR1_EL1, JSCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_JSCVT), |
3166 | HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA), |
3167 | HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC), |
3168 | HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), |
3169 | HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC3, CAP_HWCAP, KERNEL_HWCAP_LRCPC3), |
3170 | HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT), |
3171 | HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB), |
3172 | HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16), |
3173 | HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), |
3174 | HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), |
3175 | HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), |
3176 | HWCAP_CAP(ID_AA64ISAR2_EL1, LUT, IMP, CAP_HWCAP, KERNEL_HWCAP_LUT), |
3177 | HWCAP_CAP(ID_AA64ISAR3_EL1, FAMINMAX, IMP, CAP_HWCAP, KERNEL_HWCAP_FAMINMAX), |
3178 | HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), |
3179 | #ifdef CONFIG_ARM64_SVE |
3180 | HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), |
3181 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p2, CAP_HWCAP, KERNEL_HWCAP_SVE2P2), |
3182 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1), |
3183 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), |
3184 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES), |
3185 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), |
3186 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, AES2, CAP_HWCAP, KERNEL_HWCAP_SVE_AES2), |
3187 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), |
3188 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16), |
3189 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, B16B16, BFSCALE, CAP_HWCAP, KERNEL_HWCAP_SVE_BFSCALE), |
3190 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), |
3191 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16), |
3192 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), |
3193 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4), |
3194 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), |
3195 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), |
3196 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), |
3197 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F16MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_F16MM), |
3198 | HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, EltPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_ELTPERM), |
3199 | #endif |
3200 | #ifdef CONFIG_ARM64_GCS |
3201 | HWCAP_CAP(ID_AA64PFR1_EL1, GCS, IMP, CAP_HWCAP, KERNEL_HWCAP_GCS), |
3202 | #endif |
3203 | HWCAP_CAP(ID_AA64PFR1_EL1, SSBS, SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS), |
3204 | #ifdef CONFIG_ARM64_BTI |
3205 | HWCAP_CAP(ID_AA64PFR1_EL1, BT, IMP, CAP_HWCAP, KERNEL_HWCAP_BTI), |
3206 | #endif |
3207 | #ifdef CONFIG_ARM64_PTR_AUTH |
3208 | HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), |
3209 | HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), |
3210 | #endif |
3211 | #ifdef CONFIG_ARM64_MTE |
3212 | HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE), |
3213 | HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3), |
3214 | #endif /* CONFIG_ARM64_MTE */ |
3215 | HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV), |
3216 | HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP), |
3217 | HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, IMP, CAP_HWCAP, KERNEL_HWCAP_CSSC), |
3218 | HWCAP_CAP(ID_AA64ISAR2_EL1, CSSC, CMPBR, CAP_HWCAP, KERNEL_HWCAP_CMPBR), |
3219 | HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM), |
3220 | HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES), |
3221 | HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), |
3222 | HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS), |
3223 | HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), |
3224 | #ifdef CONFIG_ARM64_SME |
3225 | HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), |
3226 | HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), |
3227 | HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), |
3228 | HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), |
3229 | HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), |
3230 | HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), |
3231 | HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), |
3232 | HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), |
3233 | HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), |
3234 | HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), |
3235 | HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), |
3236 | HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), |
3237 | HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), |
3238 | HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), |
3239 | HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), |
3240 | HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), |
3241 | HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), |
3242 | HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), |
3243 | HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), |
3244 | HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), |
3245 | HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), |
3246 | HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), |
3247 | HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), |
3248 | HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), |
3249 | HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), |
3250 | HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), |
3251 | #endif /* CONFIG_ARM64_SME */ |
3252 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), |
3253 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), |
3254 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4), |
3255 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2), |
3256 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM8, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM8), |
3257 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM4), |
3258 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3), |
3259 | HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2), |
3260 | #ifdef CONFIG_ARM64_POE |
3261 | HWCAP_CAP(ID_AA64MMFR3_EL1, S1POE, IMP, CAP_HWCAP, KERNEL_HWCAP_POE), |
3262 | #endif |
3263 | {}, |
3264 | }; |
3265 | |
3266 | #ifdef CONFIG_COMPAT |
3267 | static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope) |
3268 | { |
3269 | /* |
3270 | * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available, |
3271 | * in line with that of arm32 as in vfp_init(). We make sure that the |
3272 | * check is future proof, by making sure value is non-zero. |
3273 | */ |
3274 | u32 mvfr1; |
3275 | |
3276 | WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); |
3277 | if (scope == SCOPE_SYSTEM) |
3278 | mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1); |
3279 | else |
3280 | mvfr1 = read_sysreg_s(SYS_MVFR1_EL1); |
3281 | |
3282 | return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDSP_SHIFT) && |
3283 | cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDInt_SHIFT) && |
3284 | cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_EL1_SIMDLS_SHIFT); |
3285 | } |
3286 | #endif |
3287 | |
3288 | static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { |
3289 | #ifdef CONFIG_COMPAT |
3290 | HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON), |
3291 | HWCAP_CAP(MVFR1_EL1, SIMDFMAC, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4), |
3292 | /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */ |
3293 | HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP), |
3294 | HWCAP_CAP(MVFR0_EL1, FPDP, VFPv3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3), |
3295 | HWCAP_CAP(MVFR1_EL1, FPHP, FP16, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP), |
3296 | HWCAP_CAP(MVFR1_EL1, SIMDHP, SIMDHP_FLOAT, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP), |
3297 | HWCAP_CAP(ID_ISAR5_EL1, AES, VMULL, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), |
3298 | HWCAP_CAP(ID_ISAR5_EL1, AES, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), |
3299 | HWCAP_CAP(ID_ISAR5_EL1, SHA1, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), |
3300 | HWCAP_CAP(ID_ISAR5_EL1, SHA2, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), |
3301 | HWCAP_CAP(ID_ISAR5_EL1, CRC32, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), |
3302 | HWCAP_CAP(ID_ISAR6_EL1, DP, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP), |
3303 | HWCAP_CAP(ID_ISAR6_EL1, FHM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM), |
3304 | HWCAP_CAP(ID_ISAR6_EL1, SB, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB), |
3305 | HWCAP_CAP(ID_ISAR6_EL1, BF16, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16), |
3306 | HWCAP_CAP(ID_ISAR6_EL1, I8MM, IMP, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM), |
3307 | HWCAP_CAP(ID_PFR2_EL1, SSBS, IMP, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS), |
3308 | #endif |
3309 | {}, |
3310 | }; |
3311 | |
3312 | static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) |
3313 | { |
3314 | switch (cap->hwcap_type) { |
3315 | case CAP_HWCAP: |
3316 | cpu_set_feature(cap->hwcap); |
3317 | break; |
3318 | #ifdef CONFIG_COMPAT |
3319 | case CAP_COMPAT_HWCAP: |
3320 | compat_elf_hwcap |= (u32)cap->hwcap; |
3321 | break; |
3322 | case CAP_COMPAT_HWCAP2: |
3323 | compat_elf_hwcap2 |= (u32)cap->hwcap; |
3324 | break; |
3325 | #endif |
3326 | default: |
3327 | WARN_ON(1); |
3328 | break; |
3329 | } |
3330 | } |
3331 | |
3332 | /* Check if we have a particular HWCAP enabled */ |
3333 | static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) |
3334 | { |
3335 | bool rc; |
3336 | |
3337 | switch (cap->hwcap_type) { |
3338 | case CAP_HWCAP: |
3339 | rc = cpu_have_feature(cap->hwcap); |
3340 | break; |
3341 | #ifdef CONFIG_COMPAT |
3342 | case CAP_COMPAT_HWCAP: |
3343 | rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; |
3344 | break; |
3345 | case CAP_COMPAT_HWCAP2: |
3346 | rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; |
3347 | break; |
3348 | #endif |
3349 | default: |
3350 | WARN_ON(1); |
3351 | rc = false; |
3352 | } |
3353 | |
3354 | return rc; |
3355 | } |
3356 | |
3357 | static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) |
3358 | { |
3359 | /* We support emulation of accesses to CPU ID feature registers */ |
3360 | cpu_set_named_feature(CPUID); |
3361 | for (; hwcaps->matches; hwcaps++) |
3362 | if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) |
3363 | cap_set_elf_hwcap(cap: hwcaps); |
3364 | } |
3365 | |
3366 | static void update_cpu_capabilities(u16 scope_mask) |
3367 | { |
3368 | int i; |
3369 | const struct arm64_cpu_capabilities *caps; |
3370 | |
3371 | scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
3372 | for (i = 0; i < ARM64_NCAPS; i++) { |
3373 | caps = cpucap_ptrs[i]; |
3374 | if (!caps || !(caps->type & scope_mask) || |
3375 | cpus_have_cap(caps->capability) || |
3376 | !caps->matches(caps, cpucap_default_scope(caps))) |
3377 | continue; |
3378 | |
3379 | if (caps->desc && !caps->cpus) |
3380 | pr_info("detected: %s\n" , caps->desc); |
3381 | |
3382 | __set_bit(caps->capability, system_cpucaps); |
3383 | |
3384 | if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) |
3385 | set_bit(caps->capability, boot_cpucaps); |
3386 | } |
3387 | } |
3388 | |
3389 | /* |
3390 | * Enable all the available capabilities on this CPU. The capabilities |
3391 | * with BOOT_CPU scope are handled separately and hence skipped here. |
3392 | */ |
3393 | static int cpu_enable_non_boot_scope_capabilities(void *__unused) |
3394 | { |
3395 | int i; |
3396 | u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; |
3397 | |
3398 | for_each_available_cap(i) { |
3399 | const struct arm64_cpu_capabilities *cap = cpucap_ptrs[i]; |
3400 | |
3401 | if (WARN_ON(!cap)) |
3402 | continue; |
3403 | |
3404 | if (!(cap->type & non_boot_scope)) |
3405 | continue; |
3406 | |
3407 | if (cap->cpu_enable) |
3408 | cap->cpu_enable(cap); |
3409 | } |
3410 | return 0; |
3411 | } |
3412 | |
3413 | /* |
3414 | * Run through the enabled capabilities and enable() it on all active |
3415 | * CPUs |
3416 | */ |
3417 | static void __init enable_cpu_capabilities(u16 scope_mask) |
3418 | { |
3419 | int i; |
3420 | const struct arm64_cpu_capabilities *caps; |
3421 | bool boot_scope; |
3422 | |
3423 | scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
3424 | boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); |
3425 | |
3426 | for (i = 0; i < ARM64_NCAPS; i++) { |
3427 | caps = cpucap_ptrs[i]; |
3428 | if (!caps || !(caps->type & scope_mask) || |
3429 | !cpus_have_cap(caps->capability)) |
3430 | continue; |
3431 | |
3432 | if (boot_scope && caps->cpu_enable) |
3433 | /* |
3434 | * Capabilities with SCOPE_BOOT_CPU scope are finalised |
3435 | * before any secondary CPU boots. Thus, each secondary |
3436 | * will enable the capability as appropriate via |
3437 | * check_local_cpu_capabilities(). The only exception is |
3438 | * the boot CPU, for which the capability must be |
3439 | * enabled here. This approach avoids costly |
3440 | * stop_machine() calls for this case. |
3441 | */ |
3442 | caps->cpu_enable(caps); |
3443 | } |
3444 | |
3445 | /* |
3446 | * For all non-boot scope capabilities, use stop_machine() |
3447 | * as it schedules the work allowing us to modify PSTATE, |
3448 | * instead of on_each_cpu() which uses an IPI, giving us a |
3449 | * PSTATE that disappears when we return. |
3450 | */ |
3451 | if (!boot_scope) |
3452 | stop_machine(fn: cpu_enable_non_boot_scope_capabilities, |
3453 | NULL, cpu_online_mask); |
3454 | } |
3455 | |
3456 | /* |
3457 | * Run through the list of capabilities to check for conflicts. |
3458 | * If the system has already detected a capability, take necessary |
3459 | * action on this CPU. |
3460 | */ |
3461 | static void verify_local_cpu_caps(u16 scope_mask) |
3462 | { |
3463 | int i; |
3464 | bool cpu_has_cap, system_has_cap; |
3465 | const struct arm64_cpu_capabilities *caps; |
3466 | |
3467 | scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
3468 | |
3469 | for (i = 0; i < ARM64_NCAPS; i++) { |
3470 | caps = cpucap_ptrs[i]; |
3471 | if (!caps || !(caps->type & scope_mask)) |
3472 | continue; |
3473 | |
3474 | cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); |
3475 | system_has_cap = cpus_have_cap(caps->capability); |
3476 | |
3477 | if (system_has_cap) { |
3478 | /* |
3479 | * Check if the new CPU misses an advertised feature, |
3480 | * which is not safe to miss. |
3481 | */ |
3482 | if (!cpu_has_cap && !cpucap_late_cpu_optional(caps)) |
3483 | break; |
3484 | /* |
3485 | * We have to issue cpu_enable() irrespective of |
3486 | * whether the CPU has it or not, as it is enabeld |
3487 | * system wide. It is upto the call back to take |
3488 | * appropriate action on this CPU. |
3489 | */ |
3490 | if (caps->cpu_enable) |
3491 | caps->cpu_enable(caps); |
3492 | } else { |
3493 | /* |
3494 | * Check if the CPU has this capability if it isn't |
3495 | * safe to have when the system doesn't. |
3496 | */ |
3497 | if (cpu_has_cap && !cpucap_late_cpu_permitted(caps)) |
3498 | break; |
3499 | } |
3500 | } |
3501 | |
3502 | if (i < ARM64_NCAPS) { |
3503 | pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n" , |
3504 | smp_processor_id(), caps->capability, |
3505 | caps->desc, system_has_cap, cpu_has_cap); |
3506 | |
3507 | if (cpucap_panic_on_conflict(cap: caps)) |
3508 | cpu_panic_kernel(); |
3509 | else |
3510 | cpu_die_early(); |
3511 | } |
3512 | } |
3513 | |
3514 | /* |
3515 | * Check for CPU features that are used in early boot |
3516 | * based on the Boot CPU value. |
3517 | */ |
3518 | static void check_early_cpu_features(void) |
3519 | { |
3520 | verify_cpu_asid_bits(); |
3521 | |
3522 | verify_local_cpu_caps(SCOPE_BOOT_CPU); |
3523 | } |
3524 | |
3525 | static void |
3526 | __verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) |
3527 | { |
3528 | |
3529 | for (; caps->matches; caps++) |
3530 | if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { |
3531 | pr_crit("CPU%d: missing HWCAP: %s\n" , |
3532 | smp_processor_id(), caps->desc); |
3533 | cpu_die_early(); |
3534 | } |
3535 | } |
3536 | |
3537 | static void verify_local_elf_hwcaps(void) |
3538 | { |
3539 | __verify_local_elf_hwcaps(caps: arm64_elf_hwcaps); |
3540 | |
3541 | if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1))) |
3542 | __verify_local_elf_hwcaps(caps: compat_elf_hwcaps); |
3543 | } |
3544 | |
3545 | static void verify_sve_features(void) |
3546 | { |
3547 | unsigned long cpacr = cpacr_save_enable_kernel_sve(); |
3548 | |
3549 | if (vec_verify_vq_map(ARM64_VEC_SVE)) { |
3550 | pr_crit("CPU%d: SVE: vector length support mismatch\n" , |
3551 | smp_processor_id()); |
3552 | cpu_die_early(); |
3553 | } |
3554 | |
3555 | cpacr_restore(cpacr); |
3556 | } |
3557 | |
3558 | static void verify_sme_features(void) |
3559 | { |
3560 | unsigned long cpacr = cpacr_save_enable_kernel_sme(); |
3561 | |
3562 | if (vec_verify_vq_map(ARM64_VEC_SME)) { |
3563 | pr_crit("CPU%d: SME: vector length support mismatch\n" , |
3564 | smp_processor_id()); |
3565 | cpu_die_early(); |
3566 | } |
3567 | |
3568 | cpacr_restore(cpacr); |
3569 | } |
3570 | |
3571 | static void verify_hyp_capabilities(void) |
3572 | { |
3573 | u64 safe_mmfr1, mmfr0, mmfr1; |
3574 | int parange, ipa_max; |
3575 | unsigned int safe_vmid_bits, vmid_bits; |
3576 | |
3577 | if (!IS_ENABLED(CONFIG_KVM)) |
3578 | return; |
3579 | |
3580 | safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
3581 | mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
3582 | mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); |
3583 | |
3584 | /* Verify VMID bits */ |
3585 | safe_vmid_bits = get_vmid_bits(safe_mmfr1); |
3586 | vmid_bits = get_vmid_bits(mmfr1); |
3587 | if (vmid_bits < safe_vmid_bits) { |
3588 | pr_crit("CPU%d: VMID width mismatch\n" , smp_processor_id()); |
3589 | cpu_die_early(); |
3590 | } |
3591 | |
3592 | /* Verify IPA range */ |
3593 | parange = cpuid_feature_extract_unsigned_field(mmfr0, |
3594 | ID_AA64MMFR0_EL1_PARANGE_SHIFT); |
3595 | ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange); |
3596 | if (ipa_max < get_kvm_ipa_limit()) { |
3597 | pr_crit("CPU%d: IPA range mismatch\n" , smp_processor_id()); |
3598 | cpu_die_early(); |
3599 | } |
3600 | } |
3601 | |
3602 | static void verify_mpam_capabilities(void) |
3603 | { |
3604 | u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1); |
3605 | u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
3606 | u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max; |
3607 | |
3608 | if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) != |
3609 | FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) { |
3610 | pr_crit("CPU%d: MPAM version mismatch\n" , smp_processor_id()); |
3611 | cpu_die_early(); |
3612 | } |
3613 | |
3614 | cpu_idr = read_cpuid(MPAMIDR_EL1); |
3615 | sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); |
3616 | if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) != |
3617 | FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) { |
3618 | pr_crit("CPU%d: Missing MPAM HCR\n" , smp_processor_id()); |
3619 | cpu_die_early(); |
3620 | } |
3621 | |
3622 | cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr); |
3623 | cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr); |
3624 | sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr); |
3625 | sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr); |
3626 | if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) { |
3627 | pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n" , smp_processor_id()); |
3628 | cpu_die_early(); |
3629 | } |
3630 | } |
3631 | |
3632 | /* |
3633 | * Run through the enabled system capabilities and enable() it on this CPU. |
3634 | * The capabilities were decided based on the available CPUs at the boot time. |
3635 | * Any new CPU should match the system wide status of the capability. If the |
3636 | * new CPU doesn't have a capability which the system now has enabled, we |
3637 | * cannot do anything to fix it up and could cause unexpected failures. So |
3638 | * we park the CPU. |
3639 | */ |
3640 | static void verify_local_cpu_capabilities(void) |
3641 | { |
3642 | /* |
3643 | * The capabilities with SCOPE_BOOT_CPU are checked from |
3644 | * check_early_cpu_features(), as they need to be verified |
3645 | * on all secondary CPUs. |
3646 | */ |
3647 | verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); |
3648 | verify_local_elf_hwcaps(); |
3649 | |
3650 | if (system_supports_sve()) |
3651 | verify_sve_features(); |
3652 | |
3653 | if (system_supports_sme()) |
3654 | verify_sme_features(); |
3655 | |
3656 | if (is_hyp_mode_available()) |
3657 | verify_hyp_capabilities(); |
3658 | |
3659 | if (system_supports_mpam()) |
3660 | verify_mpam_capabilities(); |
3661 | } |
3662 | |
3663 | void check_local_cpu_capabilities(void) |
3664 | { |
3665 | /* |
3666 | * All secondary CPUs should conform to the early CPU features |
3667 | * in use by the kernel based on boot CPU. |
3668 | */ |
3669 | check_early_cpu_features(); |
3670 | |
3671 | /* |
3672 | * If we haven't finalised the system capabilities, this CPU gets |
3673 | * a chance to update the errata work arounds and local features. |
3674 | * Otherwise, this CPU should verify that it has all the system |
3675 | * advertised capabilities. |
3676 | */ |
3677 | if (!system_capabilities_finalized()) |
3678 | update_cpu_capabilities(SCOPE_LOCAL_CPU); |
3679 | else |
3680 | verify_local_cpu_capabilities(); |
3681 | } |
3682 | |
3683 | bool this_cpu_has_cap(unsigned int n) |
3684 | { |
3685 | if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { |
3686 | const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; |
3687 | |
3688 | if (cap) |
3689 | return cap->matches(cap, SCOPE_LOCAL_CPU); |
3690 | } |
3691 | |
3692 | return false; |
3693 | } |
3694 | EXPORT_SYMBOL_GPL(this_cpu_has_cap); |
3695 | |
3696 | /* |
3697 | * This helper function is used in a narrow window when, |
3698 | * - The system wide safe registers are set with all the SMP CPUs and, |
3699 | * - The SYSTEM_FEATURE system_cpucaps may not have been set. |
3700 | */ |
3701 | static bool __maybe_unused __system_matches_cap(unsigned int n) |
3702 | { |
3703 | if (n < ARM64_NCAPS) { |
3704 | const struct arm64_cpu_capabilities *cap = cpucap_ptrs[n]; |
3705 | |
3706 | if (cap) |
3707 | return cap->matches(cap, SCOPE_SYSTEM); |
3708 | } |
3709 | return false; |
3710 | } |
3711 | |
3712 | void cpu_set_feature(unsigned int num) |
3713 | { |
3714 | set_bit(nr: num, addr: elf_hwcap); |
3715 | } |
3716 | |
3717 | bool cpu_have_feature(unsigned int num) |
3718 | { |
3719 | return test_bit(num, elf_hwcap); |
3720 | } |
3721 | EXPORT_SYMBOL_GPL(cpu_have_feature); |
3722 | |
3723 | unsigned long cpu_get_elf_hwcap(void) |
3724 | { |
3725 | /* |
3726 | * We currently only populate the first 32 bits of AT_HWCAP. Please |
3727 | * note that for userspace compatibility we guarantee that bits 62 |
3728 | * and 63 will always be returned as 0. |
3729 | */ |
3730 | return elf_hwcap[0]; |
3731 | } |
3732 | |
3733 | unsigned long cpu_get_elf_hwcap2(void) |
3734 | { |
3735 | return elf_hwcap[1]; |
3736 | } |
3737 | |
3738 | unsigned long cpu_get_elf_hwcap3(void) |
3739 | { |
3740 | return elf_hwcap[2]; |
3741 | } |
3742 | |
3743 | static void __init setup_boot_cpu_capabilities(void) |
3744 | { |
3745 | kvm_arm_target_impl_cpu_init(); |
3746 | /* |
3747 | * The boot CPU's feature register values have been recorded. Detect |
3748 | * boot cpucaps and local cpucaps for the boot CPU, then enable and |
3749 | * patch alternatives for the available boot cpucaps. |
3750 | */ |
3751 | update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); |
3752 | enable_cpu_capabilities(SCOPE_BOOT_CPU); |
3753 | apply_boot_alternatives(); |
3754 | } |
3755 | |
3756 | void __init setup_boot_cpu_features(void) |
3757 | { |
3758 | /* |
3759 | * Initialize the indirect array of CPU capabilities pointers before we |
3760 | * handle the boot CPU. |
3761 | */ |
3762 | init_cpucap_indirect_list(); |
3763 | |
3764 | /* |
3765 | * Detect broken pseudo-NMI. Must be called _before_ the call to |
3766 | * setup_boot_cpu_capabilities() since it interacts with |
3767 | * can_use_gic_priorities(). |
3768 | */ |
3769 | detect_system_supports_pseudo_nmi(); |
3770 | |
3771 | setup_boot_cpu_capabilities(); |
3772 | } |
3773 | |
3774 | static void __init setup_system_capabilities(void) |
3775 | { |
3776 | /* |
3777 | * The system-wide safe feature register values have been finalized. |
3778 | * Detect, enable, and patch alternatives for the available system |
3779 | * cpucaps. |
3780 | */ |
3781 | update_cpu_capabilities(SCOPE_SYSTEM); |
3782 | enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); |
3783 | apply_alternatives_all(); |
3784 | |
3785 | /* |
3786 | * Log any cpucaps with a cpumask as these aren't logged by |
3787 | * update_cpu_capabilities(). |
3788 | */ |
3789 | for (int i = 0; i < ARM64_NCAPS; i++) { |
3790 | const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i]; |
3791 | |
3792 | if (caps && caps->cpus && caps->desc && |
3793 | cpumask_any(caps->cpus) < nr_cpu_ids) |
3794 | pr_info("detected: %s on CPU%*pbl\n" , |
3795 | caps->desc, cpumask_pr_args(caps->cpus)); |
3796 | } |
3797 | |
3798 | /* |
3799 | * TTBR0 PAN doesn't have its own cpucap, so log it manually. |
3800 | */ |
3801 | if (system_uses_ttbr0_pan()) |
3802 | pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n" ); |
3803 | } |
3804 | |
3805 | void __init setup_system_features(void) |
3806 | { |
3807 | setup_system_capabilities(); |
3808 | |
3809 | kpti_install_ng_mappings(); |
3810 | |
3811 | sve_setup(); |
3812 | sme_setup(); |
3813 | |
3814 | /* |
3815 | * Check for sane CTR_EL0.CWG value. |
3816 | */ |
3817 | if (!cache_type_cwg()) |
3818 | pr_warn("No Cache Writeback Granule information, assuming %d\n" , |
3819 | ARCH_DMA_MINALIGN); |
3820 | } |
3821 | |
3822 | void __init setup_user_features(void) |
3823 | { |
3824 | user_feature_fixup(); |
3825 | |
3826 | setup_elf_hwcaps(arm64_elf_hwcaps); |
3827 | |
3828 | if (system_supports_32bit_el0()) { |
3829 | setup_elf_hwcaps(compat_elf_hwcaps); |
3830 | elf_hwcap_fixup(); |
3831 | } |
3832 | |
3833 | minsigstksz_setup(); |
3834 | } |
3835 | |
3836 | static int enable_mismatched_32bit_el0(unsigned int cpu) |
3837 | { |
3838 | /* |
3839 | * The first 32-bit-capable CPU we detected and so can no longer |
3840 | * be offlined by userspace. -1 indicates we haven't yet onlined |
3841 | * a 32-bit-capable CPU. |
3842 | */ |
3843 | static int lucky_winner = -1; |
3844 | |
3845 | struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); |
3846 | bool cpu_32bit = false; |
3847 | |
3848 | if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { |
3849 | if (!housekeeping_cpu(cpu, type: HK_TYPE_TICK)) |
3850 | pr_info("Treating adaptive-ticks CPU %u as 64-bit only\n" , cpu); |
3851 | else |
3852 | cpu_32bit = true; |
3853 | } |
3854 | |
3855 | if (cpu_32bit) { |
3856 | cpumask_set_cpu(cpu, dstp: cpu_32bit_el0_mask); |
3857 | static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0); |
3858 | } |
3859 | |
3860 | if (cpumask_test_cpu(cpu: 0, cpumask: cpu_32bit_el0_mask) == cpu_32bit) |
3861 | return 0; |
3862 | |
3863 | if (lucky_winner >= 0) |
3864 | return 0; |
3865 | |
3866 | /* |
3867 | * We've detected a mismatch. We need to keep one of our CPUs with |
3868 | * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting |
3869 | * every CPU in the system for a 32-bit task. |
3870 | */ |
3871 | lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask, |
3872 | cpu_active_mask); |
3873 | get_cpu_device(cpu: lucky_winner)->offline_disabled = true; |
3874 | setup_elf_hwcaps(compat_elf_hwcaps); |
3875 | elf_hwcap_fixup(); |
3876 | pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n" , |
3877 | cpu, lucky_winner); |
3878 | return 0; |
3879 | } |
3880 | |
3881 | static int __init init_32bit_el0_mask(void) |
3882 | { |
3883 | if (!allow_mismatched_32bit_el0) |
3884 | return 0; |
3885 | |
3886 | if (!zalloc_cpumask_var(mask: &cpu_32bit_el0_mask, GFP_KERNEL)) |
3887 | return -ENOMEM; |
3888 | |
3889 | return cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, |
3890 | name: "arm64/mismatched_32bit_el0:online" , |
3891 | startup: enable_mismatched_32bit_el0, NULL); |
3892 | } |
3893 | subsys_initcall_sync(init_32bit_el0_mask); |
3894 | |
3895 | static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) |
3896 | { |
3897 | cpu_enable_swapper_cnp(); |
3898 | } |
3899 | |
3900 | /* |
3901 | * We emulate only the following system register space. |
3902 | * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7] |
3903 | * See Table C5-6 System instruction encodings for System register accesses, |
3904 | * ARMv8 ARM(ARM DDI 0487A.f) for more details. |
3905 | */ |
3906 | static inline bool __attribute_const__ is_emulated(u32 id) |
3907 | { |
3908 | return (sys_reg_Op0(id) == 0x3 && |
3909 | sys_reg_CRn(id) == 0x0 && |
3910 | sys_reg_Op1(id) == 0x0 && |
3911 | (sys_reg_CRm(id) == 0 || |
3912 | ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7)))); |
3913 | } |
3914 | |
3915 | /* |
3916 | * With CRm == 0, reg should be one of : |
3917 | * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1. |
3918 | */ |
3919 | static inline int emulate_id_reg(u32 id, u64 *valp) |
3920 | { |
3921 | switch (id) { |
3922 | case SYS_MIDR_EL1: |
3923 | *valp = read_cpuid_id(); |
3924 | break; |
3925 | case SYS_MPIDR_EL1: |
3926 | *valp = SYS_MPIDR_SAFE_VAL; |
3927 | break; |
3928 | case SYS_REVIDR_EL1: |
3929 | /* IMPLEMENTATION DEFINED values are emulated with 0 */ |
3930 | *valp = 0; |
3931 | break; |
3932 | default: |
3933 | return -EINVAL; |
3934 | } |
3935 | |
3936 | return 0; |
3937 | } |
3938 | |
3939 | static int emulate_sys_reg(u32 id, u64 *valp) |
3940 | { |
3941 | struct arm64_ftr_reg *regp; |
3942 | |
3943 | if (!is_emulated(id)) |
3944 | return -EINVAL; |
3945 | |
3946 | if (sys_reg_CRm(id) == 0) |
3947 | return emulate_id_reg(id, valp); |
3948 | |
3949 | regp = get_arm64_ftr_reg_nowarn(sys_id: id); |
3950 | if (regp) |
3951 | *valp = arm64_ftr_reg_user_value(regp); |
3952 | else |
3953 | /* |
3954 | * The untracked registers are either IMPLEMENTATION DEFINED |
3955 | * (e.g, ID_AFR0_EL1) or reserved RAZ. |
3956 | */ |
3957 | *valp = 0; |
3958 | return 0; |
3959 | } |
3960 | |
3961 | int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt) |
3962 | { |
3963 | int rc; |
3964 | u64 val; |
3965 | |
3966 | rc = emulate_sys_reg(id: sys_reg, valp: &val); |
3967 | if (!rc) { |
3968 | pt_regs_write_reg(regs, rt, val); |
3969 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
3970 | } |
3971 | return rc; |
3972 | } |
3973 | |
3974 | bool try_emulate_mrs(struct pt_regs *regs, u32 insn) |
3975 | { |
3976 | u32 sys_reg, rt; |
3977 | |
3978 | if (compat_user_mode(regs) || !aarch64_insn_is_mrs(insn)) |
3979 | return false; |
3980 | |
3981 | /* |
3982 | * sys_reg values are defined as used in mrs/msr instruction. |
3983 | * shift the imm value to get the encoding. |
3984 | */ |
3985 | sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5; |
3986 | rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); |
3987 | return do_emulate_mrs(regs, sys_reg, rt) == 0; |
3988 | } |
3989 | |
3990 | enum mitigation_state arm64_get_meltdown_state(void) |
3991 | { |
3992 | if (__meltdown_safe) |
3993 | return SPECTRE_UNAFFECTED; |
3994 | |
3995 | if (arm64_kernel_unmapped_at_el0()) |
3996 | return SPECTRE_MITIGATED; |
3997 | |
3998 | return SPECTRE_VULNERABLE; |
3999 | } |
4000 | |
4001 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, |
4002 | char *buf) |
4003 | { |
4004 | switch (arm64_get_meltdown_state()) { |
4005 | case SPECTRE_UNAFFECTED: |
4006 | return sprintf(buf, fmt: "Not affected\n" ); |
4007 | |
4008 | case SPECTRE_MITIGATED: |
4009 | return sprintf(buf, fmt: "Mitigation: PTI\n" ); |
4010 | |
4011 | default: |
4012 | return sprintf(buf, fmt: "Vulnerable\n" ); |
4013 | } |
4014 | } |
4015 | |