1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Contains CPU specific errata definitions |
4 | * |
5 | * Copyright (C) 2014 ARM Ltd. |
6 | */ |
7 | |
8 | #include <linux/arm-smccc.h> |
9 | #include <linux/types.h> |
10 | #include <linux/cpu.h> |
11 | #include <asm/cpu.h> |
12 | #include <asm/cputype.h> |
13 | #include <asm/cpufeature.h> |
14 | #include <asm/kvm_asm.h> |
15 | #include <asm/smp_plat.h> |
16 | |
17 | static bool __maybe_unused |
18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
19 | { |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; |
22 | |
23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
25 | return false; |
26 | |
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; |
28 | revidr = read_cpuid(REVIDR_EL1); |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) |
31 | return false; |
32 | |
33 | return true; |
34 | } |
35 | |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, |
38 | int scope) |
39 | { |
40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
42 | } |
43 | |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) |
46 | { |
47 | u32 model; |
48 | |
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
50 | |
51 | model = read_cpuid_id(); |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | |
53 | MIDR_ARCHITECTURE_MASK; |
54 | |
55 | return model == entry->midr_range.model; |
56 | } |
57 | |
58 | static bool |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) |
61 | { |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; |
64 | u64 ctr_raw, ctr_real; |
65 | |
66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
67 | |
68 | /* |
69 | * We want to make sure that all the CPUs in the system expose |
70 | * a consistent CTR_EL0 to make sure that applications behaves |
71 | * correctly with migration. |
72 | * |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : |
74 | * |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway |
76 | * reports IDC = 0, consistent with the rest. |
77 | * |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. |
80 | * |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. |
83 | */ |
84 | ctr_raw = read_cpuid_cachetype() & mask; |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; |
86 | |
87 | return (ctr_real != sys) && (ctr_raw != sys); |
88 | } |
89 | |
90 | static void |
91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
92 | { |
93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
94 | bool enable_uct_trap = false; |
95 | |
96 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ |
97 | if ((read_cpuid_cachetype() & mask) != |
98 | (arm64_ftr_reg_ctrel0.sys_val & mask)) |
99 | enable_uct_trap = true; |
100 | |
101 | /* ... or if the system is affected by an erratum */ |
102 | if (cap->capability == ARM64_WORKAROUND_1542419) |
103 | enable_uct_trap = true; |
104 | |
105 | if (enable_uct_trap) |
106 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
107 | } |
108 | |
109 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
110 | static bool |
111 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, |
112 | int scope) |
113 | { |
114 | return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
115 | } |
116 | #endif |
117 | |
118 | static void __maybe_unused |
119 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) |
120 | { |
121 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); |
122 | } |
123 | |
124 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
125 | .matches = is_affected_midr_range, \ |
126 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
127 | |
128 | #define CAP_MIDR_ALL_VERSIONS(model) \ |
129 | .matches = is_affected_midr_range, \ |
130 | .midr_range = MIDR_ALL_VERSIONS(model) |
131 | |
132 | #define MIDR_FIXED(rev, revidr_mask) \ |
133 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} |
134 | |
135 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
136 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
137 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
138 | |
139 | #define CAP_MIDR_RANGE_LIST(list) \ |
140 | .matches = is_affected_midr_range_list, \ |
141 | .midr_range_list = list |
142 | |
143 | /* Errata affecting a range of revisions of given model variant */ |
144 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ |
145 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) |
146 | |
147 | /* Errata affecting a single variant/revision of a model */ |
148 | #define ERRATA_MIDR_REV(model, var, rev) \ |
149 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) |
150 | |
151 | /* Errata affecting all variants/revisions of a given a model */ |
152 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ |
153 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
154 | CAP_MIDR_ALL_VERSIONS(model) |
155 | |
156 | /* Errata affecting a list of midr ranges, with same work around */ |
157 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ |
158 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
159 | CAP_MIDR_RANGE_LIST(midr_list) |
160 | |
161 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
162 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
163 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
164 | {}, |
165 | }; |
166 | |
167 | static bool __maybe_unused |
168 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, |
169 | int scope) |
170 | { |
171 | int i; |
172 | |
173 | if (!is_affected_midr_range_list(entry, scope) || |
174 | !is_hyp_mode_available()) |
175 | return false; |
176 | |
177 | for_each_possible_cpu(i) { |
178 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) |
179 | return true; |
180 | } |
181 | |
182 | return false; |
183 | } |
184 | |
185 | static bool __maybe_unused |
186 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, |
187 | int scope) |
188 | { |
189 | u32 midr = read_cpuid_id(); |
190 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); |
191 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); |
192 | |
193 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
194 | return is_midr_in_range(midr, &range) && has_dic; |
195 | } |
196 | |
197 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
198 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
199 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
200 | { |
201 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) |
202 | }, |
203 | { |
204 | .midr_range.model = MIDR_QCOM_KRYO, |
205 | .matches = is_kryo_midr, |
206 | }, |
207 | #endif |
208 | #ifdef CONFIG_ARM64_ERRATUM_1286807 |
209 | { |
210 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), |
211 | }, |
212 | { |
213 | /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ |
214 | ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), |
215 | }, |
216 | #endif |
217 | #ifdef CONFIG_ARM64_ERRATUM_2441007 |
218 | { |
219 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
220 | }, |
221 | #endif |
222 | #ifdef CONFIG_ARM64_ERRATUM_2441009 |
223 | { |
224 | /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ |
225 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), |
226 | }, |
227 | #endif |
228 | {}, |
229 | }; |
230 | #endif |
231 | |
232 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 |
233 | static const struct midr_range cavium_erratum_23154_cpus[] = { |
234 | MIDR_ALL_VERSIONS(MIDR_THUNDERX), |
235 | MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), |
236 | MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), |
237 | MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX), |
238 | MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX), |
239 | MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX), |
240 | MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN), |
241 | MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM), |
242 | MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO), |
243 | {}, |
244 | }; |
245 | #endif |
246 | |
247 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
248 | const struct midr_range cavium_erratum_27456_cpus[] = { |
249 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
250 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), |
251 | /* Cavium ThunderX, T81 pass 1.0 */ |
252 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), |
253 | {}, |
254 | }; |
255 | #endif |
256 | |
257 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
258 | static const struct midr_range cavium_erratum_30115_cpus[] = { |
259 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ |
260 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), |
261 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ |
262 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), |
263 | /* Cavium ThunderX, T83 pass 1.0 */ |
264 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), |
265 | {}, |
266 | }; |
267 | #endif |
268 | |
269 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
270 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { |
271 | { |
272 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
273 | }, |
274 | { |
275 | .midr_range.model = MIDR_QCOM_KRYO, |
276 | .matches = is_kryo_midr, |
277 | }, |
278 | {}, |
279 | }; |
280 | #endif |
281 | |
282 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
283 | static const struct midr_range workaround_clean_cache[] = { |
284 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
285 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
286 | defined(CONFIG_ARM64_ERRATUM_824069) |
287 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
288 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), |
289 | #endif |
290 | #ifdef CONFIG_ARM64_ERRATUM_819472 |
291 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ |
292 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), |
293 | #endif |
294 | {}, |
295 | }; |
296 | #endif |
297 | |
298 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
299 | /* |
300 | * - 1188873 affects r0p0 to r2p0 |
301 | * - 1418040 affects r0p0 to r3p1 |
302 | */ |
303 | static const struct midr_range erratum_1418040_list[] = { |
304 | /* Cortex-A76 r0p0 to r3p1 */ |
305 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), |
306 | /* Neoverse-N1 r0p0 to r3p1 */ |
307 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), |
308 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
309 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), |
310 | {}, |
311 | }; |
312 | #endif |
313 | |
314 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
315 | static const struct midr_range erratum_845719_list[] = { |
316 | /* Cortex-A53 r0p[01234] */ |
317 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
318 | /* Brahma-B53 r0p[0] */ |
319 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
320 | /* Kryo2XX Silver rAp4 */ |
321 | MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), |
322 | {}, |
323 | }; |
324 | #endif |
325 | |
326 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
327 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { |
328 | { |
329 | /* Cortex-A53 r0p[01234] */ |
330 | .matches = is_affected_midr_range, |
331 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
332 | MIDR_FIXED(0x4, BIT(8)), |
333 | }, |
334 | { |
335 | /* Brahma-B53 r0p[0] */ |
336 | .matches = is_affected_midr_range, |
337 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
338 | }, |
339 | {}, |
340 | }; |
341 | #endif |
342 | |
343 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
344 | static const struct midr_range erratum_speculative_at_list[] = { |
345 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
346 | /* Cortex A76 r0p0 to r2p0 */ |
347 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), |
348 | #endif |
349 | #ifdef CONFIG_ARM64_ERRATUM_1319367 |
350 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
351 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
352 | #endif |
353 | #ifdef CONFIG_ARM64_ERRATUM_1530923 |
354 | /* Cortex A55 r0p0 to r2p0 */ |
355 | MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), |
356 | /* Kryo4xx Silver (rdpe => r1p0) */ |
357 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), |
358 | #endif |
359 | {}, |
360 | }; |
361 | #endif |
362 | |
363 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
364 | static const struct midr_range erratum_1463225[] = { |
365 | /* Cortex-A76 r0p0 - r3p1 */ |
366 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), |
367 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
368 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), |
369 | {}, |
370 | }; |
371 | #endif |
372 | |
373 | #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE |
374 | static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { |
375 | #ifdef CONFIG_ARM64_ERRATUM_2139208 |
376 | MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), |
377 | MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), |
378 | #endif |
379 | #ifdef CONFIG_ARM64_ERRATUM_2119858 |
380 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), |
381 | MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), |
382 | #endif |
383 | {}, |
384 | }; |
385 | #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ |
386 | |
387 | #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE |
388 | static const struct midr_range tsb_flush_fail_cpus[] = { |
389 | #ifdef CONFIG_ARM64_ERRATUM_2067961 |
390 | MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), |
391 | MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), |
392 | #endif |
393 | #ifdef CONFIG_ARM64_ERRATUM_2054223 |
394 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), |
395 | #endif |
396 | {}, |
397 | }; |
398 | #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ |
399 | |
400 | #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE |
401 | static struct midr_range trbe_write_out_of_range_cpus[] = { |
402 | #ifdef CONFIG_ARM64_ERRATUM_2253138 |
403 | MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), |
404 | MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), |
405 | #endif |
406 | #ifdef CONFIG_ARM64_ERRATUM_2224489 |
407 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), |
408 | MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), |
409 | #endif |
410 | {}, |
411 | }; |
412 | #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ |
413 | |
414 | #ifdef CONFIG_ARM64_ERRATUM_1742098 |
415 | static struct midr_range broken_aarch32_aes[] = { |
416 | MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), |
417 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
418 | {}, |
419 | }; |
420 | #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ |
421 | |
422 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD |
423 | static const struct midr_range erratum_spec_unpriv_load_list[] = { |
424 | #ifdef CONFIG_ARM64_ERRATUM_3117295 |
425 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), |
426 | #endif |
427 | #ifdef CONFIG_ARM64_ERRATUM_2966298 |
428 | /* Cortex-A520 r0p0 to r0p1 */ |
429 | MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), |
430 | #endif |
431 | {}, |
432 | }; |
433 | #endif |
434 | |
435 | const struct arm64_cpu_capabilities arm64_errata[] = { |
436 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
437 | { |
438 | .desc = "ARM errata 826319, 827319, 824069, or 819472" , |
439 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
440 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
441 | .cpu_enable = cpu_enable_cache_maint_trap, |
442 | }, |
443 | #endif |
444 | #ifdef CONFIG_ARM64_ERRATUM_832075 |
445 | { |
446 | /* Cortex-A57 r0p0 - r1p2 */ |
447 | .desc = "ARM erratum 832075" , |
448 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, |
449 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
450 | 0, 0, |
451 | 1, 2), |
452 | }, |
453 | #endif |
454 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
455 | { |
456 | /* Cortex-A57 r0p0 - r1p2 */ |
457 | .desc = "ARM erratum 834220" , |
458 | .capability = ARM64_WORKAROUND_834220, |
459 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
460 | 0, 0, |
461 | 1, 2), |
462 | }, |
463 | #endif |
464 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
465 | { |
466 | .desc = "ARM erratum 843419" , |
467 | .capability = ARM64_WORKAROUND_843419, |
468 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
469 | .matches = cpucap_multi_entry_cap_matches, |
470 | .match_list = erratum_843419_list, |
471 | }, |
472 | #endif |
473 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
474 | { |
475 | .desc = "ARM erratum 845719" , |
476 | .capability = ARM64_WORKAROUND_845719, |
477 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
478 | }, |
479 | #endif |
480 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 |
481 | { |
482 | .desc = "Cavium errata 23154 and 38545" , |
483 | .capability = ARM64_WORKAROUND_CAVIUM_23154, |
484 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
485 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus), |
486 | }, |
487 | #endif |
488 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
489 | { |
490 | .desc = "Cavium erratum 27456" , |
491 | .capability = ARM64_WORKAROUND_CAVIUM_27456, |
492 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
493 | }, |
494 | #endif |
495 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
496 | { |
497 | .desc = "Cavium erratum 30115" , |
498 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
499 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
500 | }, |
501 | #endif |
502 | { |
503 | .desc = "Mismatched cache type (CTR_EL0)" , |
504 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
505 | .matches = has_mismatched_cache_type, |
506 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
507 | .cpu_enable = cpu_enable_trap_ctr_access, |
508 | }, |
509 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
510 | { |
511 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003" , |
512 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
513 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
514 | .matches = cpucap_multi_entry_cap_matches, |
515 | .match_list = qcom_erratum_1003_list, |
516 | }, |
517 | #endif |
518 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
519 | { |
520 | .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009" , |
521 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
522 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
523 | .matches = cpucap_multi_entry_cap_matches, |
524 | .match_list = arm64_repeat_tlbi_list, |
525 | }, |
526 | #endif |
527 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
528 | { |
529 | /* Cortex-A73 all versions */ |
530 | .desc = "ARM erratum 858921" , |
531 | .capability = ARM64_WORKAROUND_858921, |
532 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
533 | }, |
534 | #endif |
535 | { |
536 | .desc = "Spectre-v2" , |
537 | .capability = ARM64_SPECTRE_V2, |
538 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
539 | .matches = has_spectre_v2, |
540 | .cpu_enable = spectre_v2_enable_mitigation, |
541 | }, |
542 | #ifdef CONFIG_RANDOMIZE_BASE |
543 | { |
544 | /* Must come after the Spectre-v2 entry */ |
545 | .desc = "Spectre-v3a" , |
546 | .capability = ARM64_SPECTRE_V3A, |
547 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
548 | .matches = has_spectre_v3a, |
549 | .cpu_enable = spectre_v3a_enable_mitigation, |
550 | }, |
551 | #endif |
552 | { |
553 | .desc = "Spectre-v4" , |
554 | .capability = ARM64_SPECTRE_V4, |
555 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
556 | .matches = has_spectre_v4, |
557 | .cpu_enable = spectre_v4_enable_mitigation, |
558 | }, |
559 | { |
560 | .desc = "Spectre-BHB" , |
561 | .capability = ARM64_SPECTRE_BHB, |
562 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
563 | .matches = is_spectre_bhb_affected, |
564 | .cpu_enable = spectre_bhb_enable_mitigation, |
565 | }, |
566 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
567 | { |
568 | .desc = "ARM erratum 1418040" , |
569 | .capability = ARM64_WORKAROUND_1418040, |
570 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), |
571 | /* |
572 | * We need to allow affected CPUs to come in late, but |
573 | * also need the non-affected CPUs to be able to come |
574 | * in at any point in time. Wonderful. |
575 | */ |
576 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
577 | }, |
578 | #endif |
579 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
580 | { |
581 | .desc = "ARM errata 1165522, 1319367, or 1530923" , |
582 | .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
583 | ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), |
584 | }, |
585 | #endif |
586 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
587 | { |
588 | .desc = "ARM erratum 1463225" , |
589 | .capability = ARM64_WORKAROUND_1463225, |
590 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
591 | .matches = has_cortex_a76_erratum_1463225, |
592 | .midr_range_list = erratum_1463225, |
593 | }, |
594 | #endif |
595 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 |
596 | { |
597 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)" , |
598 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, |
599 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
600 | .matches = needs_tx2_tvm_workaround, |
601 | }, |
602 | { |
603 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)" , |
604 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, |
605 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
606 | }, |
607 | #endif |
608 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
609 | { |
610 | /* we depend on the firmware portion for correctness */ |
611 | .desc = "ARM erratum 1542419 (kernel portion)" , |
612 | .capability = ARM64_WORKAROUND_1542419, |
613 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
614 | .matches = has_neoverse_n1_erratum_1542419, |
615 | .cpu_enable = cpu_enable_trap_ctr_access, |
616 | }, |
617 | #endif |
618 | #ifdef CONFIG_ARM64_ERRATUM_1508412 |
619 | { |
620 | /* we depend on the firmware portion for correctness */ |
621 | .desc = "ARM erratum 1508412 (kernel portion)" , |
622 | .capability = ARM64_WORKAROUND_1508412, |
623 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, |
624 | 0, 0, |
625 | 1, 0), |
626 | }, |
627 | #endif |
628 | #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM |
629 | { |
630 | /* NVIDIA Carmel */ |
631 | .desc = "NVIDIA Carmel CNP erratum" , |
632 | .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, |
633 | ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), |
634 | }, |
635 | #endif |
636 | #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE |
637 | { |
638 | /* |
639 | * The erratum work around is handled within the TRBE |
640 | * driver and can be applied per-cpu. So, we can allow |
641 | * a late CPU to come online with this erratum. |
642 | */ |
643 | .desc = "ARM erratum 2119858 or 2139208" , |
644 | .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, |
645 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
646 | CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), |
647 | }, |
648 | #endif |
649 | #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE |
650 | { |
651 | .desc = "ARM erratum 2067961 or 2054223" , |
652 | .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, |
653 | ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), |
654 | }, |
655 | #endif |
656 | #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE |
657 | { |
658 | .desc = "ARM erratum 2253138 or 2224489" , |
659 | .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, |
660 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
661 | CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), |
662 | }, |
663 | #endif |
664 | #ifdef CONFIG_ARM64_ERRATUM_2645198 |
665 | { |
666 | .desc = "ARM erratum 2645198" , |
667 | .capability = ARM64_WORKAROUND_2645198, |
668 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) |
669 | }, |
670 | #endif |
671 | #ifdef CONFIG_ARM64_ERRATUM_2077057 |
672 | { |
673 | .desc = "ARM erratum 2077057" , |
674 | .capability = ARM64_WORKAROUND_2077057, |
675 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), |
676 | }, |
677 | #endif |
678 | #ifdef CONFIG_ARM64_ERRATUM_2064142 |
679 | { |
680 | .desc = "ARM erratum 2064142" , |
681 | .capability = ARM64_WORKAROUND_2064142, |
682 | |
683 | /* Cortex-A510 r0p0 - r0p2 */ |
684 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) |
685 | }, |
686 | #endif |
687 | #ifdef CONFIG_ARM64_ERRATUM_2457168 |
688 | { |
689 | .desc = "ARM erratum 2457168" , |
690 | .capability = ARM64_WORKAROUND_2457168, |
691 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
692 | |
693 | /* Cortex-A510 r0p0-r1p1 */ |
694 | CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) |
695 | }, |
696 | #endif |
697 | #ifdef CONFIG_ARM64_ERRATUM_2038923 |
698 | { |
699 | .desc = "ARM erratum 2038923" , |
700 | .capability = ARM64_WORKAROUND_2038923, |
701 | |
702 | /* Cortex-A510 r0p0 - r0p2 */ |
703 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) |
704 | }, |
705 | #endif |
706 | #ifdef CONFIG_ARM64_ERRATUM_1902691 |
707 | { |
708 | .desc = "ARM erratum 1902691" , |
709 | .capability = ARM64_WORKAROUND_1902691, |
710 | |
711 | /* Cortex-A510 r0p0 - r0p1 */ |
712 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1) |
713 | }, |
714 | #endif |
715 | #ifdef CONFIG_ARM64_ERRATUM_1742098 |
716 | { |
717 | .desc = "ARM erratum 1742098" , |
718 | .capability = ARM64_WORKAROUND_1742098, |
719 | CAP_MIDR_RANGE_LIST(broken_aarch32_aes), |
720 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
721 | }, |
722 | #endif |
723 | #ifdef CONFIG_ARM64_ERRATUM_2658417 |
724 | { |
725 | .desc = "ARM erratum 2658417" , |
726 | .capability = ARM64_WORKAROUND_2658417, |
727 | /* Cortex-A510 r0p0 - r1p1 */ |
728 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), |
729 | MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), |
730 | }, |
731 | #endif |
732 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD |
733 | { |
734 | .desc = "ARM errata 2966298, 3117295" , |
735 | .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD, |
736 | /* Cortex-A520 r0p0 - r0p1 */ |
737 | ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list), |
738 | }, |
739 | #endif |
740 | #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 |
741 | { |
742 | .desc = "AmpereOne erratum AC03_CPU_38" , |
743 | .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, |
744 | ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), |
745 | }, |
746 | #endif |
747 | { |
748 | } |
749 | }; |
750 | |