1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Based on arch/arm/mm/fault.c |
4 | * |
5 | * Copyright (C) 1995 Linus Torvalds |
6 | * Copyright (C) 1995-2004 Russell King |
7 | * Copyright (C) 2012 ARM Ltd. |
8 | */ |
9 | |
10 | #include <linux/acpi.h> |
11 | #include <linux/bitfield.h> |
12 | #include <linux/extable.h> |
13 | #include <linux/kfence.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/mm.h> |
16 | #include <linux/hardirq.h> |
17 | #include <linux/init.h> |
18 | #include <linux/kasan.h> |
19 | #include <linux/kprobes.h> |
20 | #include <linux/uaccess.h> |
21 | #include <linux/page-flags.h> |
22 | #include <linux/sched/signal.h> |
23 | #include <linux/sched/debug.h> |
24 | #include <linux/highmem.h> |
25 | #include <linux/perf_event.h> |
26 | #include <linux/preempt.h> |
27 | #include <linux/hugetlb.h> |
28 | |
29 | #include <asm/acpi.h> |
30 | #include <asm/bug.h> |
31 | #include <asm/cmpxchg.h> |
32 | #include <asm/cpufeature.h> |
33 | #include <asm/efi.h> |
34 | #include <asm/exception.h> |
35 | #include <asm/daifflags.h> |
36 | #include <asm/debug-monitors.h> |
37 | #include <asm/esr.h> |
38 | #include <asm/kprobes.h> |
39 | #include <asm/mte.h> |
40 | #include <asm/processor.h> |
41 | #include <asm/sysreg.h> |
42 | #include <asm/system_misc.h> |
43 | #include <asm/tlbflush.h> |
44 | #include <asm/traps.h> |
45 | |
46 | struct fault_info { |
47 | int (*fn)(unsigned long far, unsigned long esr, |
48 | struct pt_regs *regs); |
49 | int sig; |
50 | int code; |
51 | const char *name; |
52 | }; |
53 | |
54 | static const struct fault_info fault_info[]; |
55 | static struct fault_info debug_fault_info[]; |
56 | |
57 | static inline const struct fault_info *esr_to_fault_info(unsigned long esr) |
58 | { |
59 | return fault_info + (esr & ESR_ELx_FSC); |
60 | } |
61 | |
62 | static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) |
63 | { |
64 | return debug_fault_info + DBG_ESR_EVT(esr); |
65 | } |
66 | |
67 | static void data_abort_decode(unsigned long esr) |
68 | { |
69 | unsigned long iss2 = ESR_ELx_ISS2(esr); |
70 | |
71 | pr_alert("Data abort info:\n" ); |
72 | |
73 | if (esr & ESR_ELx_ISV) { |
74 | pr_alert(" Access size = %u byte(s)\n" , |
75 | 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); |
76 | pr_alert(" SSE = %lu, SRT = %lu\n" , |
77 | (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, |
78 | (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); |
79 | pr_alert(" SF = %lu, AR = %lu\n" , |
80 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, |
81 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); |
82 | } else { |
83 | pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n" , |
84 | esr & ESR_ELx_ISS_MASK, iss2); |
85 | } |
86 | |
87 | pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n" , |
88 | (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, |
89 | (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT, |
90 | (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT, |
91 | (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT); |
92 | |
93 | pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n" , |
94 | (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT, |
95 | (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT, |
96 | (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT, |
97 | (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT); |
98 | } |
99 | |
100 | static void mem_abort_decode(unsigned long esr) |
101 | { |
102 | pr_alert("Mem abort info:\n" ); |
103 | |
104 | pr_alert(" ESR = 0x%016lx\n" , esr); |
105 | pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n" , |
106 | ESR_ELx_EC(esr), esr_get_class_string(esr), |
107 | (esr & ESR_ELx_IL) ? 32 : 16); |
108 | pr_alert(" SET = %lu, FnV = %lu\n" , |
109 | (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, |
110 | (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); |
111 | pr_alert(" EA = %lu, S1PTW = %lu\n" , |
112 | (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, |
113 | (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); |
114 | pr_alert(" FSC = 0x%02lx: %s\n" , (esr & ESR_ELx_FSC), |
115 | esr_to_fault_info(esr)->name); |
116 | |
117 | if (esr_is_data_abort(esr)) |
118 | data_abort_decode(esr); |
119 | } |
120 | |
121 | static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) |
122 | { |
123 | /* Either init_pg_dir or swapper_pg_dir */ |
124 | if (mm == &init_mm) |
125 | return __pa_symbol(mm->pgd); |
126 | |
127 | return (unsigned long)virt_to_phys(address: mm->pgd); |
128 | } |
129 | |
130 | /* |
131 | * Dump out the page tables associated with 'addr' in the currently active mm. |
132 | */ |
133 | static void show_pte(unsigned long addr) |
134 | { |
135 | struct mm_struct *mm; |
136 | pgd_t *pgdp; |
137 | pgd_t pgd; |
138 | |
139 | if (is_ttbr0_addr(addr)) { |
140 | /* TTBR0 */ |
141 | mm = current->active_mm; |
142 | if (mm == &init_mm) { |
143 | pr_alert("[%016lx] user address but active_mm is swapper\n" , |
144 | addr); |
145 | return; |
146 | } |
147 | } else if (is_ttbr1_addr(addr)) { |
148 | /* TTBR1 */ |
149 | mm = &init_mm; |
150 | } else { |
151 | pr_alert("[%016lx] address between user and kernel address ranges\n" , |
152 | addr); |
153 | return; |
154 | } |
155 | |
156 | pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n" , |
157 | mm == &init_mm ? "swapper" : "user" , PAGE_SIZE / SZ_1K, |
158 | vabits_actual, mm_to_pgd_phys(mm)); |
159 | pgdp = pgd_offset(mm, addr); |
160 | pgd = READ_ONCE(*pgdp); |
161 | pr_alert("[%016lx] pgd=%016llx" , addr, pgd_val(pgd)); |
162 | |
163 | do { |
164 | p4d_t *p4dp, p4d; |
165 | pud_t *pudp, pud; |
166 | pmd_t *pmdp, pmd; |
167 | pte_t *ptep, pte; |
168 | |
169 | if (pgd_none(pgd) || pgd_bad(pgd)) |
170 | break; |
171 | |
172 | p4dp = p4d_offset(pgd: pgdp, address: addr); |
173 | p4d = READ_ONCE(*p4dp); |
174 | pr_cont(", p4d=%016llx" , p4d_val(p4d)); |
175 | if (p4d_none(p4d) || p4d_bad(p4d)) |
176 | break; |
177 | |
178 | pudp = pud_offset(p4d: p4dp, address: addr); |
179 | pud = READ_ONCE(*pudp); |
180 | pr_cont(", pud=%016llx" , pud_val(pud)); |
181 | if (pud_none(pud) || pud_bad(pud)) |
182 | break; |
183 | |
184 | pmdp = pmd_offset(pud: pudp, address: addr); |
185 | pmd = READ_ONCE(*pmdp); |
186 | pr_cont(", pmd=%016llx" , pmd_val(pmd)); |
187 | if (pmd_none(pmd) || pmd_bad(pmd)) |
188 | break; |
189 | |
190 | ptep = pte_offset_map(pmd: pmdp, addr); |
191 | if (!ptep) |
192 | break; |
193 | |
194 | pte = __ptep_get(ptep); |
195 | pr_cont(", pte=%016llx" , pte_val(pte)); |
196 | pte_unmap(pte: ptep); |
197 | } while(0); |
198 | |
199 | pr_cont("\n" ); |
200 | } |
201 | |
202 | /* |
203 | * This function sets the access flags (dirty, accessed), as well as write |
204 | * permission, and only to a more permissive setting. |
205 | * |
206 | * It needs to cope with hardware update of the accessed/dirty state by other |
207 | * agents in the system and can safely skip the __sync_icache_dcache() call as, |
208 | * like __set_ptes(), the PTE is never changed from no-exec to exec here. |
209 | * |
210 | * Returns whether or not the PTE actually changed. |
211 | */ |
212 | int __ptep_set_access_flags(struct vm_area_struct *vma, |
213 | unsigned long address, pte_t *ptep, |
214 | pte_t entry, int dirty) |
215 | { |
216 | pteval_t old_pteval, pteval; |
217 | pte_t pte = __ptep_get(ptep); |
218 | |
219 | if (pte_same(a: pte, b: entry)) |
220 | return 0; |
221 | |
222 | /* only preserve the access flags and write permission */ |
223 | pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; |
224 | |
225 | /* |
226 | * Setting the flags must be done atomically to avoid racing with the |
227 | * hardware update of the access/dirty state. The PTE_RDONLY bit must |
228 | * be set to the most permissive (lowest value) of *ptep and entry |
229 | * (calculated as: a & b == ~(~a | ~b)). |
230 | */ |
231 | pte_val(entry) ^= PTE_RDONLY; |
232 | pteval = pte_val(pte); |
233 | do { |
234 | old_pteval = pteval; |
235 | pteval ^= PTE_RDONLY; |
236 | pteval |= pte_val(pte: entry); |
237 | pteval ^= PTE_RDONLY; |
238 | pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); |
239 | } while (pteval != old_pteval); |
240 | |
241 | /* Invalidate a stale read-only entry */ |
242 | if (dirty) |
243 | flush_tlb_page(vma, a: address); |
244 | return 1; |
245 | } |
246 | |
247 | static bool is_el1_instruction_abort(unsigned long esr) |
248 | { |
249 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; |
250 | } |
251 | |
252 | static bool is_el1_data_abort(unsigned long esr) |
253 | { |
254 | return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; |
255 | } |
256 | |
257 | static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, |
258 | struct pt_regs *regs) |
259 | { |
260 | if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) |
261 | return false; |
262 | |
263 | if (esr_fsc_is_permission_fault(esr)) |
264 | return true; |
265 | |
266 | if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) |
267 | return esr_fsc_is_translation_fault(esr) && |
268 | (regs->pstate & PSR_PAN_BIT); |
269 | |
270 | return false; |
271 | } |
272 | |
273 | static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, |
274 | unsigned long esr, |
275 | struct pt_regs *regs) |
276 | { |
277 | unsigned long flags; |
278 | u64 par, dfsc; |
279 | |
280 | if (!is_el1_data_abort(esr) || !esr_fsc_is_translation_fault(esr)) |
281 | return false; |
282 | |
283 | local_irq_save(flags); |
284 | asm volatile("at s1e1r, %0" :: "r" (addr)); |
285 | isb(); |
286 | par = read_sysreg_par(); |
287 | local_irq_restore(flags); |
288 | |
289 | /* |
290 | * If we now have a valid translation, treat the translation fault as |
291 | * spurious. |
292 | */ |
293 | if (!(par & SYS_PAR_EL1_F)) |
294 | return true; |
295 | |
296 | /* |
297 | * If we got a different type of fault from the AT instruction, |
298 | * treat the translation fault as spurious. |
299 | */ |
300 | dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); |
301 | return !esr_fsc_is_translation_fault(dfsc); |
302 | } |
303 | |
304 | static void die_kernel_fault(const char *msg, unsigned long addr, |
305 | unsigned long esr, struct pt_regs *regs) |
306 | { |
307 | bust_spinlocks(yes: 1); |
308 | |
309 | pr_alert("Unable to handle kernel %s at virtual address %016lx\n" , msg, |
310 | addr); |
311 | |
312 | kasan_non_canonical_hook(addr); |
313 | |
314 | mem_abort_decode(esr); |
315 | |
316 | show_pte(addr); |
317 | die("Oops" , regs, esr); |
318 | bust_spinlocks(yes: 0); |
319 | make_task_dead(SIGKILL); |
320 | } |
321 | |
322 | #ifdef CONFIG_KASAN_HW_TAGS |
323 | static void report_tag_fault(unsigned long addr, unsigned long esr, |
324 | struct pt_regs *regs) |
325 | { |
326 | /* |
327 | * SAS bits aren't set for all faults reported in EL1, so we can't |
328 | * find out access size. |
329 | */ |
330 | bool is_write = !!(esr & ESR_ELx_WNR); |
331 | kasan_report((void *)addr, 0, is_write, regs->pc); |
332 | } |
333 | #else |
334 | /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ |
335 | static inline void report_tag_fault(unsigned long addr, unsigned long esr, |
336 | struct pt_regs *regs) { } |
337 | #endif |
338 | |
339 | static void do_tag_recovery(unsigned long addr, unsigned long esr, |
340 | struct pt_regs *regs) |
341 | { |
342 | |
343 | report_tag_fault(addr, esr, regs); |
344 | |
345 | /* |
346 | * Disable MTE Tag Checking on the local CPU for the current EL. |
347 | * It will be done lazily on the other CPUs when they will hit a |
348 | * tag fault. |
349 | */ |
350 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, |
351 | SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); |
352 | isb(); |
353 | } |
354 | |
355 | static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) |
356 | { |
357 | unsigned long fsc = esr & ESR_ELx_FSC; |
358 | |
359 | if (!is_el1_data_abort(esr)) |
360 | return false; |
361 | |
362 | if (fsc == ESR_ELx_FSC_MTE) |
363 | return true; |
364 | |
365 | return false; |
366 | } |
367 | |
368 | static void __do_kernel_fault(unsigned long addr, unsigned long esr, |
369 | struct pt_regs *regs) |
370 | { |
371 | const char *msg; |
372 | |
373 | /* |
374 | * Are we prepared to handle this kernel fault? |
375 | * We are almost certainly not prepared to handle instruction faults. |
376 | */ |
377 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
378 | return; |
379 | |
380 | if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), |
381 | "Ignoring spurious kernel translation fault at virtual address %016lx\n" , addr)) |
382 | return; |
383 | |
384 | if (is_el1_mte_sync_tag_check_fault(esr)) { |
385 | do_tag_recovery(addr, esr, regs); |
386 | |
387 | return; |
388 | } |
389 | |
390 | if (is_el1_permission_fault(addr, esr, regs)) { |
391 | if (esr & ESR_ELx_WNR) |
392 | msg = "write to read-only memory" ; |
393 | else if (is_el1_instruction_abort(esr)) |
394 | msg = "execute from non-executable memory" ; |
395 | else |
396 | msg = "read from unreadable memory" ; |
397 | } else if (addr < PAGE_SIZE) { |
398 | msg = "NULL pointer dereference" ; |
399 | } else { |
400 | if (esr_fsc_is_translation_fault(esr) && |
401 | kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) |
402 | return; |
403 | |
404 | msg = "paging request" ; |
405 | } |
406 | |
407 | if (efi_runtime_fixup_exception(regs, msg)) |
408 | return; |
409 | |
410 | die_kernel_fault(msg, addr, esr, regs); |
411 | } |
412 | |
413 | static void set_thread_esr(unsigned long address, unsigned long esr) |
414 | { |
415 | current->thread.fault_address = address; |
416 | |
417 | /* |
418 | * If the faulting address is in the kernel, we must sanitize the ESR. |
419 | * From userspace's point of view, kernel-only mappings don't exist |
420 | * at all, so we report them as level 0 translation faults. |
421 | * (This is not quite the way that "no mapping there at all" behaves: |
422 | * an alignment fault not caused by the memory type would take |
423 | * precedence over translation fault for a real access to empty |
424 | * space. Unfortunately we can't easily distinguish "alignment fault |
425 | * not caused by memory type" from "alignment fault caused by memory |
426 | * type", so we ignore this wrinkle and just return the translation |
427 | * fault.) |
428 | */ |
429 | if (!is_ttbr0_addr(current->thread.fault_address)) { |
430 | switch (ESR_ELx_EC(esr)) { |
431 | case ESR_ELx_EC_DABT_LOW: |
432 | /* |
433 | * These bits provide only information about the |
434 | * faulting instruction, which userspace knows already. |
435 | * We explicitly clear bits which are architecturally |
436 | * RES0 in case they are given meanings in future. |
437 | * We always report the ESR as if the fault was taken |
438 | * to EL1 and so ISV and the bits in ISS[23:14] are |
439 | * clear. (In fact it always will be a fault to EL1.) |
440 | */ |
441 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | |
442 | ESR_ELx_CM | ESR_ELx_WNR; |
443 | esr |= ESR_ELx_FSC_FAULT; |
444 | break; |
445 | case ESR_ELx_EC_IABT_LOW: |
446 | /* |
447 | * Claim a level 0 translation fault. |
448 | * All other bits are architecturally RES0 for faults |
449 | * reported with that DFSC value, so we clear them. |
450 | */ |
451 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; |
452 | esr |= ESR_ELx_FSC_FAULT; |
453 | break; |
454 | default: |
455 | /* |
456 | * This should never happen (entry.S only brings us |
457 | * into this code for insn and data aborts from a lower |
458 | * exception level). Fail safe by not providing an ESR |
459 | * context record at all. |
460 | */ |
461 | WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n" , esr); |
462 | esr = 0; |
463 | break; |
464 | } |
465 | } |
466 | |
467 | current->thread.fault_code = esr; |
468 | } |
469 | |
470 | static void do_bad_area(unsigned long far, unsigned long esr, |
471 | struct pt_regs *regs) |
472 | { |
473 | unsigned long addr = untagged_addr(far); |
474 | |
475 | /* |
476 | * If we are in kernel mode at this point, we have no context to |
477 | * handle this fault with. |
478 | */ |
479 | if (user_mode(regs)) { |
480 | const struct fault_info *inf = esr_to_fault_info(esr); |
481 | |
482 | set_thread_esr(address: addr, esr); |
483 | arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); |
484 | } else { |
485 | __do_kernel_fault(addr, esr, regs); |
486 | } |
487 | } |
488 | |
489 | #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) |
490 | #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) |
491 | |
492 | static vm_fault_t __do_page_fault(struct mm_struct *mm, |
493 | struct vm_area_struct *vma, unsigned long addr, |
494 | unsigned int mm_flags, unsigned long vm_flags, |
495 | struct pt_regs *regs) |
496 | { |
497 | /* |
498 | * Ok, we have a good vm_area for this memory access, so we can handle |
499 | * it. |
500 | * Check that the permissions on the VMA allow for the fault which |
501 | * occurred. |
502 | */ |
503 | if (!(vma->vm_flags & vm_flags)) |
504 | return VM_FAULT_BADACCESS; |
505 | return handle_mm_fault(vma, address: addr, flags: mm_flags, regs); |
506 | } |
507 | |
508 | static bool is_el0_instruction_abort(unsigned long esr) |
509 | { |
510 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; |
511 | } |
512 | |
513 | /* |
514 | * Note: not valid for EL1 DC IVAC, but we never use that such that it |
515 | * should fault. EL0 cannot issue DC IVAC (undef). |
516 | */ |
517 | static bool is_write_abort(unsigned long esr) |
518 | { |
519 | return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); |
520 | } |
521 | |
522 | static int __kprobes do_page_fault(unsigned long far, unsigned long esr, |
523 | struct pt_regs *regs) |
524 | { |
525 | const struct fault_info *inf; |
526 | struct mm_struct *mm = current->mm; |
527 | vm_fault_t fault; |
528 | unsigned long vm_flags; |
529 | unsigned int mm_flags = FAULT_FLAG_DEFAULT; |
530 | unsigned long addr = untagged_addr(far); |
531 | struct vm_area_struct *vma; |
532 | |
533 | if (kprobe_page_fault(regs, trap: esr)) |
534 | return 0; |
535 | |
536 | /* |
537 | * If we're in an interrupt or have no user context, we must not take |
538 | * the fault. |
539 | */ |
540 | if (faulthandler_disabled() || !mm) |
541 | goto no_context; |
542 | |
543 | if (user_mode(regs)) |
544 | mm_flags |= FAULT_FLAG_USER; |
545 | |
546 | /* |
547 | * vm_flags tells us what bits we must have in vma->vm_flags |
548 | * for the fault to be benign, __do_page_fault() would check |
549 | * vma->vm_flags & vm_flags and returns an error if the |
550 | * intersection is empty |
551 | */ |
552 | if (is_el0_instruction_abort(esr)) { |
553 | /* It was exec fault */ |
554 | vm_flags = VM_EXEC; |
555 | mm_flags |= FAULT_FLAG_INSTRUCTION; |
556 | } else if (is_write_abort(esr)) { |
557 | /* It was write fault */ |
558 | vm_flags = VM_WRITE; |
559 | mm_flags |= FAULT_FLAG_WRITE; |
560 | } else { |
561 | /* It was read fault */ |
562 | vm_flags = VM_READ; |
563 | /* Write implies read */ |
564 | vm_flags |= VM_WRITE; |
565 | /* If EPAN is absent then exec implies read */ |
566 | if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN)) |
567 | vm_flags |= VM_EXEC; |
568 | } |
569 | |
570 | if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { |
571 | if (is_el1_instruction_abort(esr)) |
572 | die_kernel_fault(msg: "execution of user memory" , |
573 | addr, esr, regs); |
574 | |
575 | if (!search_exception_tables(add: regs->pc)) |
576 | die_kernel_fault(msg: "access to user memory outside uaccess routines" , |
577 | addr, esr, regs); |
578 | } |
579 | |
580 | perf_sw_event(event_id: PERF_COUNT_SW_PAGE_FAULTS, nr: 1, regs, addr); |
581 | |
582 | if (!(mm_flags & FAULT_FLAG_USER)) |
583 | goto lock_mmap; |
584 | |
585 | vma = lock_vma_under_rcu(mm, address: addr); |
586 | if (!vma) |
587 | goto lock_mmap; |
588 | |
589 | if (!(vma->vm_flags & vm_flags)) { |
590 | vma_end_read(vma); |
591 | goto lock_mmap; |
592 | } |
593 | fault = handle_mm_fault(vma, address: addr, flags: mm_flags | FAULT_FLAG_VMA_LOCK, regs); |
594 | if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) |
595 | vma_end_read(vma); |
596 | |
597 | if (!(fault & VM_FAULT_RETRY)) { |
598 | count_vm_vma_lock_event(VMA_LOCK_SUCCESS); |
599 | goto done; |
600 | } |
601 | count_vm_vma_lock_event(VMA_LOCK_RETRY); |
602 | if (fault & VM_FAULT_MAJOR) |
603 | mm_flags |= FAULT_FLAG_TRIED; |
604 | |
605 | /* Quick path to respond to signals */ |
606 | if (fault_signal_pending(fault_flags: fault, regs)) { |
607 | if (!user_mode(regs)) |
608 | goto no_context; |
609 | return 0; |
610 | } |
611 | lock_mmap: |
612 | |
613 | retry: |
614 | vma = lock_mm_and_find_vma(mm, address: addr, regs); |
615 | if (unlikely(!vma)) { |
616 | fault = VM_FAULT_BADMAP; |
617 | goto done; |
618 | } |
619 | |
620 | fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); |
621 | |
622 | /* Quick path to respond to signals */ |
623 | if (fault_signal_pending(fault_flags: fault, regs)) { |
624 | if (!user_mode(regs)) |
625 | goto no_context; |
626 | return 0; |
627 | } |
628 | |
629 | /* The fault is fully completed (including releasing mmap lock) */ |
630 | if (fault & VM_FAULT_COMPLETED) |
631 | return 0; |
632 | |
633 | if (fault & VM_FAULT_RETRY) { |
634 | mm_flags |= FAULT_FLAG_TRIED; |
635 | goto retry; |
636 | } |
637 | mmap_read_unlock(mm); |
638 | |
639 | done: |
640 | /* |
641 | * Handle the "normal" (no error) case first. |
642 | */ |
643 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | |
644 | VM_FAULT_BADACCESS)))) |
645 | return 0; |
646 | |
647 | /* |
648 | * If we are in kernel mode at this point, we have no context to |
649 | * handle this fault with. |
650 | */ |
651 | if (!user_mode(regs)) |
652 | goto no_context; |
653 | |
654 | if (fault & VM_FAULT_OOM) { |
655 | /* |
656 | * We ran out of memory, call the OOM killer, and return to |
657 | * userspace (which will retry the fault, or kill us if we got |
658 | * oom-killed). |
659 | */ |
660 | pagefault_out_of_memory(); |
661 | return 0; |
662 | } |
663 | |
664 | inf = esr_to_fault_info(esr); |
665 | set_thread_esr(address: addr, esr); |
666 | if (fault & VM_FAULT_SIGBUS) { |
667 | /* |
668 | * We had some memory, but were unable to successfully fix up |
669 | * this page fault. |
670 | */ |
671 | arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name); |
672 | } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { |
673 | unsigned int lsb; |
674 | |
675 | lsb = PAGE_SHIFT; |
676 | if (fault & VM_FAULT_HWPOISON_LARGE) |
677 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); |
678 | |
679 | arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); |
680 | } else { |
681 | /* |
682 | * Something tried to access memory that isn't in our memory |
683 | * map. |
684 | */ |
685 | arm64_force_sig_fault(SIGSEGV, |
686 | fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, |
687 | far, inf->name); |
688 | } |
689 | |
690 | return 0; |
691 | |
692 | no_context: |
693 | __do_kernel_fault(addr, esr, regs); |
694 | return 0; |
695 | } |
696 | |
697 | static int __kprobes do_translation_fault(unsigned long far, |
698 | unsigned long esr, |
699 | struct pt_regs *regs) |
700 | { |
701 | unsigned long addr = untagged_addr(far); |
702 | |
703 | if (is_ttbr0_addr(addr)) |
704 | return do_page_fault(far, esr, regs); |
705 | |
706 | do_bad_area(far, esr, regs); |
707 | return 0; |
708 | } |
709 | |
710 | static int do_alignment_fault(unsigned long far, unsigned long esr, |
711 | struct pt_regs *regs) |
712 | { |
713 | if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) && |
714 | compat_user_mode(regs)) |
715 | return do_compat_alignment_fixup(far, regs); |
716 | do_bad_area(far, esr, regs); |
717 | return 0; |
718 | } |
719 | |
720 | static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) |
721 | { |
722 | return 1; /* "fault" */ |
723 | } |
724 | |
725 | static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) |
726 | { |
727 | const struct fault_info *inf; |
728 | unsigned long siaddr; |
729 | |
730 | inf = esr_to_fault_info(esr); |
731 | |
732 | if (user_mode(regs) && apei_claim_sea(regs) == 0) { |
733 | /* |
734 | * APEI claimed this as a firmware-first notification. |
735 | * Some processing deferred to task_work before ret_to_user(). |
736 | */ |
737 | return 0; |
738 | } |
739 | |
740 | if (esr & ESR_ELx_FnV) { |
741 | siaddr = 0; |
742 | } else { |
743 | /* |
744 | * The architecture specifies that the tag bits of FAR_EL1 are |
745 | * UNKNOWN for synchronous external aborts. Mask them out now |
746 | * so that userspace doesn't see them. |
747 | */ |
748 | siaddr = untagged_addr(far); |
749 | } |
750 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); |
751 | |
752 | return 0; |
753 | } |
754 | |
755 | static int do_tag_check_fault(unsigned long far, unsigned long esr, |
756 | struct pt_regs *regs) |
757 | { |
758 | /* |
759 | * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN |
760 | * for tag check faults. Set them to corresponding bits in the untagged |
761 | * address. |
762 | */ |
763 | far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK); |
764 | do_bad_area(far, esr, regs); |
765 | return 0; |
766 | } |
767 | |
768 | static const struct fault_info fault_info[] = { |
769 | { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, |
770 | { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, |
771 | { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, |
772 | { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, |
773 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, |
774 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
775 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, |
776 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
777 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 access flag fault" }, |
778 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
779 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
780 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
781 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 permission fault" }, |
782 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
783 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, |
784 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
785 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, |
786 | { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, |
787 | { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, |
788 | { do_sea, SIGKILL, SI_KERNEL, "level -1 (translation table walk)" }, |
789 | { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, |
790 | { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, |
791 | { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, |
792 | { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, |
793 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented |
794 | { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, |
795 | { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, |
796 | { do_sea, SIGKILL, SI_KERNEL, "level -1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented |
797 | { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented |
798 | { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented |
799 | { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented |
800 | { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented |
801 | { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, |
802 | { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, |
803 | { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, |
804 | { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, |
805 | { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, |
806 | { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, |
807 | { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, |
808 | { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, |
809 | { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, |
810 | { do_bad, SIGKILL, SI_KERNEL, "level -1 address size fault" }, |
811 | { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, |
812 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level -1 translation fault" }, |
813 | { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, |
814 | { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, |
815 | { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, |
816 | { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, |
817 | { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, |
818 | { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, |
819 | { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, |
820 | { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, |
821 | { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, |
822 | { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, |
823 | { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, |
824 | { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, |
825 | { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, |
826 | { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, |
827 | { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, |
828 | { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, |
829 | { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, |
830 | { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, |
831 | { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, |
832 | { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, |
833 | }; |
834 | |
835 | void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) |
836 | { |
837 | const struct fault_info *inf = esr_to_fault_info(esr); |
838 | unsigned long addr = untagged_addr(far); |
839 | |
840 | if (!inf->fn(far, esr, regs)) |
841 | return; |
842 | |
843 | if (!user_mode(regs)) |
844 | die_kernel_fault(msg: inf->name, addr, esr, regs); |
845 | |
846 | /* |
847 | * At this point we have an unrecognized fault type whose tag bits may |
848 | * have been defined as UNKNOWN. Therefore we only expose the untagged |
849 | * address to the signal handler. |
850 | */ |
851 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); |
852 | } |
853 | NOKPROBE_SYMBOL(do_mem_abort); |
854 | |
855 | void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) |
856 | { |
857 | arm64_notify_die("SP/PC alignment exception" , regs, SIGBUS, BUS_ADRALN, |
858 | addr, esr); |
859 | } |
860 | NOKPROBE_SYMBOL(do_sp_pc_abort); |
861 | |
862 | /* |
863 | * __refdata because early_brk64 is __init, but the reference to it is |
864 | * clobbered at arch_initcall time. |
865 | * See traps.c and debug-monitors.c:debug_traps_init(). |
866 | */ |
867 | static struct fault_info __refdata debug_fault_info[] = { |
868 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, |
869 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, |
870 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, |
871 | { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, |
872 | { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, |
873 | { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, |
874 | { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, |
875 | { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, |
876 | }; |
877 | |
878 | void __init hook_debug_fault_code(int nr, |
879 | int (*fn)(unsigned long, unsigned long, struct pt_regs *), |
880 | int sig, int code, const char *name) |
881 | { |
882 | BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); |
883 | |
884 | debug_fault_info[nr].fn = fn; |
885 | debug_fault_info[nr].sig = sig; |
886 | debug_fault_info[nr].code = code; |
887 | debug_fault_info[nr].name = name; |
888 | } |
889 | |
890 | /* |
891 | * In debug exception context, we explicitly disable preemption despite |
892 | * having interrupts disabled. |
893 | * This serves two purposes: it makes it much less likely that we would |
894 | * accidentally schedule in exception context and it will force a warning |
895 | * if we somehow manage to schedule by accident. |
896 | */ |
897 | static void debug_exception_enter(struct pt_regs *regs) |
898 | { |
899 | preempt_disable(); |
900 | |
901 | /* This code is a bit fragile. Test it. */ |
902 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work" ); |
903 | } |
904 | NOKPROBE_SYMBOL(debug_exception_enter); |
905 | |
906 | static void debug_exception_exit(struct pt_regs *regs) |
907 | { |
908 | preempt_enable_no_resched(); |
909 | } |
910 | NOKPROBE_SYMBOL(debug_exception_exit); |
911 | |
912 | void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, |
913 | struct pt_regs *regs) |
914 | { |
915 | const struct fault_info *inf = esr_to_debug_fault_info(esr); |
916 | unsigned long pc = instruction_pointer(regs); |
917 | |
918 | debug_exception_enter(regs); |
919 | |
920 | if (user_mode(regs) && !is_ttbr0_addr(pc)) |
921 | arm64_apply_bp_hardening(); |
922 | |
923 | if (inf->fn(addr_if_watchpoint, esr, regs)) { |
924 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); |
925 | } |
926 | |
927 | debug_exception_exit(regs); |
928 | } |
929 | NOKPROBE_SYMBOL(do_debug_exception); |
930 | |
931 | /* |
932 | * Used during anonymous page fault handling. |
933 | */ |
934 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, |
935 | unsigned long vaddr) |
936 | { |
937 | gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; |
938 | |
939 | /* |
940 | * If the page is mapped with PROT_MTE, initialise the tags at the |
941 | * point of allocation and page zeroing as this is usually faster than |
942 | * separate DC ZVA and STGM. |
943 | */ |
944 | if (vma->vm_flags & VM_MTE) |
945 | flags |= __GFP_ZEROTAGS; |
946 | |
947 | return vma_alloc_folio(flags, 0, vma, vaddr, false); |
948 | } |
949 | |
950 | void tag_clear_highpage(struct page *page) |
951 | { |
952 | /* Newly allocated page, shouldn't have been tagged yet */ |
953 | WARN_ON_ONCE(!try_page_mte_tagging(page)); |
954 | mte_zero_clear_page_tags(page_address(page)); |
955 | set_page_mte_tagged(page); |
956 | } |
957 | |