1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory fault handling for Hexagon
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8/*
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
11 * execptions.
12 */
13
14#include <asm/traps.h>
15#include <asm/vm_fault.h>
16#include <linux/uaccess.h>
17#include <linux/mm.h>
18#include <linux/sched/signal.h>
19#include <linux/signal.h>
20#include <linux/extable.h>
21#include <linux/hardirq.h>
22#include <linux/perf_event.h>
23
24/*
25 * Decode of hardware exception sends us to one of several
26 * entry points. At each, we generate canonical arguments
27 * for handling by the abstract memory management code.
28 */
29#define FLT_IFETCH -1
30#define FLT_LOAD 0
31#define FLT_STORE 1
32
33
34/*
35 * Canonical page fault handler
36 */
37static void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
38{
39 struct vm_area_struct *vma;
40 struct mm_struct *mm = current->mm;
41 int si_signo;
42 int si_code = SEGV_MAPERR;
43 vm_fault_t fault;
44 const struct exception_table_entry *fixup;
45 unsigned int flags = FAULT_FLAG_DEFAULT;
46
47 /*
48 * If we're in an interrupt or have no user context,
49 * then must not take the fault.
50 */
51 if (unlikely(in_interrupt() || !mm))
52 goto no_context;
53
54 local_irq_enable();
55
56 if (user_mode(regs))
57 flags |= FAULT_FLAG_USER;
58
59 perf_sw_event(event_id: PERF_COUNT_SW_PAGE_FAULTS, nr: 1, regs, addr: address);
60retry:
61 vma = lock_mm_and_find_vma(mm, address, regs);
62 if (unlikely(!vma))
63 goto bad_area_nosemaphore;
64
65 /* Address space is OK. Now check access rights. */
66 si_code = SEGV_ACCERR;
67
68 switch (cause) {
69 case FLT_IFETCH:
70 if (!(vma->vm_flags & VM_EXEC))
71 goto bad_area;
72 break;
73 case FLT_LOAD:
74 if (!(vma->vm_flags & VM_READ))
75 goto bad_area;
76 break;
77 case FLT_STORE:
78 if (!(vma->vm_flags & VM_WRITE))
79 goto bad_area;
80 flags |= FAULT_FLAG_WRITE;
81 break;
82 }
83
84 fault = handle_mm_fault(vma, address, flags, regs);
85
86 if (fault_signal_pending(fault_flags: fault, regs)) {
87 if (!user_mode(regs))
88 goto no_context;
89 return;
90 }
91
92 /* The fault is fully completed (including releasing mmap lock) */
93 if (fault & VM_FAULT_COMPLETED)
94 return;
95
96 /* The most common case -- we are done. */
97 if (likely(!(fault & VM_FAULT_ERROR))) {
98 if (fault & VM_FAULT_RETRY) {
99 flags |= FAULT_FLAG_TRIED;
100 goto retry;
101 }
102
103 mmap_read_unlock(mm);
104 return;
105 }
106
107 mmap_read_unlock(mm);
108
109 /* Handle copyin/out exception cases */
110 if (!user_mode(regs))
111 goto no_context;
112
113 if (fault & VM_FAULT_OOM) {
114 pagefault_out_of_memory();
115 return;
116 }
117
118 /* User-mode address is in the memory map, but we are
119 * unable to fix up the page fault.
120 */
121 if (fault & VM_FAULT_SIGBUS) {
122 si_signo = SIGBUS;
123 si_code = BUS_ADRERR;
124 }
125 /* Address is not in the memory map */
126 else {
127 si_signo = SIGSEGV;
128 si_code = SEGV_ACCERR;
129 }
130 force_sig_fault(sig: si_signo, code: si_code, addr: (void __user *)address);
131 return;
132
133bad_area:
134 mmap_read_unlock(mm);
135
136bad_area_nosemaphore:
137 if (user_mode(regs)) {
138 force_sig_fault(SIGSEGV, code: si_code, addr: (void __user *)address);
139 return;
140 }
141 /* Kernel-mode fault falls through */
142
143no_context:
144 fixup = search_exception_tables(add: pt_elr(regs));
145 if (fixup) {
146 pt_set_elr(regs, fixup->fixup);
147 return;
148 }
149
150 /* Things are looking very, very bad now */
151 bust_spinlocks(yes: 1);
152 printk(KERN_EMERG "Unable to handle kernel paging request at "
153 "virtual address 0x%08lx, regs %p\n", address, regs);
154 die("Bad Kernel VA", regs, SIGKILL);
155}
156
157
158void read_protection_fault(struct pt_regs *regs)
159{
160 unsigned long badvadr = pt_badva(regs);
161
162 do_page_fault(address: badvadr, FLT_LOAD, regs);
163}
164
165void write_protection_fault(struct pt_regs *regs)
166{
167 unsigned long badvadr = pt_badva(regs);
168
169 do_page_fault(address: badvadr, FLT_STORE, regs);
170}
171
172void execute_protection_fault(struct pt_regs *regs)
173{
174 unsigned long badvadr = pt_badva(regs);
175
176 do_page_fault(address: badvadr, FLT_IFETCH, regs);
177}
178

source code of linux/arch/hexagon/mm/vm_fault.c