1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/extable.h>
5#include <linux/kprobes.h>
6#include <linux/mmu_context.h>
7#include <linux/perf_event.h>
8
9int fixup_exception(struct pt_regs *regs)
10{
11 const struct exception_table_entry *fixup;
12
13 fixup = search_exception_tables(add: instruction_pointer(regs));
14 if (fixup) {
15 regs->pc = fixup->fixup;
16
17 return 1;
18 }
19
20 return 0;
21}
22
23static inline bool is_write(struct pt_regs *regs)
24{
25 switch (trap_no(regs)) {
26 case VEC_TLBINVALIDS:
27 return true;
28 case VEC_TLBMODIFIED:
29 return true;
30 }
31
32 return false;
33}
34
35#ifdef CONFIG_CPU_HAS_LDSTEX
36static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
37{
38 return;
39}
40#else
41extern unsigned long csky_cmpxchg_ldw;
42extern unsigned long csky_cmpxchg_stw;
43static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
44{
45 if (trap_no(regs) != VEC_TLBMODIFIED)
46 return;
47
48 if (instruction_pointer(regs) == csky_cmpxchg_stw)
49 instruction_pointer_set(regs, val: csky_cmpxchg_ldw);
50 return;
51}
52#endif
53
54static inline void no_context(struct pt_regs *regs, unsigned long addr)
55{
56 current->thread.trap_no = trap_no(regs);
57
58 /* Are we prepared to handle this kernel fault? */
59 if (fixup_exception(regs))
60 return;
61
62 /*
63 * Oops. The kernel tried to access some bad page. We'll have to
64 * terminate things with extreme prejudice.
65 */
66 bust_spinlocks(yes: 1);
67 pr_alert("Unable to handle kernel paging request at virtual "
68 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
69 die(regs, "Oops");
70 make_task_dead(SIGKILL);
71}
72
73static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
74{
75 current->thread.trap_no = trap_no(regs);
76
77 if (fault & VM_FAULT_OOM) {
78 /*
79 * We ran out of memory, call the OOM killer, and return the userspace
80 * (which will retry the fault, or kill us if we got oom-killed).
81 */
82 if (!user_mode(regs)) {
83 no_context(regs, addr);
84 return;
85 }
86 pagefault_out_of_memory();
87 return;
88 } else if (fault & VM_FAULT_SIGBUS) {
89 /* Kernel mode? Handle exceptions or die */
90 if (!user_mode(regs)) {
91 no_context(regs, addr);
92 return;
93 }
94 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
95 return;
96 }
97 BUG();
98}
99
100static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101{
102 /*
103 * Something tried to access memory that isn't in our memory map.
104 * Fix it, but check if it's kernel or user first.
105 */
106 /* User mode accesses just cause a SIGSEGV */
107 if (user_mode(regs)) {
108 do_trap(regs, SIGSEGV, code, addr);
109 return;
110 }
111
112 no_context(regs, addr);
113}
114
115static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
116{
117 pgd_t *pgd, *pgd_k;
118 pud_t *pud, *pud_k;
119 pmd_t *pmd, *pmd_k;
120 pte_t *pte_k;
121 int offset;
122
123 /* User mode accesses just cause a SIGSEGV */
124 if (user_mode(regs)) {
125 do_trap(regs, SIGSEGV, code, addr);
126 return;
127 }
128
129 /*
130 * Synchronize this task's top level page-table
131 * with the 'reference' page table.
132 *
133 * Do _not_ use "tsk" here. We might be inside
134 * an interrupt in the middle of a task switch..
135 */
136 offset = pgd_index(addr);
137
138 pgd = get_pgd() + offset;
139 pgd_k = init_mm.pgd + offset;
140
141 if (!pgd_present(pgd: *pgd_k)) {
142 no_context(regs, addr);
143 return;
144 }
145 set_pgd(pgd, *pgd_k);
146
147 pud = (pud_t *)pgd;
148 pud_k = (pud_t *)pgd_k;
149 if (!pud_present(pud: *pud_k)) {
150 no_context(regs, addr);
151 return;
152 }
153
154 pmd = pmd_offset(pud, address: addr);
155 pmd_k = pmd_offset(pud: pud_k, address: addr);
156 if (!pmd_present(pmd: *pmd_k)) {
157 no_context(regs, addr);
158 return;
159 }
160 set_pmd(pmdp: pmd, pmd: *pmd_k);
161
162 pte_k = pte_offset_kernel(pmd: pmd_k, address: addr);
163 if (!pte_present(a: *pte_k)) {
164 no_context(regs, addr);
165 return;
166 }
167
168 flush_tlb_one(addr);
169}
170
171static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
172{
173 if (is_write(regs)) {
174 if (!(vma->vm_flags & VM_WRITE))
175 return true;
176 } else {
177 if (unlikely(!vma_is_accessible(vma)))
178 return true;
179 }
180 return false;
181}
182
183/*
184 * This routine handles page faults. It determines the address and the
185 * problem, and then passes it off to one of the appropriate routines.
186 */
187asmlinkage void do_page_fault(struct pt_regs *regs)
188{
189 struct task_struct *tsk;
190 struct vm_area_struct *vma;
191 struct mm_struct *mm;
192 unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
193 unsigned int flags = FAULT_FLAG_DEFAULT;
194 int code = SEGV_MAPERR;
195 vm_fault_t fault;
196
197 tsk = current;
198 mm = tsk->mm;
199
200 csky_cmpxchg_fixup(regs);
201
202 if (kprobe_page_fault(regs, trap: tsk->thread.trap_no))
203 return;
204
205 /*
206 * Fault-in kernel-space virtual memory on-demand.
207 * The 'reference' page table is init_mm.pgd.
208 *
209 * NOTE! We MUST NOT take any locks for this case. We may
210 * be in an interrupt or a critical region, and should
211 * only copy the information from the master page table,
212 * nothing more.
213 */
214 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
215 vmalloc_fault(regs, code, addr);
216 return;
217 }
218
219 /* Enable interrupts if they were enabled in the parent context. */
220 if (likely(regs->sr & BIT(6)))
221 local_irq_enable();
222
223 /*
224 * If we're in an interrupt, have no user context, or are running
225 * in an atomic region, then we must not take the fault.
226 */
227 if (unlikely(faulthandler_disabled() || !mm)) {
228 no_context(regs, addr);
229 return;
230 }
231
232 if (user_mode(regs))
233 flags |= FAULT_FLAG_USER;
234
235 perf_sw_event(event_id: PERF_COUNT_SW_PAGE_FAULTS, nr: 1, regs, addr);
236
237 if (is_write(regs))
238 flags |= FAULT_FLAG_WRITE;
239retry:
240 vma = lock_mm_and_find_vma(mm, address: addr, regs);
241 if (unlikely(!vma)) {
242 bad_area_nosemaphore(regs, mm, code, addr);
243 return;
244 }
245
246 /*
247 * Ok, we have a good vm_area for this memory access, so
248 * we can handle it.
249 */
250 code = SEGV_ACCERR;
251
252 if (unlikely(access_error(regs, vma))) {
253 mmap_read_unlock(mm);
254 bad_area_nosemaphore(regs, mm, code, addr);
255 return;
256 }
257
258 /*
259 * If for any reason at all we could not handle the fault,
260 * make sure we exit gracefully rather than endlessly redo
261 * the fault.
262 */
263 fault = handle_mm_fault(vma, address: addr, flags, regs);
264
265 /*
266 * If we need to retry but a fatal signal is pending, handle the
267 * signal first. We do not need to release the mmap_lock because it
268 * would already be released in __lock_page_or_retry in mm/filemap.c.
269 */
270 if (fault_signal_pending(fault_flags: fault, regs)) {
271 if (!user_mode(regs))
272 no_context(regs, addr);
273 return;
274 }
275
276 /* The fault is fully completed (including releasing mmap lock) */
277 if (fault & VM_FAULT_COMPLETED)
278 return;
279
280 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
281 flags |= FAULT_FLAG_TRIED;
282
283 /*
284 * No need to mmap_read_unlock(mm) as we would
285 * have already released it in __lock_page_or_retry
286 * in mm/filemap.c.
287 */
288 goto retry;
289 }
290
291 mmap_read_unlock(mm);
292
293 if (unlikely(fault & VM_FAULT_ERROR)) {
294 mm_fault_error(regs, addr, fault);
295 return;
296 }
297 return;
298}
299

source code of linux/arch/csky/mm/fault.c