1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * fault.c: Page fault handlers for the Sparc. |
4 | * |
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
7 | * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
8 | */ |
9 | |
10 | #include <asm/head.h> |
11 | |
12 | #include <linux/string.h> |
13 | #include <linux/types.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/ptrace.h> |
16 | #include <linux/mman.h> |
17 | #include <linux/threads.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/signal.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/perf_event.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/kdebug.h> |
25 | #include <linux/uaccess.h> |
26 | #include <linux/extable.h> |
27 | |
28 | #include <asm/page.h> |
29 | #include <asm/openprom.h> |
30 | #include <asm/oplib.h> |
31 | #include <asm/setup.h> |
32 | #include <asm/smp.h> |
33 | #include <asm/traps.h> |
34 | |
35 | #include "mm_32.h" |
36 | |
37 | int show_unhandled_signals = 1; |
38 | |
39 | static void __noreturn unhandled_fault(unsigned long address, |
40 | struct task_struct *tsk, |
41 | struct pt_regs *regs) |
42 | { |
43 | if ((unsigned long) address < PAGE_SIZE) { |
44 | printk(KERN_ALERT |
45 | "Unable to handle kernel NULL pointer dereference\n" ); |
46 | } else { |
47 | printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n" , |
48 | address); |
49 | } |
50 | printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n" , |
51 | (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); |
52 | printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n" , |
53 | (tsk->mm ? (unsigned long) tsk->mm->pgd : |
54 | (unsigned long) tsk->active_mm->pgd)); |
55 | die_if_kernel("Oops" , regs); |
56 | } |
57 | |
58 | static inline void |
59 | show_signal_msg(struct pt_regs *regs, int sig, int code, |
60 | unsigned long address, struct task_struct *tsk) |
61 | { |
62 | if (!unhandled_signal(tsk, sig)) |
63 | return; |
64 | |
65 | if (!printk_ratelimit()) |
66 | return; |
67 | |
68 | printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x" , |
69 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
70 | tsk->comm, task_pid_nr(tsk), address, |
71 | (void *)regs->pc, (void *)regs->u_regs[UREG_I7], |
72 | (void *)regs->u_regs[UREG_FP], code); |
73 | |
74 | print_vma_addr(KERN_CONT " in " , rip: regs->pc); |
75 | |
76 | printk(KERN_CONT "\n" ); |
77 | } |
78 | |
79 | static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, |
80 | unsigned long addr) |
81 | { |
82 | if (unlikely(show_unhandled_signals)) |
83 | show_signal_msg(regs, sig, code, |
84 | address: addr, current); |
85 | |
86 | force_sig_fault(sig, code, addr: (void __user *) addr); |
87 | } |
88 | |
89 | static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) |
90 | { |
91 | unsigned int insn; |
92 | |
93 | if (text_fault) |
94 | return regs->pc; |
95 | |
96 | if (regs->psr & PSR_PS) |
97 | insn = *(unsigned int *) regs->pc; |
98 | else |
99 | __get_user(insn, (unsigned int *) regs->pc); |
100 | |
101 | return safe_compute_effective_address(regs, insn); |
102 | } |
103 | |
104 | static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, |
105 | int text_fault) |
106 | { |
107 | unsigned long addr = compute_si_addr(regs, text_fault); |
108 | |
109 | __do_fault_siginfo(code, sig, regs, addr); |
110 | } |
111 | |
112 | asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, |
113 | unsigned long address) |
114 | { |
115 | struct vm_area_struct *vma; |
116 | struct task_struct *tsk = current; |
117 | struct mm_struct *mm = tsk->mm; |
118 | int from_user = !(regs->psr & PSR_PS); |
119 | int code; |
120 | vm_fault_t fault; |
121 | unsigned int flags = FAULT_FLAG_DEFAULT; |
122 | |
123 | if (text_fault) |
124 | address = regs->pc; |
125 | |
126 | /* |
127 | * We fault-in kernel-space virtual memory on-demand. The |
128 | * 'reference' page table is init_mm.pgd. |
129 | * |
130 | * NOTE! We MUST NOT take any locks for this case. We may |
131 | * be in an interrupt or a critical region, and should |
132 | * only copy the information from the master page table, |
133 | * nothing more. |
134 | */ |
135 | code = SEGV_MAPERR; |
136 | if (address >= TASK_SIZE) |
137 | goto vmalloc_fault; |
138 | |
139 | /* |
140 | * If we're in an interrupt or have no user |
141 | * context, we must not take the fault.. |
142 | */ |
143 | if (pagefault_disabled() || !mm) |
144 | goto no_context; |
145 | |
146 | if (!from_user && address >= PAGE_OFFSET) |
147 | goto no_context; |
148 | |
149 | perf_sw_event(event_id: PERF_COUNT_SW_PAGE_FAULTS, nr: 1, regs, addr: address); |
150 | |
151 | retry: |
152 | vma = lock_mm_and_find_vma(mm, address, regs); |
153 | if (!vma) |
154 | goto bad_area_nosemaphore; |
155 | /* |
156 | * Ok, we have a good vm_area for this memory access, so |
157 | * we can handle it.. |
158 | */ |
159 | code = SEGV_ACCERR; |
160 | if (write) { |
161 | if (!(vma->vm_flags & VM_WRITE)) |
162 | goto bad_area; |
163 | } else { |
164 | /* Allow reads even for write-only mappings */ |
165 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
166 | goto bad_area; |
167 | } |
168 | |
169 | if (from_user) |
170 | flags |= FAULT_FLAG_USER; |
171 | if (write) |
172 | flags |= FAULT_FLAG_WRITE; |
173 | |
174 | /* |
175 | * If for any reason at all we couldn't handle the fault, |
176 | * make sure we exit gracefully rather than endlessly redo |
177 | * the fault. |
178 | */ |
179 | fault = handle_mm_fault(vma, address, flags, regs); |
180 | |
181 | if (fault_signal_pending(fault_flags: fault, regs)) { |
182 | if (!from_user) |
183 | goto no_context; |
184 | return; |
185 | } |
186 | |
187 | /* The fault is fully completed (including releasing mmap lock) */ |
188 | if (fault & VM_FAULT_COMPLETED) |
189 | return; |
190 | |
191 | if (unlikely(fault & VM_FAULT_ERROR)) { |
192 | if (fault & VM_FAULT_OOM) |
193 | goto out_of_memory; |
194 | else if (fault & VM_FAULT_SIGSEGV) |
195 | goto bad_area; |
196 | else if (fault & VM_FAULT_SIGBUS) |
197 | goto do_sigbus; |
198 | BUG(); |
199 | } |
200 | |
201 | if (fault & VM_FAULT_RETRY) { |
202 | flags |= FAULT_FLAG_TRIED; |
203 | |
204 | /* No need to mmap_read_unlock(mm) as we would |
205 | * have already released it in __lock_page_or_retry |
206 | * in mm/filemap.c. |
207 | */ |
208 | |
209 | goto retry; |
210 | } |
211 | |
212 | mmap_read_unlock(mm); |
213 | return; |
214 | |
215 | /* |
216 | * Something tried to access memory that isn't in our memory map.. |
217 | * Fix it, but check if it's kernel or user first.. |
218 | */ |
219 | bad_area: |
220 | mmap_read_unlock(mm); |
221 | |
222 | bad_area_nosemaphore: |
223 | /* User mode accesses just cause a SIGSEGV */ |
224 | if (from_user) { |
225 | do_fault_siginfo(code, SIGSEGV, regs, text_fault); |
226 | return; |
227 | } |
228 | |
229 | /* Is this in ex_table? */ |
230 | no_context: |
231 | if (!from_user) { |
232 | const struct exception_table_entry *entry; |
233 | |
234 | entry = search_exception_tables(add: regs->pc); |
235 | #ifdef DEBUG_EXCEPTIONS |
236 | printk("Exception: PC<%08lx> faddr<%08lx>\n" , |
237 | regs->pc, address); |
238 | printk("EX_TABLE: insn<%08lx> fixup<%08x>\n" , |
239 | regs->pc, entry->fixup); |
240 | #endif |
241 | regs->pc = entry->fixup; |
242 | regs->npc = regs->pc + 4; |
243 | return; |
244 | } |
245 | |
246 | unhandled_fault(address, tsk, regs); |
247 | |
248 | /* |
249 | * We ran out of memory, or some other thing happened to us that made |
250 | * us unable to handle the page fault gracefully. |
251 | */ |
252 | out_of_memory: |
253 | mmap_read_unlock(mm); |
254 | if (from_user) { |
255 | pagefault_out_of_memory(); |
256 | return; |
257 | } |
258 | goto no_context; |
259 | |
260 | do_sigbus: |
261 | mmap_read_unlock(mm); |
262 | do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); |
263 | if (!from_user) |
264 | goto no_context; |
265 | |
266 | vmalloc_fault: |
267 | { |
268 | /* |
269 | * Synchronize this task's top level page-table |
270 | * with the 'reference' page table. |
271 | */ |
272 | int offset = pgd_index(address); |
273 | pgd_t *pgd, *pgd_k; |
274 | p4d_t *p4d, *p4d_k; |
275 | pud_t *pud, *pud_k; |
276 | pmd_t *pmd, *pmd_k; |
277 | |
278 | pgd = tsk->active_mm->pgd + offset; |
279 | pgd_k = init_mm.pgd + offset; |
280 | |
281 | if (!pgd_present(pgd: *pgd)) { |
282 | if (!pgd_present(pgd: *pgd_k)) |
283 | goto bad_area_nosemaphore; |
284 | pgd_val(pgd: *pgd) = pgd_val(pgd: *pgd_k); |
285 | return; |
286 | } |
287 | |
288 | p4d = p4d_offset(pgd, address); |
289 | pud = pud_offset(p4d, address); |
290 | pmd = pmd_offset(pud, address); |
291 | |
292 | p4d_k = p4d_offset(pgd: pgd_k, address); |
293 | pud_k = pud_offset(p4d: p4d_k, address); |
294 | pmd_k = pmd_offset(pud: pud_k, address); |
295 | |
296 | if (pmd_present(pmd: *pmd) || !pmd_present(pmd: *pmd_k)) |
297 | goto bad_area_nosemaphore; |
298 | |
299 | *pmd = *pmd_k; |
300 | return; |
301 | } |
302 | } |
303 | |
304 | /* This always deals with user addresses. */ |
305 | static void force_user_fault(unsigned long address, int write) |
306 | { |
307 | struct vm_area_struct *vma; |
308 | struct task_struct *tsk = current; |
309 | struct mm_struct *mm = tsk->mm; |
310 | unsigned int flags = FAULT_FLAG_USER; |
311 | int code; |
312 | |
313 | code = SEGV_MAPERR; |
314 | |
315 | vma = lock_mm_and_find_vma(mm, address, NULL); |
316 | if (!vma) |
317 | goto bad_area_nosemaphore; |
318 | code = SEGV_ACCERR; |
319 | if (write) { |
320 | if (!(vma->vm_flags & VM_WRITE)) |
321 | goto bad_area; |
322 | flags |= FAULT_FLAG_WRITE; |
323 | } else { |
324 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
325 | goto bad_area; |
326 | } |
327 | switch (handle_mm_fault(vma, address, flags, NULL)) { |
328 | case VM_FAULT_SIGBUS: |
329 | case VM_FAULT_OOM: |
330 | goto do_sigbus; |
331 | } |
332 | mmap_read_unlock(mm); |
333 | return; |
334 | bad_area: |
335 | mmap_read_unlock(mm); |
336 | bad_area_nosemaphore: |
337 | __do_fault_siginfo(code, SIGSEGV, regs: tsk->thread.kregs, addr: address); |
338 | return; |
339 | |
340 | do_sigbus: |
341 | mmap_read_unlock(mm); |
342 | __do_fault_siginfo(BUS_ADRERR, SIGBUS, regs: tsk->thread.kregs, addr: address); |
343 | } |
344 | |
345 | static void check_stack_aligned(unsigned long sp) |
346 | { |
347 | if (sp & 0x7UL) |
348 | force_sig(SIGILL); |
349 | } |
350 | |
351 | void window_overflow_fault(void) |
352 | { |
353 | unsigned long sp; |
354 | |
355 | sp = current_thread_info()->rwbuf_stkptrs[0]; |
356 | if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) |
357 | force_user_fault(address: sp + 0x38, write: 1); |
358 | force_user_fault(address: sp, write: 1); |
359 | |
360 | check_stack_aligned(sp); |
361 | } |
362 | |
363 | void window_underflow_fault(unsigned long sp) |
364 | { |
365 | if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) |
366 | force_user_fault(address: sp + 0x38, write: 0); |
367 | force_user_fault(address: sp, write: 0); |
368 | |
369 | check_stack_aligned(sp); |
370 | } |
371 | |
372 | void window_ret_fault(struct pt_regs *regs) |
373 | { |
374 | unsigned long sp; |
375 | |
376 | sp = regs->u_regs[UREG_FP]; |
377 | if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) |
378 | force_user_fault(address: sp + 0x38, write: 0); |
379 | force_user_fault(address: sp, write: 0); |
380 | |
381 | check_stack_aligned(sp); |
382 | } |
383 | |