1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * unaligned.c: Unaligned load/store trap handling with special |
4 | * cases for the kernel to do them more quickly. |
5 | * |
6 | * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) |
7 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
8 | */ |
9 | |
10 | |
11 | #include <linux/jiffies.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/extable.h> |
16 | #include <asm/asi.h> |
17 | #include <asm/ptrace.h> |
18 | #include <asm/pstate.h> |
19 | #include <asm/processor.h> |
20 | #include <linux/uaccess.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/bitops.h> |
23 | #include <linux/perf_event.h> |
24 | #include <linux/ratelimit.h> |
25 | #include <linux/context_tracking.h> |
26 | #include <asm/fpumacro.h> |
27 | #include <asm/cacheflush.h> |
28 | #include <asm/setup.h> |
29 | |
30 | #include "entry.h" |
31 | #include "kernel.h" |
32 | |
33 | enum direction { |
34 | load, /* ld, ldd, ldh, ldsh */ |
35 | store, /* st, std, sth, stsh */ |
36 | both, /* Swap, ldstub, cas, ... */ |
37 | fpld, |
38 | fpst, |
39 | invalid, |
40 | }; |
41 | |
42 | static inline enum direction decode_direction(unsigned int insn) |
43 | { |
44 | unsigned long tmp = (insn >> 21) & 1; |
45 | |
46 | if (!tmp) |
47 | return load; |
48 | else { |
49 | switch ((insn>>19)&0xf) { |
50 | case 15: /* swap* */ |
51 | return both; |
52 | default: |
53 | return store; |
54 | } |
55 | } |
56 | } |
57 | |
58 | /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ |
59 | static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) |
60 | { |
61 | unsigned int tmp; |
62 | |
63 | tmp = ((insn >> 19) & 0xf); |
64 | if (tmp == 11 || tmp == 14) /* ldx/stx */ |
65 | return 8; |
66 | tmp &= 3; |
67 | if (!tmp) |
68 | return 4; |
69 | else if (tmp == 3) |
70 | return 16; /* ldd/std - Although it is actually 8 */ |
71 | else if (tmp == 2) |
72 | return 2; |
73 | else { |
74 | printk("Impossible unaligned trap. insn=%08x\n" , insn); |
75 | die_if_kernel("Byte sized unaligned access?!?!" , regs); |
76 | |
77 | /* GCC should never warn that control reaches the end |
78 | * of this function without returning a value because |
79 | * die_if_kernel() is marked with attribute 'noreturn'. |
80 | * Alas, some versions do... |
81 | */ |
82 | |
83 | return 0; |
84 | } |
85 | } |
86 | |
87 | static inline int decode_asi(unsigned int insn, struct pt_regs *regs) |
88 | { |
89 | if (insn & 0x800000) { |
90 | if (insn & 0x2000) |
91 | return (unsigned char)(regs->tstate >> 24); /* %asi */ |
92 | else |
93 | return (unsigned char)(insn >> 5); /* imm_asi */ |
94 | } else |
95 | return ASI_P; |
96 | } |
97 | |
98 | /* 0x400000 = signed, 0 = unsigned */ |
99 | static inline int decode_signedness(unsigned int insn) |
100 | { |
101 | return (insn & 0x400000); |
102 | } |
103 | |
104 | static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, |
105 | unsigned int rd, int from_kernel) |
106 | { |
107 | if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { |
108 | if (from_kernel != 0) |
109 | __asm__ __volatile__("flushw" ); |
110 | else |
111 | flushw_user(); |
112 | } |
113 | } |
114 | |
115 | static inline long sign_extend_imm13(long imm) |
116 | { |
117 | return imm << 51 >> 51; |
118 | } |
119 | |
120 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) |
121 | { |
122 | unsigned long value, fp; |
123 | |
124 | if (reg < 16) |
125 | return (!reg ? 0 : regs->u_regs[reg]); |
126 | |
127 | fp = regs->u_regs[UREG_FP]; |
128 | |
129 | if (regs->tstate & TSTATE_PRIV) { |
130 | struct reg_window *win; |
131 | win = (struct reg_window *)(fp + STACK_BIAS); |
132 | value = win->locals[reg - 16]; |
133 | } else if (!test_thread_64bit_stack(fp)) { |
134 | struct reg_window32 __user *win32; |
135 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
136 | get_user(value, &win32->locals[reg - 16]); |
137 | } else { |
138 | struct reg_window __user *win; |
139 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
140 | get_user(value, &win->locals[reg - 16]); |
141 | } |
142 | return value; |
143 | } |
144 | |
145 | static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) |
146 | { |
147 | unsigned long fp; |
148 | |
149 | if (reg < 16) |
150 | return ®s->u_regs[reg]; |
151 | |
152 | fp = regs->u_regs[UREG_FP]; |
153 | |
154 | if (regs->tstate & TSTATE_PRIV) { |
155 | struct reg_window *win; |
156 | win = (struct reg_window *)(fp + STACK_BIAS); |
157 | return &win->locals[reg - 16]; |
158 | } else if (!test_thread_64bit_stack(fp)) { |
159 | struct reg_window32 *win32; |
160 | win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); |
161 | return (unsigned long *)&win32->locals[reg - 16]; |
162 | } else { |
163 | struct reg_window *win; |
164 | win = (struct reg_window *)(fp + STACK_BIAS); |
165 | return &win->locals[reg - 16]; |
166 | } |
167 | } |
168 | |
169 | unsigned long compute_effective_address(struct pt_regs *regs, |
170 | unsigned int insn, unsigned int rd) |
171 | { |
172 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
173 | unsigned int rs1 = (insn >> 14) & 0x1f; |
174 | unsigned int rs2 = insn & 0x1f; |
175 | unsigned long addr; |
176 | |
177 | if (insn & 0x2000) { |
178 | maybe_flush_windows(rs1, rs2: 0, rd, from_kernel); |
179 | addr = (fetch_reg(reg: rs1, regs) + sign_extend_imm13(imm: insn)); |
180 | } else { |
181 | maybe_flush_windows(rs1, rs2, rd, from_kernel); |
182 | addr = (fetch_reg(reg: rs1, regs) + fetch_reg(reg: rs2, regs)); |
183 | } |
184 | |
185 | if (!from_kernel && test_thread_flag(TIF_32BIT)) |
186 | addr &= 0xffffffff; |
187 | |
188 | return addr; |
189 | } |
190 | |
191 | /* This is just to make gcc think die_if_kernel does return... */ |
192 | static void __used unaligned_panic(char *str, struct pt_regs *regs) |
193 | { |
194 | die_if_kernel(str, regs); |
195 | } |
196 | |
197 | extern int do_int_load(unsigned long *dest_reg, int size, |
198 | unsigned long *saddr, int is_signed, int asi); |
199 | |
200 | extern int __do_int_store(unsigned long *dst_addr, int size, |
201 | unsigned long src_val, int asi); |
202 | |
203 | static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, |
204 | struct pt_regs *regs, int asi, int orig_asi) |
205 | { |
206 | unsigned long zero = 0; |
207 | unsigned long *src_val_p = &zero; |
208 | unsigned long src_val; |
209 | |
210 | if (size == 16) { |
211 | size = 8; |
212 | zero = (((long)(reg_num ? |
213 | (unsigned int)fetch_reg(reg: reg_num, regs) : 0)) << 32) | |
214 | (unsigned int)fetch_reg(reg: reg_num + 1, regs); |
215 | } else if (reg_num) { |
216 | src_val_p = fetch_reg_addr(reg: reg_num, regs); |
217 | } |
218 | src_val = *src_val_p; |
219 | if (unlikely(asi != orig_asi)) { |
220 | switch (size) { |
221 | case 2: |
222 | src_val = swab16(src_val); |
223 | break; |
224 | case 4: |
225 | src_val = swab32(src_val); |
226 | break; |
227 | case 8: |
228 | src_val = swab64(src_val); |
229 | break; |
230 | case 16: |
231 | default: |
232 | BUG(); |
233 | break; |
234 | } |
235 | } |
236 | return __do_int_store(dst_addr, size, src_val, asi); |
237 | } |
238 | |
239 | static inline void advance(struct pt_regs *regs) |
240 | { |
241 | regs->tpc = regs->tnpc; |
242 | regs->tnpc += 4; |
243 | if (test_thread_flag(TIF_32BIT)) { |
244 | regs->tpc &= 0xffffffff; |
245 | regs->tnpc &= 0xffffffff; |
246 | } |
247 | } |
248 | |
249 | static inline int floating_point_load_or_store_p(unsigned int insn) |
250 | { |
251 | return (insn >> 24) & 1; |
252 | } |
253 | |
254 | static inline int ok_for_kernel(unsigned int insn) |
255 | { |
256 | return !floating_point_load_or_store_p(insn); |
257 | } |
258 | |
259 | static void kernel_mna_trap_fault(int fixup_tstate_asi) |
260 | { |
261 | struct pt_regs *regs = current_thread_info()->kern_una_regs; |
262 | unsigned int insn = current_thread_info()->kern_una_insn; |
263 | const struct exception_table_entry *entry; |
264 | |
265 | entry = search_exception_tables(add: regs->tpc); |
266 | if (!entry) { |
267 | unsigned long address; |
268 | |
269 | address = compute_effective_address(regs, insn, |
270 | rd: ((insn >> 25) & 0x1f)); |
271 | if (address < PAGE_SIZE) { |
272 | printk(KERN_ALERT "Unable to handle kernel NULL " |
273 | "pointer dereference in mna handler" ); |
274 | } else |
275 | printk(KERN_ALERT "Unable to handle kernel paging " |
276 | "request in mna handler" ); |
277 | printk(KERN_ALERT " at virtual address %016lx\n" ,address); |
278 | printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n" , |
279 | (current->mm ? CTX_HWBITS(current->mm->context) : |
280 | CTX_HWBITS(current->active_mm->context))); |
281 | printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n" , |
282 | (current->mm ? (unsigned long) current->mm->pgd : |
283 | (unsigned long) current->active_mm->pgd)); |
284 | die_if_kernel("Oops" , regs); |
285 | /* Not reached */ |
286 | } |
287 | regs->tpc = entry->fixup; |
288 | regs->tnpc = regs->tpc + 4; |
289 | |
290 | if (fixup_tstate_asi) { |
291 | regs->tstate &= ~TSTATE_ASI; |
292 | regs->tstate |= (ASI_AIUS << 24UL); |
293 | } |
294 | } |
295 | |
296 | static void log_unaligned(struct pt_regs *regs) |
297 | { |
298 | static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); |
299 | |
300 | if (__ratelimit(&ratelimit)) { |
301 | printk("Kernel unaligned access at TPC[%lx] %pS\n" , |
302 | regs->tpc, (void *) regs->tpc); |
303 | } |
304 | } |
305 | |
306 | asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) |
307 | { |
308 | enum direction dir = decode_direction(insn); |
309 | int size = decode_access_size(regs, insn); |
310 | int orig_asi, asi; |
311 | |
312 | current_thread_info()->kern_una_regs = regs; |
313 | current_thread_info()->kern_una_insn = insn; |
314 | |
315 | orig_asi = asi = decode_asi(insn, regs); |
316 | |
317 | /* If this is a {get,put}_user() on an unaligned userspace pointer, |
318 | * just signal a fault and do not log the event. |
319 | */ |
320 | if (asi == ASI_AIUS) { |
321 | kernel_mna_trap_fault(fixup_tstate_asi: 0); |
322 | return; |
323 | } |
324 | |
325 | log_unaligned(regs); |
326 | |
327 | if (!ok_for_kernel(insn) || dir == both) { |
328 | printk("Unsupported unaligned load/store trap for kernel " |
329 | "at <%016lx>.\n" , regs->tpc); |
330 | unaligned_panic(str: "Kernel does fpu/atomic " |
331 | "unaligned load/store." , regs); |
332 | |
333 | kernel_mna_trap_fault(fixup_tstate_asi: 0); |
334 | } else { |
335 | unsigned long addr, *reg_addr; |
336 | int err; |
337 | |
338 | addr = compute_effective_address(regs, insn, |
339 | rd: ((insn >> 25) & 0x1f)); |
340 | perf_sw_event(event_id: PERF_COUNT_SW_ALIGNMENT_FAULTS, nr: 1, regs, addr); |
341 | switch (asi) { |
342 | case ASI_NL: |
343 | case ASI_AIUPL: |
344 | case ASI_AIUSL: |
345 | case ASI_PL: |
346 | case ASI_SL: |
347 | case ASI_PNFL: |
348 | case ASI_SNFL: |
349 | asi &= ~0x08; |
350 | break; |
351 | } |
352 | switch (dir) { |
353 | case load: |
354 | reg_addr = fetch_reg_addr(reg: ((insn>>25)&0x1f), regs); |
355 | err = do_int_load(dest_reg: reg_addr, size, |
356 | saddr: (unsigned long *) addr, |
357 | is_signed: decode_signedness(insn), asi); |
358 | if (likely(!err) && unlikely(asi != orig_asi)) { |
359 | unsigned long val_in = *reg_addr; |
360 | switch (size) { |
361 | case 2: |
362 | val_in = swab16(val_in); |
363 | break; |
364 | case 4: |
365 | val_in = swab32(val_in); |
366 | break; |
367 | case 8: |
368 | val_in = swab64(val_in); |
369 | break; |
370 | case 16: |
371 | default: |
372 | BUG(); |
373 | break; |
374 | } |
375 | *reg_addr = val_in; |
376 | } |
377 | break; |
378 | |
379 | case store: |
380 | err = do_int_store(reg_num: ((insn>>25)&0x1f), size, |
381 | dst_addr: (unsigned long *) addr, regs, |
382 | asi, orig_asi); |
383 | break; |
384 | |
385 | default: |
386 | panic(fmt: "Impossible kernel unaligned trap." ); |
387 | /* Not reached... */ |
388 | } |
389 | if (unlikely(err)) |
390 | kernel_mna_trap_fault(fixup_tstate_asi: 1); |
391 | else |
392 | advance(regs); |
393 | } |
394 | } |
395 | |
396 | int handle_popc(u32 insn, struct pt_regs *regs) |
397 | { |
398 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
399 | int ret, rd = ((insn >> 25) & 0x1f); |
400 | u64 value; |
401 | |
402 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, nr: 1, regs, addr: 0); |
403 | if (insn & 0x2000) { |
404 | maybe_flush_windows(rs1: 0, rs2: 0, rd, from_kernel); |
405 | value = sign_extend_imm13(imm: insn); |
406 | } else { |
407 | maybe_flush_windows(rs1: 0, rs2: insn & 0x1f, rd, from_kernel); |
408 | value = fetch_reg(reg: insn & 0x1f, regs); |
409 | } |
410 | ret = hweight64(value); |
411 | if (rd < 16) { |
412 | if (rd) |
413 | regs->u_regs[rd] = ret; |
414 | } else { |
415 | unsigned long fp = regs->u_regs[UREG_FP]; |
416 | |
417 | if (!test_thread_64bit_stack(fp)) { |
418 | struct reg_window32 __user *win32; |
419 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
420 | put_user(ret, &win32->locals[rd - 16]); |
421 | } else { |
422 | struct reg_window __user *win; |
423 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
424 | put_user(ret, &win->locals[rd - 16]); |
425 | } |
426 | } |
427 | advance(regs); |
428 | return 1; |
429 | } |
430 | |
431 | extern void do_fpother(struct pt_regs *regs); |
432 | extern void do_privact(struct pt_regs *regs); |
433 | extern void sun4v_data_access_exception(struct pt_regs *regs, |
434 | unsigned long addr, |
435 | unsigned long type_ctx); |
436 | |
437 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
438 | { |
439 | unsigned long addr = compute_effective_address(regs, insn, rd: 0); |
440 | int freg; |
441 | struct fpustate *f = FPUSTATE; |
442 | int asi = decode_asi(insn, regs); |
443 | int flag; |
444 | |
445 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, nr: 1, regs, addr: 0); |
446 | |
447 | save_and_clear_fpu(); |
448 | current_thread_info()->xfsr[0] &= ~0x1c000; |
449 | if (insn & 0x200000) { |
450 | /* STQ */ |
451 | u64 first = 0, second = 0; |
452 | |
453 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
454 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
455 | if (freg & 3) { |
456 | current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; |
457 | do_fpother(regs); |
458 | return 0; |
459 | } |
460 | if (current_thread_info()->fpsaved[0] & flag) { |
461 | first = *(u64 *)&f->regs[freg]; |
462 | second = *(u64 *)&f->regs[freg+2]; |
463 | } |
464 | if (asi < 0x80) { |
465 | do_privact(regs); |
466 | return 1; |
467 | } |
468 | switch (asi) { |
469 | case ASI_P: |
470 | case ASI_S: break; |
471 | case ASI_PL: |
472 | case ASI_SL: |
473 | { |
474 | /* Need to convert endians */ |
475 | u64 tmp = __swab64p(p: &first); |
476 | |
477 | first = __swab64p(p: &second); |
478 | second = tmp; |
479 | break; |
480 | } |
481 | default: |
482 | if (tlb_type == hypervisor) |
483 | sun4v_data_access_exception(regs, addr, type_ctx: 0); |
484 | else |
485 | spitfire_data_access_exception(regs, sfsr: 0, sfar: addr); |
486 | return 1; |
487 | } |
488 | if (put_user (first >> 32, (u32 __user *)addr) || |
489 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || |
490 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || |
491 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { |
492 | if (tlb_type == hypervisor) |
493 | sun4v_data_access_exception(regs, addr, type_ctx: 0); |
494 | else |
495 | spitfire_data_access_exception(regs, sfsr: 0, sfar: addr); |
496 | return 1; |
497 | } |
498 | } else { |
499 | /* LDF, LDDF, LDQF */ |
500 | u32 data[4] __attribute__ ((aligned(8))); |
501 | int size, i; |
502 | int err; |
503 | |
504 | if (asi < 0x80) { |
505 | do_privact(regs); |
506 | return 1; |
507 | } else if (asi > ASI_SNFL) { |
508 | if (tlb_type == hypervisor) |
509 | sun4v_data_access_exception(regs, addr, type_ctx: 0); |
510 | else |
511 | spitfire_data_access_exception(regs, sfsr: 0, sfar: addr); |
512 | return 1; |
513 | } |
514 | switch (insn & 0x180000) { |
515 | case 0x000000: size = 1; break; |
516 | case 0x100000: size = 4; break; |
517 | default: size = 2; break; |
518 | } |
519 | if (size == 1) |
520 | freg = (insn >> 25) & 0x1f; |
521 | else |
522 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
523 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
524 | |
525 | for (i = 0; i < size; i++) |
526 | data[i] = 0; |
527 | |
528 | err = get_user (data[0], (u32 __user *) addr); |
529 | if (!err) { |
530 | for (i = 1; i < size; i++) |
531 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); |
532 | } |
533 | if (err && !(asi & 0x2 /* NF */)) { |
534 | if (tlb_type == hypervisor) |
535 | sun4v_data_access_exception(regs, addr, type_ctx: 0); |
536 | else |
537 | spitfire_data_access_exception(regs, sfsr: 0, sfar: addr); |
538 | return 1; |
539 | } |
540 | if (asi & 0x8) /* Little */ { |
541 | u64 tmp; |
542 | |
543 | switch (size) { |
544 | case 1: data[0] = le32_to_cpup(p: data + 0); break; |
545 | default:*(u64 *)(data + 0) = le64_to_cpup(p: (u64 *)(data + 0)); |
546 | break; |
547 | case 4: tmp = le64_to_cpup(p: (u64 *)(data + 0)); |
548 | *(u64 *)(data + 0) = le64_to_cpup(p: (u64 *)(data + 2)); |
549 | *(u64 *)(data + 2) = tmp; |
550 | break; |
551 | } |
552 | } |
553 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { |
554 | current_thread_info()->fpsaved[0] = FPRS_FEF; |
555 | current_thread_info()->gsr[0] = 0; |
556 | } |
557 | if (!(current_thread_info()->fpsaved[0] & flag)) { |
558 | if (freg < 32) |
559 | memset(f->regs, 0, 32*sizeof(u32)); |
560 | else |
561 | memset(f->regs+32, 0, 32*sizeof(u32)); |
562 | } |
563 | memcpy(f->regs + freg, data, size * 4); |
564 | current_thread_info()->fpsaved[0] |= flag; |
565 | } |
566 | advance(regs); |
567 | return 1; |
568 | } |
569 | |
570 | void handle_ld_nf(u32 insn, struct pt_regs *regs) |
571 | { |
572 | int rd = ((insn >> 25) & 0x1f); |
573 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
574 | unsigned long *reg; |
575 | |
576 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, nr: 1, regs, addr: 0); |
577 | |
578 | maybe_flush_windows(rs1: 0, rs2: 0, rd, from_kernel); |
579 | reg = fetch_reg_addr(reg: rd, regs); |
580 | if (from_kernel || rd < 16) { |
581 | reg[0] = 0; |
582 | if ((insn & 0x780000) == 0x180000) |
583 | reg[1] = 0; |
584 | } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { |
585 | put_user(0, (int __user *) reg); |
586 | if ((insn & 0x780000) == 0x180000) |
587 | put_user(0, ((int __user *) reg) + 1); |
588 | } else { |
589 | put_user(0, (unsigned long __user *) reg); |
590 | if ((insn & 0x780000) == 0x180000) |
591 | put_user(0, (unsigned long __user *) reg + 1); |
592 | } |
593 | advance(regs); |
594 | } |
595 | |
596 | void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) |
597 | { |
598 | enum ctx_state prev_state = exception_enter(); |
599 | unsigned long pc = regs->tpc; |
600 | unsigned long tstate = regs->tstate; |
601 | u32 insn; |
602 | u64 value; |
603 | u8 freg; |
604 | int flag; |
605 | struct fpustate *f = FPUSTATE; |
606 | |
607 | if (tstate & TSTATE_PRIV) |
608 | die_if_kernel("lddfmna from kernel" , regs); |
609 | perf_sw_event(event_id: PERF_COUNT_SW_ALIGNMENT_FAULTS, nr: 1, regs, addr: sfar); |
610 | if (test_thread_flag(TIF_32BIT)) |
611 | pc = (u32)pc; |
612 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
613 | int asi = decode_asi(insn, regs); |
614 | u32 first, second; |
615 | int err; |
616 | |
617 | if ((asi > ASI_SNFL) || |
618 | (asi < ASI_P)) |
619 | goto daex; |
620 | first = second = 0; |
621 | err = get_user(first, (u32 __user *)sfar); |
622 | if (!err) |
623 | err = get_user(second, (u32 __user *)(sfar + 4)); |
624 | if (err) { |
625 | if (!(asi & 0x2)) |
626 | goto daex; |
627 | first = second = 0; |
628 | } |
629 | save_and_clear_fpu(); |
630 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
631 | value = (((u64)first) << 32) | second; |
632 | if (asi & 0x8) /* Little */ |
633 | value = __swab64p(p: &value); |
634 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
635 | if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { |
636 | current_thread_info()->fpsaved[0] = FPRS_FEF; |
637 | current_thread_info()->gsr[0] = 0; |
638 | } |
639 | if (!(current_thread_info()->fpsaved[0] & flag)) { |
640 | if (freg < 32) |
641 | memset(f->regs, 0, 32*sizeof(u32)); |
642 | else |
643 | memset(f->regs+32, 0, 32*sizeof(u32)); |
644 | } |
645 | *(u64 *)(f->regs + freg) = value; |
646 | current_thread_info()->fpsaved[0] |= flag; |
647 | } else { |
648 | daex: |
649 | if (tlb_type == hypervisor) |
650 | sun4v_data_access_exception(regs, addr: sfar, type_ctx: sfsr); |
651 | else |
652 | spitfire_data_access_exception(regs, sfsr, sfar); |
653 | goto out; |
654 | } |
655 | advance(regs); |
656 | out: |
657 | exception_exit(prev_ctx: prev_state); |
658 | } |
659 | |
660 | void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) |
661 | { |
662 | enum ctx_state prev_state = exception_enter(); |
663 | unsigned long pc = regs->tpc; |
664 | unsigned long tstate = regs->tstate; |
665 | u32 insn; |
666 | u64 value; |
667 | u8 freg; |
668 | int flag; |
669 | struct fpustate *f = FPUSTATE; |
670 | |
671 | if (tstate & TSTATE_PRIV) |
672 | die_if_kernel("stdfmna from kernel" , regs); |
673 | perf_sw_event(event_id: PERF_COUNT_SW_ALIGNMENT_FAULTS, nr: 1, regs, addr: sfar); |
674 | if (test_thread_flag(TIF_32BIT)) |
675 | pc = (u32)pc; |
676 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
677 | int asi = decode_asi(insn, regs); |
678 | freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); |
679 | value = 0; |
680 | flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
681 | if ((asi > ASI_SNFL) || |
682 | (asi < ASI_P)) |
683 | goto daex; |
684 | save_and_clear_fpu(); |
685 | if (current_thread_info()->fpsaved[0] & flag) |
686 | value = *(u64 *)&f->regs[freg]; |
687 | switch (asi) { |
688 | case ASI_P: |
689 | case ASI_S: break; |
690 | case ASI_PL: |
691 | case ASI_SL: |
692 | value = __swab64p(p: &value); break; |
693 | default: goto daex; |
694 | } |
695 | if (put_user (value >> 32, (u32 __user *) sfar) || |
696 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) |
697 | goto daex; |
698 | } else { |
699 | daex: |
700 | if (tlb_type == hypervisor) |
701 | sun4v_data_access_exception(regs, addr: sfar, type_ctx: sfsr); |
702 | else |
703 | spitfire_data_access_exception(regs, sfsr, sfar); |
704 | goto out; |
705 | } |
706 | advance(regs); |
707 | out: |
708 | exception_exit(prev_ctx: prev_state); |
709 | } |
710 | |