1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California |
4 | * Copyright (C) 2017 SiFive |
5 | */ |
6 | |
7 | #include <linux/init.h> |
8 | #include <linux/linkage.h> |
9 | |
10 | #include <asm/asm.h> |
11 | #include <asm/csr.h> |
12 | #include <asm/scs.h> |
13 | #include <asm/unistd.h> |
14 | #include <asm/page.h> |
15 | #include <asm/thread_info.h> |
16 | #include <asm/asm-offsets.h> |
17 | #include <asm/errata_list.h> |
18 | #include <linux/sizes.h> |
19 | |
20 | .section .irqentry.text, "ax" |
21 | |
22 | SYM_CODE_START(handle_exception) |
23 | /* |
24 | * If coming from userspace, preserve the user thread pointer and load |
25 | * the kernel thread pointer. If we came from the kernel, the scratch |
26 | * register will contain 0, and we should continue on the current TP. |
27 | */ |
28 | csrrw tp, CSR_SCRATCH, tp |
29 | bnez tp, .Lsave_context |
30 | |
31 | .Lrestore_kernel_tpsp: |
32 | csrr tp, CSR_SCRATCH |
33 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
34 | |
35 | #ifdef CONFIG_VMAP_STACK |
36 | addi sp, sp, -(PT_SIZE_ON_STACK) |
37 | srli sp, sp, THREAD_SHIFT |
38 | andi sp, sp, 0x1 |
39 | bnez sp, handle_kernel_stack_overflow |
40 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
41 | #endif |
42 | |
43 | .Lsave_context: |
44 | REG_S sp, TASK_TI_USER_SP(tp) |
45 | REG_L sp, TASK_TI_KERNEL_SP(tp) |
46 | addi sp, sp, -(PT_SIZE_ON_STACK) |
47 | REG_S x1, PT_RA(sp) |
48 | REG_S x3, PT_GP(sp) |
49 | REG_S x5, PT_T0(sp) |
50 | save_from_x6_to_x31 |
51 | |
52 | /* |
53 | * Disable user-mode memory access as it should only be set in the |
54 | * actual user copy routines. |
55 | * |
56 | * Disable the FPU/Vector to detect illegal usage of floating point |
57 | * or vector in kernel space. |
58 | */ |
59 | li t0, SR_SUM | SR_FS_VS |
60 | |
61 | REG_L s0, TASK_TI_USER_SP(tp) |
62 | csrrc s1, CSR_STATUS, t0 |
63 | csrr s2, CSR_EPC |
64 | csrr s3, CSR_TVAL |
65 | csrr s4, CSR_CAUSE |
66 | csrr s5, CSR_SCRATCH |
67 | REG_S s0, PT_SP(sp) |
68 | REG_S s1, PT_STATUS(sp) |
69 | REG_S s2, PT_EPC(sp) |
70 | REG_S s3, PT_BADADDR(sp) |
71 | REG_S s4, PT_CAUSE(sp) |
72 | REG_S s5, PT_TP(sp) |
73 | |
74 | /* |
75 | * Set the scratch register to 0, so that if a recursive exception |
76 | * occurs, the exception vector knows it came from the kernel |
77 | */ |
78 | csrw CSR_SCRATCH, x0 |
79 | |
80 | /* Load the global pointer */ |
81 | load_global_pointer |
82 | |
83 | /* Load the kernel shadow call stack pointer if coming from userspace */ |
84 | scs_load_current_if_task_changed s5 |
85 | |
86 | #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE |
87 | move a0, sp |
88 | call riscv_v_context_nesting_start |
89 | #endif |
90 | move a0, sp /* pt_regs */ |
91 | la ra, ret_from_exception |
92 | |
93 | /* |
94 | * MSB of cause differentiates between |
95 | * interrupts and exceptions |
96 | */ |
97 | bge s4, zero, 1f |
98 | |
99 | /* Handle interrupts */ |
100 | tail do_irq |
101 | 1: |
102 | /* Handle other exceptions */ |
103 | slli t0, s4, RISCV_LGPTR |
104 | la t1, excp_vect_table |
105 | la t2, excp_vect_table_end |
106 | add t0, t1, t0 |
107 | /* Check if exception code lies within bounds */ |
108 | bgeu t0, t2, 1f |
109 | REG_L t0, 0(t0) |
110 | jr t0 |
111 | 1: |
112 | tail do_trap_unknown |
113 | SYM_CODE_END(handle_exception) |
114 | ASM_NOKPROBE(handle_exception) |
115 | |
116 | /* |
117 | * The ret_from_exception must be called with interrupt disabled. Here is the |
118 | * caller list: |
119 | * - handle_exception |
120 | * - ret_from_fork |
121 | */ |
122 | SYM_CODE_START_NOALIGN(ret_from_exception) |
123 | REG_L s0, PT_STATUS(sp) |
124 | #ifdef CONFIG_RISCV_M_MODE |
125 | /* the MPP value is too large to be used as an immediate arg for addi */ |
126 | li t0, SR_MPP |
127 | and s0, s0, t0 |
128 | #else |
129 | andi s0, s0, SR_SPP |
130 | #endif |
131 | bnez s0, 1f |
132 | |
133 | /* Save unwound kernel stack pointer in thread_info */ |
134 | addi s0, sp, PT_SIZE_ON_STACK |
135 | REG_S s0, TASK_TI_KERNEL_SP(tp) |
136 | |
137 | /* Save the kernel shadow call stack pointer */ |
138 | scs_save_current |
139 | |
140 | /* |
141 | * Save TP into the scratch register , so we can find the kernel data |
142 | * structures again. |
143 | */ |
144 | csrw CSR_SCRATCH, tp |
145 | 1: |
146 | #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE |
147 | move a0, sp |
148 | call riscv_v_context_nesting_end |
149 | #endif |
150 | REG_L a0, PT_STATUS(sp) |
151 | /* |
152 | * The current load reservation is effectively part of the processor's |
153 | * state, in the sense that load reservations cannot be shared between |
154 | * different hart contexts. We can't actually save and restore a load |
155 | * reservation, so instead here we clear any existing reservation -- |
156 | * it's always legal for implementations to clear load reservations at |
157 | * any point (as long as the forward progress guarantee is kept, but |
158 | * we'll ignore that here). |
159 | * |
160 | * Dangling load reservations can be the result of taking a trap in the |
161 | * middle of an LR/SC sequence, but can also be the result of a taken |
162 | * forward branch around an SC -- which is how we implement CAS. As a |
163 | * result we need to clear reservations between the last CAS and the |
164 | * jump back to the new context. While it is unlikely the store |
165 | * completes, implementations are allowed to expand reservations to be |
166 | * arbitrarily large. |
167 | */ |
168 | REG_L a2, PT_EPC(sp) |
169 | REG_SC x0, a2, PT_EPC(sp) |
170 | |
171 | csrw CSR_STATUS, a0 |
172 | csrw CSR_EPC, a2 |
173 | |
174 | REG_L x1, PT_RA(sp) |
175 | REG_L x3, PT_GP(sp) |
176 | REG_L x4, PT_TP(sp) |
177 | REG_L x5, PT_T0(sp) |
178 | restore_from_x6_to_x31 |
179 | |
180 | REG_L x2, PT_SP(sp) |
181 | |
182 | #ifdef CONFIG_RISCV_M_MODE |
183 | mret |
184 | #else |
185 | sret |
186 | #endif |
187 | SYM_CODE_END(ret_from_exception) |
188 | ASM_NOKPROBE(ret_from_exception) |
189 | |
190 | #ifdef CONFIG_VMAP_STACK |
191 | SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) |
192 | /* we reach here from kernel context, sscratch must be 0 */ |
193 | csrrw x31, CSR_SCRATCH, x31 |
194 | asm_per_cpu sp, overflow_stack, x31 |
195 | li x31, OVERFLOW_STACK_SIZE |
196 | add sp, sp, x31 |
197 | /* zero out x31 again and restore x31 */ |
198 | xor x31, x31, x31 |
199 | csrrw x31, CSR_SCRATCH, x31 |
200 | |
201 | addi sp, sp, -(PT_SIZE_ON_STACK) |
202 | |
203 | //save context to overflow stack |
204 | REG_S x1, PT_RA(sp) |
205 | REG_S x3, PT_GP(sp) |
206 | REG_S x5, PT_T0(sp) |
207 | save_from_x6_to_x31 |
208 | |
209 | REG_L s0, TASK_TI_KERNEL_SP(tp) |
210 | csrr s1, CSR_STATUS |
211 | csrr s2, CSR_EPC |
212 | csrr s3, CSR_TVAL |
213 | csrr s4, CSR_CAUSE |
214 | csrr s5, CSR_SCRATCH |
215 | REG_S s0, PT_SP(sp) |
216 | REG_S s1, PT_STATUS(sp) |
217 | REG_S s2, PT_EPC(sp) |
218 | REG_S s3, PT_BADADDR(sp) |
219 | REG_S s4, PT_CAUSE(sp) |
220 | REG_S s5, PT_TP(sp) |
221 | move a0, sp |
222 | tail handle_bad_stack |
223 | SYM_CODE_END(handle_kernel_stack_overflow) |
224 | ASM_NOKPROBE(handle_kernel_stack_overflow) |
225 | #endif |
226 | |
227 | SYM_CODE_START(ret_from_fork) |
228 | call schedule_tail |
229 | beqz s0, 1f /* not from kernel thread */ |
230 | /* Call fn(arg) */ |
231 | move a0, s1 |
232 | jalr s0 |
233 | 1: |
234 | move a0, sp /* pt_regs */ |
235 | la ra, ret_from_exception |
236 | tail syscall_exit_to_user_mode |
237 | SYM_CODE_END(ret_from_fork) |
238 | |
239 | #ifdef CONFIG_IRQ_STACKS |
240 | /* |
241 | * void call_on_irq_stack(struct pt_regs *regs, |
242 | * void (*func)(struct pt_regs *)); |
243 | * |
244 | * Calls func(regs) using the per-CPU IRQ stack. |
245 | */ |
246 | SYM_FUNC_START(call_on_irq_stack) |
247 | /* Create a frame record to save ra and s0 (fp) */ |
248 | addi sp, sp, -STACKFRAME_SIZE_ON_STACK |
249 | REG_S ra, STACKFRAME_RA(sp) |
250 | REG_S s0, STACKFRAME_FP(sp) |
251 | addi s0, sp, STACKFRAME_SIZE_ON_STACK |
252 | |
253 | /* Switch to the per-CPU shadow call stack */ |
254 | scs_save_current |
255 | scs_load_irq_stack t0 |
256 | |
257 | /* Switch to the per-CPU IRQ stack and call the handler */ |
258 | load_per_cpu t0, irq_stack_ptr, t1 |
259 | li t1, IRQ_STACK_SIZE |
260 | add sp, t0, t1 |
261 | jalr a1 |
262 | |
263 | /* Switch back to the thread shadow call stack */ |
264 | scs_load_current |
265 | |
266 | /* Switch back to the thread stack and restore ra and s0 */ |
267 | addi sp, s0, -STACKFRAME_SIZE_ON_STACK |
268 | REG_L ra, STACKFRAME_RA(sp) |
269 | REG_L s0, STACKFRAME_FP(sp) |
270 | addi sp, sp, STACKFRAME_SIZE_ON_STACK |
271 | |
272 | ret |
273 | SYM_FUNC_END(call_on_irq_stack) |
274 | #endif /* CONFIG_IRQ_STACKS */ |
275 | |
276 | /* |
277 | * Integer register context switch |
278 | * The callee-saved registers must be saved and restored. |
279 | * |
280 | * a0: previous task_struct (must be preserved across the switch) |
281 | * a1: next task_struct |
282 | * |
283 | * The value of a0 and a1 must be preserved by this function, as that's how |
284 | * arguments are passed to schedule_tail. |
285 | */ |
286 | SYM_FUNC_START(__switch_to) |
287 | /* Save context into prev->thread */ |
288 | li a4, TASK_THREAD_RA |
289 | add a3, a0, a4 |
290 | add a4, a1, a4 |
291 | REG_S ra, TASK_THREAD_RA_RA(a3) |
292 | REG_S sp, TASK_THREAD_SP_RA(a3) |
293 | REG_S s0, TASK_THREAD_S0_RA(a3) |
294 | REG_S s1, TASK_THREAD_S1_RA(a3) |
295 | REG_S s2, TASK_THREAD_S2_RA(a3) |
296 | REG_S s3, TASK_THREAD_S3_RA(a3) |
297 | REG_S s4, TASK_THREAD_S4_RA(a3) |
298 | REG_S s5, TASK_THREAD_S5_RA(a3) |
299 | REG_S s6, TASK_THREAD_S6_RA(a3) |
300 | REG_S s7, TASK_THREAD_S7_RA(a3) |
301 | REG_S s8, TASK_THREAD_S8_RA(a3) |
302 | REG_S s9, TASK_THREAD_S9_RA(a3) |
303 | REG_S s10, TASK_THREAD_S10_RA(a3) |
304 | REG_S s11, TASK_THREAD_S11_RA(a3) |
305 | /* Save the kernel shadow call stack pointer */ |
306 | scs_save_current |
307 | /* Restore context from next->thread */ |
308 | REG_L ra, TASK_THREAD_RA_RA(a4) |
309 | REG_L sp, TASK_THREAD_SP_RA(a4) |
310 | REG_L s0, TASK_THREAD_S0_RA(a4) |
311 | REG_L s1, TASK_THREAD_S1_RA(a4) |
312 | REG_L s2, TASK_THREAD_S2_RA(a4) |
313 | REG_L s3, TASK_THREAD_S3_RA(a4) |
314 | REG_L s4, TASK_THREAD_S4_RA(a4) |
315 | REG_L s5, TASK_THREAD_S5_RA(a4) |
316 | REG_L s6, TASK_THREAD_S6_RA(a4) |
317 | REG_L s7, TASK_THREAD_S7_RA(a4) |
318 | REG_L s8, TASK_THREAD_S8_RA(a4) |
319 | REG_L s9, TASK_THREAD_S9_RA(a4) |
320 | REG_L s10, TASK_THREAD_S10_RA(a4) |
321 | REG_L s11, TASK_THREAD_S11_RA(a4) |
322 | /* The offset of thread_info in task_struct is zero. */ |
323 | move tp, a1 |
324 | /* Switch to the next shadow call stack */ |
325 | scs_load_current |
326 | ret |
327 | SYM_FUNC_END(__switch_to) |
328 | |
329 | #ifndef CONFIG_MMU |
330 | #define do_page_fault do_trap_unknown |
331 | #endif |
332 | |
333 | .section ".rodata" |
334 | .align LGREG |
335 | /* Exception vector table */ |
336 | SYM_DATA_START_LOCAL(excp_vect_table) |
337 | RISCV_PTR do_trap_insn_misaligned |
338 | ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) |
339 | RISCV_PTR do_trap_insn_illegal |
340 | RISCV_PTR do_trap_break |
341 | RISCV_PTR do_trap_load_misaligned |
342 | RISCV_PTR do_trap_load_fault |
343 | RISCV_PTR do_trap_store_misaligned |
344 | RISCV_PTR do_trap_store_fault |
345 | RISCV_PTR do_trap_ecall_u /* system call */ |
346 | RISCV_PTR do_trap_ecall_s |
347 | RISCV_PTR do_trap_unknown |
348 | RISCV_PTR do_trap_ecall_m |
349 | /* instruciton page fault */ |
350 | ALT_PAGE_FAULT(RISCV_PTR do_page_fault) |
351 | RISCV_PTR do_page_fault /* load page fault */ |
352 | RISCV_PTR do_trap_unknown |
353 | RISCV_PTR do_page_fault /* store page fault */ |
354 | SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end) |
355 | |
356 | #ifndef CONFIG_MMU |
357 | SYM_DATA_START(__user_rt_sigreturn) |
358 | li a7, __NR_rt_sigreturn |
359 | ecall |
360 | SYM_DATA_END(__user_rt_sigreturn) |
361 | #endif |
362 | |