1 | /* SPDX-License-Identifier: GPL-2.0-or-later |
2 | * -*- mode: asm -*- |
3 | * |
4 | * linux/arch/m68k/kernel/entry.S |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds |
7 | * |
8 | * Linux/m68k support by Hamish Macdonald |
9 | * |
10 | * 68060 fixes by Jesper Skov |
11 | * |
12 | */ |
13 | |
14 | /* |
15 | * entry.S contains the system-call and fault low-level handling routines. |
16 | * This also contains the timer-interrupt handler, as well as all interrupts |
17 | * and faults that can result in a task-switch. |
18 | * |
19 | * NOTE: This code handles signal-recognition, which happens every time |
20 | * after a timer-interrupt and after each system call. |
21 | * |
22 | */ |
23 | |
24 | /* |
25 | * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so |
26 | * all pointers that used to be 'current' are now entry |
27 | * number 0 in the 'current_set' list. |
28 | * |
29 | * 6/05/00 RZ: addedd writeback completion after return from sighandler |
30 | * for 68040 |
31 | */ |
32 | |
33 | #include <linux/linkage.h> |
34 | #include <asm/errno.h> |
35 | #include <asm/setup.h> |
36 | #include <asm/traps.h> |
37 | #include <asm/unistd.h> |
38 | #include <asm/asm-offsets.h> |
39 | #include <asm/entry.h> |
40 | |
41 | .globl system_call, buserr, trap, resume |
42 | .globl sys_call_table |
43 | .globl __sys_fork, __sys_clone, __sys_vfork |
44 | .globl bad_interrupt |
45 | .globl auto_irqhandler_fixup |
46 | .globl user_irqvec_fixup |
47 | |
48 | .text |
49 | ENTRY(__sys_fork) |
50 | SAVE_SWITCH_STACK |
51 | jbsr sys_fork |
52 | lea %sp@(24),%sp |
53 | rts |
54 | |
55 | ENTRY(__sys_clone) |
56 | SAVE_SWITCH_STACK |
57 | pea %sp@(SWITCH_STACK_SIZE) |
58 | jbsr m68k_clone |
59 | lea %sp@(28),%sp |
60 | rts |
61 | |
62 | ENTRY(__sys_vfork) |
63 | SAVE_SWITCH_STACK |
64 | jbsr sys_vfork |
65 | lea %sp@(24),%sp |
66 | rts |
67 | |
68 | ENTRY(__sys_clone3) |
69 | SAVE_SWITCH_STACK |
70 | pea %sp@(SWITCH_STACK_SIZE) |
71 | jbsr m68k_clone3 |
72 | lea %sp@(28),%sp |
73 | rts |
74 | |
75 | ENTRY(sys_sigreturn) |
76 | SAVE_SWITCH_STACK |
77 | movel %sp,%a1 | switch_stack pointer |
78 | lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer |
79 | lea %sp@(-84),%sp | leave a gap |
80 | movel %a1,%sp@- |
81 | movel %a0,%sp@- |
82 | jbsr do_sigreturn |
83 | jra 1f | shared with rt_sigreturn() |
84 | |
85 | ENTRY(sys_rt_sigreturn) |
86 | SAVE_SWITCH_STACK |
87 | movel %sp,%a1 | switch_stack pointer |
88 | lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer |
89 | lea %sp@(-84),%sp | leave a gap |
90 | movel %a1,%sp@- |
91 | movel %a0,%sp@- |
92 | | stack contents: |
93 | | [original pt_regs address] [original switch_stack address] |
94 | | [gap] [switch_stack] [pt_regs] [exception frame] |
95 | jbsr do_rt_sigreturn |
96 | |
97 | 1: |
98 | | stack contents now: |
99 | | [original pt_regs address] [original switch_stack address] |
100 | | [unused part of the gap] [moved switch_stack] [moved pt_regs] |
101 | | [replacement exception frame] |
102 | | return value of do_{rt_,}sigreturn() points to moved switch_stack. |
103 | |
104 | movel %d0,%sp | discard the leftover junk |
105 | RESTORE_SWITCH_STACK |
106 | | stack contents now is just [syscall return address] [pt_regs] [frame] |
107 | | return pt_regs.d0 |
108 | movel %sp@(PT_OFF_D0+4),%d0 |
109 | rts |
110 | |
111 | ENTRY(buserr) |
112 | SAVE_ALL_INT |
113 | GET_CURRENT(%d0) |
114 | movel %sp,%sp@- | stack frame pointer argument |
115 | jbsr buserr_c |
116 | addql #4,%sp |
117 | jra ret_from_exception |
118 | |
119 | ENTRY(trap) |
120 | SAVE_ALL_INT |
121 | GET_CURRENT(%d0) |
122 | movel %sp,%sp@- | stack frame pointer argument |
123 | jbsr trap_c |
124 | addql #4,%sp |
125 | jra ret_from_exception |
126 | |
127 | | After a fork we jump here directly from resume, |
128 | | so that %d1 contains the previous task |
129 | | schedule_tail now used regardless of CONFIG_SMP |
130 | ENTRY(ret_from_fork) |
131 | movel %d1,%sp@- |
132 | jsr schedule_tail |
133 | addql #4,%sp |
134 | jra ret_from_exception |
135 | |
136 | ENTRY(ret_from_kernel_thread) |
137 | | a3 contains the kernel thread payload, d7 - its argument |
138 | movel %d1,%sp@- |
139 | jsr schedule_tail |
140 | movel %d7,(%sp) |
141 | jsr %a3@ |
142 | addql #4,%sp |
143 | jra ret_from_exception |
144 | |
145 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
146 | |
147 | #ifdef TRAP_DBG_INTERRUPT |
148 | |
149 | .globl dbginterrupt |
150 | ENTRY(dbginterrupt) |
151 | SAVE_ALL_INT |
152 | GET_CURRENT(%d0) |
153 | movel %sp,%sp@- /* stack frame pointer argument */ |
154 | jsr dbginterrupt_c |
155 | addql #4,%sp |
156 | jra ret_from_exception |
157 | #endif |
158 | |
159 | ENTRY(reschedule) |
160 | /* save top of frame */ |
161 | pea %sp@ |
162 | jbsr set_esp0 |
163 | addql #4,%sp |
164 | pea ret_from_exception |
165 | jmp schedule |
166 | |
167 | ENTRY(ret_from_user_signal) |
168 | moveq #__NR_sigreturn,%d0 |
169 | trap #0 |
170 | |
171 | ENTRY(ret_from_user_rt_signal) |
172 | movel #__NR_rt_sigreturn,%d0 |
173 | trap #0 |
174 | |
175 | #else |
176 | |
177 | do_trace_entry: |
178 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace |
179 | subql #4,%sp |
180 | SAVE_SWITCH_STACK |
181 | jbsr syscall_trace_enter |
182 | RESTORE_SWITCH_STACK |
183 | addql #4,%sp |
184 | addql #1,%d0 | optimization for cmpil #-1,%d0 |
185 | jeq ret_from_syscall |
186 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
187 | cmpl #NR_syscalls,%d0 |
188 | jcs syscall |
189 | jra ret_from_syscall |
190 | badsys: |
191 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
192 | jra ret_from_syscall |
193 | |
194 | do_trace_exit: |
195 | subql #4,%sp |
196 | SAVE_SWITCH_STACK |
197 | jbsr syscall_trace_leave |
198 | RESTORE_SWITCH_STACK |
199 | addql #4,%sp |
200 | jra .Lret_from_exception |
201 | |
202 | ENTRY(system_call) |
203 | SAVE_ALL_SYS |
204 | |
205 | GET_CURRENT(%d1) |
206 | movel %d1,%a1 |
207 | |
208 | | save top of frame |
209 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) |
210 | |
211 | | syscall trace? |
212 | tstb %a1@(TINFO_FLAGS+2) |
213 | jmi do_trace_entry |
214 | | seccomp filter active? |
215 | btst #5,%a1@(TINFO_FLAGS+2) |
216 | bnes do_trace_entry |
217 | cmpl #NR_syscalls,%d0 |
218 | jcc badsys |
219 | syscall: |
220 | jbsr @(sys_call_table,%d0:l:4)@(0) |
221 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
222 | ret_from_syscall: |
223 | |oriw #0x0700,%sr |
224 | movel %curptr@(TASK_STACK),%a1 |
225 | movew %a1@(TINFO_FLAGS+2),%d0 |
226 | jne syscall_exit_work |
227 | 1: RESTORE_ALL |
228 | |
229 | syscall_exit_work: |
230 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
231 | bnes 1b | if so, skip resched, signals |
232 | lslw #1,%d0 |
233 | jcs do_trace_exit |
234 | jmi do_delayed_trace |
235 | lslw #8,%d0 |
236 | jne do_signal_return |
237 | pea resume_userspace |
238 | jra schedule |
239 | |
240 | |
241 | ENTRY(ret_from_exception) |
242 | .Lret_from_exception: |
243 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
244 | bnes 1f | if so, skip resched, signals |
245 | | only allow interrupts when we are really the last one on the |
246 | | kernel stack, otherwise stack overflow can occur during |
247 | | heavy interrupt load |
248 | andw #ALLOWINT,%sr |
249 | |
250 | resume_userspace: |
251 | movel %curptr@(TASK_STACK),%a1 |
252 | moveb %a1@(TINFO_FLAGS+3),%d0 |
253 | jne exit_work |
254 | 1: RESTORE_ALL |
255 | |
256 | exit_work: |
257 | | save top of frame |
258 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) |
259 | lslb #1,%d0 |
260 | jne do_signal_return |
261 | pea resume_userspace |
262 | jra schedule |
263 | |
264 | |
265 | do_signal_return: |
266 | |andw #ALLOWINT,%sr |
267 | subql #4,%sp | dummy return address |
268 | SAVE_SWITCH_STACK |
269 | pea %sp@(SWITCH_STACK_SIZE) |
270 | bsrl do_notify_resume |
271 | addql #4,%sp |
272 | RESTORE_SWITCH_STACK |
273 | addql #4,%sp |
274 | jbra resume_userspace |
275 | |
276 | do_delayed_trace: |
277 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR |
278 | pea 1 | send SIGTRAP |
279 | movel %curptr,%sp@- |
280 | pea LSIGTRAP |
281 | jbsr send_sig |
282 | addql #8,%sp |
283 | addql #4,%sp |
284 | jbra resume_userspace |
285 | |
286 | |
287 | /* This is the main interrupt handler for autovector interrupts */ |
288 | |
289 | ENTRY(auto_inthandler) |
290 | SAVE_ALL_INT |
291 | GET_CURRENT(%d0) |
292 | | put exception # in d0 |
293 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
294 | subw #VEC_SPUR,%d0 |
295 | |
296 | movel %sp,%sp@- |
297 | movel %d0,%sp@- | put vector # on stack |
298 | auto_irqhandler_fixup = . + 2 |
299 | jsr do_IRQ | process the IRQ |
300 | addql #8,%sp | pop parameters off stack |
301 | jra ret_from_exception |
302 | |
303 | /* Handler for user defined interrupt vectors */ |
304 | |
305 | ENTRY(user_inthandler) |
306 | SAVE_ALL_INT |
307 | GET_CURRENT(%d0) |
308 | | put exception # in d0 |
309 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
310 | user_irqvec_fixup = . + 2 |
311 | subw #VEC_USER,%d0 |
312 | |
313 | movel %sp,%sp@- |
314 | movel %d0,%sp@- | put vector # on stack |
315 | jsr do_IRQ | process the IRQ |
316 | addql #8,%sp | pop parameters off stack |
317 | jra ret_from_exception |
318 | |
319 | /* Handler for uninitialized and spurious interrupts */ |
320 | |
321 | ENTRY(bad_inthandler) |
322 | SAVE_ALL_INT |
323 | GET_CURRENT(%d0) |
324 | |
325 | movel %sp,%sp@- |
326 | jsr handle_badint |
327 | addql #4,%sp |
328 | jra ret_from_exception |
329 | |
330 | resume: |
331 | /* |
332 | * Beware - when entering resume, prev (the current task) is |
333 | * in a0, next (the new task) is in a1,so don't change these |
334 | * registers until their contents are no longer needed. |
335 | */ |
336 | |
337 | /* save sr */ |
338 | movew %sr,%a0@(TASK_THREAD+THREAD_SR) |
339 | |
340 | /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ |
341 | movec %sfc,%d0 |
342 | movew %d0,%a0@(TASK_THREAD+THREAD_FC) |
343 | |
344 | /* save usp */ |
345 | /* it is better to use a movel here instead of a movew 8*) */ |
346 | movec %usp,%d0 |
347 | movel %d0,%a0@(TASK_THREAD+THREAD_USP) |
348 | |
349 | /* save non-scratch registers on stack */ |
350 | SAVE_SWITCH_STACK |
351 | |
352 | /* save current kernel stack pointer */ |
353 | movel %sp,%a0@(TASK_THREAD+THREAD_KSP) |
354 | |
355 | /* save floating point context */ |
356 | #ifndef CONFIG_M68KFPU_EMU_ONLY |
357 | #ifdef CONFIG_M68KFPU_EMU |
358 | tstl m68k_fputype |
359 | jeq 3f |
360 | #endif |
361 | fsave %a0@(TASK_THREAD+THREAD_FPSTATE) |
362 | |
363 | #if defined(CONFIG_M68060) |
364 | #if !defined(CPU_M68060_ONLY) |
365 | btst #3,m68k_cputype+3 |
366 | beqs 1f |
367 | #endif |
368 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ |
369 | tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) |
370 | jeq 3f |
371 | #if !defined(CPU_M68060_ONLY) |
372 | jra 2f |
373 | #endif |
374 | #endif /* CONFIG_M68060 */ |
375 | #if !defined(CPU_M68060_ONLY) |
376 | 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) |
377 | jeq 3f |
378 | #endif |
379 | 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) |
380 | fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) |
381 | 3: |
382 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ |
383 | /* Return previous task in %d1 */ |
384 | movel %curptr,%d1 |
385 | |
386 | /* switch to new task (a1 contains new task) */ |
387 | movel %a1,%curptr |
388 | |
389 | /* restore floating point context */ |
390 | #ifndef CONFIG_M68KFPU_EMU_ONLY |
391 | #ifdef CONFIG_M68KFPU_EMU |
392 | tstl m68k_fputype |
393 | jeq 4f |
394 | #endif |
395 | #if defined(CONFIG_M68060) |
396 | #if !defined(CPU_M68060_ONLY) |
397 | btst #3,m68k_cputype+3 |
398 | beqs 1f |
399 | #endif |
400 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ |
401 | tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) |
402 | jeq 3f |
403 | #if !defined(CPU_M68060_ONLY) |
404 | jra 2f |
405 | #endif |
406 | #endif /* CONFIG_M68060 */ |
407 | #if !defined(CPU_M68060_ONLY) |
408 | 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) |
409 | jeq 3f |
410 | #endif |
411 | 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 |
412 | fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar |
413 | 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) |
414 | 4: |
415 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ |
416 | |
417 | /* restore the kernel stack pointer */ |
418 | movel %a1@(TASK_THREAD+THREAD_KSP),%sp |
419 | |
420 | /* restore non-scratch registers */ |
421 | RESTORE_SWITCH_STACK |
422 | |
423 | /* restore user stack pointer */ |
424 | movel %a1@(TASK_THREAD+THREAD_USP),%a0 |
425 | movel %a0,%usp |
426 | |
427 | /* restore fs (sfc,%dfc) */ |
428 | movew %a1@(TASK_THREAD+THREAD_FC),%a0 |
429 | movec %a0,%sfc |
430 | movec %a0,%dfc |
431 | |
432 | /* restore status register */ |
433 | movew %a1@(TASK_THREAD+THREAD_SR),%sr |
434 | |
435 | rts |
436 | |
437 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ |
438 | |