1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. |
9 | */ |
10 | |
11 | #include <asm/asm.h> |
12 | #include <asm/asmmacro.h> |
13 | #include <asm/compiler.h> |
14 | #include <asm/irqflags.h> |
15 | #include <asm/regdef.h> |
16 | #include <asm/mipsregs.h> |
17 | #include <asm/stackframe.h> |
18 | #include <asm/isadep.h> |
19 | #include <asm/thread_info.h> |
20 | |
21 | #ifndef CONFIG_PREEMPTION |
22 | #define resume_kernel restore_all |
23 | #else |
24 | #define __ret_from_irq ret_from_exception |
25 | #endif |
26 | |
27 | .text |
28 | .align 5 |
29 | #ifndef CONFIG_PREEMPTION |
30 | FEXPORT(ret_from_exception) |
31 | local_irq_disable # preempt stop |
32 | b __ret_from_irq |
33 | #endif |
34 | FEXPORT(ret_from_irq) |
35 | LONG_S s0, TI_REGS($28) |
36 | FEXPORT(__ret_from_irq) |
37 | /* |
38 | * We can be coming here from a syscall done in the kernel space, |
39 | * e.g. a failed kernel_execve(). |
40 | */ |
41 | resume_userspace_check: |
42 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
43 | andi t0, t0, KU_USER |
44 | beqz t0, resume_kernel |
45 | |
46 | resume_userspace: |
47 | local_irq_disable # make sure we dont miss an |
48 | # interrupt setting need_resched |
49 | # between sampling and return |
50 | LONG_L a2, TI_FLAGS($28) # current->work |
51 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) |
52 | bnez t0, work_pending |
53 | j restore_all |
54 | |
55 | #ifdef CONFIG_PREEMPTION |
56 | resume_kernel: |
57 | local_irq_disable |
58 | lw t0, TI_PRE_COUNT($28) |
59 | bnez t0, restore_all |
60 | LONG_L t0, TI_FLAGS($28) |
61 | andi t1, t0, _TIF_NEED_RESCHED |
62 | beqz t1, restore_all |
63 | LONG_L t0, PT_STATUS(sp) # Interrupts off? |
64 | andi t0, 1 |
65 | beqz t0, restore_all |
66 | PTR_LA ra, restore_all |
67 | j preempt_schedule_irq |
68 | #endif |
69 | |
70 | FEXPORT(ret_from_kernel_thread) |
71 | jal schedule_tail # a0 = struct task_struct *prev |
72 | move a0, s1 |
73 | jal s0 |
74 | j syscall_exit |
75 | |
76 | FEXPORT(ret_from_fork) |
77 | jal schedule_tail # a0 = struct task_struct *prev |
78 | |
79 | FEXPORT(syscall_exit) |
80 | #ifdef CONFIG_DEBUG_RSEQ |
81 | move a0, sp |
82 | jal rseq_syscall |
83 | #endif |
84 | local_irq_disable # make sure need_resched and |
85 | # signals dont change between |
86 | # sampling and return |
87 | LONG_L a2, TI_FLAGS($28) # current->work |
88 | li t0, _TIF_ALLWORK_MASK |
89 | and t0, a2, t0 |
90 | bnez t0, syscall_exit_work |
91 | |
92 | restore_all: # restore full frame |
93 | .set noat |
94 | RESTORE_TEMP |
95 | RESTORE_AT |
96 | RESTORE_STATIC |
97 | restore_partial: # restore partial frame |
98 | #ifdef CONFIG_TRACE_IRQFLAGS |
99 | SAVE_STATIC |
100 | SAVE_AT |
101 | SAVE_TEMP |
102 | LONG_L v0, PT_STATUS(sp) |
103 | #if defined(CONFIG_CPU_R3000) |
104 | and v0, ST0_IEP |
105 | #else |
106 | and v0, ST0_IE |
107 | #endif |
108 | beqz v0, 1f |
109 | jal trace_hardirqs_on |
110 | b 2f |
111 | 1: jal trace_hardirqs_off |
112 | 2: |
113 | RESTORE_TEMP |
114 | RESTORE_AT |
115 | RESTORE_STATIC |
116 | #endif |
117 | RESTORE_SOME |
118 | RESTORE_SP_AND_RET |
119 | .set at |
120 | |
121 | work_pending: |
122 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS |
123 | beqz t0, work_notifysig |
124 | work_resched: |
125 | TRACE_IRQS_OFF |
126 | jal schedule |
127 | |
128 | local_irq_disable # make sure need_resched and |
129 | # signals dont change between |
130 | # sampling and return |
131 | LONG_L a2, TI_FLAGS($28) |
132 | andi t0, a2, _TIF_WORK_MASK # is there any work to be done |
133 | # other than syscall tracing? |
134 | beqz t0, restore_all |
135 | andi t0, a2, _TIF_NEED_RESCHED |
136 | bnez t0, work_resched |
137 | |
138 | work_notifysig: # deal with pending signals and |
139 | # notify-resume requests |
140 | move a0, sp |
141 | li a1, 0 |
142 | jal do_notify_resume # a2 already loaded |
143 | j resume_userspace_check |
144 | |
145 | FEXPORT(syscall_exit_partial) |
146 | #ifdef CONFIG_DEBUG_RSEQ |
147 | move a0, sp |
148 | jal rseq_syscall |
149 | #endif |
150 | local_irq_disable # make sure need_resched doesn't |
151 | # change between and return |
152 | LONG_L a2, TI_FLAGS($28) # current->work |
153 | li t0, _TIF_ALLWORK_MASK |
154 | and t0, a2 |
155 | beqz t0, restore_partial |
156 | SAVE_STATIC |
157 | syscall_exit_work: |
158 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
159 | andi t0, t0, KU_USER |
160 | beqz t0, resume_kernel |
161 | li t0, _TIF_WORK_SYSCALL_EXIT |
162 | and t0, a2 # a2 is preloaded with TI_FLAGS |
163 | beqz t0, work_pending # trace bit set? |
164 | local_irq_enable # could let syscall_trace_leave() |
165 | # call schedule() instead |
166 | TRACE_IRQS_ON |
167 | move a0, sp |
168 | jal syscall_trace_leave |
169 | b resume_userspace |
170 | |
171 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ |
172 | defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT) |
173 | |
174 | /* |
175 | * MIPS32R2 Instruction Hazard Barrier - must be called |
176 | * |
177 | * For C code use the inline version named instruction_hazard(). |
178 | */ |
179 | LEAF(mips_ihb) |
180 | .set MIPS_ISA_LEVEL_RAW |
181 | jr.hb ra |
182 | nop |
183 | END(mips_ihb) |
184 | |
185 | #endif /* CONFIG_CPU_MIPSR2 - CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ |
186 | |