1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * S390 low-level entry points. |
4 | * |
5 | * Copyright IBM Corp. 1999, 2012 |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
9 | */ |
10 | |
11 | #include <linux/export.h> |
12 | #include <linux/init.h> |
13 | #include <linux/linkage.h> |
14 | #include <asm/asm-extable.h> |
15 | #include <asm/alternative-asm.h> |
16 | #include <asm/processor.h> |
17 | #include <asm/cache.h> |
18 | #include <asm/dwarf.h> |
19 | #include <asm/errno.h> |
20 | #include <asm/ptrace.h> |
21 | #include <asm/thread_info.h> |
22 | #include <asm/asm-offsets.h> |
23 | #include <asm/unistd.h> |
24 | #include <asm/page.h> |
25 | #include <asm/sigp.h> |
26 | #include <asm/irq.h> |
27 | #include <asm/fpu-insn.h> |
28 | #include <asm/setup.h> |
29 | #include <asm/nmi.h> |
30 | #include <asm/nospec-insn.h> |
31 | |
32 | _LPP_OFFSET = __LC_LPP |
33 | |
34 | .macro STBEAR address |
35 | ALTERNATIVE "nop" , ".insn s,0xb2010000,\address" , 193 |
36 | .endm |
37 | |
38 | .macro LBEAR address |
39 | ALTERNATIVE "nop" , ".insn s,0xb2000000,\address" , 193 |
40 | .endm |
41 | |
42 | .macro LPSWEY address,lpswe |
43 | ALTERNATIVE "b \lpswe; nopr" , ".insn siy,0xeb0000000071,\address,0" , 193 |
44 | .endm |
45 | |
46 | .macro MBEAR reg |
47 | ALTERNATIVE "brcl 0,0" , __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 |
48 | .endm |
49 | |
50 | .macro CHECK_STACK savearea |
51 | #ifdef CONFIG_CHECK_STACK |
52 | tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD |
53 | lghi %r14,\savearea |
54 | jz stack_overflow |
55 | #endif |
56 | .endm |
57 | |
58 | .macro CHECK_VMAP_STACK savearea,oklabel |
59 | #ifdef CONFIG_VMAP_STACK |
60 | lgr %r14,%r15 |
61 | nill %r14,0x10000 - THREAD_SIZE |
62 | oill %r14,STACK_INIT_OFFSET |
63 | clg %r14,__LC_KERNEL_STACK |
64 | je \oklabel |
65 | clg %r14,__LC_ASYNC_STACK |
66 | je \oklabel |
67 | clg %r14,__LC_MCCK_STACK |
68 | je \oklabel |
69 | clg %r14,__LC_NODAT_STACK |
70 | je \oklabel |
71 | clg %r14,__LC_RESTART_STACK |
72 | je \oklabel |
73 | lghi %r14,\savearea |
74 | j stack_overflow |
75 | #else |
76 | j \oklabel |
77 | #endif |
78 | .endm |
79 | |
80 | /* |
81 | * The TSTMSK macro generates a test-under-mask instruction by |
82 | * calculating the memory offset for the specified mask value. |
83 | * Mask value can be any constant. The macro shifts the mask |
84 | * value to calculate the memory offset for the test-under-mask |
85 | * instruction. |
86 | */ |
87 | .macro TSTMSK addr, mask, size=8, bytepos=0 |
88 | .if (\bytepos < \size) && (\mask >> 8) |
89 | .if (\mask & 0xff) |
90 | .error "Mask exceeds byte boundary" |
91 | .endif |
92 | TSTMSK \addr, "(\mask >> 8)" , \size, "(\bytepos + 1)" |
93 | .exitm |
94 | .endif |
95 | .ifeq \mask |
96 | .error "Mask must not be zero" |
97 | .endif |
98 | off = \size - \bytepos - 1 |
99 | tm off+\addr, \mask |
100 | .endm |
101 | |
102 | .macro BPOFF |
103 | ALTERNATIVE "nop" , ".insn rrf,0xb2e80000,0,0,12,0" , 82 |
104 | .endm |
105 | |
106 | .macro BPON |
107 | ALTERNATIVE "nop" , ".insn rrf,0xb2e80000,0,0,13,0" , 82 |
108 | .endm |
109 | |
110 | .macro BPENTER tif_ptr,tif_mask |
111 | ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0" , \ |
112 | "j .+12; nop; nop" , 82 |
113 | .endm |
114 | |
115 | .macro BPEXIT tif_ptr,tif_mask |
116 | TSTMSK \tif_ptr,\tif_mask |
117 | ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0" , \ |
118 | "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0" , 82 |
119 | .endm |
120 | |
121 | #if IS_ENABLED(CONFIG_KVM) |
122 | .macro SIEEXIT sie_control |
123 | lg %r9,\sie_control # get control block pointer |
124 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
125 | lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce |
126 | ni __LC_CPU_FLAGS+7,255-_CIF_SIE |
127 | larl %r9,sie_exit # skip forward to sie_exit |
128 | .endm |
129 | #endif |
130 | |
131 | .macro STACKLEAK_ERASE |
132 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
133 | brasl %r14,stackleak_erase_on_task_stack |
134 | #endif |
135 | .endm |
136 | |
137 | GEN_BR_THUNK %r14 |
138 | |
139 | .section .kprobes.text, "ax" |
140 | .Ldummy: |
141 | /* |
142 | * The following nop exists only in order to avoid that the next |
143 | * symbol starts at the beginning of the kprobes text section. |
144 | * In that case there would be several symbols at the same address. |
145 | * E.g. objdump would take an arbitrary symbol when disassembling |
146 | * the code. |
147 | * With the added nop in between this cannot happen. |
148 | */ |
149 | nop 0 |
150 | |
151 | /* |
152 | * Scheduler resume function, called by __switch_to |
153 | * gpr2 = (task_struct *)prev |
154 | * gpr3 = (task_struct *)next |
155 | * Returns: |
156 | * gpr2 = prev |
157 | */ |
158 | SYM_FUNC_START(__switch_to_asm) |
159 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
160 | lghi %r4,__TASK_stack |
161 | lghi %r1,__TASK_thread |
162 | llill %r5,STACK_INIT_OFFSET |
163 | stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev |
164 | lg %r15,0(%r4,%r3) # start of kernel stack of next |
165 | agr %r15,%r5 # end of kernel stack of next |
166 | stg %r3,__LC_CURRENT # store task struct of next |
167 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
168 | lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next |
169 | aghi %r3,__TASK_pid |
170 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next |
171 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
172 | ALTERNATIVE "nop" , "lpp _LPP_OFFSET" , 40 |
173 | BR_EX %r14 |
174 | SYM_FUNC_END(__switch_to_asm) |
175 | |
176 | #if IS_ENABLED(CONFIG_KVM) |
177 | /* |
178 | * __sie64a calling convention: |
179 | * %r2 pointer to sie control block phys |
180 | * %r3 pointer to sie control block virt |
181 | * %r4 guest register save area |
182 | */ |
183 | SYM_FUNC_START(__sie64a) |
184 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers |
185 | lg %r12,__LC_CURRENT |
186 | stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. |
187 | stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses |
188 | stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area |
189 | xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 |
190 | mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags |
191 | lmg %r0,%r13,0(%r4) # load guest gprs 0-13 |
192 | lg %r14,__LC_GMAP # get gmap pointer |
193 | ltgr %r14,%r14 |
194 | jz .Lsie_gmap |
195 | oi __LC_CPU_FLAGS+7,_CIF_SIE |
196 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce |
197 | .Lsie_gmap: |
198 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
199 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
200 | tm __SIE_PROG20+3(%r14),3 # last exit... |
201 | jnz .Lsie_skip |
202 | lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr |
203 | BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
204 | .Lsie_entry: |
205 | sie 0(%r14) |
206 | # Let the next instruction be NOP to avoid triggering a machine check |
207 | # and handling it in a guest as result of the instruction execution. |
208 | nopr 7 |
209 | .Lsie_leave: |
210 | BPOFF |
211 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
212 | .Lsie_skip: |
213 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
214 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
215 | lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce |
216 | ni __LC_CPU_FLAGS+7,255-_CIF_SIE |
217 | # some program checks are suppressing. C code (e.g. do_protection_exception) |
218 | # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There |
219 | # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. |
220 | # Other instructions between __sie64a and .Lsie_done should not cause program |
221 | # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. |
222 | .Lrewind_pad6: |
223 | nopr 7 |
224 | .Lrewind_pad4: |
225 | nopr 7 |
226 | .Lrewind_pad2: |
227 | nopr 7 |
228 | SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) |
229 | lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area |
230 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 |
231 | xgr %r0,%r0 # clear guest registers to |
232 | xgr %r1,%r1 # prevent speculative use |
233 | xgr %r3,%r3 |
234 | xgr %r4,%r4 |
235 | xgr %r5,%r5 |
236 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
237 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code |
238 | BR_EX %r14 |
239 | .Lsie_fault: |
240 | lghi %r14,-EFAULT |
241 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code |
242 | j sie_exit |
243 | |
244 | EX_TABLE(.Lrewind_pad6,.Lsie_fault) |
245 | EX_TABLE(.Lrewind_pad4,.Lsie_fault) |
246 | EX_TABLE(.Lrewind_pad2,.Lsie_fault) |
247 | EX_TABLE(sie_exit,.Lsie_fault) |
248 | SYM_FUNC_END(__sie64a) |
249 | EXPORT_SYMBOL(__sie64a) |
250 | EXPORT_SYMBOL(sie_exit) |
251 | #endif |
252 | |
253 | /* |
254 | * SVC interrupt handler routine. System calls are synchronous events and |
255 | * are entered with interrupts disabled. |
256 | */ |
257 | |
258 | SYM_CODE_START(system_call) |
259 | stpt __LC_SYS_ENTER_TIMER |
260 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
261 | BPOFF |
262 | lghi %r14,0 |
263 | .Lsysc_per: |
264 | STBEAR __LC_LAST_BREAK |
265 | lctlg %c1,%c1,__LC_KERNEL_ASCE |
266 | lg %r15,__LC_KERNEL_STACK |
267 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
268 | stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
269 | # clear user controlled register to prevent speculative use |
270 | xgr %r0,%r0 |
271 | xgr %r1,%r1 |
272 | xgr %r4,%r4 |
273 | xgr %r5,%r5 |
274 | xgr %r6,%r6 |
275 | xgr %r7,%r7 |
276 | xgr %r8,%r8 |
277 | xgr %r9,%r9 |
278 | xgr %r10,%r10 |
279 | xgr %r11,%r11 |
280 | la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
281 | mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC |
282 | MBEAR %r2 |
283 | lgr %r3,%r14 |
284 | brasl %r14,__do_syscall |
285 | STACKLEAK_ERASE |
286 | lctlg %c1,%c1,__LC_USER_ASCE |
287 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
288 | BPON |
289 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
290 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
291 | stpt __LC_EXIT_TIMER |
292 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
293 | SYM_CODE_END(system_call) |
294 | |
295 | # |
296 | # a new process exits the kernel with ret_from_fork |
297 | # |
298 | SYM_CODE_START(ret_from_fork) |
299 | lgr %r3,%r11 |
300 | brasl %r14,__ret_from_fork |
301 | STACKLEAK_ERASE |
302 | lctlg %c1,%c1,__LC_USER_ASCE |
303 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
304 | BPON |
305 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
306 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
307 | stpt __LC_EXIT_TIMER |
308 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
309 | SYM_CODE_END(ret_from_fork) |
310 | |
311 | /* |
312 | * Program check handler routine |
313 | */ |
314 | |
315 | SYM_CODE_START(pgm_check_handler) |
316 | stpt __LC_SYS_ENTER_TIMER |
317 | BPOFF |
318 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
319 | lgr %r10,%r15 |
320 | lmg %r8,%r9,__LC_PGM_OLD_PSW |
321 | tmhh %r8,0x0001 # coming from user space? |
322 | jno .Lpgm_skip_asce |
323 | lctlg %c1,%c1,__LC_KERNEL_ASCE |
324 | j 3f # -> fault in user space |
325 | .Lpgm_skip_asce: |
326 | 1: tmhh %r8,0x4000 # PER bit set in old PSW ? |
327 | jnz 2f # -> enabled, can't be a double fault |
328 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
329 | jnz .Lpgm_svcper # -> single stepped svc |
330 | 2: CHECK_STACK __LC_SAVE_AREA_SYNC |
331 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
332 | # CHECK_VMAP_STACK branches to stack_overflow or 4f |
333 | CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f |
334 | 3: lg %r15,__LC_KERNEL_STACK |
335 | 4: la %r11,STACK_FRAME_OVERHEAD(%r15) |
336 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
337 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
338 | stmg %r0,%r7,__PT_R0(%r11) |
339 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
340 | mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK |
341 | stctg %c1,%c1,__PT_CR1(%r11) |
342 | #if IS_ENABLED(CONFIG_KVM) |
343 | ltg %r12,__LC_GMAP |
344 | jz 5f |
345 | clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) |
346 | jne 5f |
347 | BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST |
348 | SIEEXIT __SF_SIE_CONTROL(%r10) |
349 | #endif |
350 | 5: stmg %r8,%r9,__PT_PSW(%r11) |
351 | # clear user controlled registers to prevent speculative use |
352 | xgr %r0,%r0 |
353 | xgr %r1,%r1 |
354 | xgr %r3,%r3 |
355 | xgr %r4,%r4 |
356 | xgr %r5,%r5 |
357 | xgr %r6,%r6 |
358 | xgr %r7,%r7 |
359 | lgr %r2,%r11 |
360 | brasl %r14,__do_pgm_check |
361 | tmhh %r8,0x0001 # returning to user space? |
362 | jno .Lpgm_exit_kernel |
363 | STACKLEAK_ERASE |
364 | lctlg %c1,%c1,__LC_USER_ASCE |
365 | BPON |
366 | stpt __LC_EXIT_TIMER |
367 | .Lpgm_exit_kernel: |
368 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
369 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
370 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
371 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
372 | |
373 | # |
374 | # single stepped system call |
375 | # |
376 | .Lpgm_svcper: |
377 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
378 | larl %r14,.Lsysc_per |
379 | stg %r14,__LC_RETURN_PSW+8 |
380 | lghi %r14,1 |
381 | LBEAR __LC_PGM_LAST_BREAK |
382 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per |
383 | SYM_CODE_END(pgm_check_handler) |
384 | |
385 | /* |
386 | * Interrupt handler macro used for external and IO interrupts. |
387 | */ |
388 | .macro INT_HANDLER name,lc_old_psw,handler |
389 | SYM_CODE_START(\name) |
390 | stckf __LC_INT_CLOCK |
391 | stpt __LC_SYS_ENTER_TIMER |
392 | STBEAR __LC_LAST_BREAK |
393 | BPOFF |
394 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
395 | lmg %r8,%r9,\lc_old_psw |
396 | tmhh %r8,0x0001 # interrupting from user ? |
397 | jnz 1f |
398 | #if IS_ENABLED(CONFIG_KVM) |
399 | TSTMSK __LC_CPU_FLAGS,_CIF_SIE |
400 | jz 0f |
401 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
402 | SIEEXIT __SF_SIE_CONTROL(%r15) |
403 | #endif |
404 | 0: CHECK_STACK __LC_SAVE_AREA_ASYNC |
405 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
406 | j 2f |
407 | 1: lctlg %c1,%c1,__LC_KERNEL_ASCE |
408 | lg %r15,__LC_KERNEL_STACK |
409 | 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
410 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
411 | stmg %r0,%r7,__PT_R0(%r11) |
412 | # clear user controlled registers to prevent speculative use |
413 | xgr %r0,%r0 |
414 | xgr %r1,%r1 |
415 | xgr %r3,%r3 |
416 | xgr %r4,%r4 |
417 | xgr %r5,%r5 |
418 | xgr %r6,%r6 |
419 | xgr %r7,%r7 |
420 | xgr %r10,%r10 |
421 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
422 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
423 | MBEAR %r11 |
424 | stmg %r8,%r9,__PT_PSW(%r11) |
425 | lgr %r2,%r11 # pass pointer to pt_regs |
426 | brasl %r14,\handler |
427 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
428 | tmhh %r8,0x0001 # returning to user ? |
429 | jno 2f |
430 | STACKLEAK_ERASE |
431 | lctlg %c1,%c1,__LC_USER_ASCE |
432 | BPON |
433 | stpt __LC_EXIT_TIMER |
434 | 2: LBEAR __PT_LAST_BREAK(%r11) |
435 | lmg %r0,%r15,__PT_R0(%r11) |
436 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
437 | SYM_CODE_END(\name) |
438 | .endm |
439 | |
440 | INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq |
441 | INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq |
442 | |
443 | /* |
444 | * Load idle PSW. |
445 | */ |
446 | SYM_FUNC_START(psw_idle) |
447 | stg %r14,(__SF_GPRS+8*8)(%r15) |
448 | stg %r3,__SF_EMPTY(%r15) |
449 | larl %r1,psw_idle_exit |
450 | stg %r1,__SF_EMPTY+8(%r15) |
451 | larl %r1,smp_cpu_mtid |
452 | llgf %r1,0(%r1) |
453 | ltgr %r1,%r1 |
454 | jz .Lpsw_idle_stcctm |
455 | .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) |
456 | .Lpsw_idle_stcctm: |
457 | oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT |
458 | BPON |
459 | stckf __CLOCK_IDLE_ENTER(%r2) |
460 | stpt __TIMER_IDLE_ENTER(%r2) |
461 | lpswe __SF_EMPTY(%r15) |
462 | SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL) |
463 | BR_EX %r14 |
464 | SYM_FUNC_END(psw_idle) |
465 | |
466 | /* |
467 | * Machine check handler routines |
468 | */ |
469 | SYM_CODE_START(mcck_int_handler) |
470 | BPOFF |
471 | lmg %r8,%r9,__LC_MCK_OLD_PSW |
472 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE |
473 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
474 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID |
475 | jno .Lmcck_panic # control registers invalid -> panic |
476 | ptlb |
477 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
478 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
479 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID |
480 | jo 3f |
481 | la %r14,__LC_SYS_ENTER_TIMER |
482 | clc 0(8,%r14),__LC_EXIT_TIMER |
483 | jl 1f |
484 | la %r14,__LC_EXIT_TIMER |
485 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER |
486 | jl 2f |
487 | la %r14,__LC_LAST_UPDATE_TIMER |
488 | 2: spt 0(%r14) |
489 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
490 | 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID |
491 | jno .Lmcck_panic |
492 | tmhh %r8,0x0001 # interrupting from user ? |
493 | jnz .Lmcck_user |
494 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID |
495 | jno .Lmcck_panic |
496 | #if IS_ENABLED(CONFIG_KVM) |
497 | TSTMSK __LC_CPU_FLAGS,_CIF_SIE |
498 | jz .Lmcck_user |
499 | # Need to compare the address instead of a CIF_SIE* flag. |
500 | # Otherwise there would be a race between setting the flag |
501 | # and entering SIE (or leaving and clearing the flag). This |
502 | # would cause machine checks targeted at the guest to be |
503 | # handled by the host. |
504 | larl %r14,.Lsie_entry |
505 | clgrjl %r9,%r14, 4f |
506 | larl %r14,.Lsie_leave |
507 | clgrjhe %r9,%r14, 4f |
508 | oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST |
509 | 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
510 | SIEEXIT __SF_SIE_CONTROL(%r15) |
511 | #endif |
512 | .Lmcck_user: |
513 | lg %r15,__LC_MCCK_STACK |
514 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
515 | stctg %c1,%c1,__PT_CR1(%r11) |
516 | lctlg %c1,%c1,__LC_KERNEL_ASCE |
517 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
518 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
519 | stmg %r0,%r7,__PT_R0(%r11) |
520 | # clear user controlled registers to prevent speculative use |
521 | xgr %r0,%r0 |
522 | xgr %r1,%r1 |
523 | xgr %r3,%r3 |
524 | xgr %r4,%r4 |
525 | xgr %r5,%r5 |
526 | xgr %r6,%r6 |
527 | xgr %r7,%r7 |
528 | xgr %r10,%r10 |
529 | mvc __PT_R8(64,%r11),0(%r14) |
530 | stmg %r8,%r9,__PT_PSW(%r11) |
531 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
532 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
533 | lgr %r2,%r11 # pass pointer to pt_regs |
534 | brasl %r14,s390_do_machine_check |
535 | lctlg %c1,%c1,__PT_CR1(%r11) |
536 | lmg %r0,%r10,__PT_R0(%r11) |
537 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW |
538 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
539 | jno 0f |
540 | BPON |
541 | stpt __LC_EXIT_TIMER |
542 | 0: ALTERNATIVE "nop" , __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 |
543 | LBEAR 0(%r12) |
544 | lmg %r11,%r15,__PT_R11(%r11) |
545 | LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE |
546 | |
547 | .Lmcck_panic: |
548 | /* |
549 | * Iterate over all possible CPU addresses in the range 0..0xffff |
550 | * and stop each CPU using signal processor. Use compare and swap |
551 | * to allow just one CPU-stopper and prevent concurrent CPUs from |
552 | * stopping each other while leaving the others running. |
553 | */ |
554 | lhi %r5,0 |
555 | lhi %r6,1 |
556 | larl %r7,stop_lock |
557 | cs %r5,%r6,0(%r7) # single CPU-stopper only |
558 | jnz 4f |
559 | larl %r7,this_cpu |
560 | stap 0(%r7) # this CPU address |
561 | lh %r4,0(%r7) |
562 | nilh %r4,0 |
563 | lhi %r0,1 |
564 | sll %r0,16 # CPU counter |
565 | lhi %r3,0 # next CPU address |
566 | 0: cr %r3,%r4 |
567 | je 2f |
568 | 1: sigp %r1,%r3,SIGP_STOP # stop next CPU |
569 | brc SIGP_CC_BUSY,1b |
570 | 2: ahi %r3,1 |
571 | brct %r0,0b |
572 | 3: sigp %r1,%r4,SIGP_STOP # stop this CPU |
573 | brc SIGP_CC_BUSY,3b |
574 | 4: j 4b |
575 | SYM_CODE_END(mcck_int_handler) |
576 | |
577 | SYM_CODE_START(restart_int_handler) |
578 | ALTERNATIVE "nop" , "lpp _LPP_OFFSET" , 40 |
579 | stg %r15,__LC_SAVE_AREA_RESTART |
580 | TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 |
581 | jz 0f |
582 | lctlg %c0,%c15,__LC_CREGS_SAVE_AREA |
583 | 0: larl %r15,daton_psw |
584 | lpswe 0(%r15) # turn dat on, keep irqs off |
585 | .Ldaton: |
586 | lg %r15,__LC_RESTART_STACK |
587 | xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) |
588 | stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
589 | mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART |
590 | mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW |
591 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) |
592 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu |
593 | lg %r2,__LC_RESTART_DATA |
594 | lgf %r3,__LC_RESTART_SOURCE |
595 | ltgr %r3,%r3 # test source cpu address |
596 | jm 1f # negative -> skip source stop |
597 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu |
598 | brc 10,0b # wait for status stored |
599 | 1: basr %r14,%r1 # call function |
600 | stap __SF_EMPTY(%r15) # store cpu address |
601 | llgh %r3,__SF_EMPTY(%r15) |
602 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu |
603 | brc 2,2b |
604 | 3: j 3b |
605 | SYM_CODE_END(restart_int_handler) |
606 | |
607 | .section .kprobes.text, "ax" |
608 | |
609 | #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) |
610 | /* |
611 | * The synchronous or the asynchronous stack overflowed. We are dead. |
612 | * No need to properly save the registers, we are going to panic anyway. |
613 | * Setup a pt_regs so that show_trace can provide a good call trace. |
614 | */ |
615 | SYM_CODE_START(stack_overflow) |
616 | lg %r15,__LC_NODAT_STACK # change to panic stack |
617 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
618 | stmg %r0,%r7,__PT_R0(%r11) |
619 | stmg %r8,%r9,__PT_PSW(%r11) |
620 | mvc __PT_R8(64,%r11),0(%r14) |
621 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 |
622 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
623 | lgr %r2,%r11 # pass pointer to pt_regs |
624 | jg kernel_stack_overflow |
625 | SYM_CODE_END(stack_overflow) |
626 | #endif |
627 | |
628 | .section .data, "aw" |
629 | .balign 4 |
630 | SYM_DATA_LOCAL(stop_lock, .long 0) |
631 | SYM_DATA_LOCAL(this_cpu, .short 0) |
632 | .balign 8 |
633 | SYM_DATA_START_LOCAL(daton_psw) |
634 | .quad PSW_KERNEL_BITS |
635 | .quad .Ldaton |
636 | SYM_DATA_END(daton_psw) |
637 | |
638 | .section .rodata, "a" |
639 | .balign 8 |
640 | #define SYSCALL(esame,emu) .quad __s390x_ ## esame |
641 | SYM_DATA_START(sys_call_table) |
642 | #include "asm/syscall_table.h" |
643 | SYM_DATA_END(sys_call_table) |
644 | #undef SYSCALL |
645 | |
646 | #ifdef CONFIG_COMPAT |
647 | |
648 | #define SYSCALL(esame,emu) .quad __s390_ ## emu |
649 | SYM_DATA_START(sys_call_table_emu) |
650 | #include "asm/syscall_table.h" |
651 | SYM_DATA_END(sys_call_table_emu) |
652 | #undef SYSCALL |
653 | #endif |
654 | |