blob: 150130c897c39938d03d04e497100cca77d0a353 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 low-level entry points.
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0 = __PT_GPRS
34__PT_R1 = __PT_GPRS + 8
35__PT_R2 = __PT_GPRS + 16
36__PT_R3 = __PT_GPRS + 24
37__PT_R4 = __PT_GPRS + 32
38__PT_R5 = __PT_GPRS + 40
39__PT_R6 = __PT_GPRS + 48
40__PT_R7 = __PT_GPRS + 56
41__PT_R8 = __PT_GPRS + 64
42__PT_R9 = __PT_GPRS + 72
43__PT_R10 = __PT_GPRS + 80
44__PT_R11 = __PT_GPRS + 88
45__PT_R12 = __PT_GPRS + 96
46__PT_R13 = __PT_GPRS + 104
47__PT_R14 = __PT_GPRS + 112
48__PT_R15 = __PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
57 _TIF_SYSCALL_TRACEPOINT)
58_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
59 _CIF_ASCE_SECONDARY | _CIF_FPU)
60_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62_LPP_OFFSET = __LC_LPP
63
64#define BASED(name) name-cleanup_critical(%r13)
65
66 .macro TRACE_IRQS_ON
67#ifdef CONFIG_TRACE_IRQFLAGS
68 basr %r2,%r0
69 brasl %r14,trace_hardirqs_on_caller
70#endif
71 .endm
72
73 .macro TRACE_IRQS_OFF
74#ifdef CONFIG_TRACE_IRQFLAGS
75 basr %r2,%r0
76 brasl %r14,trace_hardirqs_off_caller
77#endif
78 .endm
79
80 .macro LOCKDEP_SYS_EXIT
81#ifdef CONFIG_LOCKDEP
82 tm __PT_PSW+1(%r11),0x01 # returning to user ?
83 jz .+10
84 brasl %r14,lockdep_sys_exit
85#endif
86 .endm
87
88 .macro CHECK_STACK stacksize,savearea
89#ifdef CONFIG_CHECK_STACK
90 tml %r15,\stacksize - CONFIG_STACK_GUARD
91 lghi %r14,\savearea
92 jz stack_overflow
93#endif
94 .endm
95
96 .macro SWITCH_ASYNC savearea,timer
97 tmhh %r8,0x0001 # interrupting from user ?
98 jnz 1f
99 lgr %r14,%r9
100 slg %r14,BASED(.Lcritical_start)
101 clg %r14,BASED(.Lcritical_length)
102 jhe 0f
103 lghi %r11,\savearea # inside critical section, do cleanup
104 brasl %r14,cleanup_critical
105 tmhh %r8,0x0001 # retest problem state after cleanup
106 jnz 1f
1070: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
108 slgr %r14,%r15
109 srag %r14,%r14,STACK_SHIFT
110 jnz 2f
111 CHECK_STACK 1<<STACK_SHIFT,\savearea
112 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
113 j 3f
1141: UPDATE_VTIME %r14,%r15,\timer
115 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1162: lg %r15,__LC_ASYNC_STACK # load async stack
1173: la %r11,STACK_FRAME_OVERHEAD(%r15)
118 .endm
119
120 .macro UPDATE_VTIME w1,w2,enter_timer
121 lg \w1,__LC_EXIT_TIMER
122 lg \w2,__LC_LAST_UPDATE_TIMER
123 slg \w1,\enter_timer
124 slg \w2,__LC_EXIT_TIMER
125 alg \w1,__LC_USER_TIMER
126 alg \w2,__LC_SYSTEM_TIMER
127 stg \w1,__LC_USER_TIMER
128 stg \w2,__LC_SYSTEM_TIMER
129 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
130 .endm
131
132 .macro REENABLE_IRQS
133 stg %r8,__LC_RETURN_PSW
134 ni __LC_RETURN_PSW,0xbf
135 ssm __LC_RETURN_PSW
136 .endm
137
138 .macro STCK savearea
139#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
140 .insn s,0xb27c0000,\savearea # store clock fast
141#else
142 .insn s,0xb2050000,\savearea # store clock
143#endif
144 .endm
145
146 /*
147 * The TSTMSK macro generates a test-under-mask instruction by
148 * calculating the memory offset for the specified mask value.
149 * Mask value can be any constant. The macro shifts the mask
150 * value to calculate the memory offset for the test-under-mask
151 * instruction.
152 */
153 .macro TSTMSK addr, mask, size=8, bytepos=0
154 .if (\bytepos < \size) && (\mask >> 8)
155 .if (\mask & 0xff)
156 .error "Mask exceeds byte boundary"
157 .endif
158 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
159 .exitm
160 .endif
161 .ifeq \mask
162 .error "Mask must not be zero"
163 .endif
164 off = \size - \bytepos - 1
165 tm off+\addr, \mask
166 .endm
167
168 .macro BPOFF
169 ALTERNATIVE "", ".long 0xb2e8c000", 82
170 .endm
171
172 .macro BPON
173 ALTERNATIVE "", ".long 0xb2e8d000", 82
174 .endm
175
176 .macro BPENTER tif_ptr,tif_mask
177 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
178 "", 82
179 .endm
180
181 .macro BPEXIT tif_ptr,tif_mask
182 TSTMSK \tif_ptr,\tif_mask
183 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
184 "jnz .+8; .long 0xb2e8d000", 82
185 .endm
186
187 GEN_BR_THUNK %r9
188 GEN_BR_THUNK %r14
189 GEN_BR_THUNK %r14,%r11
190
191 .section .kprobes.text, "ax"
192.Ldummy:
193 /*
194 * This nop exists only in order to avoid that __switch_to starts at
195 * the beginning of the kprobes text section. In that case we would
196 * have several symbols at the same address. E.g. objdump would take
197 * an arbitrary symbol name when disassembling this code.
198 * With the added nop in between the __switch_to symbol is unique
199 * again.
200 */
201 nop 0
202
203ENTRY(__bpon)
204 .globl __bpon
205 BPON
206 BR_EX %r14
207
208/*
209 * Scheduler resume function, called by switch_to
210 * gpr2 = (task_struct *) prev
211 * gpr3 = (task_struct *) next
212 * Returns:
213 * gpr2 = prev
214 */
215ENTRY(__switch_to)
216 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
217 lghi %r4,__TASK_stack
218 lghi %r1,__TASK_thread
219 lg %r5,0(%r4,%r3) # start of kernel stack of next
220 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
221 lgr %r15,%r5
222 aghi %r15,STACK_INIT # end of kernel stack of next
223 stg %r3,__LC_CURRENT # store task struct of next
224 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
225 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
226 aghi %r3,__TASK_pid
227 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
228 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
229 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
230 BR_EX %r14
231
232.L__critical_start:
233
234#if IS_ENABLED(CONFIG_KVM)
235/*
236 * sie64a calling convention:
237 * %r2 pointer to sie control block
238 * %r3 guest register save area
239 */
240ENTRY(sie64a)
241 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
242 lg %r12,__LC_CURRENT
243 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
244 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
245 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
246 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
247 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
248 jno .Lsie_load_guest_gprs
249 brasl %r14,load_fpu_regs # load guest fp/vx regs
250.Lsie_load_guest_gprs:
251 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
252 lg %r14,__LC_GMAP # get gmap pointer
253 ltgr %r14,%r14
254 jz .Lsie_gmap
255 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
256.Lsie_gmap:
257 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
258 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
259 tm __SIE_PROG20+3(%r14),3 # last exit...
260 jnz .Lsie_skip
261 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
262 jo .Lsie_skip # exit if fp/vx regs changed
263 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
264.Lsie_entry:
265 sie 0(%r14)
266.Lsie_exit:
267 BPOFF
268 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
269.Lsie_skip:
270 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
271 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
272.Lsie_done:
273# some program checks are suppressing. C code (e.g. do_protection_exception)
274# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
275# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
276# Other instructions between sie64a and .Lsie_done should not cause program
277# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
278# See also .Lcleanup_sie
279.Lrewind_pad6:
280 nopr 7
281.Lrewind_pad4:
282 nopr 7
283.Lrewind_pad2:
284 nopr 7
285 .globl sie_exit
286sie_exit:
287 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
288 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
289 xgr %r0,%r0 # clear guest registers to
290 xgr %r1,%r1 # prevent speculative use
291 xgr %r2,%r2
292 xgr %r3,%r3
293 xgr %r4,%r4
294 xgr %r5,%r5
295 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
296 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
297 BR_EX %r14
298.Lsie_fault:
299 lghi %r14,-EFAULT
300 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
301 j sie_exit
302
303 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
304 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
305 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
306 EX_TABLE(sie_exit,.Lsie_fault)
307EXPORT_SYMBOL(sie64a)
308EXPORT_SYMBOL(sie_exit)
309#endif
310
311/*
312 * SVC interrupt handler routine. System calls are synchronous events and
313 * are executed with interrupts enabled.
314 */
315
316ENTRY(system_call)
317 stpt __LC_SYNC_ENTER_TIMER
318.Lsysc_stmg:
319 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
320 BPOFF
321 lg %r12,__LC_CURRENT
322 lghi %r13,__TASK_thread
323 lghi %r14,_PIF_SYSCALL
324.Lsysc_per:
325 lg %r15,__LC_KERNEL_STACK
326 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
327.Lsysc_vtime:
328 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
329 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
330 stmg %r0,%r7,__PT_R0(%r11)
331 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
332 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
333 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
334 stg %r14,__PT_FLAGS(%r11)
335.Lsysc_do_svc:
336 # clear user controlled register to prevent speculative use
337 xgr %r0,%r0
338 # load address of system call table
339 lg %r10,__THREAD_sysc_table(%r13,%r12)
340 llgh %r8,__PT_INT_CODE+2(%r11)
341 slag %r8,%r8,2 # shift and test for svc 0
342 jnz .Lsysc_nr_ok
343 # svc 0: system call number in %r1
344 llgfr %r1,%r1 # clear high word in r1
345 cghi %r1,NR_syscalls
346 jnl .Lsysc_nr_ok
347 sth %r1,__PT_INT_CODE+2(%r11)
348 slag %r8,%r1,2
349.Lsysc_nr_ok:
350 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
351 stg %r2,__PT_ORIG_GPR2(%r11)
352 stg %r7,STACK_FRAME_OVERHEAD(%r15)
353 lgf %r9,0(%r8,%r10) # get system call add.
354 TSTMSK __TI_flags(%r12),_TIF_TRACE
355 jnz .Lsysc_tracesys
356 BASR_EX %r14,%r9 # call sys_xxxx
357 stg %r2,__PT_R2(%r11) # store return value
358
359.Lsysc_return:
360#ifdef CONFIG_DEBUG_RSEQ
361 lgr %r2,%r11
362 brasl %r14,rseq_syscall
363#endif
364 LOCKDEP_SYS_EXIT
365.Lsysc_tif:
366 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
367 jnz .Lsysc_work
368 TSTMSK __TI_flags(%r12),_TIF_WORK
369 jnz .Lsysc_work # check for work
370 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
371 jnz .Lsysc_work
372 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
373.Lsysc_restore:
374 lg %r14,__LC_VDSO_PER_CPU
375 lmg %r0,%r10,__PT_R0(%r11)
376 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
377.Lsysc_exit_timer:
378 stpt __LC_EXIT_TIMER
379 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
380 lmg %r11,%r15,__PT_R11(%r11)
381 lpswe __LC_RETURN_PSW
382.Lsysc_done:
383
384#
385# One of the work bits is on. Find out which one.
386#
387.Lsysc_work:
388 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
389 jo .Lsysc_mcck_pending
390 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
391 jo .Lsysc_reschedule
392 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
393 jo .Lsysc_syscall_restart
394#ifdef CONFIG_UPROBES
395 TSTMSK __TI_flags(%r12),_TIF_UPROBE
396 jo .Lsysc_uprobe_notify
397#endif
398 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
399 jo .Lsysc_guarded_storage
400 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
401 jo .Lsysc_singlestep
402#ifdef CONFIG_LIVEPATCH
403 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
404 jo .Lsysc_patch_pending # handle live patching just before
405 # signals and possible syscall restart
406#endif
407 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
408 jo .Lsysc_syscall_restart
409 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
410 jo .Lsysc_sigpending
411 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
412 jo .Lsysc_notify_resume
413 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
414 jo .Lsysc_vxrs
415 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
416 jnz .Lsysc_asce
417 j .Lsysc_return # beware of critical section cleanup
418
419#
420# _TIF_NEED_RESCHED is set, call schedule
421#
422.Lsysc_reschedule:
423 larl %r14,.Lsysc_return
424 jg schedule
425
426#
427# _CIF_MCCK_PENDING is set, call handler
428#
429.Lsysc_mcck_pending:
430 larl %r14,.Lsysc_return
431 jg s390_handle_mcck # TIF bit will be cleared by handler
432
433#
434# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
435#
436.Lsysc_asce:
437 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
438 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
439 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
440 jz .Lsysc_return
441#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
442 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
443 jnz .Lsysc_set_fs_fixup
444 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
445 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
446 j .Lsysc_return
447.Lsysc_set_fs_fixup:
448#endif
449 larl %r14,.Lsysc_return
450 jg set_fs_fixup
451
452#
453# CIF_FPU is set, restore floating-point controls and floating-point registers.
454#
455.Lsysc_vxrs:
456 larl %r14,.Lsysc_return
457 jg load_fpu_regs
458
459#
460# _TIF_SIGPENDING is set, call do_signal
461#
462.Lsysc_sigpending:
463 lgr %r2,%r11 # pass pointer to pt_regs
464 brasl %r14,do_signal
465 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
466 jno .Lsysc_return
467.Lsysc_do_syscall:
468 lghi %r13,__TASK_thread
469 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
470 lghi %r1,0 # svc 0 returns -ENOSYS
471 j .Lsysc_do_svc
472
473#
474# _TIF_NOTIFY_RESUME is set, call do_notify_resume
475#
476.Lsysc_notify_resume:
477 lgr %r2,%r11 # pass pointer to pt_regs
478 larl %r14,.Lsysc_return
479 jg do_notify_resume
480
481#
482# _TIF_UPROBE is set, call uprobe_notify_resume
483#
484#ifdef CONFIG_UPROBES
485.Lsysc_uprobe_notify:
486 lgr %r2,%r11 # pass pointer to pt_regs
487 larl %r14,.Lsysc_return
488 jg uprobe_notify_resume
489#endif
490
491#
492# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
493#
494.Lsysc_guarded_storage:
495 lgr %r2,%r11 # pass pointer to pt_regs
496 larl %r14,.Lsysc_return
497 jg gs_load_bc_cb
498#
499# _TIF_PATCH_PENDING is set, call klp_update_patch_state
500#
501#ifdef CONFIG_LIVEPATCH
502.Lsysc_patch_pending:
503 lg %r2,__LC_CURRENT # pass pointer to task struct
504 larl %r14,.Lsysc_return
505 jg klp_update_patch_state
506#endif
507
508#
509# _PIF_PER_TRAP is set, call do_per_trap
510#
511.Lsysc_singlestep:
512 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
513 lgr %r2,%r11 # pass pointer to pt_regs
514 larl %r14,.Lsysc_return
515 jg do_per_trap
516
517#
518# _PIF_SYSCALL_RESTART is set, repeat the current system call
519#
520.Lsysc_syscall_restart:
521 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
522 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
523 lg %r2,__PT_ORIG_GPR2(%r11)
524 j .Lsysc_do_svc
525
526#
527# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
528# and after the system call
529#
530.Lsysc_tracesys:
531 lgr %r2,%r11 # pass pointer to pt_regs
532 la %r3,0
533 llgh %r0,__PT_INT_CODE+2(%r11)
534 stg %r0,__PT_R2(%r11)
535 brasl %r14,do_syscall_trace_enter
536 lghi %r0,NR_syscalls
537 clgr %r0,%r2
538 jnh .Lsysc_tracenogo
539 sllg %r8,%r2,2
540 lgf %r9,0(%r8,%r10)
541.Lsysc_tracego:
542 lmg %r3,%r7,__PT_R3(%r11)
543 stg %r7,STACK_FRAME_OVERHEAD(%r15)
544 lg %r2,__PT_ORIG_GPR2(%r11)
545 BASR_EX %r14,%r9 # call sys_xxx
546 stg %r2,__PT_R2(%r11) # store return value
547.Lsysc_tracenogo:
548 TSTMSK __TI_flags(%r12),_TIF_TRACE
549 jz .Lsysc_return
550 lgr %r2,%r11 # pass pointer to pt_regs
551 larl %r14,.Lsysc_return
552 jg do_syscall_trace_exit
553
554#
555# a new process exits the kernel with ret_from_fork
556#
557ENTRY(ret_from_fork)
558 la %r11,STACK_FRAME_OVERHEAD(%r15)
559 lg %r12,__LC_CURRENT
560 brasl %r14,schedule_tail
561 TRACE_IRQS_ON
562 ssm __LC_SVC_NEW_PSW # reenable interrupts
563 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
564 jne .Lsysc_tracenogo
565 # it's a kernel thread
566 lmg %r9,%r10,__PT_R9(%r11) # load gprs
567ENTRY(kernel_thread_starter)
568 la %r2,0(%r10)
569 BASR_EX %r14,%r9
570 j .Lsysc_tracenogo
571
572/*
573 * Program check handler routine
574 */
575
576ENTRY(pgm_check_handler)
577 stpt __LC_SYNC_ENTER_TIMER
578 BPOFF
579 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
580 lg %r10,__LC_LAST_BREAK
581 lg %r12,__LC_CURRENT
582 lghi %r11,0
583 larl %r13,cleanup_critical
584 lmg %r8,%r9,__LC_PGM_OLD_PSW
585 tmhh %r8,0x0001 # test problem state bit
586 jnz 2f # -> fault in user space
587#if IS_ENABLED(CONFIG_KVM)
588 # cleanup critical section for program checks in sie64a
589 lgr %r14,%r9
590 slg %r14,BASED(.Lsie_critical_start)
591 clg %r14,BASED(.Lsie_critical_length)
592 jhe 0f
593 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
594 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
595 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
596 larl %r9,sie_exit # skip forward to sie_exit
597 lghi %r11,_PIF_GUEST_FAULT
598#endif
5990: tmhh %r8,0x4000 # PER bit set in old PSW ?
600 jnz 1f # -> enabled, can't be a double fault
601 tm __LC_PGM_ILC+3,0x80 # check for per exception
602 jnz .Lpgm_svcper # -> single stepped svc
6031: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
604 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
605 j 4f
6062: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
607 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
608 lg %r15,__LC_KERNEL_STACK
609 lgr %r14,%r12
610 aghi %r14,__TASK_thread # pointer to thread_struct
611 lghi %r13,__LC_PGM_TDB
612 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
613 jz 3f
614 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
6153: stg %r10,__THREAD_last_break(%r14)
6164: lgr %r13,%r11
617 la %r11,STACK_FRAME_OVERHEAD(%r15)
618 stmg %r0,%r7,__PT_R0(%r11)
619 # clear user controlled registers to prevent speculative use
620 xgr %r0,%r0
621 xgr %r1,%r1
622 xgr %r2,%r2
623 xgr %r3,%r3
624 xgr %r4,%r4
625 xgr %r5,%r5
626 xgr %r6,%r6
627 xgr %r7,%r7
628 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
629 stmg %r8,%r9,__PT_PSW(%r11)
630 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
631 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
632 stg %r13,__PT_FLAGS(%r11)
633 stg %r10,__PT_ARGS(%r11)
634 tm __LC_PGM_ILC+3,0x80 # check for per exception
635 jz 5f
636 tmhh %r8,0x0001 # kernel per event ?
637 jz .Lpgm_kprobe
638 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
639 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
640 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
641 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6425: REENABLE_IRQS
643 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
644 larl %r1,pgm_check_table
645 llgh %r10,__PT_INT_CODE+2(%r11)
646 nill %r10,0x007f
647 sll %r10,2
648 je .Lpgm_return
649 lgf %r9,0(%r10,%r1) # load address of handler routine
650 lgr %r2,%r11 # pass pointer to pt_regs
651 BASR_EX %r14,%r9 # branch to interrupt-handler
652.Lpgm_return:
653 LOCKDEP_SYS_EXIT
654 tm __PT_PSW+1(%r11),0x01 # returning to user ?
655 jno .Lsysc_restore
656 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
657 jo .Lsysc_do_syscall
658 j .Lsysc_tif
659
660#
661# PER event in supervisor state, must be kprobes
662#
663.Lpgm_kprobe:
664 REENABLE_IRQS
665 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
666 lgr %r2,%r11 # pass pointer to pt_regs
667 brasl %r14,do_per_trap
668 j .Lpgm_return
669
670#
671# single stepped system call
672#
673.Lpgm_svcper:
674 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
675 lghi %r13,__TASK_thread
676 larl %r14,.Lsysc_per
677 stg %r14,__LC_RETURN_PSW+8
678 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
679 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
680
681/*
682 * IO interrupt handler routine
683 */
684ENTRY(io_int_handler)
685 STCK __LC_INT_CLOCK
686 stpt __LC_ASYNC_ENTER_TIMER
687 BPOFF
688 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
689 lg %r12,__LC_CURRENT
690 larl %r13,cleanup_critical
691 lmg %r8,%r9,__LC_IO_OLD_PSW
692 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
693 stmg %r0,%r7,__PT_R0(%r11)
694 # clear user controlled registers to prevent speculative use
695 xgr %r0,%r0
696 xgr %r1,%r1
697 xgr %r2,%r2
698 xgr %r3,%r3
699 xgr %r4,%r4
700 xgr %r5,%r5
701 xgr %r6,%r6
702 xgr %r7,%r7
703 xgr %r10,%r10
704 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
705 stmg %r8,%r9,__PT_PSW(%r11)
706 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
707 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
708 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
709 jo .Lio_restore
710 TRACE_IRQS_OFF
711 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
712.Lio_loop:
713 lgr %r2,%r11 # pass pointer to pt_regs
714 lghi %r3,IO_INTERRUPT
715 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
716 jz .Lio_call
717 lghi %r3,THIN_INTERRUPT
718.Lio_call:
719 brasl %r14,do_IRQ
720 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
721 jz .Lio_return
722 tpi 0
723 jz .Lio_return
724 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
725 j .Lio_loop
726.Lio_return:
727 LOCKDEP_SYS_EXIT
728 TRACE_IRQS_ON
729.Lio_tif:
730 TSTMSK __TI_flags(%r12),_TIF_WORK
731 jnz .Lio_work # there is work to do (signals etc.)
732 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
733 jnz .Lio_work
734.Lio_restore:
735 lg %r14,__LC_VDSO_PER_CPU
736 lmg %r0,%r10,__PT_R0(%r11)
737 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
738 tm __PT_PSW+1(%r11),0x01 # returning to user ?
739 jno .Lio_exit_kernel
740 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
741.Lio_exit_timer:
742 stpt __LC_EXIT_TIMER
743 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
744.Lio_exit_kernel:
745 lmg %r11,%r15,__PT_R11(%r11)
746 lpswe __LC_RETURN_PSW
747.Lio_done:
748
749#
750# There is work todo, find out in which context we have been interrupted:
751# 1) if we return to user space we can do all _TIF_WORK work
752# 2) if we return to kernel code and kvm is enabled check if we need to
753# modify the psw to leave SIE
754# 3) if we return to kernel code and preemptive scheduling is enabled check
755# the preemption counter and if it is zero call preempt_schedule_irq
756# Before any work can be done, a switch to the kernel stack is required.
757#
758.Lio_work:
759 tm __PT_PSW+1(%r11),0x01 # returning to user ?
760 jo .Lio_work_user # yes -> do resched & signal
761#ifdef CONFIG_PREEMPT
762 # check for preemptive scheduling
763 icm %r0,15,__LC_PREEMPT_COUNT
764 jnz .Lio_restore # preemption is disabled
765 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
766 jno .Lio_restore
767 # switch to kernel stack
768 lg %r1,__PT_R15(%r11)
769 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
770 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
771 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
772 la %r11,STACK_FRAME_OVERHEAD(%r1)
773 lgr %r15,%r1
774 # TRACE_IRQS_ON already done at .Lio_return, call
775 # TRACE_IRQS_OFF to keep things symmetrical
776 TRACE_IRQS_OFF
777 brasl %r14,preempt_schedule_irq
778 j .Lio_return
779#else
780 j .Lio_restore
781#endif
782
783#
784# Need to do work before returning to userspace, switch to kernel stack
785#
786.Lio_work_user:
787 lg %r1,__LC_KERNEL_STACK
788 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
789 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
790 la %r11,STACK_FRAME_OVERHEAD(%r1)
791 lgr %r15,%r1
792
793#
794# One of the work bits is on. Find out which one.
795#
796.Lio_work_tif:
797 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
798 jo .Lio_mcck_pending
799 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
800 jo .Lio_reschedule
801#ifdef CONFIG_LIVEPATCH
802 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
803 jo .Lio_patch_pending
804#endif
805 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
806 jo .Lio_sigpending
807 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
808 jo .Lio_notify_resume
809 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
810 jo .Lio_guarded_storage
811 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
812 jo .Lio_vxrs
813 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
814 jnz .Lio_asce
815 j .Lio_return # beware of critical section cleanup
816
817#
818# _CIF_MCCK_PENDING is set, call handler
819#
820.Lio_mcck_pending:
821 # TRACE_IRQS_ON already done at .Lio_return
822 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
823 TRACE_IRQS_OFF
824 j .Lio_return
825
826#
827# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
828#
829.Lio_asce:
830 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
831 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
832 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
833 jz .Lio_return
834#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
835 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
836 jnz .Lio_set_fs_fixup
837 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
838 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
839 j .Lio_return
840.Lio_set_fs_fixup:
841#endif
842 larl %r14,.Lio_return
843 jg set_fs_fixup
844
845#
846# CIF_FPU is set, restore floating-point controls and floating-point registers.
847#
848.Lio_vxrs:
849 larl %r14,.Lio_return
850 jg load_fpu_regs
851
852#
853# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
854#
855.Lio_guarded_storage:
856 # TRACE_IRQS_ON already done at .Lio_return
857 ssm __LC_SVC_NEW_PSW # reenable interrupts
858 lgr %r2,%r11 # pass pointer to pt_regs
859 brasl %r14,gs_load_bc_cb
860 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
861 TRACE_IRQS_OFF
862 j .Lio_return
863
864#
865# _TIF_NEED_RESCHED is set, call schedule
866#
867.Lio_reschedule:
868 # TRACE_IRQS_ON already done at .Lio_return
869 ssm __LC_SVC_NEW_PSW # reenable interrupts
870 brasl %r14,schedule # call scheduler
871 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
872 TRACE_IRQS_OFF
873 j .Lio_return
874
875#
876# _TIF_PATCH_PENDING is set, call klp_update_patch_state
877#
878#ifdef CONFIG_LIVEPATCH
879.Lio_patch_pending:
880 lg %r2,__LC_CURRENT # pass pointer to task struct
881 larl %r14,.Lio_return
882 jg klp_update_patch_state
883#endif
884
885#
886# _TIF_SIGPENDING or is set, call do_signal
887#
888.Lio_sigpending:
889 # TRACE_IRQS_ON already done at .Lio_return
890 ssm __LC_SVC_NEW_PSW # reenable interrupts
891 lgr %r2,%r11 # pass pointer to pt_regs
892 brasl %r14,do_signal
893 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
894 TRACE_IRQS_OFF
895 j .Lio_return
896
897#
898# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
899#
900.Lio_notify_resume:
901 # TRACE_IRQS_ON already done at .Lio_return
902 ssm __LC_SVC_NEW_PSW # reenable interrupts
903 lgr %r2,%r11 # pass pointer to pt_regs
904 brasl %r14,do_notify_resume
905 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
906 TRACE_IRQS_OFF
907 j .Lio_return
908
909/*
910 * External interrupt handler routine
911 */
912ENTRY(ext_int_handler)
913 STCK __LC_INT_CLOCK
914 stpt __LC_ASYNC_ENTER_TIMER
915 BPOFF
916 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
917 lg %r12,__LC_CURRENT
918 larl %r13,cleanup_critical
919 lmg %r8,%r9,__LC_EXT_OLD_PSW
920 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
921 stmg %r0,%r7,__PT_R0(%r11)
922 # clear user controlled registers to prevent speculative use
923 xgr %r0,%r0
924 xgr %r1,%r1
925 xgr %r2,%r2
926 xgr %r3,%r3
927 xgr %r4,%r4
928 xgr %r5,%r5
929 xgr %r6,%r6
930 xgr %r7,%r7
931 xgr %r10,%r10
932 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
933 stmg %r8,%r9,__PT_PSW(%r11)
934 lghi %r1,__LC_EXT_PARAMS2
935 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
936 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
937 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
938 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
939 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
940 jo .Lio_restore
941 TRACE_IRQS_OFF
942 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
943 lgr %r2,%r11 # pass pointer to pt_regs
944 lghi %r3,EXT_INTERRUPT
945 brasl %r14,do_IRQ
946 j .Lio_return
947
948/*
949 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
950 */
951ENTRY(psw_idle)
952 stg %r3,__SF_EMPTY(%r15)
953 larl %r1,.Lpsw_idle_lpsw+4
954 stg %r1,__SF_EMPTY+8(%r15)
955#ifdef CONFIG_SMP
956 larl %r1,smp_cpu_mtid
957 llgf %r1,0(%r1)
958 ltgr %r1,%r1
959 jz .Lpsw_idle_stcctm
960 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
961.Lpsw_idle_stcctm:
962#endif
963 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
964 BPON
965 STCK __CLOCK_IDLE_ENTER(%r2)
966 stpt __TIMER_IDLE_ENTER(%r2)
967.Lpsw_idle_lpsw:
968 lpswe __SF_EMPTY(%r15)
969 BR_EX %r14
970.Lpsw_idle_end:
971
972/*
973 * Store floating-point controls and floating-point or vector register
974 * depending whether the vector facility is available. A critical section
975 * cleanup assures that the registers are stored even if interrupted for
976 * some other work. The CIF_FPU flag is set to trigger a lazy restore
977 * of the register contents at return from io or a system call.
978 */
979ENTRY(save_fpu_regs)
980 lg %r2,__LC_CURRENT
981 aghi %r2,__TASK_thread
982 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
983 jo .Lsave_fpu_regs_exit
984 stfpc __THREAD_FPU_fpc(%r2)
985 lg %r3,__THREAD_FPU_regs(%r2)
986 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
987 jz .Lsave_fpu_regs_fp # no -> store FP regs
988 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
989 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
990 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
991.Lsave_fpu_regs_fp:
992 std 0,0(%r3)
993 std 1,8(%r3)
994 std 2,16(%r3)
995 std 3,24(%r3)
996 std 4,32(%r3)
997 std 5,40(%r3)
998 std 6,48(%r3)
999 std 7,56(%r3)
1000 std 8,64(%r3)
1001 std 9,72(%r3)
1002 std 10,80(%r3)
1003 std 11,88(%r3)
1004 std 12,96(%r3)
1005 std 13,104(%r3)
1006 std 14,112(%r3)
1007 std 15,120(%r3)
1008.Lsave_fpu_regs_done:
1009 oi __LC_CPU_FLAGS+7,_CIF_FPU
1010.Lsave_fpu_regs_exit:
1011 BR_EX %r14
1012.Lsave_fpu_regs_end:
1013EXPORT_SYMBOL(save_fpu_regs)
1014
1015/*
1016 * Load floating-point controls and floating-point or vector registers.
1017 * A critical section cleanup assures that the register contents are
1018 * loaded even if interrupted for some other work.
1019 *
1020 * There are special calling conventions to fit into sysc and io return work:
1021 * %r15: <kernel stack>
1022 * The function requires:
1023 * %r4
1024 */
1025load_fpu_regs:
1026 lg %r4,__LC_CURRENT
1027 aghi %r4,__TASK_thread
1028 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1029 jno .Lload_fpu_regs_exit
1030 lfpc __THREAD_FPU_fpc(%r4)
1031 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1032 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1033 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
1034 VLM %v0,%v15,0,%r4
1035 VLM %v16,%v31,256,%r4
1036 j .Lload_fpu_regs_done
1037.Lload_fpu_regs_fp:
1038 ld 0,0(%r4)
1039 ld 1,8(%r4)
1040 ld 2,16(%r4)
1041 ld 3,24(%r4)
1042 ld 4,32(%r4)
1043 ld 5,40(%r4)
1044 ld 6,48(%r4)
1045 ld 7,56(%r4)
1046 ld 8,64(%r4)
1047 ld 9,72(%r4)
1048 ld 10,80(%r4)
1049 ld 11,88(%r4)
1050 ld 12,96(%r4)
1051 ld 13,104(%r4)
1052 ld 14,112(%r4)
1053 ld 15,120(%r4)
1054.Lload_fpu_regs_done:
1055 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1056.Lload_fpu_regs_exit:
1057 BR_EX %r14
1058.Lload_fpu_regs_end:
1059
1060.L__critical_end:
1061
1062/*
1063 * Machine check handler routines
1064 */
1065ENTRY(mcck_int_handler)
1066 STCK __LC_MCCK_CLOCK
1067 BPOFF
1068 la %r1,4095 # validate r1
1069 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
1070 sckc __LC_CLOCK_COMPARATOR # validate comparator
1071 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1072 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1073 lg %r12,__LC_CURRENT
1074 larl %r13,cleanup_critical
1075 lmg %r8,%r9,__LC_MCK_OLD_PSW
1076 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1077 jo .Lmcck_panic # yes -> rest of mcck code invalid
1078 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
1079 jno .Lmcck_panic # control registers invalid -> panic
1080 la %r14,4095
1081 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1082 ptlb
1083 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1084 nill %r11,0xfc00 # MCESA_ORIGIN_MASK
1085 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1086 jno 0f
1087 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
1088 jno 0f
1089 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
10900: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1091 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
1092 jo 0f
1093 sr %r14,%r14
10940: sfpc %r14
1095 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1096 jo 0f
1097 lghi %r14,__LC_FPREGS_SAVE_AREA
1098 ld %f0,0(%r14)
1099 ld %f1,8(%r14)
1100 ld %f2,16(%r14)
1101 ld %f3,24(%r14)
1102 ld %f4,32(%r14)
1103 ld %f5,40(%r14)
1104 ld %f6,48(%r14)
1105 ld %f7,56(%r14)
1106 ld %f8,64(%r14)
1107 ld %f9,72(%r14)
1108 ld %f10,80(%r14)
1109 ld %f11,88(%r14)
1110 ld %f12,96(%r14)
1111 ld %f13,104(%r14)
1112 ld %f14,112(%r14)
1113 ld %f15,120(%r14)
1114 j 1f
11150: VLM %v0,%v15,0,%r11
1116 VLM %v16,%v31,256,%r11
11171: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
1118 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1119 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1120 jo 3f
1121 la %r14,__LC_SYNC_ENTER_TIMER
1122 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
1123 jl 0f
1124 la %r14,__LC_ASYNC_ENTER_TIMER
11250: clc 0(8,%r14),__LC_EXIT_TIMER
1126 jl 1f
1127 la %r14,__LC_EXIT_TIMER
11281: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
1129 jl 2f
1130 la %r14,__LC_LAST_UPDATE_TIMER
11312: spt 0(%r14)
1132 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
11333: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1134 jno .Lmcck_panic
1135 tmhh %r8,0x0001 # interrupting from user ?
1136 jnz 4f
1137 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1138 jno .Lmcck_panic
11394: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
1140.Lmcck_skip:
1141 lghi %r14,__LC_GPREGS_SAVE_AREA+64
1142 stmg %r0,%r7,__PT_R0(%r11)
1143 # clear user controlled registers to prevent speculative use
1144 xgr %r0,%r0
1145 xgr %r1,%r1
1146 xgr %r2,%r2
1147 xgr %r3,%r3
1148 xgr %r4,%r4
1149 xgr %r5,%r5
1150 xgr %r6,%r6
1151 xgr %r7,%r7
1152 xgr %r10,%r10
1153 mvc __PT_R8(64,%r11),0(%r14)
1154 stmg %r8,%r9,__PT_PSW(%r11)
1155 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1156 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1157 lgr %r2,%r11 # pass pointer to pt_regs
1158 brasl %r14,s390_do_machine_check
1159 tm __PT_PSW+1(%r11),0x01 # returning to user ?
1160 jno .Lmcck_return
1161 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
1162 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1163 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1164 la %r11,STACK_FRAME_OVERHEAD(%r1)
1165 lgr %r15,%r1
1166 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
1167 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
1168 jno .Lmcck_return
1169 TRACE_IRQS_OFF
1170 brasl %r14,s390_handle_mcck
1171 TRACE_IRQS_ON
1172.Lmcck_return:
1173 lg %r14,__LC_VDSO_PER_CPU
1174 lmg %r0,%r10,__PT_R0(%r11)
1175 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1176 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1177 jno 0f
1178 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
1179 stpt __LC_EXIT_TIMER
1180 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
11810: lmg %r11,%r15,__PT_R11(%r11)
1182 lpswe __LC_RETURN_MCCK_PSW
1183
1184.Lmcck_panic:
1185 lg %r15,__LC_PANIC_STACK
1186 la %r11,STACK_FRAME_OVERHEAD(%r15)
1187 j .Lmcck_skip
1188
1189#
1190# PSW restart interrupt handler
1191#
1192ENTRY(restart_int_handler)
1193 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1194 stg %r15,__LC_SAVE_AREA_RESTART
1195 lg %r15,__LC_RESTART_STACK
1196 aghi %r15,-__PT_SIZE # create pt_regs on stack
1197 xc 0(__PT_SIZE,%r15),0(%r15)
1198 stmg %r0,%r14,__PT_R0(%r15)
1199 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1200 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
1201 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
1202 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1203 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1204 lg %r2,__LC_RESTART_DATA
1205 lg %r3,__LC_RESTART_SOURCE
1206 ltgr %r3,%r3 # test source cpu address
1207 jm 1f # negative -> skip source stop
12080: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1209 brc 10,0b # wait for status stored
12101: basr %r14,%r1 # call function
1211 stap __SF_EMPTY(%r15) # store cpu address
1212 llgh %r3,__SF_EMPTY(%r15)
12132: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
1214 brc 2,2b
12153: j 3b
1216
1217 .section .kprobes.text, "ax"
1218
1219#ifdef CONFIG_CHECK_STACK
1220/*
1221 * The synchronous or the asynchronous stack overflowed. We are dead.
1222 * No need to properly save the registers, we are going to panic anyway.
1223 * Setup a pt_regs so that show_trace can provide a good call trace.
1224 */
1225stack_overflow:
1226 lg %r15,__LC_PANIC_STACK # change to panic stack
1227 la %r11,STACK_FRAME_OVERHEAD(%r15)
1228 stmg %r0,%r7,__PT_R0(%r11)
1229 stmg %r8,%r9,__PT_PSW(%r11)
1230 mvc __PT_R8(64,%r11),0(%r14)
1231 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1232 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1233 lgr %r2,%r11 # pass pointer to pt_regs
1234 jg kernel_stack_overflow
1235#endif
1236
1237cleanup_critical:
1238#if IS_ENABLED(CONFIG_KVM)
1239 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
1240 jl 0f
1241 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1242 jl .Lcleanup_sie
1243#endif
1244 clg %r9,BASED(.Lcleanup_table) # system_call
1245 jl 0f
1246 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
1247 jl .Lcleanup_system_call
1248 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
1249 jl 0f
1250 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
1251 jl .Lcleanup_sysc_tif
1252 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
1253 jl .Lcleanup_sysc_restore
1254 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
1255 jl 0f
1256 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
1257 jl .Lcleanup_io_tif
1258 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
1259 jl .Lcleanup_io_restore
1260 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
1261 jl 0f
1262 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
1263 jl .Lcleanup_idle
1264 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
1265 jl 0f
1266 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
1267 jl .Lcleanup_save_fpu_regs
1268 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
1269 jl 0f
1270 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1271 jl .Lcleanup_load_fpu_regs
12720: BR_EX %r14,%r11
1273
1274 .align 8
1275.Lcleanup_table:
1276 .quad system_call
1277 .quad .Lsysc_do_svc
1278 .quad .Lsysc_tif
1279 .quad .Lsysc_restore
1280 .quad .Lsysc_done
1281 .quad .Lio_tif
1282 .quad .Lio_restore
1283 .quad .Lio_done
1284 .quad psw_idle
1285 .quad .Lpsw_idle_end
1286 .quad save_fpu_regs
1287 .quad .Lsave_fpu_regs_end
1288 .quad load_fpu_regs
1289 .quad .Lload_fpu_regs_end
1290
1291#if IS_ENABLED(CONFIG_KVM)
1292.Lcleanup_table_sie:
1293 .quad .Lsie_gmap
1294 .quad .Lsie_done
1295
1296.Lcleanup_sie:
1297 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
1298 je 1f
1299 slg %r9,BASED(.Lsie_crit_mcck_start)
1300 clg %r9,BASED(.Lsie_crit_mcck_length)
1301 jh 1f
1302 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
13031: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1304 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
1305 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1306 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1307 larl %r9,sie_exit # skip forward to sie_exit
1308 BR_EX %r14,%r11
1309#endif
1310
1311.Lcleanup_system_call:
1312 # check if stpt has been executed
1313 clg %r9,BASED(.Lcleanup_system_call_insn)
1314 jh 0f
1315 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1316 cghi %r11,__LC_SAVE_AREA_ASYNC
1317 je 0f
1318 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
13190: # check if stmg has been executed
1320 clg %r9,BASED(.Lcleanup_system_call_insn+8)
1321 jh 0f
1322 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
13230: # check if base register setup + TIF bit load has been done
1324 clg %r9,BASED(.Lcleanup_system_call_insn+16)
1325 jhe 0f
1326 # set up saved register r12 task struct pointer
1327 stg %r12,32(%r11)
1328 # set up saved register r13 __TASK_thread offset
1329 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
13300: # check if the user time update has been done
1331 clg %r9,BASED(.Lcleanup_system_call_insn+24)
1332 jh 0f
1333 lg %r15,__LC_EXIT_TIMER
1334 slg %r15,__LC_SYNC_ENTER_TIMER
1335 alg %r15,__LC_USER_TIMER
1336 stg %r15,__LC_USER_TIMER
13370: # check if the system time update has been done
1338 clg %r9,BASED(.Lcleanup_system_call_insn+32)
1339 jh 0f
1340 lg %r15,__LC_LAST_UPDATE_TIMER
1341 slg %r15,__LC_EXIT_TIMER
1342 alg %r15,__LC_SYSTEM_TIMER
1343 stg %r15,__LC_SYSTEM_TIMER
13440: # update accounting time stamp
1345 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1346 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1347 # set up saved register r11
1348 lg %r15,__LC_KERNEL_STACK
1349 la %r9,STACK_FRAME_OVERHEAD(%r15)
1350 stg %r9,24(%r11) # r11 pt_regs pointer
1351 # fill pt_regs
1352 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1353 stmg %r0,%r7,__PT_R0(%r9)
1354 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1355 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
1356 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1357 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
1358 # setup saved register r15
1359 stg %r15,56(%r11) # r15 stack pointer
1360 # set new psw address and exit
1361 larl %r9,.Lsysc_do_svc
1362 BR_EX %r14,%r11
1363.Lcleanup_system_call_insn:
1364 .quad system_call
1365 .quad .Lsysc_stmg
1366 .quad .Lsysc_per
1367 .quad .Lsysc_vtime+36
1368 .quad .Lsysc_vtime+42
1369.Lcleanup_system_call_const:
1370 .quad __TASK_thread
1371
1372.Lcleanup_sysc_tif:
1373 larl %r9,.Lsysc_tif
1374 BR_EX %r14,%r11
1375
1376.Lcleanup_sysc_restore:
1377 # check if stpt has been executed
1378 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1379 jh 0f
1380 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1381 cghi %r11,__LC_SAVE_AREA_ASYNC
1382 je 0f
1383 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
13840: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1385 je 1f
1386 lg %r9,24(%r11) # get saved pointer to pt_regs
1387 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1388 mvc 0(64,%r11),__PT_R8(%r9)
1389 lmg %r0,%r7,__PT_R0(%r9)
13901: lmg %r8,%r9,__LC_RETURN_PSW
1391 BR_EX %r14,%r11
1392.Lcleanup_sysc_restore_insn:
1393 .quad .Lsysc_exit_timer
1394 .quad .Lsysc_done - 4
1395
1396.Lcleanup_io_tif:
1397 larl %r9,.Lio_tif
1398 BR_EX %r14,%r11
1399
1400.Lcleanup_io_restore:
1401 # check if stpt has been executed
1402 clg %r9,BASED(.Lcleanup_io_restore_insn)
1403 jh 0f
1404 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14050: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1406 je 1f
1407 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1408 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1409 mvc 0(64,%r11),__PT_R8(%r9)
1410 lmg %r0,%r7,__PT_R0(%r9)
14111: lmg %r8,%r9,__LC_RETURN_PSW
1412 BR_EX %r14,%r11
1413.Lcleanup_io_restore_insn:
1414 .quad .Lio_exit_timer
1415 .quad .Lio_done - 4
1416
1417.Lcleanup_idle:
1418 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1419 # copy interrupt clock & cpu timer
1420 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1421 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1422 cghi %r11,__LC_SAVE_AREA_ASYNC
1423 je 0f
1424 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1425 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
14260: # check if stck & stpt have been executed
1427 clg %r9,BASED(.Lcleanup_idle_insn)
1428 jhe 1f
1429 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1430 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
14311: # calculate idle cycles
1432#ifdef CONFIG_SMP
1433 clg %r9,BASED(.Lcleanup_idle_insn)
1434 jl 3f
1435 larl %r1,smp_cpu_mtid
1436 llgf %r1,0(%r1)
1437 ltgr %r1,%r1
1438 jz 3f
1439 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1440 larl %r3,mt_cycles
1441 ag %r3,__LC_PERCPU_OFFSET
1442 la %r4,__SF_EMPTY+16(%r15)
14432: lg %r0,0(%r3)
1444 slg %r0,0(%r4)
1445 alg %r0,64(%r4)
1446 stg %r0,0(%r3)
1447 la %r3,8(%r3)
1448 la %r4,8(%r4)
1449 brct %r1,2b
1450#endif
14513: # account system time going idle
1452 lg %r9,__LC_STEAL_TIMER
1453 alg %r9,__CLOCK_IDLE_ENTER(%r2)
1454 slg %r9,__LC_LAST_UPDATE_CLOCK
1455 stg %r9,__LC_STEAL_TIMER
1456 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1457 lg %r9,__LC_SYSTEM_TIMER
1458 alg %r9,__LC_LAST_UPDATE_TIMER
1459 slg %r9,__TIMER_IDLE_ENTER(%r2)
1460 stg %r9,__LC_SYSTEM_TIMER
1461 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1462 # prepare return psw
1463 nihh %r8,0xfcfd # clear irq & wait state bits
1464 lg %r9,48(%r11) # return from psw_idle
1465 BR_EX %r14,%r11
1466.Lcleanup_idle_insn:
1467 .quad .Lpsw_idle_lpsw
1468
1469.Lcleanup_save_fpu_regs:
1470 larl %r9,save_fpu_regs
1471 BR_EX %r14,%r11
1472
1473.Lcleanup_load_fpu_regs:
1474 larl %r9,load_fpu_regs
1475 BR_EX %r14,%r11
1476
1477/*
1478 * Integer constants
1479 */
1480 .align 8
1481.Lcritical_start:
1482 .quad .L__critical_start
1483.Lcritical_length:
1484 .quad .L__critical_end - .L__critical_start
1485#if IS_ENABLED(CONFIG_KVM)
1486.Lsie_critical_start:
1487 .quad .Lsie_gmap
1488.Lsie_critical_length:
1489 .quad .Lsie_done - .Lsie_gmap
1490.Lsie_crit_mcck_start:
1491 .quad .Lsie_entry
1492.Lsie_crit_mcck_length:
1493 .quad .Lsie_skip - .Lsie_entry
1494#endif
1495 .section .rodata, "a"
1496#define SYSCALL(esame,emu) .long esame
1497 .globl sys_call_table
1498sys_call_table:
1499#include "asm/syscall_table.h"
1500#undef SYSCALL
1501
1502#ifdef CONFIG_COMPAT
1503
1504#define SYSCALL(esame,emu) .long emu
1505 .globl sys_call_table_emu
1506sys_call_table_emu:
1507#include "asm/syscall_table.h"
1508#undef SYSCALL
1509#endif