blob: a4b357e5bbfe99b6067c09031cef9499926d30ca [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/jump_label.h>
3#include <asm/unwind_hints.h>
4#include <asm/cpufeatures.h>
5#include <asm/page_types.h>
6#include <asm/percpu.h>
7#include <asm/asm-offsets.h>
8#include <asm/processor-flags.h>
Olivier Deprez92d4c212022-12-06 15:05:30 +01009#include <asm/msr.h>
10#include <asm/nospec-branch.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011
12/*
13
14 x86 function call convention, 64-bit:
15 -------------------------------------
16 arguments | callee-saved | extra caller-saved | return
17 [callee-clobbered] | | [callee-clobbered] |
18 ---------------------------------------------------------------------------
19 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
20
21 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
22 functions when it sees tail-call optimization possibilities) rflags is
23 clobbered. Leftover arguments are passed over the stack frame.)
24
25 [*] In the frame-pointers case rbp is fixed to the stack frame.
26
27 [**] for struct return values wider than 64 bits the return convention is a
28 bit more complex: up to 128 bits width we return small structures
29 straight in rax, rdx. For structures larger than that (3 words or
30 larger) the caller puts a pointer to an on-stack return struct
31 [allocated in the caller's stack frame] into the first argument - i.e.
32 into rdi. All other arguments shift up by one in this case.
33 Fortunately this case is rare in the kernel.
34
35For 32-bit we have the following conventions - kernel is built with
36-mregparm=3 and -freg-struct-return:
37
38 x86 function calling convention, 32-bit:
39 ----------------------------------------
40 arguments | callee-saved | extra caller-saved | return
41 [callee-clobbered] | | [callee-clobbered] |
42 -------------------------------------------------------------------------
43 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
44
45 ( here too esp is obviously invariant across normal function calls. eflags
46 is clobbered. Leftover arguments are passed over the stack frame. )
47
48 [*] In the frame-pointers case ebp is fixed to the stack frame.
49
50 [**] We build with -freg-struct-return, which on 32-bit means similar
51 semantics as on 64-bit: edx can be used for a second return value
52 (i.e. covering integer and structure sizes up to 64 bits) - after that
53 it gets more complex and more expensive: 3-word or larger struct returns
54 get done in the caller's frame and the pointer to the return struct goes
55 into regparm0, i.e. eax - the other arguments shift up and the
56 function's register parameters degenerate to regparm=2 in essence.
57
58*/
59
60#ifdef CONFIG_X86_64
61
62/*
63 * 64-bit system call stack frame layout defines and helpers,
64 * for assembly code:
65 */
66
67/* The layout forms the "struct pt_regs" on the stack: */
68/*
69 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
70 * unless syscall needs a complete, fully filled "struct pt_regs".
71 */
72#define R15 0*8
73#define R14 1*8
74#define R13 2*8
75#define R12 3*8
76#define RBP 4*8
77#define RBX 5*8
78/* These regs are callee-clobbered. Always saved on kernel entry. */
79#define R11 6*8
80#define R10 7*8
81#define R9 8*8
82#define R8 9*8
83#define RAX 10*8
84#define RCX 11*8
85#define RDX 12*8
86#define RSI 13*8
87#define RDI 14*8
88/*
89 * On syscall entry, this is syscall#. On CPU exception, this is error code.
90 * On hw interrupt, it's IRQ number:
91 */
92#define ORIG_RAX 15*8
93/* Return frame for iretq */
94#define RIP 16*8
95#define CS 17*8
96#define EFLAGS 18*8
97#define RSP 19*8
98#define SS 20*8
99
100#define SIZEOF_PTREGS 21*8
101
102.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 .if \save_ret
104 pushq %rsi /* pt_regs->si */
105 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
106 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
107 .else
108 pushq %rdi /* pt_regs->di */
109 pushq %rsi /* pt_regs->si */
110 .endif
111 pushq \rdx /* pt_regs->dx */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 pushq %rcx /* pt_regs->cx */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 pushq \rax /* pt_regs->ax */
114 pushq %r8 /* pt_regs->r8 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 pushq %r9 /* pt_regs->r9 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 pushq %r10 /* pt_regs->r10 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117 pushq %r11 /* pt_regs->r11 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 pushq %rbx /* pt_regs->rbx */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 pushq %rbp /* pt_regs->rbp */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 pushq %r12 /* pt_regs->r12 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121 pushq %r13 /* pt_regs->r13 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122 pushq %r14 /* pt_regs->r14 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 pushq %r15 /* pt_regs->r15 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124 UNWIND_HINT_REGS
Olivier Deprez0e641232021-09-23 10:07:05 +0200125
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126 .if \save_ret
127 pushq %rsi /* return address on top of stack */
128 .endif
Olivier Deprez0e641232021-09-23 10:07:05 +0200129
130 /*
131 * Sanitize registers of values that a speculation attack might
132 * otherwise want to exploit. The lower registers are likely clobbered
133 * well before they could be put to use in a speculative execution
134 * gadget.
135 */
136 xorl %edx, %edx /* nospec dx */
137 xorl %ecx, %ecx /* nospec cx */
138 xorl %r8d, %r8d /* nospec r8 */
139 xorl %r9d, %r9d /* nospec r9 */
140 xorl %r10d, %r10d /* nospec r10 */
141 xorl %r11d, %r11d /* nospec r11 */
142 xorl %ebx, %ebx /* nospec rbx */
143 xorl %ebp, %ebp /* nospec rbp */
144 xorl %r12d, %r12d /* nospec r12 */
145 xorl %r13d, %r13d /* nospec r13 */
146 xorl %r14d, %r14d /* nospec r14 */
147 xorl %r15d, %r15d /* nospec r15 */
148
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149.endm
150
Olivier Deprez92d4c212022-12-06 15:05:30 +0100151.macro POP_REGS pop_rdi=1
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152 popq %r15
153 popq %r14
154 popq %r13
155 popq %r12
156 popq %rbp
157 popq %rbx
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 popq %r11
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159 popq %r10
160 popq %r9
161 popq %r8
162 popq %rax
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163 popq %rcx
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000164 popq %rdx
165 popq %rsi
166 .if \pop_rdi
167 popq %rdi
168 .endif
169.endm
170
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171#ifdef CONFIG_PAGE_TABLE_ISOLATION
172
173/*
174 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
175 * halves:
176 */
177#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
178#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
179#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
180#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
181#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
182
183.macro SET_NOFLUSH_BIT reg:req
184 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
185.endm
186
187.macro ADJUST_KERNEL_CR3 reg:req
188 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
189 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
190 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
191.endm
192
193.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
194 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
195 mov %cr3, \scratch_reg
196 ADJUST_KERNEL_CR3 \scratch_reg
197 mov \scratch_reg, %cr3
198.Lend_\@:
199.endm
200
201#define THIS_CPU_user_pcid_flush_mask \
202 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
203
204.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
205 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
206 mov %cr3, \scratch_reg
207
208 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
209
210 /*
211 * Test if the ASID needs a flush.
212 */
213 movq \scratch_reg, \scratch_reg2
214 andq $(0x7FF), \scratch_reg /* mask ASID */
215 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
216 jnc .Lnoflush_\@
217
218 /* Flush needed, clear the bit */
219 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
220 movq \scratch_reg2, \scratch_reg
221 jmp .Lwrcr3_pcid_\@
222
223.Lnoflush_\@:
224 movq \scratch_reg2, \scratch_reg
225 SET_NOFLUSH_BIT \scratch_reg
226
227.Lwrcr3_pcid_\@:
228 /* Flip the ASID to the user version */
229 orq $(PTI_USER_PCID_MASK), \scratch_reg
230
231.Lwrcr3_\@:
232 /* Flip the PGD to the user version */
233 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
234 mov \scratch_reg, %cr3
235.Lend_\@:
236.endm
237
238.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
239 pushq %rax
240 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
241 popq %rax
242.endm
243
244.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
245 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
246 movq %cr3, \scratch_reg
247 movq \scratch_reg, \save_reg
248 /*
249 * Test the user pagetable bit. If set, then the user page tables
250 * are active. If clear CR3 already has the kernel page table
251 * active.
252 */
253 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
254 jnc .Ldone_\@
255
256 ADJUST_KERNEL_CR3 \scratch_reg
257 movq \scratch_reg, %cr3
258
259.Ldone_\@:
260.endm
261
262.macro RESTORE_CR3 scratch_reg:req save_reg:req
263 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
264
265 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
266
267 /*
268 * KERNEL pages can always resume with NOFLUSH as we do
269 * explicit flushes.
270 */
271 bt $PTI_USER_PGTABLE_BIT, \save_reg
272 jnc .Lnoflush_\@
273
274 /*
275 * Check if there's a pending flush for the user ASID we're
276 * about to set.
277 */
278 movq \save_reg, \scratch_reg
279 andq $(0x7FF), \scratch_reg
280 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
281 jnc .Lnoflush_\@
282
283 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
284 jmp .Lwrcr3_\@
285
286.Lnoflush_\@:
287 SET_NOFLUSH_BIT \save_reg
288
289.Lwrcr3_\@:
290 /*
291 * The CR3 write could be avoided when not changing its value,
292 * but would require a CR3 read *and* a scratch register.
293 */
294 movq \save_reg, %cr3
295.Lend_\@:
296.endm
297
298#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
299
300.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
301.endm
302.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
303.endm
304.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
305.endm
306.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
307.endm
308.macro RESTORE_CR3 scratch_reg:req save_reg:req
309.endm
310
311#endif
312
David Brazdil0f672f62019-12-10 10:32:29 +0000313/*
Olivier Deprez92d4c212022-12-06 15:05:30 +0100314 * IBRS kernel mitigation for Spectre_v2.
315 *
316 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
317 * the regs it uses (AX, CX, DX). Must be called before the first RET
318 * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
319 *
320 * The optional argument is used to save/restore the current value,
321 * which is used on the paranoid paths.
322 *
323 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
324 */
325.macro IBRS_ENTER save_reg
326#ifdef CONFIG_CPU_IBRS_ENTRY
327 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
328 movl $MSR_IA32_SPEC_CTRL, %ecx
329
330.ifnb \save_reg
331 rdmsr
332 shl $32, %rdx
333 or %rdx, %rax
334 mov %rax, \save_reg
335 test $SPEC_CTRL_IBRS, %eax
336 jz .Ldo_wrmsr_\@
337 lfence
338 jmp .Lend_\@
339.Ldo_wrmsr_\@:
340.endif
341
342 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
343 movl %edx, %eax
344 shr $32, %rdx
345 wrmsr
346.Lend_\@:
347#endif
348.endm
349
350/*
351 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
352 * regs. Must be called after the last RET.
353 */
354.macro IBRS_EXIT save_reg
355#ifdef CONFIG_CPU_IBRS_ENTRY
356 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
357 movl $MSR_IA32_SPEC_CTRL, %ecx
358
359.ifnb \save_reg
360 mov \save_reg, %rdx
361.else
362 movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
363 andl $(~SPEC_CTRL_IBRS), %edx
364.endif
365
366 movl %edx, %eax
367 shr $32, %rdx
368 wrmsr
369.Lend_\@:
370#endif
371.endm
372
373/*
David Brazdil0f672f62019-12-10 10:32:29 +0000374 * Mitigate Spectre v1 for conditional swapgs code paths.
375 *
376 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
377 * prevent a speculative swapgs when coming from kernel space.
378 *
379 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
380 * to prevent the swapgs from getting speculatively skipped when coming from
381 * user space.
382 */
383.macro FENCE_SWAPGS_USER_ENTRY
384 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
385.endm
386.macro FENCE_SWAPGS_KERNEL_ENTRY
387 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
388.endm
389
390.macro STACKLEAK_ERASE_NOCLOBBER
391#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
392 PUSH_AND_CLEAR_REGS
393 call stackleak_erase
394 POP_REGS
395#endif
396.endm
397
Olivier Deprez157378f2022-04-04 15:47:50 +0200398.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
399 rdgsbase \save_reg
400 GET_PERCPU_BASE \scratch_reg
401 wrgsbase \scratch_reg
402.endm
403
404#else /* CONFIG_X86_64 */
405# undef UNWIND_HINT_IRET_REGS
406# define UNWIND_HINT_IRET_REGS
407#endif /* !CONFIG_X86_64 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408
David Brazdil0f672f62019-12-10 10:32:29 +0000409.macro STACKLEAK_ERASE
410#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
411 call stackleak_erase
412#endif
413.endm
414
Olivier Deprez157378f2022-04-04 15:47:50 +0200415#ifdef CONFIG_SMP
416
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200418 * CPU/node NR is loaded from the limit (size) field of a special segment
419 * descriptor entry in GDT.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200421.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
422 movq $__CPUNODE_SEG, \reg
423 lsl \reg, \reg
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424.endm
David Brazdil0f672f62019-12-10 10:32:29 +0000425
Olivier Deprez157378f2022-04-04 15:47:50 +0200426/*
427 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
428 * We normally use %gs for accessing per-CPU data, but we are setting up
429 * %gs here and obviously can not use %gs itself to access per-CPU data.
430 *
431 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
432 * may not restore the host's value until the CPU returns to userspace.
433 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
434 * while running KVM's run loop.
435 */
436.macro GET_PERCPU_BASE reg:req
437 LOAD_CPU_AND_NODE_SEG_LIMIT \reg
438 andq $VDSO_CPUNODE_MASK, \reg
439 movq __per_cpu_offset(, \reg, 8), \reg
440.endm
441
David Brazdil0f672f62019-12-10 10:32:29 +0000442#else
Olivier Deprez157378f2022-04-04 15:47:50 +0200443
444.macro GET_PERCPU_BASE reg:req
445 movq pcpu_unit_offsets(%rip), \reg
446.endm
447
448#endif /* CONFIG_SMP */