blob: 8c86edefa11508ab3401ead3bcfd757b3834e015 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _X86_IRQFLAGS_H_
3#define _X86_IRQFLAGS_H_
4
5#include <asm/processor-flags.h>
6
7#ifndef __ASSEMBLY__
8
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <asm/nospec-branch.h>
10
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
Olivier Deprez157378f2022-04-04 15:47:50 +020012#define __cpuidle __section(".cpuidle.text")
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013
14/*
15 * Interrupt control:
16 */
17
18/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19extern inline unsigned long native_save_fl(void);
Olivier Deprez157378f2022-04-04 15:47:50 +020020extern __always_inline unsigned long native_save_fl(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021{
22 unsigned long flags;
23
24 /*
25 * "=rm" is safe here, because "pop" adjusts the stack before
26 * it evaluates its effective address -- this is part of the
27 * documented behavior of the "pop" instruction.
28 */
29 asm volatile("# __raw_save_flags\n\t"
30 "pushf ; pop %0"
31 : "=rm" (flags)
32 : /* no input */
33 : "memory");
34
35 return flags;
36}
37
38extern inline void native_restore_fl(unsigned long flags);
39extern inline void native_restore_fl(unsigned long flags)
40{
41 asm volatile("push %0 ; popf"
42 : /* no output */
43 :"g" (flags)
44 :"memory", "cc");
45}
46
Olivier Deprez157378f2022-04-04 15:47:50 +020047static __always_inline void native_irq_disable(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048{
49 asm volatile("cli": : :"memory");
50}
51
Olivier Deprez157378f2022-04-04 15:47:50 +020052static __always_inline void native_irq_enable(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053{
54 asm volatile("sti": : :"memory");
55}
56
57static inline __cpuidle void native_safe_halt(void)
58{
David Brazdil0f672f62019-12-10 10:32:29 +000059 mds_idle_clear_cpu_buffers();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060 asm volatile("sti; hlt": : :"memory");
61}
62
63static inline __cpuidle void native_halt(void)
64{
David Brazdil0f672f62019-12-10 10:32:29 +000065 mds_idle_clear_cpu_buffers();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 asm volatile("hlt": : :"memory");
67}
68
69#endif
70
David Brazdil0f672f62019-12-10 10:32:29 +000071#ifdef CONFIG_PARAVIRT_XXL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072#include <asm/paravirt.h>
73#else
74#ifndef __ASSEMBLY__
75#include <linux/types.h>
76
Olivier Deprez157378f2022-04-04 15:47:50 +020077static __always_inline unsigned long arch_local_save_flags(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078{
79 return native_save_fl();
80}
81
Olivier Deprez157378f2022-04-04 15:47:50 +020082static __always_inline void arch_local_irq_restore(unsigned long flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083{
84 native_restore_fl(flags);
85}
86
Olivier Deprez157378f2022-04-04 15:47:50 +020087static __always_inline void arch_local_irq_disable(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088{
89 native_irq_disable();
90}
91
Olivier Deprez157378f2022-04-04 15:47:50 +020092static __always_inline void arch_local_irq_enable(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093{
94 native_irq_enable();
95}
96
97/*
98 * Used in the idle loop; sti takes one instruction cycle
99 * to complete:
100 */
101static inline __cpuidle void arch_safe_halt(void)
102{
103 native_safe_halt();
104}
105
106/*
107 * Used when interrupts are already enabled or to
108 * shutdown the processor:
109 */
110static inline __cpuidle void halt(void)
111{
112 native_halt();
113}
114
115/*
116 * For spinlocks, etc:
117 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200118static __always_inline unsigned long arch_local_irq_save(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119{
120 unsigned long flags = arch_local_save_flags();
121 arch_local_irq_disable();
122 return flags;
123}
124#else
125
126#define ENABLE_INTERRUPTS(x) sti
127#define DISABLE_INTERRUPTS(x) cli
128
129#ifdef CONFIG_X86_64
David Brazdil0f672f62019-12-10 10:32:29 +0000130#ifdef CONFIG_DEBUG_ENTRY
131#define SAVE_FLAGS(x) pushfq; popq %rax
132#endif
133
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134#define INTERRUPT_RETURN jmp native_iret
135#define USERGS_SYSRET64 \
136 swapgs; \
137 sysretq;
138#define USERGS_SYSRET32 \
139 swapgs; \
140 sysretl
141
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142#else
143#define INTERRUPT_RETURN iret
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144#endif
145
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146#endif /* __ASSEMBLY__ */
David Brazdil0f672f62019-12-10 10:32:29 +0000147#endif /* CONFIG_PARAVIRT_XXL */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148
149#ifndef __ASSEMBLY__
Olivier Deprez157378f2022-04-04 15:47:50 +0200150static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151{
152 return !(flags & X86_EFLAGS_IF);
153}
154
Olivier Deprez157378f2022-04-04 15:47:50 +0200155static __always_inline int arch_irqs_disabled(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156{
157 unsigned long flags = arch_local_save_flags();
158
159 return arch_irqs_disabled_flags(flags);
160}
Olivier Deprez157378f2022-04-04 15:47:50 +0200161#else
162#ifdef CONFIG_X86_64
163#ifdef CONFIG_XEN_PV
164#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
165#else
166#define SWAPGS swapgs
167#endif
168#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169#endif /* !__ASSEMBLY__ */
170
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171#endif