1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _X86_IRQFLAGS_H_ |
3 | #define _X86_IRQFLAGS_H_ |
4 | |
5 | #include <asm/processor-flags.h> |
6 | |
7 | #ifndef __ASSEMBLY__ |
8 | |
9 | #include <asm/nospec-branch.h> |
10 | |
11 | /* |
12 | * Interrupt control: |
13 | */ |
14 | |
15 | /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ |
16 | extern inline unsigned long native_save_fl(void); |
17 | extern __always_inline unsigned long native_save_fl(void) |
18 | { |
19 | unsigned long flags; |
20 | |
21 | /* |
22 | * "=rm" is safe here, because "pop" adjusts the stack before |
23 | * it evaluates its effective address -- this is part of the |
24 | * documented behavior of the "pop" instruction. |
25 | */ |
26 | asm volatile("# __raw_save_flags\n\t" |
27 | "pushf ; pop %0" |
28 | : "=rm" (flags) |
29 | : /* no input */ |
30 | : "memory" ); |
31 | |
32 | return flags; |
33 | } |
34 | |
35 | static __always_inline void native_irq_disable(void) |
36 | { |
37 | asm volatile("cli" : : :"memory" ); |
38 | } |
39 | |
40 | static __always_inline void native_irq_enable(void) |
41 | { |
42 | asm volatile("sti" : : :"memory" ); |
43 | } |
44 | |
45 | static __always_inline void native_safe_halt(void) |
46 | { |
47 | mds_idle_clear_cpu_buffers(); |
48 | asm volatile("sti; hlt" : : :"memory" ); |
49 | } |
50 | |
51 | static __always_inline void native_halt(void) |
52 | { |
53 | mds_idle_clear_cpu_buffers(); |
54 | asm volatile("hlt" : : :"memory" ); |
55 | } |
56 | |
57 | #endif |
58 | |
59 | #ifdef CONFIG_PARAVIRT_XXL |
60 | #include <asm/paravirt.h> |
61 | #else |
62 | #ifndef __ASSEMBLY__ |
63 | #include <linux/types.h> |
64 | |
65 | static __always_inline unsigned long arch_local_save_flags(void) |
66 | { |
67 | return native_save_fl(); |
68 | } |
69 | |
70 | static __always_inline void arch_local_irq_disable(void) |
71 | { |
72 | native_irq_disable(); |
73 | } |
74 | |
75 | static __always_inline void arch_local_irq_enable(void) |
76 | { |
77 | native_irq_enable(); |
78 | } |
79 | |
80 | /* |
81 | * Used in the idle loop; sti takes one instruction cycle |
82 | * to complete: |
83 | */ |
84 | static __always_inline void arch_safe_halt(void) |
85 | { |
86 | native_safe_halt(); |
87 | } |
88 | |
89 | /* |
90 | * Used when interrupts are already enabled or to |
91 | * shutdown the processor: |
92 | */ |
93 | static __always_inline void halt(void) |
94 | { |
95 | native_halt(); |
96 | } |
97 | |
98 | /* |
99 | * For spinlocks, etc: |
100 | */ |
101 | static __always_inline unsigned long arch_local_irq_save(void) |
102 | { |
103 | unsigned long flags = arch_local_save_flags(); |
104 | arch_local_irq_disable(); |
105 | return flags; |
106 | } |
107 | #else |
108 | |
109 | #ifdef CONFIG_X86_64 |
110 | #ifdef CONFIG_DEBUG_ENTRY |
111 | #define SAVE_FLAGS pushfq; popq %rax |
112 | #endif |
113 | |
114 | #endif |
115 | |
116 | #endif /* __ASSEMBLY__ */ |
117 | #endif /* CONFIG_PARAVIRT_XXL */ |
118 | |
119 | #ifndef __ASSEMBLY__ |
120 | static __always_inline int arch_irqs_disabled_flags(unsigned long flags) |
121 | { |
122 | return !(flags & X86_EFLAGS_IF); |
123 | } |
124 | |
125 | static __always_inline int arch_irqs_disabled(void) |
126 | { |
127 | unsigned long flags = arch_local_save_flags(); |
128 | |
129 | return arch_irqs_disabled_flags(flags); |
130 | } |
131 | |
132 | static __always_inline void arch_local_irq_restore(unsigned long flags) |
133 | { |
134 | if (!arch_irqs_disabled_flags(flags)) |
135 | arch_local_irq_enable(); |
136 | } |
137 | #endif /* !__ASSEMBLY__ */ |
138 | |
139 | #endif |
140 | |