1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _X86_IRQFLAGS_H_ |
3 | #define _X86_IRQFLAGS_H_ |
4 | |
5 | #include <asm/processor-flags.h> |
6 | |
7 | #ifndef __ASSEMBLY__ |
8 | |
9 | /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ |
10 | #define __cpuidle __attribute__((__section__(".cpuidle.text"))) |
11 | |
12 | /* |
13 | * Interrupt control: |
14 | */ |
15 | |
16 | /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ |
17 | extern inline unsigned long native_save_fl(void); |
18 | extern inline unsigned long native_save_fl(void) |
19 | { |
20 | unsigned long flags; |
21 | |
22 | /* |
23 | * "=rm" is safe here, because "pop" adjusts the stack before |
24 | * it evaluates its effective address -- this is part of the |
25 | * documented behavior of the "pop" instruction. |
26 | */ |
27 | asm volatile("# __raw_save_flags\n\t" |
28 | "pushf ; pop %0" |
29 | : "=rm" (flags) |
30 | : /* no input */ |
31 | : "memory" ); |
32 | |
33 | return flags; |
34 | } |
35 | |
36 | extern inline void native_restore_fl(unsigned long flags); |
37 | extern inline void native_restore_fl(unsigned long flags) |
38 | { |
39 | asm volatile("push %0 ; popf" |
40 | : /* no output */ |
41 | :"g" (flags) |
42 | :"memory" , "cc" ); |
43 | } |
44 | |
45 | static inline void native_irq_disable(void) |
46 | { |
47 | asm volatile("cli" : : :"memory" ); |
48 | } |
49 | |
50 | static inline void native_irq_enable(void) |
51 | { |
52 | asm volatile("sti" : : :"memory" ); |
53 | } |
54 | |
55 | static inline __cpuidle void native_safe_halt(void) |
56 | { |
57 | asm volatile("sti; hlt" : : :"memory" ); |
58 | } |
59 | |
60 | static inline __cpuidle void native_halt(void) |
61 | { |
62 | asm volatile("hlt" : : :"memory" ); |
63 | } |
64 | |
65 | #endif |
66 | |
67 | #ifdef CONFIG_PARAVIRT_XXL |
68 | #include <asm/paravirt.h> |
69 | #else |
70 | #ifndef __ASSEMBLY__ |
71 | #include <linux/types.h> |
72 | |
73 | static inline notrace unsigned long arch_local_save_flags(void) |
74 | { |
75 | return native_save_fl(); |
76 | } |
77 | |
78 | static inline notrace void arch_local_irq_restore(unsigned long flags) |
79 | { |
80 | native_restore_fl(flags); |
81 | } |
82 | |
83 | static inline notrace void arch_local_irq_disable(void) |
84 | { |
85 | native_irq_disable(); |
86 | } |
87 | |
88 | static inline notrace void arch_local_irq_enable(void) |
89 | { |
90 | native_irq_enable(); |
91 | } |
92 | |
93 | /* |
94 | * Used in the idle loop; sti takes one instruction cycle |
95 | * to complete: |
96 | */ |
97 | static inline __cpuidle void arch_safe_halt(void) |
98 | { |
99 | native_safe_halt(); |
100 | } |
101 | |
102 | /* |
103 | * Used when interrupts are already enabled or to |
104 | * shutdown the processor: |
105 | */ |
106 | static inline __cpuidle void halt(void) |
107 | { |
108 | native_halt(); |
109 | } |
110 | |
111 | /* |
112 | * For spinlocks, etc: |
113 | */ |
114 | static inline notrace unsigned long arch_local_irq_save(void) |
115 | { |
116 | unsigned long flags = arch_local_save_flags(); |
117 | arch_local_irq_disable(); |
118 | return flags; |
119 | } |
120 | #else |
121 | |
122 | #define ENABLE_INTERRUPTS(x) sti |
123 | #define DISABLE_INTERRUPTS(x) cli |
124 | |
125 | #ifdef CONFIG_X86_64 |
126 | #ifdef CONFIG_DEBUG_ENTRY |
127 | #define SAVE_FLAGS(x) pushfq; popq %rax |
128 | #endif |
129 | |
130 | #define SWAPGS swapgs |
131 | /* |
132 | * Currently paravirt can't handle swapgs nicely when we |
133 | * don't have a stack we can rely on (such as a user space |
134 | * stack). So we either find a way around these or just fault |
135 | * and emulate if a guest tries to call swapgs directly. |
136 | * |
137 | * Either way, this is a good way to document that we don't |
138 | * have a reliable stack. x86_64 only. |
139 | */ |
140 | #define SWAPGS_UNSAFE_STACK swapgs |
141 | |
142 | #define INTERRUPT_RETURN jmp native_iret |
143 | #define USERGS_SYSRET64 \ |
144 | swapgs; \ |
145 | sysretq; |
146 | #define USERGS_SYSRET32 \ |
147 | swapgs; \ |
148 | sysretl |
149 | |
150 | #else |
151 | #define INTERRUPT_RETURN iret |
152 | #endif |
153 | |
154 | #endif /* __ASSEMBLY__ */ |
155 | #endif /* CONFIG_PARAVIRT_XXL */ |
156 | |
157 | #ifndef __ASSEMBLY__ |
158 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
159 | { |
160 | return !(flags & X86_EFLAGS_IF); |
161 | } |
162 | |
163 | static inline int arch_irqs_disabled(void) |
164 | { |
165 | unsigned long flags = arch_local_save_flags(); |
166 | |
167 | return arch_irqs_disabled_flags(flags); |
168 | } |
169 | #endif /* !__ASSEMBLY__ */ |
170 | |
171 | #ifdef __ASSEMBLY__ |
172 | #ifdef CONFIG_TRACE_IRQFLAGS |
173 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; |
174 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; |
175 | #else |
176 | # define TRACE_IRQS_ON |
177 | # define TRACE_IRQS_OFF |
178 | #endif |
179 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
180 | # ifdef CONFIG_X86_64 |
181 | # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk |
182 | # define LOCKDEP_SYS_EXIT_IRQ \ |
183 | TRACE_IRQS_ON; \ |
184 | sti; \ |
185 | call lockdep_sys_exit_thunk; \ |
186 | cli; \ |
187 | TRACE_IRQS_OFF; |
188 | # else |
189 | # define LOCKDEP_SYS_EXIT \ |
190 | pushl %eax; \ |
191 | pushl %ecx; \ |
192 | pushl %edx; \ |
193 | call lockdep_sys_exit; \ |
194 | popl %edx; \ |
195 | popl %ecx; \ |
196 | popl %eax; |
197 | # define LOCKDEP_SYS_EXIT_IRQ |
198 | # endif |
199 | #else |
200 | # define LOCKDEP_SYS_EXIT |
201 | # define LOCKDEP_SYS_EXIT_IRQ |
202 | #endif |
203 | #endif /* __ASSEMBLY__ */ |
204 | |
205 | #endif |
206 | |