• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _X86_IRQFLAGS_H_
3 #define _X86_IRQFLAGS_H_
4 
5 #include <asm/processor-flags.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #include <asm/nospec-branch.h>
10 
11 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
12 #define __cpuidle __section(".cpuidle.text")
13 
14 /*
15  * Interrupt control:
16  */
17 
18 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19 extern inline unsigned long native_save_fl(void);
native_save_fl(void)20 extern __always_inline unsigned long native_save_fl(void)
21 {
22 	unsigned long flags;
23 
24 	/*
25 	 * "=rm" is safe here, because "pop" adjusts the stack before
26 	 * it evaluates its effective address -- this is part of the
27 	 * documented behavior of the "pop" instruction.
28 	 */
29 	asm volatile("# __raw_save_flags\n\t"
30 		     "pushf ; pop %0"
31 		     : "=rm" (flags)
32 		     : /* no input */
33 		     : "memory");
34 
35 	return flags;
36 }
37 
native_irq_disable(void)38 static __always_inline void native_irq_disable(void)
39 {
40 	asm volatile("cli": : :"memory");
41 }
42 
native_irq_enable(void)43 static __always_inline void native_irq_enable(void)
44 {
45 	asm volatile("sti": : :"memory");
46 }
47 
native_safe_halt(void)48 static inline __cpuidle void native_safe_halt(void)
49 {
50 	mds_idle_clear_cpu_buffers();
51 	asm volatile("sti; hlt": : :"memory");
52 }
53 
native_halt(void)54 static inline __cpuidle void native_halt(void)
55 {
56 	mds_idle_clear_cpu_buffers();
57 	asm volatile("hlt": : :"memory");
58 }
59 
60 #endif
61 
62 #ifdef CONFIG_PARAVIRT_XXL
63 #include <asm/paravirt.h>
64 #else
65 #ifndef __ASSEMBLY__
66 #include <linux/types.h>
67 
arch_local_save_flags(void)68 static __always_inline unsigned long arch_local_save_flags(void)
69 {
70 	return native_save_fl();
71 }
72 
arch_local_irq_disable(void)73 static __always_inline void arch_local_irq_disable(void)
74 {
75 	native_irq_disable();
76 }
77 
arch_local_irq_enable(void)78 static __always_inline void arch_local_irq_enable(void)
79 {
80 	native_irq_enable();
81 }
82 
83 /*
84  * Used in the idle loop; sti takes one instruction cycle
85  * to complete:
86  */
arch_safe_halt(void)87 static inline __cpuidle void arch_safe_halt(void)
88 {
89 	native_safe_halt();
90 }
91 
92 /*
93  * Used when interrupts are already enabled or to
94  * shutdown the processor:
95  */
halt(void)96 static inline __cpuidle void halt(void)
97 {
98 	native_halt();
99 }
100 
101 /*
102  * For spinlocks, etc:
103  */
arch_local_irq_save(void)104 static __always_inline unsigned long arch_local_irq_save(void)
105 {
106 	unsigned long flags = arch_local_save_flags();
107 	arch_local_irq_disable();
108 	return flags;
109 }
110 #else
111 
112 #ifdef CONFIG_X86_64
113 #ifdef CONFIG_DEBUG_ENTRY
114 #define SAVE_FLAGS		pushfq; popq %rax
115 #endif
116 
117 #define INTERRUPT_RETURN	jmp native_iret
118 
119 #endif
120 
121 #endif /* __ASSEMBLY__ */
122 #endif /* CONFIG_PARAVIRT_XXL */
123 
124 #ifndef __ASSEMBLY__
arch_irqs_disabled_flags(unsigned long flags)125 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
126 {
127 	return !(flags & X86_EFLAGS_IF);
128 }
129 
arch_irqs_disabled(void)130 static __always_inline int arch_irqs_disabled(void)
131 {
132 	unsigned long flags = arch_local_save_flags();
133 
134 	return arch_irqs_disabled_flags(flags);
135 }
136 
arch_local_irq_restore(unsigned long flags)137 static __always_inline void arch_local_irq_restore(unsigned long flags)
138 {
139 	if (!arch_irqs_disabled_flags(flags))
140 		arch_local_irq_enable();
141 }
142 #else
143 #ifdef CONFIG_X86_64
144 #ifdef CONFIG_XEN_PV
145 #define SWAPGS	ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
146 #else
147 #define SWAPGS	swapgs
148 #endif
149 #endif
150 #endif /* !__ASSEMBLY__ */
151 
152 #endif
153