1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_PREEMPT_H
3 #define __ASM_PREEMPT_H
4
5 #include <asm/rmwcc.h>
6 #include <asm/percpu.h>
7 #include <linux/thread_info.h>
8
9 DECLARE_PER_CPU(int, __preempt_count);
10
11 /* We use the MSB mostly because its available */
12 #define PREEMPT_NEED_RESCHED 0x80000000
13
14 /*
15 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
16 * that a decrement hitting 0 means we can and should reschedule.
17 */
18 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
19
20 /*
21 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
22 * that think a non-zero value indicates we cannot preempt.
23 */
preempt_count(void)24 static __always_inline int preempt_count(void)
25 {
26 return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
27 }
28
preempt_count_set(int pc)29 static __always_inline void preempt_count_set(int pc)
30 {
31 int old, new;
32
33 do {
34 old = raw_cpu_read_4(__preempt_count);
35 new = (old & PREEMPT_NEED_RESCHED) |
36 (pc & ~PREEMPT_NEED_RESCHED);
37 } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old);
38 }
39
40 /*
41 * must be macros to avoid header recursion hell
42 */
43 #define init_task_preempt_count(p) do { } while (0)
44
45 #define init_idle_preempt_count(p, cpu) do { \
46 per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
47 } while (0)
48
49 /*
50 * We fold the NEED_RESCHED bit into the preempt count such that
51 * preempt_enable() can decrement and test for needing to reschedule with a
52 * single instruction.
53 *
54 * We invert the actual bit, so that when the decrement hits 0 we know we both
55 * need to resched (the bit is cleared) and can resched (no preempt count).
56 */
57
set_preempt_need_resched(void)58 static __always_inline void set_preempt_need_resched(void)
59 {
60 raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
61 }
62
clear_preempt_need_resched(void)63 static __always_inline void clear_preempt_need_resched(void)
64 {
65 raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
66 }
67
test_preempt_need_resched(void)68 static __always_inline bool test_preempt_need_resched(void)
69 {
70 return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
71 }
72
73 /*
74 * The various preempt_count add/sub methods
75 */
76
__preempt_count_add(int val)77 static __always_inline void __preempt_count_add(int val)
78 {
79 raw_cpu_add_4(__preempt_count, val);
80 }
81
__preempt_count_sub(int val)82 static __always_inline void __preempt_count_sub(int val)
83 {
84 raw_cpu_add_4(__preempt_count, -val);
85 }
86
87 /*
88 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
89 * a decrement which hits zero means we have no preempt_count and should
90 * reschedule.
91 */
__preempt_count_dec_and_test(void)92 static __always_inline bool __preempt_count_dec_and_test(void)
93 {
94 return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
95 }
96
97 /*
98 * Returns true when we need to resched and can (barring IRQ state).
99 */
should_resched(int preempt_offset)100 static __always_inline bool should_resched(int preempt_offset)
101 {
102 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
103 }
104
105 #ifdef CONFIG_PREEMPTION
106 extern asmlinkage void preempt_schedule_thunk(void);
107 # define __preempt_schedule() \
108 asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
109
110 extern asmlinkage void preempt_schedule(void);
111 extern asmlinkage void preempt_schedule_notrace_thunk(void);
112 # define __preempt_schedule_notrace() \
113 asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
114
115 extern asmlinkage void preempt_schedule_notrace(void);
116 #endif
117
118 #endif /* __ASM_PREEMPT_H */
119