1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
4
5 #include <linux/jump_label.h>
6 #include <asm/smp.h>
7 #ifdef CONFIG_PPC64
8 #include <asm/paca.h>
9 #include <asm/hvcall.h>
10 #endif
11
12 #ifdef CONFIG_PPC_SPLPAR
13 DECLARE_STATIC_KEY_FALSE(shared_processor);
14
is_shared_processor(void)15 static inline bool is_shared_processor(void)
16 {
17 return static_branch_unlikely(&shared_processor);
18 }
19
20 /* If bit 0 is set, the cpu has been preempted */
yield_count_of(int cpu)21 static inline u32 yield_count_of(int cpu)
22 {
23 __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
24 return be32_to_cpu(yield_count);
25 }
26
27 /*
28 * Spinlock code confers and prods, so don't trace the hcalls because the
29 * tracing code takes spinlocks which can cause recursion deadlocks.
30 *
31 * These calls are made while the lock is not held: the lock slowpath yields if
32 * it can not acquire the lock, and unlock slow path might prod if a waiter has
33 * yielded). So this may not be a problem for simple spin locks because the
34 * tracing does not technically recurse on the lock, but we avoid it anyway.
35 *
36 * However the queued spin lock contended path is more strictly ordered: the
37 * H_CONFER hcall is made after the task has queued itself on the lock, so then
38 * recursing on that lock will cause the task to then queue up again behind the
39 * first instance (or worse: queued spinlocks use tricks that assume a context
40 * never waits on more than one spinlock, so such recursion may cause random
41 * corruption in the lock code).
42 */
yield_to_preempted(int cpu,u32 yield_count)43 static inline void yield_to_preempted(int cpu, u32 yield_count)
44 {
45 plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
46 }
47
prod_cpu(int cpu)48 static inline void prod_cpu(int cpu)
49 {
50 plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
51 }
52
yield_to_any(void)53 static inline void yield_to_any(void)
54 {
55 plpar_hcall_norets_notrace(H_CONFER, -1, 0);
56 }
57 #else
is_shared_processor(void)58 static inline bool is_shared_processor(void)
59 {
60 return false;
61 }
62
yield_count_of(int cpu)63 static inline u32 yield_count_of(int cpu)
64 {
65 return 0;
66 }
67
68 extern void ___bad_yield_to_preempted(void);
yield_to_preempted(int cpu,u32 yield_count)69 static inline void yield_to_preempted(int cpu, u32 yield_count)
70 {
71 ___bad_yield_to_preempted(); /* This would be a bug */
72 }
73
74 extern void ___bad_yield_to_any(void);
yield_to_any(void)75 static inline void yield_to_any(void)
76 {
77 ___bad_yield_to_any(); /* This would be a bug */
78 }
79
80 extern void ___bad_prod_cpu(void);
prod_cpu(int cpu)81 static inline void prod_cpu(int cpu)
82 {
83 ___bad_prod_cpu(); /* This would be a bug */
84 }
85
86 #endif
87
88 #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(int cpu)89 static inline bool vcpu_is_preempted(int cpu)
90 {
91 if (!is_shared_processor())
92 return false;
93 if (yield_count_of(cpu) & 1)
94 return true;
95 return false;
96 }
97
pv_is_native_spin_unlock(void)98 static inline bool pv_is_native_spin_unlock(void)
99 {
100 return !is_shared_processor();
101 }
102
103 #endif /* _ASM_POWERPC_PARAVIRT_H */
104