• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_PARAVIRT_H
3 #define _ASM_LOONGARCH_PARAVIRT_H
4 #include <asm/kvm_para.h>
5 
6 #ifdef CONFIG_PARAVIRT
kvm_para_available(void)7 static inline bool kvm_para_available(void)
8 {
9 	return true;
10 }
11 struct static_key;
12 extern struct static_key paravirt_steal_enabled;
13 extern struct static_key paravirt_steal_rq_enabled;
14 
15 struct pv_time_ops {
16 	unsigned long long (*steal_clock)(int cpu);
17 };
18 struct kvm_steal_time {
19 	__u64 steal;
20 	__u32 version;
21 	__u32 flags;
22 	__u8  preempted;
23 	__u8  pad[47];
24 };
25 extern struct pv_time_ops pv_time_ops;
26 
27 bool pv_is_native_spin_unlock(void);
28 
paravirt_steal_clock(int cpu)29 static inline u64 paravirt_steal_clock(int cpu)
30 {
31 	return pv_time_ops.steal_clock(cpu);
32 }
33 
pv_feature_support(int feature)34 static inline bool pv_feature_support(int feature)
35 {
36 	return kvm_hypercall1(KVM_HC_FUNC_FEATURE, feature) == KVM_RET_SUC;
37 }
pv_notify_host(int feature,unsigned long data)38 static inline void pv_notify_host(int feature, unsigned long data)
39 {
40 	kvm_hypercall2(KVM_HC_FUNC_NOTIFY, feature, data);
41 }
42 
43 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
44 struct qspinlock;
45 
46 struct pv_lock_ops {
47 	void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
48 	void (*queued_spin_unlock)(struct qspinlock *lock);
49 	void (*wait)(u8 *ptr, u8 val);
50 	void (*kick)(int cpu);
51 	bool (*vcpu_is_preempted)(long cpu);
52 };
53 
54 extern struct pv_lock_ops pv_lock_ops;
55 
56 void __init kvm_spinlock_init(void);
57 
pv_queued_spin_lock_slowpath(struct qspinlock * lock,u32 val)58 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
59 		u32 val)
60 {
61 	pv_lock_ops.queued_spin_lock_slowpath(lock, val);
62 }
63 
pv_queued_spin_unlock(struct qspinlock * lock)64 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
65 {
66 	pv_lock_ops.queued_spin_unlock(lock);
67 }
68 
pv_wait(u8 * ptr,u8 val)69 static __always_inline void pv_wait(u8 *ptr, u8 val)
70 {
71 	pv_lock_ops.wait(ptr, val);
72 }
73 
pv_kick(int cpu)74 static __always_inline void pv_kick(int cpu)
75 {
76 	pv_lock_ops.kick(cpu);
77 }
78 
pv_vcpu_is_preempted(long cpu)79 static __always_inline bool pv_vcpu_is_preempted(long cpu)
80 {
81 	return pv_lock_ops.vcpu_is_preempted(cpu);
82 }
83 
84 #endif /* SMP && PARAVIRT_SPINLOCKS */
85 
86 int __init pv_time_init(void);
87 int __init pv_ipi_init(void);
88 #else
kvm_para_available(void)89 static inline bool kvm_para_available(void)
90 {
91 	return false;
92 }
93 
94 #define pv_time_init() do {} while (0)
95 #define pv_ipi_init() do {} while (0)
96 #endif
97 #endif /* _ASM_LOONGARCH_PARAVIRT_H */
98