• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * A stand-alone rwlock implementation for use by the non-VHE KVM
4  * hypervisor code running at EL2. This is *not* a fair lock and is
5  * likely to scale very badly under contention.
6  *
7  * Copyright (C) 2022 Google LLC
8  * Author: Will Deacon <will@kernel.org>
9  *
10  * Heavily based on the implementation removed by 087133ac9076 which was:
11  * Copyright (C) 2012 ARM Ltd.
12  */
13 
14 #ifndef __ARM64_KVM_NVHE_RWLOCK_H__
15 #define __ARM64_KVM_NVHE_RWLOCK_H__
16 
17 #include <linux/bits.h>
18 
19 typedef struct {
20 	u32	__val;
21 } hyp_rwlock_t;
22 
23 #define __HYP_RWLOCK_INITIALIZER \
24 	{ .__val = 0 }
25 
26 #define __HYP_RWLOCK_UNLOCKED \
27 	((hyp_rwlock_t) __HYP_RWLOCK_INITIALIZER)
28 
29 #define DEFINE_HYP_RWLOCK(x)	hyp_rwlock_t x = __HYP_RWLOCK_UNLOCKED
30 
31 #define hyp_rwlock_init(l)						\
32 do {									\
33 	*(l) = __HYP_RWLOCK_UNLOCKED;					\
34 } while (0)
35 
36 #define __HYP_RWLOCK_WRITER_BIT	31
37 
hyp_write_lock(hyp_rwlock_t * lock)38 static inline void hyp_write_lock(hyp_rwlock_t *lock)
39 {
40 	u32 tmp;
41 
42 	asm volatile(ARM64_LSE_ATOMIC_INSN(
43 	/* LL/SC */
44 	"	sevl\n"
45 	"1:	wfe\n"
46 	"2:	ldaxr	%w0, %1\n"
47 	"	cbnz	%w0, 1b\n"
48 	"	stxr	%w0, %w2, %1\n"
49 	"	cbnz	%w0, 2b\n"
50 	__nops(1),
51 	/* LSE atomics */
52 	"1:	mov	%w0, wzr\n"
53 	"2:	casa	%w0, %w2, %1\n"
54 	"	cbz	%w0, 3f\n"
55 	"	ldxr	%w0, %1\n"
56 	"	cbz	%w0, 2b\n"
57 	"	wfe\n"
58 	"	b	1b\n"
59 	"3:")
60 	: "=&r" (tmp), "+Q" (lock->__val)
61 	: "r" (BIT(__HYP_RWLOCK_WRITER_BIT))
62 	: "memory");
63 }
64 
hyp_write_unlock(hyp_rwlock_t * lock)65 static inline void hyp_write_unlock(hyp_rwlock_t *lock)
66 {
67 	asm volatile(ARM64_LSE_ATOMIC_INSN(
68 	"	stlr	wzr, %0",
69 	"	swpl	wzr, wzr, %0")
70 	: "=Q" (lock->__val) :: "memory");
71 }
72 
hyp_read_lock(hyp_rwlock_t * lock)73 static inline void hyp_read_lock(hyp_rwlock_t *lock)
74 {
75 	u32 tmp, tmp2;
76 
77 	asm volatile(
78 	"	sevl\n"
79 	ARM64_LSE_ATOMIC_INSN(
80 	/* LL/SC */
81 	"1:	wfe\n"
82 	"2:	ldaxr	%w0, %2\n"
83 	"	add	%w0, %w0, #1\n"
84 	"	tbnz	%w0, %3, 1b\n"
85 	"	stxr	%w1, %w0, %2\n"
86 	"	cbnz	%w1, 2b\n"
87 	__nops(1),
88 	/* LSE atomics */
89 	"1:	wfe\n"
90 	"2:	ldxr	%w0, %2\n"
91 	"	adds	%w1, %w0, #1\n"
92 	"	tbnz	%w1, %3, 1b\n"
93 	"	casa	%w0, %w1, %2\n"
94 	"	sbc	%w0, %w1, %w0\n"
95 	"	cbnz	%w0, 2b")
96 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (lock->__val)
97 	: "i" (__HYP_RWLOCK_WRITER_BIT)
98 	: "cc", "memory");
99 }
100 
hyp_read_unlock(hyp_rwlock_t * lock)101 static inline void hyp_read_unlock(hyp_rwlock_t *lock)
102 {
103 	u32 tmp, tmp2;
104 
105 	asm volatile(ARM64_LSE_ATOMIC_INSN(
106 	/* LL/SC */
107 	"1:	ldxr	%w0, %2\n"
108 	"	sub	%w0, %w0, #1\n"
109 	"	stlxr	%w1, %w0, %2\n"
110 	"	cbnz	%w1, 1b",
111 	/* LSE atomics */
112 	"	movn	%w0, #0\n"
113 	"	staddl	%w0, %2\n"
114 	__nops(2))
115 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (lock->__val)
116 	:
117 	: "memory");
118 }
119 
120 #ifdef CONFIG_PKVM_STRICT_CHECKS
hyp_assert_write_lock_held(hyp_rwlock_t * lock)121 static inline void hyp_assert_write_lock_held(hyp_rwlock_t *lock)
122 {
123 	BUG_ON(!(READ_ONCE(lock->__val) & BIT(__HYP_RWLOCK_WRITER_BIT)));
124 }
125 #else
hyp_assert_write_lock_held(hyp_rwlock_t * lock)126 static inline void hyp_assert_write_lock_held(hyp_rwlock_t *lock) { }
127 #endif
128 
129 #endif	/* __ARM64_KVM_NVHE_RWLOCK_H__ */
130