• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Atomics-based checking refcount implementation.
4  * Copyright (C) 2023 Google LLC
5  * Author: Will Deacon <will@kernel.org>
6  */
7 #ifndef __ARM64_KVM_NVHE_REFCOUNT_H__
8 #define __ARM64_KVM_NVHE_REFCOUNT_H__
9 
10 #include <asm/lse.h>
11 
__ll_sc_refcount_fetch_add_16(u16 * refcount,s16 addend)12 static inline s16 __ll_sc_refcount_fetch_add_16(u16 *refcount, s16 addend)
13 {
14 	u16 new;
15 	u32 flag;
16 
17 	asm volatile(
18 	"	prfm	pstl1strm, %[refcount]\n"
19 	"1:	ldxrh	%w[new], %[refcount]\n"
20 	"	add	%w[new], %w[new], %w[addend]\n"
21 	"	stxrh	%w[flag], %w[new], %[refcount]\n"
22 	"	cbnz	%w[flag], 1b"
23 	: [refcount] "+Q" (*refcount),
24 	  [new] "=&r" (new),
25 	  [flag] "=&r" (flag)
26 	: [addend] "Ir" (addend));
27 
28 	return new;
29 }
30 
31 #ifdef CONFIG_ARM64_LSE_ATOMICS
32 
__lse_refcount_fetch_add_16(u16 * refcount,s16 addend)33 static inline s16 __lse_refcount_fetch_add_16(u16 *refcount, s16 addend)
34 {
35 	s16 old;
36 
37 	asm volatile(__LSE_PREAMBLE
38 	"	ldaddh	%w[addend], %w[old], %[refcount]"
39 	: [refcount] "+Q" (*refcount),
40 	  [old] "=r" (old)
41 	: [addend] "r" (addend));
42 
43 	return old + addend;
44 }
45 
46 #endif /* CONFIG_ARM64_LSE_ATOMICS */
47 
__hyp_refcount_fetch_add(void * refcount,const size_t size,const s64 addend)48 static inline u64 __hyp_refcount_fetch_add(void *refcount, const size_t size,
49 					   const s64 addend)
50 {
51 	s64 new;
52 
53 	switch (size) {
54 	case 2:
55 		new = __lse_ll_sc_body(refcount_fetch_add_16, refcount, addend);
56 		break;
57 	default:
58 		BUILD_BUG_ON_MSG(1, "Unsupported refcount size");
59 		unreachable();
60 	}
61 
62 	BUG_ON(new < 0);
63 	return new;
64 }
65 
66 
67 #define hyp_refcount_inc(r)	__hyp_refcount_fetch_add(&(r), sizeof(r), 1)
68 #define hyp_refcount_dec(r)	__hyp_refcount_fetch_add(&(r), sizeof(r), -1)
69 #define hyp_refcount_get(r)	READ_ONCE(r)
70 #define hyp_refcount_set(r, v)	do {			\
71 	typeof(r) *__rp = &(r);				\
72 	WARN_ON(hyp_refcount_get(*__rp));		\
73 	WRITE_ONCE(*__rp, v);				\
74 } while (0)
75 
76 #endif /* __ARM64_KVM_NVHE_REFCOUNT_H__ */
77