1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * A stand-alone ticket spinlock implementation for use by the non-VHE
4 * KVM hypervisor code running at EL2.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 *
9 * Heavily based on the implementation removed by c11090474d70 which was:
10 * Copyright (C) 2012 ARM Ltd.
11 */
12
13 #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
14 #define __ARM64_KVM_NVHE_SPINLOCK_H__
15
16 #include <asm/alternative.h>
17 #include <asm/lse.h>
18
19 typedef union hyp_spinlock {
20 u32 __val;
21 struct {
22 #ifdef __AARCH64EB__
23 u16 next, owner;
24 #else
25 u16 owner, next;
26 #endif
27 };
28 } hyp_spinlock_t;
29
30 #define hyp_spin_lock_init(l) \
31 do { \
32 *(l) = (hyp_spinlock_t){ .__val = 0 }; \
33 } while (0)
34
hyp_spin_lock(hyp_spinlock_t * lock)35 static inline void hyp_spin_lock(hyp_spinlock_t *lock)
36 {
37 u32 tmp;
38 hyp_spinlock_t lockval, newval;
39
40 asm volatile(
41 /* Atomically increment the next ticket. */
42 ARM64_LSE_ATOMIC_INSN(
43 /* LL/SC */
44 " prfm pstl1strm, %3\n"
45 "1: ldaxr %w0, %3\n"
46 " add %w1, %w0, #(1 << 16)\n"
47 " stxr %w2, %w1, %3\n"
48 " cbnz %w2, 1b\n",
49 /* LSE atomics */
50 " mov %w2, #(1 << 16)\n"
51 " ldadda %w2, %w0, %3\n"
52 __nops(3))
53
54 /* Did we get the lock? */
55 " eor %w1, %w0, %w0, ror #16\n"
56 " cbz %w1, 3f\n"
57 /*
58 * No: spin on the owner. Send a local event to avoid missing an
59 * unlock before the exclusive load.
60 */
61 " sevl\n"
62 "2: wfe\n"
63 " ldaxrh %w2, %4\n"
64 " eor %w1, %w2, %w0, lsr #16\n"
65 " cbnz %w1, 2b\n"
66 /* We got the lock. Critical section starts here. */
67 "3:"
68 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
69 : "Q" (lock->owner)
70 : "memory");
71 }
72
hyp_spin_unlock(hyp_spinlock_t * lock)73 static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
74 {
75 u64 tmp;
76
77 asm volatile(
78 ARM64_LSE_ATOMIC_INSN(
79 /* LL/SC */
80 " ldrh %w1, %0\n"
81 " add %w1, %w1, #1\n"
82 " stlrh %w1, %0",
83 /* LSE atomics */
84 " mov %w1, #1\n"
85 " staddlh %w1, %0\n"
86 __nops(1))
87 : "=Q" (lock->owner), "=&r" (tmp)
88 :
89 : "memory");
90 }
91
92 #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
93