• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #ifndef ARCH_AARCH64_ARCH_SYNC_H
13 #define ARCH_AARCH64_ARCH_SYNC_H
14 
15 #include <common/types.h>
16 
17 #define COMPILER_BARRIER() asm volatile("" ::: "memory")
18 
19 #define sev() asm volatile("sev" : : : "memory")
20 #define wfe() asm volatile("wfe" : : : "memory")
21 #define wfi() asm volatile("wfi" : : : "memory")
22 
23 #define isb()    asm volatile("isb" : : : "memory")
24 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
25 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
26 
27 #define mb()  dsb(sy)
28 #define rmb() dsb(ld)
29 #define wmb() dsb(st)
30 
31 #define smp_mb()  dmb(ish)
32 #define smp_rmb() dmb(ishld)
33 #define smp_wmb() dmb(ishst)
34 
35 #define dma_rmb() dmb(oshld)
36 #define dma_wmb() dmb(oshst)
37 
38 #define ldar_32(ptr, value) \
39     asm volatile("ldar %w0, [%1]" : "=r"(value) : "r"(ptr))
40 #define stlr_32(ptr, value) \
41     asm volatile("stlr %w0, [%1]" : : "rZ"(value), "r"(ptr))
42 
43 #define ldar_64(ptr, value) \
44     asm volatile("ldar %x0, [%1]" : "=r"(value) : "r"(ptr))
45 #define stlr_64(ptr, value) \
46     asm volatile("stlr %x0, [%1]" : : "rZ"(value), "r"(ptr))
47 
48 // clang-format off
49 #define __atomic_compare_exchange(ptr, compare, exchange, len, width)	\
50 ({									\
51         u##len oldval;							\
52         u32 ret;							\
53         asm volatile (  "1: ldaxr   %"#width"0, %2\n"			\
54                         "   cmp     %"#width"0, %"#width"3\n"		\
55                         "   b.ne    2f\n"				\
56                         "   stlxr   %w1, %"#width"4, %2\n"		\
57                         "   cbnz    %w1, 1b\n"				\
58                         "2:":"=&r" (oldval), "=&r"(ret), "+Q"(*(ptr))	\
59                         :"r"(compare), "r"(exchange)			\
60                         );						\
61         oldval;								\
62 })
63 // clang-format on
64 
65 #define atomic_compare_exchange_64(ptr, compare, exchange) \
66     __atomic_compare_exchange(ptr, compare, exchange, 64, x)
67 #define atomic_compare_exchange_32(ptr, compare, exchange) \
68     __atomic_compare_exchange(ptr, compare, exchange, 32, w)
69 
70 #define atomic_cmpxchg_32 atomic_compare_exchange_32
71 #define atomic_cmpxchg_64 atomic_compare_exchange_64
72 
73 // clang-format off
atomic_exchange_64(s64 * ptr,s64 exchange)74 static inline s64 atomic_exchange_64(s64 * ptr, s64 exchange)
75 {
76         s64 oldval;
77         s32 ret;
78         asm volatile (  "1: ldaxr   %x0, %2\n"
79                         "   stlxr   %w1, %x3, %2\n"
80                         "   cbnz    %w1, 1b\n"
81                         "2:":"=&r" (oldval), "=&r"(ret), "+Q"(*ptr)
82                         :"r"(exchange)
83                      );
84         return oldval;
85 }
86 
87 #define __atomic_fetch_op(ptr, val, len, width, op)			\
88 ({									\
89         u##len oldval, newval;						\
90         u32 ret;							\
91          asm volatile ( "1: ldaxr   %"#width"0, %3\n"			\
92                         "   "#op"   %"#width"1, %"#width"0, %"#width"4\n"\
93                         "   stlxr   %w2, %"#width"1, %3\n"		\
94                         "   cbnz    %w2, 1b\n"				\
95                         "2:":"=&r" (oldval), "=&r"(newval),		\
96                         "=&r"(ret), "+Q"(*(ptr))			\
97                         :"r"(val)					\
98                       );						\
99         oldval;								\
100 })
101 // clang-format on
102 
103 #define atomic_fetch_sub_32(ptr, val) __atomic_fetch_op(ptr, val, 32, w, sub)
104 #define atomic_fetch_sub_64(ptr, val) __atomic_fetch_op(ptr, val, 64, x, sub)
105 #define atomic_fetch_add_32(ptr, val) __atomic_fetch_op(ptr, val, 32, w, add)
106 #define atomic_fetch_add_64(ptr, val) __atomic_fetch_op(ptr, val, 64, x, add)
107 #define atomic_set_bit_32(ptr, val) \
108     __atomic_fetch_op(ptr, 1 << (val), 32, w, or)
109 
atomic_fetch_add_64_unless(u64 * ptr,int add,int not_expect)110 static inline u64 atomic_fetch_add_64_unless(u64* ptr, int add, int not_expect)
111 {
112     u64 val = *ptr, oldval;
113     smp_rmb();
114 
115     while (true) {
116         if (val == not_expect)
117             break;
118         oldval = atomic_compare_exchange_64(ptr, val, val + add);
119         if (oldval == val)
120             break;
121         val = *ptr;
122     }
123 
124     return val;
125 }
126 
127 #endif /* ARCH_AARCH64_ARCH_SYNC_H */
128