1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_
18 #define BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_
19
20 #include <cstdint>
21 #include <type_traits>
22
23 namespace berberis {
24
25 namespace {
26
27 // We are not using std::atomic here because using reinterpret_cast to process normal integers via
28 // pointer to std::atomic is undefined behavior in C++. There was proposal (N4013) to make that
29 // behavior defined: https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4013.html.
30 // Unfortunately it wasn't accepted and thus we would need to rely on the check for the compiler
31 // version (both clang and gcc are using layout which makes it safe to use std::atomic in that
32 // fashion). But the final complication makes even that impractical: while clang builtins support
33 // fetch_min and fetch_max operations that we need std::atomic don't expose these. This means that
34 // we would need to mix both styles in that file. At that point it becomes just simpler to go with
35 // clang/gcc builtins.
36
AqRlToMemoryOrder(bool aq,bool rl)37 int AqRlToMemoryOrder(bool aq, bool rl) {
38 if (aq) {
39 if (rl) {
40 return __ATOMIC_ACQ_REL;
41 } else {
42 return __ATOMIC_ACQUIRE;
43 }
44 } else {
45 if (rl) {
46 return __ATOMIC_RELEASE;
47 } else {
48 return __ATOMIC_RELAXED;
49 }
50 }
51 }
52
53 template <typename IntType>
AtomicExchange(uint64_t arg1,uint64_t arg2,bool aq,bool rl)54 uint64_t AtomicExchange(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
55 static_assert(std::is_integral_v<IntType>, "AtomicExchange: IntType must be integral");
56 static_assert(std::is_signed_v<IntType>, "AtomicExchange: IntType must be signed");
57 auto ptr = ToHostAddr<IntType>(arg1);
58 return __atomic_exchange_n(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
59 }
60
61 template <typename IntType>
AtomicAdd(uint64_t arg1,uint64_t arg2,bool aq,bool rl)62 uint64_t AtomicAdd(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
63 static_assert(std::is_integral_v<IntType>, "AtomicAdd: IntType must be integral");
64 static_assert(std::is_signed_v<IntType>, "AtomicAdd: IntType must be signed");
65 auto ptr = ToHostAddr<IntType>(arg1);
66 return __atomic_fetch_add(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
67 }
68
69 template <typename IntType>
AtomicXor(uint64_t arg1,uint64_t arg2,bool aq,bool rl)70 uint64_t AtomicXor(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
71 static_assert(std::is_integral_v<IntType>, "AtomicXor: IntType must be integral");
72 static_assert(std::is_signed_v<IntType>, "AtomicXor: IntType must be signed");
73 auto ptr = ToHostAddr<IntType>(arg1);
74 return __atomic_fetch_xor(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
75 }
76
77 template <typename IntType>
AtomicAnd(uint64_t arg1,uint64_t arg2,bool aq,bool rl)78 uint64_t AtomicAnd(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
79 static_assert(std::is_integral_v<IntType>, "AtomicAnd: IntType must be integral");
80 static_assert(std::is_signed_v<IntType>, "AtomicAnd: IntType must be signed");
81 auto ptr = ToHostAddr<IntType>(arg1);
82 return __atomic_fetch_and(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
83 }
84
85 template <typename IntType>
AtomicOr(uint64_t arg1,uint64_t arg2,bool aq,bool rl)86 uint64_t AtomicOr(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
87 static_assert(std::is_integral_v<IntType>, "AtomicOr: IntType must be integral");
88 static_assert(std::is_signed_v<IntType>, "AtomicOr: IntType must be signed");
89 auto ptr = ToHostAddr<IntType>(arg1);
90 return __atomic_fetch_or(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
91 }
92
93 template <typename IntType>
AtomicMin(uint64_t arg1,uint64_t arg2,bool aq,bool rl)94 uint64_t AtomicMin(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
95 static_assert(std::is_integral_v<IntType>, "AtomicMin: IntType must be integral");
96 static_assert(std::is_signed_v<IntType>, "AtomicMin: IntType must be signed");
97 auto ptr = ToHostAddr<IntType>(arg1);
98 return __atomic_fetch_min(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
99 }
100
101 template <typename IntType>
AtomicMax(uint64_t arg1,uint64_t arg2,bool aq,bool rl)102 uint64_t AtomicMax(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
103 static_assert(std::is_integral_v<IntType>, "AtomicMax: IntType must be integral");
104 static_assert(std::is_signed_v<IntType>, "AtomicMax: IntType must be signed");
105 auto ptr = ToHostAddr<IntType>(arg1);
106 return __atomic_fetch_max(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl));
107 }
108
109 template <typename IntType>
AtomicMinu(uint64_t arg1,uint64_t arg2,bool aq,bool rl)110 uint64_t AtomicMinu(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
111 static_assert(std::is_integral_v<IntType>, "AtomicMinu: IntType must be integral");
112 static_assert(!std::is_signed_v<IntType>, "AtomicMinu: IntType must be unsigned");
113 auto ptr = ToHostAddr<IntType>(arg1);
114 return std::make_signed_t<IntType>(
115 __atomic_fetch_min(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)));
116 }
117
118 template <typename IntType>
AtomicMaxu(uint64_t arg1,uint64_t arg2,bool aq,bool rl)119 uint64_t AtomicMaxu(uint64_t arg1, uint64_t arg2, bool aq, bool rl) {
120 static_assert(std::is_integral_v<IntType>, "AtomicMaxu: IntType must be integral");
121 static_assert(!std::is_signed_v<IntType>, "AtomicMaxu: IntType must be unsigned");
122 auto ptr = ToHostAddr<IntType>(arg1);
123 return std::make_signed_t<IntType>(
124 __atomic_fetch_max(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)));
125 }
126
127 } // namespace
128
129 } // namespace berberis
130
131 #endif // BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_
132