/* * Copyright (C) 2023 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_ #define BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_ #include #include namespace berberis { namespace { // We are not using std::atomic here because using reinterpret_cast to process normal integers via // pointer to std::atomic is undefined behavior in C++. There was proposal (N4013) to make that // behavior defined: https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4013.html. // Unfortunately it wasn't accepted and thus we would need to rely on the check for the compiler // version (both clang and gcc are using layout which makes it safe to use std::atomic in that // fashion). But the final complication makes even that impractical: while clang builtins support // fetch_min and fetch_max operations that we need std::atomic don't expose these. This means that // we would need to mix both styles in that file. At that point it becomes just simpler to go with // clang/gcc builtins. int AqRlToMemoryOrder(bool aq, bool rl) { if (aq) { if (rl) { return __ATOMIC_ACQ_REL; } else { return __ATOMIC_ACQUIRE; } } else { if (rl) { return __ATOMIC_RELEASE; } else { return __ATOMIC_RELAXED; } } } template uint64_t AtomicExchange(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicExchange: IntType must be integral"); static_assert(std::is_signed_v, "AtomicExchange: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_exchange_n(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicAdd(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicAdd: IntType must be integral"); static_assert(std::is_signed_v, "AtomicAdd: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_add(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicXor(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicXor: IntType must be integral"); static_assert(std::is_signed_v, "AtomicXor: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_xor(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicAnd(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicAnd: IntType must be integral"); static_assert(std::is_signed_v, "AtomicAnd: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_and(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicOr(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicOr: IntType must be integral"); static_assert(std::is_signed_v, "AtomicOr: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_or(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicMin(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicMin: IntType must be integral"); static_assert(std::is_signed_v, "AtomicMin: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_min(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicMax(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicMax: IntType must be integral"); static_assert(std::is_signed_v, "AtomicMax: IntType must be signed"); auto ptr = ToHostAddr(arg1); return __atomic_fetch_max(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl)); } template uint64_t AtomicMinu(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicMinu: IntType must be integral"); static_assert(!std::is_signed_v, "AtomicMinu: IntType must be unsigned"); auto ptr = ToHostAddr(arg1); return std::make_signed_t( __atomic_fetch_min(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl))); } template uint64_t AtomicMaxu(uint64_t arg1, uint64_t arg2, bool aq, bool rl) { static_assert(std::is_integral_v, "AtomicMaxu: IntType must be integral"); static_assert(!std::is_signed_v, "AtomicMaxu: IntType must be unsigned"); auto ptr = ToHostAddr(arg1); return std::make_signed_t( __atomic_fetch_max(ptr, IntType(arg2), AqRlToMemoryOrder(aq, rl))); } } // namespace } // namespace berberis #endif // BERBERIS_INTERPRETER_RISCV64_ATOMICS_H_