1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_BASE_QUASI_ATOMIC_H_ 18 #define ART_RUNTIME_BASE_QUASI_ATOMIC_H_ 19 20 #include <stdint.h> 21 #include <atomic> 22 #include <limits> 23 #include <vector> 24 25 #include <android-base/logging.h> 26 27 #include "arch/instruction_set.h" 28 #include "base/macros.h" 29 30 namespace art { 31 32 class Mutex; 33 34 // QuasiAtomic encapsulates two separate facilities that we are 35 // trying to move away from: "quasiatomic" 64 bit operations 36 // and custom memory fences. For the time being, they remain 37 // exposed. Clients should be converted to use either class Atomic 38 // below whenever possible, and should eventually use C++11 atomics. 39 // The two facilities that do not have a good C++11 analog are 40 // ThreadFenceForConstructor and Atomic::*JavaData. 41 // 42 // NOTE: Two "quasiatomic" operations on the exact same memory address 43 // are guaranteed to operate atomically with respect to each other, 44 // but no guarantees are made about quasiatomic operations mixed with 45 // non-quasiatomic operations on the same address, nor about 46 // quasiatomic operations that are performed on partially-overlapping 47 // memory. 48 class QuasiAtomic { NeedSwapMutexes(InstructionSet isa)49 static constexpr bool NeedSwapMutexes(InstructionSet isa) { 50 // TODO - mips64 still need this for Cas64 ??? 51 return (isa == InstructionSet::kMips) || (isa == InstructionSet::kMips64); 52 } 53 54 public: 55 static void Startup(); 56 57 static void Shutdown(); 58 59 // Reads the 64-bit value at "addr" without tearing. Read64(volatile const int64_t * addr)60 static int64_t Read64(volatile const int64_t* addr) { 61 if (!NeedSwapMutexes(kRuntimeISA)) { 62 int64_t value; 63 #if defined(__LP64__) 64 value = *addr; 65 #else 66 #if defined(__arm__) 67 #if defined(__ARM_FEATURE_LPAE) 68 // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. 69 __asm__ __volatile__("@ QuasiAtomic::Read64\n" 70 "ldrd %0, %H0, %1" 71 : "=r" (value) 72 : "m" (*addr)); 73 #else 74 // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. 75 __asm__ __volatile__("@ QuasiAtomic::Read64\n" 76 "ldrexd %0, %H0, %1" 77 : "=r" (value) 78 : "Q" (*addr)); 79 #endif 80 #elif defined(__i386__) 81 __asm__ __volatile__( 82 "movq %1, %0\n" 83 : "=x" (value) 84 : "m" (*addr)); 85 #else 86 LOG(FATAL) << "Unsupported architecture"; 87 #endif 88 #endif // defined(__LP64__) 89 return value; 90 } else { 91 return SwapMutexRead64(addr); 92 } 93 } 94 95 // Writes to the 64-bit value at "addr" without tearing. Write64(volatile int64_t * addr,int64_t value)96 static void Write64(volatile int64_t* addr, int64_t value) { 97 if (!NeedSwapMutexes(kRuntimeISA)) { 98 #if defined(__LP64__) 99 *addr = value; 100 #else 101 #if defined(__arm__) 102 #if defined(__ARM_FEATURE_LPAE) 103 // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. 104 __asm__ __volatile__("@ QuasiAtomic::Write64\n" 105 "strd %1, %H1, %0" 106 : "=m"(*addr) 107 : "r" (value)); 108 #else 109 // The write is done as a swap so that the cache-line is in the exclusive state for the store. 110 int64_t prev; 111 int status; 112 do { 113 __asm__ __volatile__("@ QuasiAtomic::Write64\n" 114 "ldrexd %0, %H0, %2\n" 115 "strexd %1, %3, %H3, %2" 116 : "=&r" (prev), "=&r" (status), "+Q"(*addr) 117 : "r" (value) 118 : "cc"); 119 } while (UNLIKELY(status != 0)); 120 #endif 121 #elif defined(__i386__) 122 __asm__ __volatile__( 123 "movq %1, %0" 124 : "=m" (*addr) 125 : "x" (value)); 126 #else 127 LOG(FATAL) << "Unsupported architecture"; 128 #endif 129 #endif // defined(__LP64__) 130 } else { 131 SwapMutexWrite64(addr, value); 132 } 133 } 134 135 // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value" 136 // and return true. Otherwise, don't swap, and return false. 137 // This is fully ordered, i.e. it has C++11 memory_order_seq_cst 138 // semantics (assuming all other accesses use a mutex if this one does). 139 // This has "strong" semantics; if it fails then it is guaranteed that 140 // at some point during the execution of Cas64, *addr was not equal to 141 // old_value. Cas64(int64_t old_value,int64_t new_value,volatile int64_t * addr)142 static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { 143 if (!NeedSwapMutexes(kRuntimeISA)) { 144 return __sync_bool_compare_and_swap(addr, old_value, new_value); 145 } else { 146 return SwapMutexCas64(old_value, new_value, addr); 147 } 148 } 149 150 // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? LongAtomicsUseMutexes(InstructionSet isa)151 static bool LongAtomicsUseMutexes(InstructionSet isa) { 152 return NeedSwapMutexes(isa); 153 } 154 ThreadFenceForConstructor()155 static void ThreadFenceForConstructor() { 156 #if defined(__aarch64__) 157 __asm__ __volatile__("dmb ishst" : : : "memory"); 158 #else 159 std::atomic_thread_fence(std::memory_order_release); 160 #endif 161 } 162 163 private: 164 static Mutex* GetSwapMutex(const volatile int64_t* addr); 165 static int64_t SwapMutexRead64(volatile const int64_t* addr); 166 static void SwapMutexWrite64(volatile int64_t* addr, int64_t val); 167 static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr); 168 169 // We stripe across a bunch of different mutexes to reduce contention. 170 static constexpr size_t kSwapMutexCount = 32; 171 static std::vector<Mutex*>* gSwapMutexes; 172 173 DISALLOW_COPY_AND_ASSIGN(QuasiAtomic); 174 }; 175 176 } // namespace art 177 178 #endif // ART_RUNTIME_BASE_QUASI_ATOMIC_H_ 179