1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_LIBARTBASE_BASE_ATOMIC_H_
18 #define ART_LIBARTBASE_BASE_ATOMIC_H_
19
20 #include <stdint.h>
21 #include <atomic>
22 #include <limits>
23 #include <vector>
24
25 #include <android-base/logging.h>
26
27 #include "macros.h"
28
29 namespace art {
30
31 enum class CASMode {
32 kStrong,
33 kWeak,
34 };
35
36 template<typename T>
PACKED(sizeof (T))37 class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
38 public:
39 Atomic<T>() : std::atomic<T>(T()) { }
40
41 explicit Atomic<T>(T value) : std::atomic<T>(value) { }
42
43 // Load data from an atomic variable with Java data memory order semantics.
44 //
45 // Promises memory access semantics of ordinary Java data.
46 // Does not order other memory accesses.
47 // Long and double accesses may be performed 32 bits at a time.
48 // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered.
49 // In contrast to normal C++ accesses, racing accesses are allowed.
50 T LoadJavaData() const {
51 return this->load(std::memory_order_relaxed);
52 }
53
54 // Store data in an atomic variable with Java data memory ordering semantics.
55 //
56 // Promises memory access semantics of ordinary Java data.
57 // Does not order other memory accesses.
58 // Long and double accesses may be performed 32 bits at a time.
59 // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered.
60 // In contrast to normal C++ accesses, racing accesses are allowed.
61 void StoreJavaData(T desired_value) {
62 this->store(desired_value, std::memory_order_relaxed);
63 }
64
65 // Atomically replace the value with desired_value if it matches the expected_value.
66 // Participates in total ordering of atomic operations.
67 bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) {
68 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst);
69 }
70
71 // The same, except it may fail spuriously.
72 bool CompareAndSetWeakSequentiallyConsistent(T expected_value, T desired_value) {
73 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst);
74 }
75
76 // Atomically replace the value with desired_value if it matches the expected_value. Doesn't
77 // imply ordering or synchronization constraints.
78 bool CompareAndSetStrongRelaxed(T expected_value, T desired_value) {
79 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
80 }
81
82 // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
83 // to other memory locations become visible to the threads that do a consume or an acquire on the
84 // same location.
85 bool CompareAndSetStrongRelease(T expected_value, T desired_value) {
86 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release);
87 }
88
89 // The same, except it may fail spuriously.
90 bool CompareAndSetWeakRelaxed(T expected_value, T desired_value) {
91 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
92 }
93
94 // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
95 // made to other memory locations by the thread that did the release become visible in this
96 // thread.
97 bool CompareAndSetWeakAcquire(T expected_value, T desired_value) {
98 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
99 }
100
101 // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
102 // to other memory locations become visible to the threads that do a consume or an acquire on the
103 // same location.
104 bool CompareAndSetWeakRelease(T expected_value, T desired_value) {
105 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
106 }
107
108 bool CompareAndSet(T expected_value,
109 T desired_value,
110 CASMode mode,
111 std::memory_order memory_order) {
112 return mode == CASMode::kStrong
113 ? this->compare_exchange_strong(expected_value, desired_value, memory_order)
114 : this->compare_exchange_weak(expected_value, desired_value, memory_order);
115 }
116
117 // Returns the address of the current atomic variable. This is only used by futex() which is
118 // declared to take a volatile address (see base/mutex-inl.h).
119 volatile T* Address() {
120 return reinterpret_cast<T*>(this);
121 }
122
123 static T MaxValue() {
124 return std::numeric_limits<T>::max();
125 }
126 };
127
128 typedef Atomic<int32_t> AtomicInteger;
129
130 static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
131 static_assert(alignof(AtomicInteger) == alignof(int32_t),
132 "AtomicInteger alignment differs from that of underlyingtype");
133 static_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size");
134
135 // Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
136 // architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
137 // aligned.
138 #if defined(__LP64__)
139 static_assert(alignof(Atomic<int64_t>) == alignof(int64_t),
140 "Atomic<int64> alignment differs from that of underlying type");
141 #endif
142
143 } // namespace art
144
145 #endif // ART_LIBARTBASE_BASE_ATOMIC_H_
146