• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
9 
10 #include <libkern/OSAtomic.h>
11 
12 namespace v8 {
13 namespace base {
14 
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
16                                          Atomic32 old_value,
17                                          Atomic32 new_value) {
18   Atomic32 prev_value;
19   do {
20     if (OSAtomicCompareAndSwap32(old_value, new_value,
21                                  const_cast<Atomic32*>(ptr))) {
22       return old_value;
23     }
24     prev_value = *ptr;
25   } while (prev_value == old_value);
26   return prev_value;
27 }
28 
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)29 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
30                                          Atomic32 new_value) {
31   Atomic32 old_value;
32   do {
33     old_value = *ptr;
34   } while (!OSAtomicCompareAndSwap32(old_value, new_value,
35                                      const_cast<Atomic32*>(ptr)));
36   return old_value;
37 }
38 
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
40                                           Atomic32 increment) {
41   return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
42 }
43 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
45                                         Atomic32 increment) {
46   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
47 }
48 
MemoryBarrier()49 inline void MemoryBarrier() {
50   OSMemoryBarrier();
51 }
52 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
54                                        Atomic32 old_value,
55                                        Atomic32 new_value) {
56   Atomic32 prev_value;
57   do {
58     if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
59                                         const_cast<Atomic32*>(ptr))) {
60       return old_value;
61     }
62     prev_value = *ptr;
63   } while (prev_value == old_value);
64   return prev_value;
65 }
66 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)67 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
68                                        Atomic32 old_value,
69                                        Atomic32 new_value) {
70   return Acquire_CompareAndSwap(ptr, old_value, new_value);
71 }
72 
NoBarrier_Store(volatile Atomic8 * ptr,Atomic8 value)73 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
74   *ptr = value;
75 }
76 
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
78   *ptr = value;
79 }
80 
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)81 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
82   *ptr = value;
83   MemoryBarrier();
84 }
85 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)86 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
87   MemoryBarrier();
88   *ptr = value;
89 }
90 
NoBarrier_Load(volatile const Atomic8 * ptr)91 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
92   return *ptr;
93 }
94 
NoBarrier_Load(volatile const Atomic32 * ptr)95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
96   return *ptr;
97 }
98 
Acquire_Load(volatile const Atomic32 * ptr)99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
100   Atomic32 value = *ptr;
101   MemoryBarrier();
102   return value;
103 }
104 
Release_Load(volatile const Atomic32 * ptr)105 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
106   MemoryBarrier();
107   return *ptr;
108 }
109 
110 #ifdef __LP64__
111 
112 // 64-bit implementation on 64-bit platform
113 
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)114 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
115                                          Atomic64 old_value,
116                                          Atomic64 new_value) {
117   Atomic64 prev_value;
118   do {
119     if (OSAtomicCompareAndSwap64(old_value, new_value,
120                                  reinterpret_cast<volatile int64_t*>(ptr))) {
121       return old_value;
122     }
123     prev_value = *ptr;
124   } while (prev_value == old_value);
125   return prev_value;
126 }
127 
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)128 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
129                                          Atomic64 new_value) {
130   Atomic64 old_value;
131   do {
132     old_value = *ptr;
133   } while (!OSAtomicCompareAndSwap64(old_value, new_value,
134                                      reinterpret_cast<volatile int64_t*>(ptr)));
135   return old_value;
136 }
137 
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)138 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
139                                           Atomic64 increment) {
140   return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
141 }
142 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)143 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
144                                         Atomic64 increment) {
145   return OSAtomicAdd64Barrier(increment,
146                               reinterpret_cast<volatile int64_t*>(ptr));
147 }
148 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)149 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
150                                        Atomic64 old_value,
151                                        Atomic64 new_value) {
152   Atomic64 prev_value;
153   do {
154     if (OSAtomicCompareAndSwap64Barrier(
155         old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
156       return old_value;
157     }
158     prev_value = *ptr;
159   } while (prev_value == old_value);
160   return prev_value;
161 }
162 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)163 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
164                                        Atomic64 old_value,
165                                        Atomic64 new_value) {
166   // The lib kern interface does not distinguish between
167   // Acquire and Release memory barriers; they are equivalent.
168   return Acquire_CompareAndSwap(ptr, old_value, new_value);
169 }
170 
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)171 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
172   *ptr = value;
173 }
174 
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)175 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
176   *ptr = value;
177   MemoryBarrier();
178 }
179 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)180 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
181   MemoryBarrier();
182   *ptr = value;
183 }
184 
NoBarrier_Load(volatile const Atomic64 * ptr)185 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
186   return *ptr;
187 }
188 
Acquire_Load(volatile const Atomic64 * ptr)189 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
190   Atomic64 value = *ptr;
191   MemoryBarrier();
192   return value;
193 }
194 
Release_Load(volatile const Atomic64 * ptr)195 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
196   MemoryBarrier();
197   return *ptr;
198 }
199 
200 #endif  // defined(__LP64__)
201 
202 } }  // namespace v8::base
203 
204 #endif  // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
205