• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2012 Google Inc.  All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 //     * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //     * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 //     * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // This file is an internal atomic implementation, use atomicops.h instead.
32 
33 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
34 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
35 
36 namespace google {
37 namespace protobuf {
38 namespace internal {
39 
MemoryBarrier()40 inline void MemoryBarrier() {
41   __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
42 }
43 
44 // NoBarrier versions of the operation include "memory" in the clobber list.
45 // This is not required for direct usage of the NoBarrier versions of the
46 // operations. However this is required for correctness when they are used as
47 // part of the Acquire or Release versions, to ensure that nothing from outside
48 // the call is reordered between the operation and the memory barrier. This does
49 // not change the code generated, so has no or minimal impact on the
50 // NoBarrier operations.
51 
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)52 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
53                                          Atomic32 old_value,
54                                          Atomic32 new_value) {
55   Atomic32 prev;
56   int32_t temp;
57 
58   __asm__ __volatile__ (  // NOLINT
59     "0:                                    \n\t"
60     "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
61     "cmp %w[prev], %w[old_value]           \n\t"
62     "bne 1f                                \n\t"
63     "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
64     "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
65     "1:                                    \n\t"
66     : [prev]"=&r" (prev),
67       [temp]"=&r" (temp),
68       [ptr]"+Q" (*ptr)
69     : [old_value]"IJr" (old_value),
70       [new_value]"r" (new_value)
71     : "cc", "memory"
72   );  // NOLINT
73 
74   return prev;
75 }
76 
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)77 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
78                                          Atomic32 new_value) {
79   Atomic32 result;
80   int32_t temp;
81 
82   __asm__ __volatile__ (  // NOLINT
83     "0:                                    \n\t"
84     "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
85     "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
86     "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
87     : [result]"=&r" (result),
88       [temp]"=&r" (temp),
89       [ptr]"+Q" (*ptr)
90     : [new_value]"r" (new_value)
91     : "memory"
92   );  // NOLINT
93 
94   return result;
95 }
96 
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)97 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
98                                           Atomic32 increment) {
99   Atomic32 result;
100   int32_t temp;
101 
102   __asm__ __volatile__ (  // NOLINT
103     "0:                                       \n\t"
104     "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
105     "add %w[result], %w[result], %w[increment]\n\t"
106     "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
107     "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
108     : [result]"=&r" (result),
109       [temp]"=&r" (temp),
110       [ptr]"+Q" (*ptr)
111     : [increment]"IJr" (increment)
112     : "memory"
113   );  // NOLINT
114 
115   return result;
116 }
117 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)118 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
119                                         Atomic32 increment) {
120   MemoryBarrier();
121   Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
122   MemoryBarrier();
123 
124   return result;
125 }
126 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)127 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
128                                        Atomic32 old_value,
129                                        Atomic32 new_value) {
130   Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
131   MemoryBarrier();
132 
133   return prev;
134 }
135 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)136 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
137                                        Atomic32 old_value,
138                                        Atomic32 new_value) {
139   MemoryBarrier();
140   Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
141 
142   return prev;
143 }
144 
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)145 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
146   *ptr = value;
147 }
148 
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)149 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
150   *ptr = value;
151   MemoryBarrier();
152 }
153 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)154 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
155   __asm__ __volatile__ (  // NOLINT
156     "stlr %w[value], %[ptr]  \n\t"
157     : [ptr]"=Q" (*ptr)
158     : [value]"r" (value)
159     : "memory"
160   );  // NOLINT
161 }
162 
NoBarrier_Load(volatile const Atomic32 * ptr)163 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
164   return *ptr;
165 }
166 
Acquire_Load(volatile const Atomic32 * ptr)167 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
168   Atomic32 value;
169 
170   __asm__ __volatile__ (  // NOLINT
171     "ldar %w[value], %[ptr]  \n\t"
172     : [value]"=r" (value)
173     : [ptr]"Q" (*ptr)
174     : "memory"
175   );  // NOLINT
176 
177   return value;
178 }
179 
Release_Load(volatile const Atomic32 * ptr)180 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
181   MemoryBarrier();
182   return *ptr;
183 }
184 
185 // 64-bit versions of the operations.
186 // See the 32-bit versions for comments.
187 
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)188 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
189                                          Atomic64 old_value,
190                                          Atomic64 new_value) {
191   Atomic64 prev;
192   int32_t temp;
193 
194   __asm__ __volatile__ (  // NOLINT
195     "0:                                    \n\t"
196     "ldxr %[prev], %[ptr]                  \n\t"
197     "cmp %[prev], %[old_value]             \n\t"
198     "bne 1f                                \n\t"
199     "stxr %w[temp], %[new_value], %[ptr]   \n\t"
200     "cbnz %w[temp], 0b                     \n\t"
201     "1:                                    \n\t"
202     : [prev]"=&r" (prev),
203       [temp]"=&r" (temp),
204       [ptr]"+Q" (*ptr)
205     : [old_value]"IJr" (old_value),
206       [new_value]"r" (new_value)
207     : "cc", "memory"
208   );  // NOLINT
209 
210   return prev;
211 }
212 
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)213 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
214                                          Atomic64 new_value) {
215   Atomic64 result;
216   int32_t temp;
217 
218   __asm__ __volatile__ (  // NOLINT
219     "0:                                    \n\t"
220     "ldxr %[result], %[ptr]                \n\t"
221     "stxr %w[temp], %[new_value], %[ptr]   \n\t"
222     "cbnz %w[temp], 0b                     \n\t"
223     : [result]"=&r" (result),
224       [temp]"=&r" (temp),
225       [ptr]"+Q" (*ptr)
226     : [new_value]"r" (new_value)
227     : "memory"
228   );  // NOLINT
229 
230   return result;
231 }
232 
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)233 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
234                                           Atomic64 increment) {
235   Atomic64 result;
236   int32_t temp;
237 
238   __asm__ __volatile__ (  // NOLINT
239     "0:                                     \n\t"
240     "ldxr %[result], %[ptr]                 \n\t"
241     "add %[result], %[result], %[increment] \n\t"
242     "stxr %w[temp], %[result], %[ptr]       \n\t"
243     "cbnz %w[temp], 0b                      \n\t"
244     : [result]"=&r" (result),
245       [temp]"=&r" (temp),
246       [ptr]"+Q" (*ptr)
247     : [increment]"IJr" (increment)
248     : "memory"
249   );  // NOLINT
250 
251   return result;
252 }
253 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)254 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
255                                         Atomic64 increment) {
256   MemoryBarrier();
257   Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
258   MemoryBarrier();
259 
260   return result;
261 }
262 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)263 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
264                                        Atomic64 old_value,
265                                        Atomic64 new_value) {
266   Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
267   MemoryBarrier();
268 
269   return prev;
270 }
271 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)272 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
273                                        Atomic64 old_value,
274                                        Atomic64 new_value) {
275   MemoryBarrier();
276   Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
277 
278   return prev;
279 }
280 
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)281 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
282   *ptr = value;
283 }
284 
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)285 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
286   *ptr = value;
287   MemoryBarrier();
288 }
289 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)290 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
291   __asm__ __volatile__ (  // NOLINT
292     "stlr %x[value], %[ptr]  \n\t"
293     : [ptr]"=Q" (*ptr)
294     : [value]"r" (value)
295     : "memory"
296   );  // NOLINT
297 }
298 
NoBarrier_Load(volatile const Atomic64 * ptr)299 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
300   return *ptr;
301 }
302 
Acquire_Load(volatile const Atomic64 * ptr)303 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
304   Atomic64 value;
305 
306   __asm__ __volatile__ (  // NOLINT
307     "ldar %x[value], %[ptr]  \n\t"
308     : [value]"=r" (value)
309     : [ptr]"Q" (*ptr)
310     : "memory"
311   );  // NOLINT
312 
313   return value;
314 }
315 
Release_Load(volatile const Atomic64 * ptr)316 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
317   MemoryBarrier();
318   return *ptr;
319 }
320 
321 }  // namespace internal
322 }  // namespace protobuf
323 }  // namespace google
324 
325 #endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_
326