• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkAtomics_sync_DEFINED
9 #define SkAtomics_sync_DEFINED
10 
11 // This file is mostly a shim.  We'd like to delete it.  Please don't put much
12 // effort into maintaining it, and if you find bugs in it, the right fix is to
13 // delete this file and upgrade your compiler to something that supports
14 // __atomic builtins or std::atomic.
15 
barrier(sk_memory_order mo)16 static inline void barrier(sk_memory_order mo) {
17     asm volatile("" : : : "memory");  // Prevents the compiler from reordering code.
18     #if SK_CPU_X86
19         // On x86, we generally don't need an extra memory barrier for loads or stores.
20         if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
21     #else
22         // On other platforms (e.g. ARM) we do unless the memory order is relaxed.
23         if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
24     #endif
25 }
26 
27 // These barriers only support our majority use cases: acquire and relaxed loads, release stores.
28 // For anything more complicated, please consider deleting this file and upgrading your compiler.
29 
30 template <typename T>
sk_atomic_load(const T * ptr,sk_memory_order mo)31 T sk_atomic_load(const T* ptr, sk_memory_order mo) {
32     T val = *ptr;
33     barrier(mo);
34     return val;
35 }
36 
37 template <typename T>
sk_atomic_store(T * ptr,T val,sk_memory_order mo)38 void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
39     barrier(mo);
40     *ptr = val;
41 }
42 
43 template <typename T>
sk_atomic_fetch_add(T * ptr,T val,sk_memory_order)44 T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
45     return __sync_fetch_and_add(ptr, val);
46 }
47 
48 template <typename T>
sk_atomic_compare_exchange(T * ptr,T * expected,T desired,sk_memory_order,sk_memory_order)49 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
50     T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
51     if (prev == *expected) {
52         return true;
53     }
54     *expected = prev;
55     return false;
56 }
57 
58 template <typename T>
sk_atomic_exchange(T * ptr,T val,sk_memory_order)59 T sk_atomic_exchange(T* ptr, T val, sk_memory_order) {
60     // There is no __sync exchange.  Emulate it with a CAS loop.
61     T prev;
62     do {
63         prev = sk_atomic_load(ptr);
64     } while(!sk_atomic_compare_exchange(ptr, &prev, val));
65     return prev;
66 }
67 
68 #endif//SkAtomics_sync_DEFINED
69