1
2 /*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8 #include "SkThread.h"
9 #include "SkTLS.h"
10
11 #include <pthread.h>
12 #include <errno.h>
13
14 #ifndef SK_BUILD_FOR_ANDROID
15
16 /**
17 We prefer the GCC intrinsic implementation of the atomic operations over the
18 SkMutex-based implementation. The SkMutex version suffers from static
19 destructor ordering problems.
20 Note clang also defines the GCC version macros and implements the intrinsics.
21 TODO: Verify that gcc-style __sync_* intrinsics work on ARM
22 According to this the intrinsics are supported on ARM in LLVM 2.7+
23 http://llvm.org/releases/2.7/docs/ReleaseNotes.html
24 */
25 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || __GNUC__ > 4
26 #if (defined(__x86_64) || defined(__i386__))
27 #define GCC_INTRINSIC
28 #endif
29 #endif
30
31 #if defined(GCC_INTRINSIC)
32
sk_atomic_inc(int32_t * addr)33 int32_t sk_atomic_inc(int32_t* addr)
34 {
35 return __sync_fetch_and_add(addr, 1);
36 }
37
sk_atomic_add(int32_t * addr,int32_t inc)38 int32_t sk_atomic_add(int32_t* addr, int32_t inc)
39 {
40 return __sync_fetch_and_add(addr, inc);
41 }
42
sk_atomic_dec(int32_t * addr)43 int32_t sk_atomic_dec(int32_t* addr)
44 {
45 return __sync_fetch_and_add(addr, -1);
46 }
sk_membar_aquire__after_atomic_dec()47 void sk_membar_aquire__after_atomic_dec() { }
48
sk_atomic_conditional_inc(int32_t * addr)49 int32_t sk_atomic_conditional_inc(int32_t* addr)
50 {
51 int32_t value = *addr;
52
53 while (true) {
54 if (value == 0) {
55 return 0;
56 }
57
58 int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
59
60 if (before == value) {
61 return value;
62 } else {
63 value = before;
64 }
65 }
66 }
sk_membar_aquire__after_atomic_conditional_inc()67 void sk_membar_aquire__after_atomic_conditional_inc() { }
68
69 #else
70
71 SkMutex gAtomicMutex;
72
sk_atomic_inc(int32_t * addr)73 int32_t sk_atomic_inc(int32_t* addr)
74 {
75 SkAutoMutexAcquire ac(gAtomicMutex);
76
77 int32_t value = *addr;
78 *addr = value + 1;
79 return value;
80 }
81
sk_atomic_add(int32_t * addr,int32_t inc)82 int32_t sk_atomic_add(int32_t* addr, int32_t inc)
83 {
84 SkAutoMutexAcquire ac(gAtomicMutex);
85
86 int32_t value = *addr;
87 *addr = value + inc;
88 return value;
89 }
90
sk_atomic_dec(int32_t * addr)91 int32_t sk_atomic_dec(int32_t* addr)
92 {
93 SkAutoMutexAcquire ac(gAtomicMutex);
94
95 int32_t value = *addr;
96 *addr = value - 1;
97 return value;
98 }
sk_membar_aquire__after_atomic_dec()99 void sk_membar_aquire__after_atomic_dec() { }
100
sk_atomic_conditional_inc(int32_t * addr)101 int32_t sk_atomic_conditional_inc(int32_t* addr)
102 {
103 SkAutoMutexAcquire ac(gAtomicMutex);
104
105 int32_t value = *addr;
106 if (value != 0) ++*addr;
107 return value;
108 }
sk_membar_aquire__after_atomic_conditional_inc()109 void sk_membar_aquire__after_atomic_conditional_inc() { }
110
111 #endif
112
113 #endif // SK_BUILD_FOR_ANDROID
114
115 //////////////////////////////////////////////////////////////////////////////
116
print_pthread_error(int status)117 static void print_pthread_error(int status) {
118 switch (status) {
119 case 0: // success
120 break;
121 case EINVAL:
122 SkDebugf("pthread error [%d] EINVAL\n", status);
123 break;
124 case EBUSY:
125 SkDebugf("pthread error [%d] EBUSY\n", status);
126 break;
127 default:
128 SkDebugf("pthread error [%d] unknown\n", status);
129 break;
130 }
131 }
132
133 #ifdef SK_USE_POSIX_THREADS
134
SkMutex()135 SkMutex::SkMutex() {
136 int status;
137
138 status = pthread_mutex_init(&fMutex, NULL);
139 if (status != 0) {
140 print_pthread_error(status);
141 SkASSERT(0 == status);
142 }
143 }
144
~SkMutex()145 SkMutex::~SkMutex() {
146 int status = pthread_mutex_destroy(&fMutex);
147
148 // only report errors on non-global mutexes
149 if (status != 0) {
150 print_pthread_error(status);
151 SkASSERT(0 == status);
152 }
153 }
154
155 #else // !SK_USE_POSIX_THREADS
156
SkMutex()157 SkMutex::SkMutex() {
158 if (sizeof(pthread_mutex_t) > sizeof(fStorage)) {
159 SkDEBUGF(("pthread mutex size = %d\n", sizeof(pthread_mutex_t)));
160 SkDEBUGFAIL("mutex storage is too small");
161 }
162
163 int status;
164 pthread_mutexattr_t attr;
165
166 status = pthread_mutexattr_init(&attr);
167 print_pthread_error(status);
168 SkASSERT(0 == status);
169
170 status = pthread_mutex_init((pthread_mutex_t*)fStorage, &attr);
171 print_pthread_error(status);
172 SkASSERT(0 == status);
173 }
174
~SkMutex()175 SkMutex::~SkMutex() {
176 int status = pthread_mutex_destroy((pthread_mutex_t*)fStorage);
177 #if 0
178 // only report errors on non-global mutexes
179 if (!fIsGlobal) {
180 print_pthread_error(status);
181 SkASSERT(0 == status);
182 }
183 #endif
184 }
185
acquire()186 void SkMutex::acquire() {
187 int status = pthread_mutex_lock((pthread_mutex_t*)fStorage);
188 print_pthread_error(status);
189 SkASSERT(0 == status);
190 }
191
release()192 void SkMutex::release() {
193 int status = pthread_mutex_unlock((pthread_mutex_t*)fStorage);
194 print_pthread_error(status);
195 SkASSERT(0 == status);
196 }
197
198 #endif // !SK_USE_POSIX_THREADS
199
200 ///////////////////////////////////////////////////////////////////////////////
201
202 static pthread_key_t gSkTLSKey;
203 static pthread_once_t gSkTLSKey_Once = PTHREAD_ONCE_INIT;
204
sk_tls_make_key()205 static void sk_tls_make_key() {
206 (void)pthread_key_create(&gSkTLSKey, SkTLS::Destructor);
207 }
208
PlatformGetSpecific(bool forceCreateTheSlot)209 void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
210 // should we use forceCreateTheSlot to potentially skip calling pthread_once
211 // and just return NULL if we've never been called with
212 // forceCreateTheSlot==true ?
213
214 (void)pthread_once(&gSkTLSKey_Once, sk_tls_make_key);
215 return pthread_getspecific(gSkTLSKey);
216 }
217
PlatformSetSpecific(void * ptr)218 void SkTLS::PlatformSetSpecific(void* ptr) {
219 (void)pthread_setspecific(gSkTLSKey, ptr);
220 }
221