• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/core/SkSharedMutex.h"
9 
10 #include "include/core/SkTypes.h"
11 #include "include/private/base/SkSemaphore.h"
12 
13 #include <cinttypes>
14 
15 #if !defined(__has_feature)
16     #define __has_feature(x) 0
17 #endif
18 
19 #if __has_feature(thread_sanitizer)
20 
21     /* Report that a lock has been created at address "lock". */
22     #define ANNOTATE_RWLOCK_CREATE(lock) \
23         AnnotateRWLockCreate(__FILE__, __LINE__, lock)
24 
25     /* Report that the lock at address "lock" is about to be destroyed. */
26     #define ANNOTATE_RWLOCK_DESTROY(lock) \
27         AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
28 
29     /* Report that the lock at address "lock" has been acquired.
30        is_w=1 for writer lock, is_w=0 for reader lock. */
31     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
32         AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
33 
34     /* Report that the lock at address "lock" is about to be released. */
35     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
36       AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
37 
38     #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
39         #if defined(__GNUC__)
40             #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
41         #else
42             /* TODO(glider): for Windows support we may want to change this macro in order
43                to prepend __declspec(selectany) to the annotations' declarations. */
44             #error weak annotations are not supported for your compiler
45         #endif
46     #else
47         #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
48     #endif
49 
50     extern "C" {
51     void AnnotateRWLockCreate(
52         const char *file, int line,
53         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
54     void AnnotateRWLockDestroy(
55         const char *file, int line,
56         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
57     void AnnotateRWLockAcquired(
58         const char *file, int line,
59         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
60     void AnnotateRWLockReleased(
61         const char *file, int line,
62         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
63     }
64 
65 #else
66 
67     #define ANNOTATE_RWLOCK_CREATE(lock)
68     #define ANNOTATE_RWLOCK_DESTROY(lock)
69     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
70     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
71 
72 #endif
73 
74 #ifdef SK_DEBUG
75 
76     #include "include/private/base/SkTDArray.h"
77     #include "include/private/base/SkThreadID.h"
78 
79     class SkSharedMutex::ThreadIDSet {
80     public:
81         // Returns true if threadID is in the set.
find(SkThreadID threadID) const82         bool find(SkThreadID threadID) const {
83             for (auto& t : fThreadIDs) {
84                 if (t == threadID) return true;
85             }
86             return false;
87         }
88 
89         // Returns true if did not already exist.
tryAdd(SkThreadID threadID)90         bool tryAdd(SkThreadID threadID) {
91             for (auto& t : fThreadIDs) {
92                 if (t == threadID) return false;
93             }
94             fThreadIDs.append(1, &threadID);
95             return true;
96         }
97         // Returns true if already exists in Set.
tryRemove(SkThreadID threadID)98         bool tryRemove(SkThreadID threadID) {
99             for (int i = 0; i < fThreadIDs.size(); ++i) {
100                 if (fThreadIDs[i] == threadID) {
101                     fThreadIDs.remove(i);
102                     return true;
103                 }
104             }
105             return false;
106         }
107 
swap(ThreadIDSet & other)108         void swap(ThreadIDSet& other) {
109             fThreadIDs.swap(other.fThreadIDs);
110         }
111 
count() const112         int count() const {
113             return fThreadIDs.size();
114         }
115 
116     private:
117         SkTDArray<SkThreadID> fThreadIDs;
118     };
119 
SkSharedMutex()120     SkSharedMutex::SkSharedMutex()
121         : fCurrentShared(new ThreadIDSet)
122         , fWaitingExclusive(new ThreadIDSet)
123         , fWaitingShared(new ThreadIDSet){
124         ANNOTATE_RWLOCK_CREATE(this);
125     }
126 
~SkSharedMutex()127     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
128 
acquire()129     void SkSharedMutex::acquire() {
130         SkThreadID threadID(SkGetThreadID());
131         int currentSharedCount;
132         int waitingExclusiveCount;
133         {
134             SkAutoMutexExclusive l(fMu);
135 
136             SkASSERTF(!fCurrentShared->find(threadID),
137                       "Thread %" PRIx64 " already has an shared lock\n", threadID);
138 
139             if (!fWaitingExclusive->tryAdd(threadID)) {
140                 SkDEBUGFAILF("Thread %" PRIx64 " already has an exclusive lock\n", threadID);
141             }
142 
143             currentSharedCount = fCurrentShared->count();
144             waitingExclusiveCount = fWaitingExclusive->count();
145         }
146 
147         if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
148             fExclusiveQueue.wait();
149         }
150 
151         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
152     }
153 
154     // Implementation Detail:
155     // The shared threads need two separate queues to keep the threads that were added after the
156     // exclusive lock separate from the threads added before.
release()157     void SkSharedMutex::release() {
158         ANNOTATE_RWLOCK_RELEASED(this, 1);
159         SkThreadID threadID(SkGetThreadID());
160         int sharedWaitingCount;
161         int exclusiveWaitingCount;
162         int sharedQueueSelect;
163         {
164             SkAutoMutexExclusive l(fMu);
165             SkASSERT(0 == fCurrentShared->count());
166             if (!fWaitingExclusive->tryRemove(threadID)) {
167                 SkDEBUGFAILF("Thread %" PRIx64 " did not have the lock held.\n", threadID);
168             }
169             exclusiveWaitingCount = fWaitingExclusive->count();
170             sharedWaitingCount = fWaitingShared->count();
171             fWaitingShared.swap(fCurrentShared);
172             sharedQueueSelect = fSharedQueueSelect;
173             if (sharedWaitingCount > 0) {
174                 fSharedQueueSelect = 1 - fSharedQueueSelect;
175             }
176         }
177 
178         if (sharedWaitingCount > 0) {
179             fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
180         } else if (exclusiveWaitingCount > 0) {
181             fExclusiveQueue.signal();
182         }
183     }
184 
assertHeld() const185     void SkSharedMutex::assertHeld() const {
186         SkThreadID threadID(SkGetThreadID());
187         SkAutoMutexExclusive l(fMu);
188         SkASSERT(0 == fCurrentShared->count());
189         SkASSERT(fWaitingExclusive->find(threadID));
190     }
191 
acquireShared()192     void SkSharedMutex::acquireShared() {
193         SkThreadID threadID(SkGetThreadID());
194         int exclusiveWaitingCount;
195         int sharedQueueSelect;
196         {
197             SkAutoMutexExclusive l(fMu);
198             exclusiveWaitingCount = fWaitingExclusive->count();
199             if (exclusiveWaitingCount > 0) {
200                 if (!fWaitingShared->tryAdd(threadID)) {
201                     SkDEBUGFAILF("Thread %" PRIx64 " was already waiting!\n", threadID);
202                 }
203             } else {
204                 if (!fCurrentShared->tryAdd(threadID)) {
205                     SkDEBUGFAILF("Thread %" PRIx64 " already holds a shared lock!\n", threadID);
206                 }
207             }
208             sharedQueueSelect = fSharedQueueSelect;
209         }
210 
211         if (exclusiveWaitingCount > 0) {
212             fSharedQueue[sharedQueueSelect].wait();
213         }
214 
215         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
216     }
217 
releaseShared()218     void SkSharedMutex::releaseShared() {
219         ANNOTATE_RWLOCK_RELEASED(this, 0);
220         SkThreadID threadID(SkGetThreadID());
221 
222         int currentSharedCount;
223         int waitingExclusiveCount;
224         {
225             SkAutoMutexExclusive l(fMu);
226             if (!fCurrentShared->tryRemove(threadID)) {
227                 SkDEBUGFAILF("Thread %" PRIx64 " does not hold a shared lock.\n", threadID);
228             }
229             currentSharedCount = fCurrentShared->count();
230             waitingExclusiveCount = fWaitingExclusive->count();
231         }
232 
233         if (0 == currentSharedCount && waitingExclusiveCount > 0) {
234             fExclusiveQueue.signal();
235         }
236     }
237 
assertHeldShared() const238     void SkSharedMutex::assertHeldShared() const {
239         SkThreadID threadID(SkGetThreadID());
240         SkAutoMutexExclusive l(fMu);
241         SkASSERT(fCurrentShared->find(threadID));
242     }
243 
244 #else
245 
246     // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
247     // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
248     // the log of the count which is 1024.
249     //
250     // The three counts held in fQueueCounts are:
251     // * Shared - the number of shared lock holders currently running.
252     // * WaitingExclusive - the number of threads waiting for an exclusive lock.
253     // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
254     //   to finish.
255     static const int kLogThreadCount = 10;
256 
257     enum {
258         kSharedOffset          = (0 * kLogThreadCount),
259         kWaitingExlusiveOffset = (1 * kLogThreadCount),
260         kWaitingSharedOffset   = (2 * kLogThreadCount),
261         kSharedMask            = ((1 << kLogThreadCount) - 1) << kSharedOffset,
262         kWaitingExclusiveMask  = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
263         kWaitingSharedMask     = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
264     };
265 
SkSharedMutex()266     SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
~SkSharedMutex()267     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
acquire()268     void SkSharedMutex::acquire() {
269         // Increment the count of exclusive queue waiters.
270         int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
271                                                         std::memory_order_acquire);
272 
273         // If there are no other exclusive waiters and no shared threads are running then run
274         // else wait.
275         if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
276             fExclusiveQueue.wait();
277         }
278         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
279     }
280 
release()281     void SkSharedMutex::release() {
282         ANNOTATE_RWLOCK_RELEASED(this, 1);
283 
284         int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
285         int32_t waitingShared;
286         int32_t newQueueCounts;
287         do {
288             newQueueCounts = oldQueueCounts;
289 
290             // Decrement exclusive waiters.
291             newQueueCounts -= 1 << kWaitingExlusiveOffset;
292 
293             // The number of threads waiting to acquire a shared lock.
294             waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
295 
296             // If there are any move the counts of all the shared waiters to actual shared. They are
297             // going to run next.
298             if (waitingShared > 0) {
299 
300                 // Set waiting shared to zero.
301                 newQueueCounts &= ~kWaitingSharedMask;
302 
303                 // Because this is the exclusive release, then there are zero readers. So, the bits
304                 // for shared locks should be zero. Since those bits are zero, we can just |= in the
305                 // waitingShared count instead of clearing with an &= and then |= the count.
306                 newQueueCounts |= waitingShared << kSharedOffset;
307             }
308 
309         } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
310                                                        std::memory_order_release,
311                                                        std::memory_order_relaxed));
312 
313         if (waitingShared > 0) {
314             // Run all the shared.
315             fSharedQueue.signal(waitingShared);
316         } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
317             // Run a single exclusive waiter.
318             fExclusiveQueue.signal();
319         }
320     }
321 
acquireShared()322     void SkSharedMutex::acquireShared() {
323         int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
324         int32_t newQueueCounts;
325         do {
326             newQueueCounts = oldQueueCounts;
327             // If there are waiting exclusives then this shared lock waits else it runs.
328             if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
329                 newQueueCounts += 1 << kWaitingSharedOffset;
330             } else {
331                 newQueueCounts += 1 << kSharedOffset;
332             }
333         } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
334                                                        std::memory_order_acquire,
335                                                        std::memory_order_relaxed));
336 
337         // If there are waiting exclusives, then this shared waits until after it runs.
338         if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
339             fSharedQueue.wait();
340         }
341         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
342 
343     }
344 
releaseShared()345     void SkSharedMutex::releaseShared() {
346         ANNOTATE_RWLOCK_RELEASED(this, 0);
347 
348         // Decrement the shared count.
349         int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
350                                                         std::memory_order_release);
351 
352         // If shared count is going to zero (because the old count == 1) and there are exclusive
353         // waiters, then run a single exclusive waiter.
354         if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
355             && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
356             fExclusiveQueue.signal();
357         }
358     }
359 
360 #endif
361