Searched refs:sk_memory_order_acquire (Results 1 – 11 of 11) sorted by relevance
/external/skia/src/utils/ |
D | SkEventTracer.cpp | 47 SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire)); in SetInstance() 50 atexit([]() { delete sk_atomic_load(&gUserTracer, sk_memory_order_acquire); }); in SetInstance() 54 if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acquire)) { in GetInstance()
|
/external/skia/include/private/ |
D | SkOncePtr.h | 64 uintptr_t state = sk_atomic_load(&fState, sk_memory_order_acquire); in get() 85 state = sk_atomic_load(&fState, sk_memory_order_acquire); in get() 95 auto state = sk_atomic_load(&fState, sk_memory_order_acquire);
|
D | SkAtomics.h | 20 sk_memory_order_acquire, enumerator 98 mo == sk_memory_order_acquire || in sk_atomic_load() 134 failure == sk_memory_order_acquire || in sk_atomic_compare_exchange() 182 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } in sk_acquire_load()
|
D | SkSpinlock.h | 22 if (sk_atomic_exchange(&fLocked, true, sk_memory_order_acquire)) { in acquire()
|
D | SkSemaphore.h | 37 if (sk_atomic_fetch_sub(&fCount, 1, sk_memory_order_acquire) <= 0) { in wait()
|
D | SkOnce.h | 113 if (!sk_atomic_load(done, sk_memory_order_acquire)) { in SkOnce()
|
/external/skia/src/core/ |
D | SkSpinlock.cpp | 12 while(sk_atomic_exchange(&fLocked, true, sk_memory_order_acquire)) { /*spin*/ } in contendedAcquire()
|
D | SkSharedMutex.cpp | 263 sk_memory_order_acquire); in acquire() 325 sk_memory_order_acquire, sk_memory_order_relaxed)); in acquireShared()
|
D | SkTaskGroup.cpp | 63 while (pending->load(sk_memory_order_acquire) > 0) { in Wait()
|
/external/skia/include/core/ |
D | SkRefCnt.h | 49 if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) { in unique() 240 bool unique() const { return 1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire); } in unique()
|
/external/skia/src/gpu/vk/ |
D | GrVkResource.h | 74 if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) { in unique()
|