Home
last modified time | relevance | path

Searched refs:fRefCnt (Results 1 – 25 of 40) sorted by relevance

12

/external/skia/src/gpu/vk/
DGrVkResource.h69 GrVkResource() : fRefCnt(1) { in GrVkResource()
80 SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt); in ~GrVkResource()
81 fRefCnt = 0; // illegal value, to catch us if we reuse after delete in ~GrVkResource()
87 int32_t getRefCnt() const { return fRefCnt; } in getRefCnt()
94 if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) { in unique()
107 SkASSERT(fRefCnt > 0); in ref()
108 (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required. in ref()
117 SkASSERT(fRefCnt > 0); in unref()
120 if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) { in unref()
129 SkASSERT(fRefCnt > 0); in unrefAndAbandon()
[all …]
/external/skqp/src/gpu/vk/
DGrVkResource.h69 GrVkResource() : fRefCnt(1) { in GrVkResource()
80 SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt); in ~GrVkResource()
81 fRefCnt = 0; // illegal value, to catch us if we reuse after delete in ~GrVkResource()
87 int32_t getRefCnt() const { return fRefCnt; } in getRefCnt()
94 if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) { in unique()
107 SkASSERT(fRefCnt > 0); in ref()
108 (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required. in ref()
117 SkASSERT(fRefCnt > 0); in unref()
120 if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) { in unref()
129 SkASSERT(fRefCnt > 0); in unrefAndAbandon()
[all …]
/external/skia/src/gpu/
DGrNonAtomicRef.h20 GrNonAtomicRef() : fRefCnt(1) {} in GrNonAtomicRef()
25 SkASSERT((0 == fRefCnt || 1 == fRefCnt)); in ~GrNonAtomicRef()
27 fRefCnt = -10; in ~GrNonAtomicRef()
31 bool unique() const { return 1 == fRefCnt; } in unique()
35 SkASSERT(fRefCnt > 0); in ref()
36 ++fRefCnt; in ref()
40 SkASSERT(fRefCnt > 0); in unref()
41 --fRefCnt; in unref()
42 if (0 == fRefCnt) { in unref()
49 mutable int32_t fRefCnt;
DGrProgramElement.h44 SkASSERT((0 == fRefCnt || 1 == fRefCnt) && 0 == fPendingExecutions); in ~GrProgramElement()
46 SkDEBUGCODE(fRefCnt = fPendingExecutions = -10;) in ~GrProgramElement()
52 SkASSERT(fRefCnt > 0); in ref()
53 ++fRefCnt; in ref()
59 --fRefCnt; in unref()
60 if (0 == fRefCnt) { in unref()
74 SkASSERT(fRefCnt >= 0); in validate()
76 SkASSERT(fRefCnt + fPendingExecutions > 0); in validate()
81 GrProgramElement() : fRefCnt(1), fPendingExecutions(0) {} in GrProgramElement()
96 if (0 == fRefCnt) { in completedExecution()
[all …]
/external/skqp/src/gpu/
DGrNonAtomicRef.h20 GrNonAtomicRef() : fRefCnt(1) {} in GrNonAtomicRef()
25 SkASSERT((0 == fRefCnt || 1 == fRefCnt)); in ~GrNonAtomicRef()
27 fRefCnt = -10; in ~GrNonAtomicRef()
31 bool unique() const { return 1 == fRefCnt; } in unique()
35 SkASSERT(fRefCnt > 0); in ref()
36 ++fRefCnt; in ref()
40 SkASSERT(fRefCnt > 0); in unref()
41 --fRefCnt; in unref()
42 if (0 == fRefCnt) { in unref()
49 mutable int32_t fRefCnt;
DGrProgramElement.h44 SkASSERT((0 == fRefCnt || 1 == fRefCnt) && 0 == fPendingExecutions); in ~GrProgramElement()
46 SkDEBUGCODE(fRefCnt = fPendingExecutions = -10;) in ~GrProgramElement()
52 SkASSERT(fRefCnt > 0); in ref()
53 ++fRefCnt; in ref()
59 --fRefCnt; in unref()
60 if (0 == fRefCnt) { in unref()
74 SkASSERT(fRefCnt >= 0); in validate()
76 SkASSERT(fRefCnt + fPendingExecutions > 0); in validate()
81 GrProgramElement() : fRefCnt(1), fPendingExecutions(0) {} in GrProgramElement()
96 if (0 == fRefCnt) { in completedExecution()
[all …]
/external/skia/include/private/
DGrSurfaceProxy.h36 ++fRefCnt; in ref()
49 --fRefCnt; in unref()
55 SkASSERT(fRefCnt >= 0 && fPendingWrites >= 0 && fPendingReads >= 0); in isUnique_debugOnly()
56 return 1 == fRefCnt + fPendingWrites + fPendingReads; in isUnique_debugOnly()
61 SkASSERT(1 == fRefCnt); in release()
73 SkASSERT(fRefCnt >= 0); in validate()
76 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 1); in validate()
82 SkASSERT(fTarget->fRefCnt >= fRefCnt); in validate()
135 GrIORefProxy() : fTarget(nullptr), fRefCnt(1), fPendingReads(0), fPendingWrites(0) {} in GrIORefProxy()
136 GrIORefProxy(sk_sp<GrSurface> surface) : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { in GrIORefProxy()
[all …]
DSkWeakRefCnt.h87 int32_t prev = fRefCnt.load(std::memory_order_relaxed); in atomic_conditional_acquire_strong_ref()
92 } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire, in atomic_conditional_acquire_strong_ref()
147 return fRefCnt.load(std::memory_order_relaxed) == 0; in weak_expired()
/external/skqp/src/core/
DSkCachedData.cpp34 , fRefCnt(1) in SkCachedData()
46 , fRefCnt(1) in SkCachedData()
99 if ((1 == fRefCnt) && fInCache) { in inMutexRef()
103 fRefCnt += 1; in inMutexRef()
111 switch (--fRefCnt) { in inMutexUnref()
136 return 0 == fRefCnt; in inMutexUnref()
185 SkASSERT((fInCache && fRefCnt > 1) || !fInCache); in validate()
195 SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt)); in validate()
DSkRegionPriv.h45 int32_t fRefCnt;
79 head->fRefCnt = 1; in Alloc()
102 SkASSERT(fRefCnt == 1); in writable_runs()
112 if (fRefCnt > 1) { in ensureWritable()
123 if (sk_atomic_dec(&fRefCnt) == 1) { in ensureWritable()
DSkRWBuffer.cpp67 mutable std::atomic<int32_t> fRefCnt; member
70 SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {} in SkBufferHead()
85 SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed)); in ref()
90 int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel); in unref()
106 SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0); in validate()
/external/skia/src/core/
DSkCachedData.cpp34 , fRefCnt(1) in SkCachedData()
46 , fRefCnt(1) in SkCachedData()
99 if ((1 == fRefCnt) && fInCache) { in inMutexRef()
103 fRefCnt += 1; in inMutexRef()
111 switch (--fRefCnt) { in inMutexUnref()
136 return 0 == fRefCnt; in inMutexUnref()
185 SkASSERT((fInCache && fRefCnt > 1) || !fInCache); in validate()
195 SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt)); in validate()
DSkRegionPriv.h45 int32_t fRefCnt;
79 head->fRefCnt = 1; in Alloc()
102 SkASSERT(fRefCnt == 1); in writable_runs()
112 if (fRefCnt > 1) { in ensureWritable()
123 if (sk_atomic_dec(&fRefCnt) == 1) { in ensureWritable()
DSkRWBuffer.cpp67 mutable std::atomic<int32_t> fRefCnt; member
70 SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {} in SkBufferHead()
85 SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed)); in ref()
90 int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel); in unref()
106 SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0); in validate()
DSkCachedData.h30 int testing_only_getRefCnt() const { return fRefCnt; } in testing_only_getRefCnt()
56 int fRefCnt; // low-bit means we're owned by the cache variable
/external/skqp/include/private/
DGrSurfaceProxy.h36 ++fRefCnt; in ref()
49 --fRefCnt; in unref()
55 SkASSERT(fRefCnt >= 0 && fPendingWrites >= 0 && fPendingReads >= 0); in isUnique_debugOnly()
56 return 1 == fRefCnt + fPendingWrites + fPendingReads; in isUnique_debugOnly()
62 SkASSERT(fRefCnt >= 0); in validate()
65 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 1); in validate()
71 SkASSERT(fTarget->fRefCnt >= fRefCnt); in validate()
124 GrIORefProxy() : fTarget(nullptr), fRefCnt(1), fPendingReads(0), fPendingWrites(0) {} in GrIORefProxy()
125 GrIORefProxy(sk_sp<GrSurface> surface) : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { in GrIORefProxy()
141 SkASSERT(fTarget->fRefCnt > 0); in transferRefs()
[all …]
DSkWeakRefCnt.h87 int32_t prev = fRefCnt.load(std::memory_order_relaxed); in atomic_conditional_acquire_strong_ref()
92 } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire, in atomic_conditional_acquire_strong_ref()
147 return fRefCnt.load(std::memory_order_relaxed) == 0; in weak_expired()
/external/skqp/include/gpu/
DGrGpuResource.h54 ++fRefCnt; in ref()
60 if (!(--fRefCnt)) { in unref()
71 SkASSERT(fRefCnt >= 0); in validate()
74 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0); in validate()
79 GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { } in GrIORef()
93 bool internalHasRef() const { return SkToBool(fRefCnt); } in internalHasRef()
94 bool internalHasUniqueRef() const { return fRefCnt == 1; } in internalHasUniqueRef()
126 if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { in didRemoveRefOrPendingIO()
131 mutable int32_t fRefCnt; variable
/external/skqp/include/core/
DSkRefCnt.h33 SkRefCntBase() : fRefCnt(1) {} in SkRefCntBase()
41 fRefCnt.store(0, std::memory_order_relaxed); in ~SkRefCntBase()
48 return fRefCnt.load(std::memory_order_relaxed); in getRefCnt()
60 if (1 == fRefCnt.load(std::memory_order_acquire)) { in unique()
74 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); in ref()
84 if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { in unref()
99 fRefCnt.store(1, std::memory_order_relaxed); in internal_dispose_restore_refcnt_to_1()
115 mutable std::atomic<int32_t> fRefCnt; variable
211 SkNVRefCnt() : fRefCnt(1) {} in SkNVRefCnt()
219 bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); } in unique()
[all …]
/external/skia/include/gpu/
DGrGpuResource.h54 ++fRefCnt; in ref()
60 if (!(--fRefCnt)) { in unref()
71 SkASSERT(fRefCnt >= 0); in validate()
74 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0); in validate()
79 GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { } in GrIORef()
93 bool internalHasRef() const { return SkToBool(fRefCnt); } in internalHasRef()
94 bool internalHasUniqueRef() const { return fRefCnt == 1; } in internalHasUniqueRef()
126 if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { in didRemoveRefOrPendingIO()
131 mutable int32_t fRefCnt; variable
/external/skia/include/core/
DSkRefCnt.h33 SkRefCntBase() : fRefCnt(1) {} in SkRefCntBase()
41 fRefCnt.store(0, std::memory_order_relaxed); in ~SkRefCntBase()
48 return fRefCnt.load(std::memory_order_relaxed); in getRefCnt()
60 if (1 == fRefCnt.load(std::memory_order_acquire)) { in unique()
74 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); in ref()
84 if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { in unref()
99 fRefCnt.store(1, std::memory_order_relaxed); in internal_dispose_restore_refcnt_to_1()
115 mutable std::atomic<int32_t> fRefCnt; variable
211 SkNVRefCnt() : fRefCnt(1) {} in SkNVRefCnt()
219 bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); } in unique()
[all …]
/external/skia/tests/
DRefCntTest.cpp91 Effect() : fRefCnt(1) { in Effect()
96 int fRefCnt; member in Effect
100 fRefCnt += 1; in ref()
105 SkASSERT(fRefCnt > 0); in unref()
106 if (0 == --fRefCnt) { in unref()
160 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 1); in DEF_TEST()
193 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 2); in DEF_TEST()
198 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 3); in DEF_TEST()
/external/skqp/tests/
DRefCntTest.cpp91 Effect() : fRefCnt(1) { in Effect()
96 int fRefCnt; member in Effect
100 fRefCnt += 1; in ref()
105 SkASSERT(fRefCnt > 0); in unref()
106 if (0 == --fRefCnt) { in unref()
160 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 1); in DEF_TEST()
193 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 2); in DEF_TEST()
198 REPORTER_ASSERT(reporter, paint.fEffect.get()->fRefCnt == 3); in DEF_TEST()
/external/skia/src/gpu/ops/
DGrDrawPathOp.h129 instanceData->fRefCnt = 1; in Alloc()
153 void ref() const { ++fRefCnt; } in ref()
156 if (0 == --fRefCnt) { in unref()
171 mutable int fRefCnt; member
/external/skqp/src/gpu/ops/
DGrDrawPathOp.h129 instanceData->fRefCnt = 1; in Alloc()
153 void ref() const { ++fRefCnt; } in ref()
156 if (0 == --fRefCnt) { in unref()
171 mutable int fRefCnt; member

12