1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14
15 class GrContext;
16 class GrGpu;
17 class GrResourceCache;
18 class SkTraceMemoryDump;
19
20 /**
21 * Base class for GrGpuResource. Handles the various types of refs we need. Separated out as a base
22 * class to isolate the ref-cnting behavior and provide friendship without exposing all of
23 * GrGpuResource.
24 *
25 * Gpu resources can have three types of refs:
26 * 1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls
27 * that read and write the resource via GrOpList and by any object that must own a
28 * GrGpuResource and is itself owned (directly or indirectly) by Skia-client code.
29 * 2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read
30 * of the resource by the GPU as a result of a skia API call but hasn't executed it yet.
31 * 3) Pending write (+ by addPendingWrite(), - by completedWrite()): GrContext has scheduled a
32 * write to the resource by the GPU as a result of a skia API call but hasn't executed it yet.
33 *
34 * The latter two ref types are private and intended only for Gr core code.
35 *
36 * PRIOR to the last ref/IO count being removed DERIVED::notifyAllCntsWillBeZero() will be called
37 * (static poly morphism using CRTP). It is legal for additional ref's or pending IOs to be added
38 * during this time. AFTER all the ref/io counts reach zero DERIVED::notifyAllCntsAreZero() will be
39 * called. Similarly when the ref (but not necessarily pending read/write) count reaches 0
40 * DERIVED::notifyRefCountIsZero() will be called. In the case when an unref() causes both
41 * the ref cnt to reach zero and the other counts are zero, notifyRefCountIsZero() will be called
42 * before notifyAllCntsAreZero(). Moreover, if notifyRefCountIsZero() returns false then
43 * notifyAllCntsAreZero() won't be called at all. notifyRefCountIsZero() must return false if the
44 * object may be deleted after notifyRefCntIsZero() returns.
45 *
46 * GrIORef and GrGpuResource are separate classes for organizational reasons and to be
47 * able to give access via friendship to only the functions related to pending IO operations.
48 */
49 template <typename DERIVED> class GrIORef : public SkNoncopyable {
50 public:
51 // Some of the signatures are written to mirror SkRefCnt so that GrGpuResource can work with
52 // templated helper classes (e.g. sk_sp). However, we have different categories of
53 // refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are
54 // not intended to cross thread boundaries.
ref()55 void ref() const {
56 // Only the cache should be able to add the first ref to a resource.
57 SkASSERT(fRefCnt > 0);
58 this->validate();
59 ++fRefCnt;
60 }
61
unref()62 void unref() const {
63 this->validate();
64
65 if (fRefCnt == 1) {
66 if (!this->internalHasPendingIO()) {
67 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
68 }
69 SkASSERT(fRefCnt > 0);
70 }
71 if (--fRefCnt == 0) {
72 if (!static_cast<const DERIVED*>(this)->notifyRefCountIsZero()) {
73 return;
74 }
75 }
76
77 this->didRemoveRefOrPendingIO(kRef_CntType);
78 }
79
validate()80 void validate() const {
81 #ifdef SK_DEBUG
82 SkASSERT(fRefCnt >= 0);
83 SkASSERT(fPendingReads >= 0);
84 SkASSERT(fPendingWrites >= 0);
85 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0);
86 #endif
87 }
88
89 #if GR_TEST_UTILS
testingOnly_getRefCnt()90 int32_t testingOnly_getRefCnt() const { return fRefCnt; }
testingOnly_getPendingReads()91 int32_t testingOnly_getPendingReads() const { return fPendingReads; }
testingOnly_getPendingWrites()92 int32_t testingOnly_getPendingWrites() const { return fPendingWrites; }
93 #endif
94
95 protected:
GrIORef()96 GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { }
97
98 enum CntType {
99 kRef_CntType,
100 kPendingRead_CntType,
101 kPendingWrite_CntType,
102 };
103
internalHasPendingRead()104 bool internalHasPendingRead() const { return SkToBool(fPendingReads); }
internalHasPendingWrite()105 bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); }
internalHasPendingIO()106 bool internalHasPendingIO() const { return SkToBool(fPendingWrites | fPendingReads); }
107
internalHasRef()108 bool internalHasRef() const { return SkToBool(fRefCnt); }
internalHasUniqueRef()109 bool internalHasUniqueRef() const { return fRefCnt == 1; }
110
111 // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()112 void addInitialRef() const {
113 this->validate();
114 ++fRefCnt;
115 }
116
117 private:
addPendingRead()118 void addPendingRead() const {
119 this->validate();
120 ++fPendingReads;
121 }
122
completedRead()123 void completedRead() const {
124 this->validate();
125 if (fPendingReads == 1 && !fPendingWrites && !fRefCnt) {
126 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
127 }
128 --fPendingReads;
129 this->didRemoveRefOrPendingIO(kPendingRead_CntType);
130 }
131
addPendingWrite()132 void addPendingWrite() const {
133 this->validate();
134 ++fPendingWrites;
135 }
136
completedWrite()137 void completedWrite() const {
138 this->validate();
139 if (fPendingWrites == 1 && !fPendingReads && !fRefCnt) {
140 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
141 }
142 --fPendingWrites;
143 this->didRemoveRefOrPendingIO(kPendingWrite_CntType);
144 }
145
didRemoveRefOrPendingIO(CntType cntTypeRemoved)146 void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const {
147 if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
148 static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved);
149 }
150 }
151
152 mutable int32_t fRefCnt;
153 mutable int32_t fPendingReads;
154 mutable int32_t fPendingWrites;
155
156 friend class GrResourceCache; // to check IO ref counts.
157
158 template <typename, GrIOType> friend class GrPendingIOResource;
159 };
160
161 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag162 GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0) {}
163
GrGpuResourceTagGrGpuResourceTag164 GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid)
165 : fPid(pid), fTid(tid), fWid(wid), fFid(fid) {}
166
isGrTagValidGrGpuResourceTag167 bool isGrTagValid() const {
168 return fPid || fTid || fWid || fFid;
169 }
170
filterGrGpuResourceTag171 bool filter(GrGpuResourceTag& tag) const {
172 if (!isGrTagValid()) {
173 return !tag.isGrTagValid();
174 }
175 if (fPid && fPid != tag.fPid) {
176 return false;
177 }
178 if (fTid && fTid != tag.fTid) {
179 return false;
180 }
181 if (fWid && fWid != tag.fWid) {
182 return false;
183 }
184 if (fFid && fFid != tag.fFid) {
185 return false;
186 }
187 return true;
188 }
189
filterGrGpuResourceTag190 bool filter(GrGpuResourceTag&& tag) const {
191 if (!isGrTagValid()) {
192 return !tag.isGrTagValid();
193 }
194 if (fPid && fPid != tag.fPid) {
195 return false;
196 }
197 if (fTid && fTid != tag.fTid) {
198 return false;
199 }
200 if (fWid && fWid != tag.fWid) {
201 return false;
202 }
203 if (fFid && fFid != tag.fFid) {
204 return false;
205 }
206 return true;
207 }
208
209 uint32_t fPid;
210 uint32_t fTid;
211 uint32_t fWid;
212 uint32_t fFid;
213 };
214
215 /**
216 * Base class for objects that can be kept in the GrResourceCache.
217 */
218 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
219 public:
220 /**
221 * Tests whether a object has been abandoned or released. All objects will
222 * be in this state after their creating GrContext is destroyed or has
223 * contextLost called. It's up to the client to test wasDestroyed() before
224 * attempting to use an object if it holds refs on objects across
225 * ~GrContext, freeResources with the force flag, or contextLost.
226 *
227 * @return true if the object has been released or abandoned,
228 * false otherwise.
229 */
wasDestroyed()230 bool wasDestroyed() const { return nullptr == fGpu; }
231
232 /**
233 * Retrieves the context that owns the object. Note that it is possible for
234 * this to return NULL. When objects have been release()ed or abandon()ed
235 * they no longer have an owning context. Destroying a GrContext
236 * automatically releases all its resources.
237 */
238 const GrContext* getContext() const;
239 GrContext* getContext();
240
241 /**
242 * Retrieves the amount of GPU memory used by this resource in bytes. It is
243 * approximate since we aren't aware of additional padding or copies made
244 * by the driver.
245 *
246 * @return the amount of GPU memory used in bytes
247 */
gpuMemorySize()248 size_t gpuMemorySize() const {
249 if (kInvalidGpuMemorySize == fGpuMemorySize) {
250 fGpuMemorySize = this->onGpuMemorySize();
251 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
252 }
253 return fGpuMemorySize;
254 }
255
256 class UniqueID {
257 public:
258 UniqueID() = default;
259
UniqueID(uint32_t id)260 explicit UniqueID(uint32_t id) : fID(id) {}
261
asUInt()262 uint32_t asUInt() const { return fID; }
263
264 bool operator==(const UniqueID& other) const { return fID == other.fID; }
265 bool operator!=(const UniqueID& other) const { return !(*this == other); }
266
makeInvalid()267 void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()268 bool isInvalid() const { return fID == SK_InvalidUniqueID; }
269
270 protected:
271 uint32_t fID = SK_InvalidUniqueID;
272 };
273
274 /**
275 * Gets an id that is unique for this GrGpuResource object. It is static in that it does
276 * not change when the content of the GrGpuResource object changes. This will never return
277 * 0.
278 */
uniqueID()279 UniqueID uniqueID() const { return fUniqueID; }
280
281 /** Returns the current unique key for the resource. It will be invalid if the resource has no
282 associated unique key. */
getUniqueKey()283 const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
284
285 /**
286 * Internal-only helper class used for manipulations of the resource by the cache.
287 */
288 class CacheAccess;
289 inline CacheAccess cacheAccess();
290 inline const CacheAccess cacheAccess() const;
291
292 /**
293 * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
294 */
295 class ProxyAccess;
296 inline ProxyAccess proxyAccess();
297
298 /**
299 * Internal-only helper class used for manipulations of the resource by internal code.
300 */
301 class ResourcePriv;
302 inline ResourcePriv resourcePriv();
303 inline const ResourcePriv resourcePriv() const;
304
305 /**
306 * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
307 * Typically, subclasses should not need to override this, and should only
308 * need to override setMemoryBacking.
309 **/
310 virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
311
312 /**
313 * Describes the type of gpu resource that is represented by the implementing
314 * class (e.g. texture, buffer object, stencil). This data is used for diagnostic
315 * purposes by dumpMemoryStatistics().
316 *
317 * The value returned is expected to be long lived and will not be copied by the caller.
318 */
319 virtual const char* getResourceType() const = 0;
320
321 static uint32_t CreateUniqueID();
322
323 /**
324 * Set the resource tag.
325 */
setResourceTag(const GrGpuResourceTag tag)326 void setResourceTag(const GrGpuResourceTag tag) { fGrResourceTag = tag; }
327
328 /**
329 * Get the resource tag.
330 */
getResourceTag()331 GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
332
333 protected:
334 // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
335 // fully initialized (i.e. only from the constructors of the final class).
336 void registerWithCache(SkBudgeted);
337
338 // This must be called by every GrGpuObject that references any wrapped backend objects. It
339 // should be called once the object is fully initialized (i.e. only from the constructors of the
340 // final class).
341 void registerWithCacheWrapped(GrWrapCacheable);
342
343 GrGpuResource(GrGpu*);
344 virtual ~GrGpuResource();
345
getGpu()346 GrGpu* getGpu() const { return fGpu; }
347
348 /** Overridden to free GPU resources in the backend API. */
onRelease()349 virtual void onRelease() { }
350 /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
351 This may be called when the underlying 3D context is no longer valid and so no
352 backend API calls should be made. */
onAbandon()353 virtual void onAbandon() { }
354
355 /**
356 * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
357 **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)358 virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
359
360 /**
361 * Returns a string that uniquely identifies this resource.
362 */
363 SkString getResourceName() const;
364
365 /**
366 * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
367 * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
368 * to customize various inputs.
369 */
370 void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
371 const char* type, size_t size) const;
372
373
374 private:
375 bool isPurgeable() const;
376 bool hasRef() const;
377 bool hasRefOrPendingIO() const;
378
379 /**
380 * Called by the registerWithCache if the resource is available to be used as scratch.
381 * Resource subclasses should override this if the instances should be recycled as scratch
382 * resources and populate the scratchKey with the key.
383 * By default resources are not recycled as scratch.
384 **/
computeScratchKey(GrScratchKey *)385 virtual void computeScratchKey(GrScratchKey*) const {}
386
387 /**
388 * Removes references to objects in the underlying 3D API without freeing them.
389 * Called by CacheAccess.
390 */
391 void abandon();
392
393 /**
394 * Frees the object in the underlying 3D API. Called by CacheAccess.
395 */
396 void release();
397
398 virtual size_t onGpuMemorySize() const = 0;
399
400 /**
401 * Called by GrResourceCache when a resource loses its last ref or pending IO.
402 */
willRemoveLastRefOrPendingIO()403 virtual void willRemoveLastRefOrPendingIO() {}
404
405 // See comments in CacheAccess and ResourcePriv.
406 void setUniqueKey(const GrUniqueKey&);
407 void removeUniqueKey();
408 void notifyAllCntsWillBeZero() const;
409 void notifyAllCntsAreZero(CntType) const;
410 bool notifyRefCountIsZero() const;
411 void removeScratchKey();
412 void makeBudgeted();
413 void makeUnbudgeted();
414
415 #ifdef SK_DEBUG
416 friend class GrGpu; // for assert in GrGpu to access getGpu
417 #endif
418
419 // An index into a heap when this resource is purgeable or an array when not. This is maintained
420 // by the cache.
421 int fCacheArrayIndex;
422 // This value reflects how recently this resource was accessed in the cache. This is maintained
423 // by the cache.
424 uint32_t fTimestamp;
425 GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
426
427 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
428 GrScratchKey fScratchKey;
429 GrUniqueKey fUniqueKey;
430
431 // This is not ref'ed but abandon() or release() will be called before the GrGpu object
432 // is destroyed. Those calls set will this to NULL.
433 GrGpu* fGpu;
434 mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
435
436 GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
437 bool fRefsWrappedObjects = false;
438 const UniqueID fUniqueID;
439 GrGpuResourceTag fGrResourceTag;
440
441 typedef GrIORef<GrGpuResource> INHERITED;
442 friend class GrIORef<GrGpuResource>; // to access notifyAllCntsAreZero and notifyRefCntIsZero.
443 };
444
445 class GrGpuResource::ProxyAccess {
446 private:
ProxyAccess(GrGpuResource * resource)447 ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
448
449 /** Proxies are allowed to take a resource from no refs to one ref. */
450 void ref(GrResourceCache* cache);
451
452 // No taking addresses of this type.
453 const CacheAccess* operator&() const = delete;
454 CacheAccess* operator&() = delete;
455
456 GrGpuResource* fResource;
457
458 friend class GrGpuResource;
459 friend class GrSurfaceProxy;
460 friend class GrIORefProxy;
461 };
462
proxyAccess()463 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
464
465 #endif
466