• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 
15 class GrContext;
16 class GrGpu;
17 class GrResourceCache;
18 class SkTraceMemoryDump;
19 
20 /**
21  * Base class for GrGpuResource. Handles the various types of refs we need. Separated out as a base
22  * class to isolate the ref-cnting behavior and provide friendship without exposing all of
23  * GrGpuResource.
24  *
25  * Gpu resources can have three types of refs:
26  *   1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls
27  *      that read and write the resource via GrOpList and by any object that must own a
28  *      GrGpuResource and is itself owned (directly or indirectly) by Skia-client code.
29  *   2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read
30  *      of the resource by the GPU as a result of a skia API call but hasn't executed it yet.
31  *   3) Pending write (+ by addPendingWrite(), - by completedWrite()): GrContext has scheduled a
32  *      write to the resource by the GPU as a result of a skia API call but hasn't executed it yet.
33  *
34  * The latter two ref types are private and intended only for Gr core code.
35  *
36  * PRIOR to the last ref/IO count being removed DERIVED::notifyAllCntsWillBeZero() will be called
37  * (static poly morphism using CRTP). It is legal for additional ref's or pending IOs to be added
38  * during this time. AFTER all the ref/io counts reach zero DERIVED::notifyAllCntsAreZero() will be
39  * called. Similarly when the ref (but not necessarily pending read/write) count reaches 0
40  * DERIVED::notifyRefCountIsZero() will be called. In the case when an unref() causes both
41  * the ref cnt to reach zero and the other counts are zero, notifyRefCountIsZero() will be called
42  * before notifyAllCntsAreZero(). Moreover, if notifyRefCountIsZero() returns false then
43  * notifyAllCntsAreZero() won't be called at all. notifyRefCountIsZero() must return false if the
44  * object may be deleted after notifyRefCntIsZero() returns.
45  *
46  * GrIORef and GrGpuResource are separate classes for organizational reasons and to be
47  * able to give access via friendship to only the functions related to pending IO operations.
48  */
49 template <typename DERIVED> class GrIORef : public SkNoncopyable {
50 public:
51     // Some of the signatures are written to mirror SkRefCnt so that GrGpuResource can work with
52     // templated helper classes (e.g. sk_sp). However, we have different categories of
53     // refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are
54     // not intended to cross thread boundaries.
ref()55     void ref() const {
56         // Only the cache should be able to add the first ref to a resource.
57         SkASSERT(fRefCnt > 0);
58         this->validate();
59         ++fRefCnt;
60     }
61 
unref()62     void unref() const {
63         this->validate();
64 
65         if (fRefCnt == 1) {
66             if (!this->internalHasPendingIO()) {
67                 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
68             }
69             SkASSERT(fRefCnt > 0);
70         }
71         if (--fRefCnt == 0) {
72             if (!static_cast<const DERIVED*>(this)->notifyRefCountIsZero()) {
73                 return;
74             }
75         }
76 
77         this->didRemoveRefOrPendingIO(kRef_CntType);
78     }
79 
validate()80     void validate() const {
81 #ifdef SK_DEBUG
82         SkASSERT(fRefCnt >= 0);
83         SkASSERT(fPendingReads >= 0);
84         SkASSERT(fPendingWrites >= 0);
85         SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0);
86 #endif
87     }
88 
89 #if GR_TEST_UTILS
testingOnly_getRefCnt()90     int32_t testingOnly_getRefCnt() const { return fRefCnt; }
testingOnly_getPendingReads()91     int32_t testingOnly_getPendingReads() const { return fPendingReads; }
testingOnly_getPendingWrites()92     int32_t testingOnly_getPendingWrites() const { return fPendingWrites; }
93 #endif
94 
95 protected:
GrIORef()96     GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { }
97 
98     enum CntType {
99         kRef_CntType,
100         kPendingRead_CntType,
101         kPendingWrite_CntType,
102     };
103 
internalHasPendingRead()104     bool internalHasPendingRead() const { return SkToBool(fPendingReads); }
internalHasPendingWrite()105     bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); }
internalHasPendingIO()106     bool internalHasPendingIO() const { return SkToBool(fPendingWrites | fPendingReads); }
107 
internalHasRef()108     bool internalHasRef() const { return SkToBool(fRefCnt); }
internalHasUniqueRef()109     bool internalHasUniqueRef() const { return fRefCnt == 1; }
110 
111     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()112     void addInitialRef() const {
113         this->validate();
114         ++fRefCnt;
115     }
116 
117 private:
addPendingRead()118     void addPendingRead() const {
119         this->validate();
120         ++fPendingReads;
121     }
122 
completedRead()123     void completedRead() const {
124         this->validate();
125         if (fPendingReads == 1 && !fPendingWrites && !fRefCnt) {
126             static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
127         }
128         --fPendingReads;
129         this->didRemoveRefOrPendingIO(kPendingRead_CntType);
130     }
131 
addPendingWrite()132     void addPendingWrite() const {
133         this->validate();
134         ++fPendingWrites;
135     }
136 
completedWrite()137     void completedWrite() const {
138         this->validate();
139         if (fPendingWrites == 1 && !fPendingReads && !fRefCnt) {
140             static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero();
141         }
142         --fPendingWrites;
143         this->didRemoveRefOrPendingIO(kPendingWrite_CntType);
144     }
145 
didRemoveRefOrPendingIO(CntType cntTypeRemoved)146     void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const {
147         if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
148             static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved);
149         }
150     }
151 
152     mutable int32_t fRefCnt;
153     mutable int32_t fPendingReads;
154     mutable int32_t fPendingWrites;
155 
156     friend class GrResourceCache; // to check IO ref counts.
157 
158     template <typename, GrIOType> friend class GrPendingIOResource;
159 };
160 
161 /**
162  * Base class for objects that can be kept in the GrResourceCache.
163  */
164 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
165 public:
166     /**
167      * Tests whether a object has been abandoned or released. All objects will
168      * be in this state after their creating GrContext is destroyed or has
169      * contextLost called. It's up to the client to test wasDestroyed() before
170      * attempting to use an object if it holds refs on objects across
171      * ~GrContext, freeResources with the force flag, or contextLost.
172      *
173      * @return true if the object has been released or abandoned,
174      *         false otherwise.
175      */
wasDestroyed()176     bool wasDestroyed() const { return nullptr == fGpu; }
177 
178     /**
179      * Retrieves the context that owns the object. Note that it is possible for
180      * this to return NULL. When objects have been release()ed or abandon()ed
181      * they no longer have an owning context. Destroying a GrContext
182      * automatically releases all its resources.
183      */
184     const GrContext* getContext() const;
185     GrContext* getContext();
186 
187     /**
188      * Retrieves the amount of GPU memory used by this resource in bytes. It is
189      * approximate since we aren't aware of additional padding or copies made
190      * by the driver.
191      *
192      * @return the amount of GPU memory used in bytes
193      */
gpuMemorySize()194     size_t gpuMemorySize() const {
195         if (kInvalidGpuMemorySize == fGpuMemorySize) {
196             fGpuMemorySize = this->onGpuMemorySize();
197             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
198         }
199         return fGpuMemorySize;
200     }
201 
202     class UniqueID {
203     public:
204         UniqueID() = default;
205 
UniqueID(uint32_t id)206         explicit UniqueID(uint32_t id) : fID(id) {}
207 
asUInt()208         uint32_t asUInt() const { return fID; }
209 
210         bool operator==(const UniqueID& other) const { return fID == other.fID; }
211         bool operator!=(const UniqueID& other) const { return !(*this == other); }
212 
makeInvalid()213         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()214         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
215 
216     protected:
217         uint32_t fID = SK_InvalidUniqueID;
218     };
219 
220     /**
221      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
222      * not change when the content of the GrGpuResource object changes. This will never return
223      * 0.
224      */
uniqueID()225     UniqueID uniqueID() const { return fUniqueID; }
226 
227     /** Returns the current unique key for the resource. It will be invalid if the resource has no
228         associated unique key. */
getUniqueKey()229     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
230 
231     /**
232      * Internal-only helper class used for manipulations of the resource by the cache.
233      */
234     class CacheAccess;
235     inline CacheAccess cacheAccess();
236     inline const CacheAccess cacheAccess() const;
237 
238     /**
239      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
240      */
241     class ProxyAccess;
242     inline ProxyAccess proxyAccess();
243 
244     /**
245      * Internal-only helper class used for manipulations of the resource by internal code.
246      */
247     class ResourcePriv;
248     inline ResourcePriv resourcePriv();
249     inline const ResourcePriv resourcePriv() const;
250 
251     /**
252      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
253      * Typically, subclasses should not need to override this, and should only
254      * need to override setMemoryBacking.
255      **/
256     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
257 
258     /**
259      * Describes the type of gpu resource that is represented by the implementing
260      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
261      * purposes by dumpMemoryStatistics().
262      *
263      * The value returned is expected to be long lived and will not be copied by the caller.
264      */
265     virtual const char* getResourceType() const = 0;
266 
267     static uint32_t CreateUniqueID();
268 
269 protected:
270     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
271     // fully initialized (i.e. only from the constructors of the final class).
272     void registerWithCache(SkBudgeted);
273 
274     // This must be called by every GrGpuObject that references any wrapped backend objects. It
275     // should be called once the object is fully initialized (i.e. only from the constructors of the
276     // final class).
277     void registerWithCacheWrapped(GrWrapCacheable);
278 
279     GrGpuResource(GrGpu*);
280     virtual ~GrGpuResource();
281 
getGpu()282     GrGpu* getGpu() const { return fGpu; }
283 
284     /** Overridden to free GPU resources in the backend API. */
onRelease()285     virtual void onRelease() { }
286     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
287         This may be called when the underlying 3D context is no longer valid and so no
288         backend API calls should be made. */
onAbandon()289     virtual void onAbandon() { }
290 
291     /**
292      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
293      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)294     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
295 
296     /**
297      * Returns a string that uniquely identifies this resource.
298      */
299     SkString getResourceName() const;
300 
301     /**
302      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
303      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
304      * to customize various inputs.
305      */
306     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
307                                   const char* type, size_t size) const;
308 
309 
310 private:
311     bool isPurgeable() const;
312     bool hasRef() const;
313     bool hasRefOrPendingIO() const;
314 
315     /**
316      * Called by the registerWithCache if the resource is available to be used as scratch.
317      * Resource subclasses should override this if the instances should be recycled as scratch
318      * resources and populate the scratchKey with the key.
319      * By default resources are not recycled as scratch.
320      **/
computeScratchKey(GrScratchKey *)321     virtual void computeScratchKey(GrScratchKey*) const {}
322 
323     /**
324      * Removes references to objects in the underlying 3D API without freeing them.
325      * Called by CacheAccess.
326      */
327     void abandon();
328 
329     /**
330      * Frees the object in the underlying 3D API. Called by CacheAccess.
331      */
332     void release();
333 
334     virtual size_t onGpuMemorySize() const = 0;
335 
336     /**
337      * Called by GrResourceCache when a resource loses its last ref or pending IO.
338      */
willRemoveLastRefOrPendingIO()339     virtual void willRemoveLastRefOrPendingIO() {}
340 
341     // See comments in CacheAccess and ResourcePriv.
342     void setUniqueKey(const GrUniqueKey&);
343     void removeUniqueKey();
344     void notifyAllCntsWillBeZero() const;
345     void notifyAllCntsAreZero(CntType) const;
346     bool notifyRefCountIsZero() const;
347     void removeScratchKey();
348     void makeBudgeted();
349     void makeUnbudgeted();
350 
351 #ifdef SK_DEBUG
352     friend class GrGpu;  // for assert in GrGpu to access getGpu
353 #endif
354 
355     // An index into a heap when this resource is purgeable or an array when not. This is maintained
356     // by the cache.
357     int fCacheArrayIndex;
358     // This value reflects how recently this resource was accessed in the cache. This is maintained
359     // by the cache.
360     uint32_t fTimestamp;
361     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
362 
363     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
364     GrScratchKey fScratchKey;
365     GrUniqueKey fUniqueKey;
366 
367     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
368     // is destroyed. Those calls set will this to NULL.
369     GrGpu* fGpu;
370     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
371 
372     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
373     bool fRefsWrappedObjects = false;
374     const UniqueID fUniqueID;
375 
376     typedef GrIORef<GrGpuResource> INHERITED;
377     friend class GrIORef<GrGpuResource>; // to access notifyAllCntsAreZero and notifyRefCntIsZero.
378 };
379 
380 class GrGpuResource::ProxyAccess {
381 private:
ProxyAccess(GrGpuResource * resource)382     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
383 
384     /** Proxies are allowed to take a resource from no refs to one ref. */
385     void ref(GrResourceCache* cache);
386 
387     // No taking addresses of this type.
388     const CacheAccess* operator&() const = delete;
389     CacheAccess* operator&() = delete;
390 
391     GrGpuResource* fResource;
392 
393     friend class GrGpuResource;
394     friend class GrSurfaceProxy;
395     friend class GrIORefProxy;
396 };
397 
proxyAccess()398 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
399 
400 #endif
401