• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 #include <mutex>
15 
16 class GrGpu;
17 class GrResourceCache;
18 class SkTraceMemoryDump;
19 
20 /**
21  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
22  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
23  * exposing all of GrGpuResource.
24  *
25  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
26  * (static poly morphism using CRTP). It is legal for additional ref's to be added
27  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
28  * called.
29  */
30 template <typename DERIVED> class GrIORef : public SkNoncopyable {
31 public:
unique()32     bool unique() const { return fRefCnt == 1; }
33 
ref()34     void ref() const {
35         // Only the cache should be able to add the first ref to a resource.
36         SkASSERT(this->getRefCnt() > 0);
37         // No barrier required.
38         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
39     }
40 
41     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
42     enum class LastRemovedRef {
43         kMainRef,            // This refers to fRefCnt
44         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
45     };
46 
unref()47     void unref() const {
48         SkASSERT(this->getRefCnt() > 0);
49         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
50             this->notifyWillBeZero(LastRemovedRef::kMainRef);
51         }
52     }
53 
addCommandBufferUsage()54     void addCommandBufferUsage() const {
55         // No barrier required.
56         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
57     }
58 
removeCommandBufferUsage()59     void removeCommandBufferUsage() const {
60         SkASSERT(!this->hasNoCommandBufferUsages());
61         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
62             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
63         }
64     }
65 
66 #if GR_TEST_UTILS
testingOnly_getRefCnt()67     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
68 #endif
69 
70 protected:
GrIORef()71     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
72 
internalHasRef()73     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()74     bool internalHasNoCommandBufferUsages() const {
75         return SkToBool(this->hasNoCommandBufferUsages());
76     }
77 
78     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()79     void addInitialRef() const {
80         SkASSERT(fRefCnt >= 0);
81         // No barrier required.
82         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
83     }
84 
85 private:
notifyWillBeZero(LastRemovedRef removedRef)86     void notifyWillBeZero(LastRemovedRef removedRef) const {
87         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
88     }
89 
getRefCnt()90     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
91 
hasNoCommandBufferUsages()92     bool hasNoCommandBufferUsages() const {
93         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
94             // The acquire barrier is only really needed if we return true.  It
95             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
96             // until previous owners are all totally done calling removeCommandBufferUsage().
97             return true;
98         }
99         return false;
100     }
101 
102     mutable std::atomic<int32_t> fRefCnt;
103     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
104 
105     using INHERITED = SkNoncopyable;
106 };
107 
108 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag109     GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0)
110     {
111         isGrGpuResourceTagValid = false;
112     }
113 
GrGpuResourceTagGrGpuResourceTag114     GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid)
115         : fPid(pid), fTid(tid), fWid(wid), fFid(fid)
116     {
117         isGrGpuResourceTagValid = fPid || fTid || fWid || fFid;
118     }
119 
120     bool operator< (const GrGpuResourceTag& tag) const {
121         if (fPid != tag.fPid) {
122             return fPid < tag.fPid;
123         }
124         if (fTid != tag.fTid) {
125             return fTid < tag.fTid;
126         }
127         if (fWid != tag.fWid) {
128             return fWid < tag.fWid;
129         }
130         if (fFid != tag.fFid) {
131             return fFid < tag.fFid;
132         }
133         return false;
134     }
135 
136     bool operator== (const GrGpuResourceTag& tag) const {
137         return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
138     }
139 
toStringGrGpuResourceTag140     std::string toString() const {
141         return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
142             + std::to_string(fWid) + "," + std::to_string(fFid) + "]";
143     }
144 
isGrTagValidGrGpuResourceTag145     bool isGrTagValid() const {
146         return isGrGpuResourceTagValid;
147     }
148 
filterGrGpuResourceTag149     bool filter(GrGpuResourceTag& tag) const {
150         if (!isGrTagValid()) {
151             return !tag.isGrTagValid();
152         }
153         if (fPid && fPid != tag.fPid) {
154             return false;
155         }
156         if (fTid && fTid != tag.fTid) {
157             return false;
158         }
159         if (fWid && fWid != tag.fWid) {
160             return false;
161         }
162         if (fFid && fFid != tag.fFid) {
163             return false;
164         }
165         return true;
166     }
167 
filterGrGpuResourceTag168     bool filter(GrGpuResourceTag&& tag) const {
169         if (!isGrTagValid()) {
170             return !tag.isGrTagValid();
171         }
172         if (fPid && fPid != tag.fPid) {
173             return false;
174         }
175         if (fTid && fTid != tag.fTid) {
176             return false;
177         }
178         if (fWid && fWid != tag.fWid) {
179             return false;
180         }
181         if (fFid && fFid != tag.fFid) {
182             return false;
183         }
184         return true;
185     }
186     uint32_t fPid;
187     uint32_t fTid;
188     uint32_t fWid;
189     uint32_t fFid;
190     bool isGrGpuResourceTagValid;
191 };
192 
193 /**
194  * Base class for objects that can be kept in the GrResourceCache.
195  */
196 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
197 public:
198     /**
199      * Tests whether a object has been abandoned or released. All objects will
200      * be in this state after their creating GrContext is destroyed or has
201      * contextLost called. It's up to the client to test wasDestroyed() before
202      * attempting to use an object if it holds refs on objects across
203      * ~GrContext, freeResources with the force flag, or contextLost.
204      *
205      * @return true if the object has been released or abandoned,
206      *         false otherwise.
207      */
wasDestroyed()208     bool wasDestroyed() const { return nullptr == fGpu; }
209 
210     /**
211      * Retrieves the context that owns the object. Note that it is possible for
212      * this to return NULL. When objects have been release()ed or abandon()ed
213      * they no longer have an owning context. Destroying a GrDirectContext
214      * automatically releases all its resources.
215      */
216     const GrDirectContext* getContext() const;
217     GrDirectContext* getContext();
218 
219     /**
220      * Retrieves the amount of GPU memory used by this resource in bytes. It is
221      * approximate since we aren't aware of additional padding or copies made
222      * by the driver.
223      *
224      * @return the amount of GPU memory used in bytes
225      */
gpuMemorySize()226     size_t gpuMemorySize() const {
227         if (kInvalidGpuMemorySize == fGpuMemorySize) {
228             fGpuMemorySize = this->onGpuMemorySize();
229             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
230         }
231         return fGpuMemorySize;
232     }
233 
234     class UniqueID {
235     public:
236         UniqueID() = default;
237 
UniqueID(uint32_t id)238         explicit UniqueID(uint32_t id) : fID(id) {}
239 
asUInt()240         uint32_t asUInt() const { return fID; }
241 
242         bool operator==(const UniqueID& other) const { return fID == other.fID; }
243         bool operator!=(const UniqueID& other) const { return !(*this == other); }
244 
makeInvalid()245         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()246         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
247 
248     protected:
249         uint32_t fID = SK_InvalidUniqueID;
250     };
251 
252     /**
253      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
254      * not change when the content of the GrGpuResource object changes. This will never return
255      * 0.
256      */
uniqueID()257     UniqueID uniqueID() const { return fUniqueID; }
258 
259     /** Returns the current unique key for the resource. It will be invalid if the resource has no
260         associated unique key. */
getUniqueKey()261     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
262 
263     /**
264      * Internal-only helper class used for manipulations of the resource by the cache.
265      */
266     class CacheAccess;
267     inline CacheAccess cacheAccess();
268     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
269 
270     /**
271      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
272      */
273     class ProxyAccess;
274     inline ProxyAccess proxyAccess();
275 
276     /**
277      * Internal-only helper class used for manipulations of the resource by internal code.
278      */
279     class ResourcePriv;
280     inline ResourcePriv resourcePriv();
281     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
282 
283     /**
284      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
285      * Typically, subclasses should not need to override this, and should only
286      * need to override setMemoryBacking.
287      **/
288     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
289 
290     /**
291      * Describes the type of gpu resource that is represented by the implementing
292      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
293      * purposes by dumpMemoryStatistics().
294      *
295      * The value returned is expected to be long lived and will not be copied by the caller.
296      */
297     virtual const char* getResourceType() const = 0;
298 
299     static uint32_t CreateUniqueID();
300 
301     /**
302      * Set the resource tag.
303      */
setResourceTag(const GrGpuResourceTag tag)304         void setResourceTag(const GrGpuResourceTag tag) { fGrResourceTag = tag; }
305 
306     /**
307      * Get the resource tag.
308      *
309      * @return all GrGpuResourceTags.
310      */
getResourceTag()311     GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
312 
313 protected:
314     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
315     // fully initialized (i.e. only from the constructors of the final class).
316     void registerWithCache(SkBudgeted);
317 
318     // This must be called by every GrGpuObject that references any wrapped backend objects. It
319     // should be called once the object is fully initialized (i.e. only from the constructors of the
320     // final class).
321     void registerWithCacheWrapped(GrWrapCacheable);
322 
323     GrGpuResource(GrGpu*);
324     virtual ~GrGpuResource();
325 
getGpu()326     GrGpu* getGpu() const { return fGpu; }
327 
328     /** Overridden to free GPU resources in the backend API. */
onRelease()329     virtual void onRelease() { }
330     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
331         This may be called when the underlying 3D context is no longer valid and so no
332         backend API calls should be made. */
onAbandon()333     virtual void onAbandon() { }
334 
335     /**
336      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
337      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)338     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
339 
340     /**
341      * Returns a string that uniquely identifies this resource.
342      */
343     SkString getResourceName() const;
344 
345     /**
346      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
347      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
348      * to customize various inputs.
349      */
350     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
351                                   const char* type, size_t size) const;
352 
353 
354 private:
355     bool isPurgeable() const;
356     bool hasRef() const;
357     bool hasNoCommandBufferUsages() const;
358 
359     /**
360      * Called by the registerWithCache if the resource is available to be used as scratch.
361      * Resource subclasses should override this if the instances should be recycled as scratch
362      * resources and populate the scratchKey with the key.
363      * By default resources are not recycled as scratch.
364      **/
computeScratchKey(GrScratchKey *)365     virtual void computeScratchKey(GrScratchKey*) const {}
366 
367     /**
368      * Removes references to objects in the underlying 3D API without freeing them.
369      * Called by CacheAccess.
370      */
371     void abandon();
372 
373     /**
374      * Frees the object in the underlying 3D API. Called by CacheAccess.
375      */
376     void release();
377 
378     virtual size_t onGpuMemorySize() const = 0;
379 
380     // See comments in CacheAccess and ResourcePriv.
381     void setUniqueKey(const GrUniqueKey&);
382     void removeUniqueKey();
383     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
384     void removeScratchKey();
385     void makeBudgeted();
386     void makeUnbudgeted();
387     void userRegisterResource();
388 
389 #ifdef SK_DEBUG
390     friend class GrGpu;  // for assert in GrGpu to access getGpu
391 #endif
392 
393     // An index into a heap when this resource is purgeable or an array when not. This is maintained
394     // by the cache.
395     int fCacheArrayIndex = -1;
396     // This value reflects how recently this resource was accessed in the cache. This is maintained
397     // by the cache.
398     uint32_t fTimestamp;
399     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
400 
401     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
402     GrScratchKey fScratchKey;
403     GrUniqueKey fUniqueKey;
404 
405     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
406     // is destroyed. Those calls set will this to NULL.
407     GrGpu* fGpu;
408     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
409 
410     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
411     bool fRefsWrappedObjects = false;
412     const UniqueID fUniqueID;
413     GrGpuResourceTag fGrResourceTag;
414 
415     using INHERITED = GrIORef<GrGpuResource>;
416     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
417                                          // notifyARefCntIsZero.
418     std::mutex mutex_; // The gpu cache is released abnormally due to multi threads.
419 };
420 
421 class GrGpuResource::ProxyAccess {
422 private:
ProxyAccess(GrGpuResource * resource)423     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
424 
425     /** Proxies are allowed to take a resource from no refs to one ref. */
426     void ref(GrResourceCache* cache);
427 
428     // No taking addresses of this type.
429     const CacheAccess* operator&() const = delete;
430     CacheAccess* operator&() = delete;
431 
432     GrGpuResource* fResource;
433 
434     friend class GrGpuResource;
435     friend class GrSurfaceProxy;
436 };
437 
proxyAccess()438 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
439 
440 #endif
441