• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 #include <mutex>
15 
16 class GrGpu;
17 class GrResourceCache;
18 class SkTraceMemoryDump;
19 
20 /**
21  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
22  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
23  * exposing all of GrGpuResource.
24  *
25  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
26  * (static poly morphism using CRTP). It is legal for additional ref's to be added
27  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
28  * called.
29  */
30 template <typename DERIVED> class GrIORef : public SkNoncopyable {
31 public:
unique()32     bool unique() const { return fRefCnt == 1; }
33 
ref()34     void ref() const {
35         // Only the cache should be able to add the first ref to a resource.
36         SkASSERT(this->getRefCnt() > 0);
37         // No barrier required.
38         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
39     }
40 
41     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
42     enum class LastRemovedRef {
43         kMainRef,            // This refers to fRefCnt
44         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
45     };
46 
unref()47     void unref() const {
48         SkASSERT(this->getRefCnt() > 0);
49         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
50             this->notifyWillBeZero(LastRemovedRef::kMainRef);
51         }
52     }
53 
addCommandBufferUsage()54     void addCommandBufferUsage() const {
55         // No barrier required.
56         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
57     }
58 
removeCommandBufferUsage()59     void removeCommandBufferUsage() const {
60         SkASSERT(!this->hasNoCommandBufferUsages());
61         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
62             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
63         }
64     }
65 
66 #if GR_TEST_UTILS
testingOnly_getRefCnt()67     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
68 #endif
69 
70 protected:
GrIORef()71     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
72 
internalHasRef()73     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()74     bool internalHasNoCommandBufferUsages() const {
75         return SkToBool(this->hasNoCommandBufferUsages());
76     }
77 
78     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()79     void addInitialRef() const {
80         SkASSERT(fRefCnt >= 0);
81         // No barrier required.
82         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
83     }
84 
85 private:
notifyWillBeZero(LastRemovedRef removedRef)86     void notifyWillBeZero(LastRemovedRef removedRef) const {
87         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
88     }
89 
getRefCnt()90     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
91 
hasNoCommandBufferUsages()92     bool hasNoCommandBufferUsages() const {
93         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
94             // The acquire barrier is only really needed if we return true.  It
95             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
96             // until previous owners are all totally done calling removeCommandBufferUsage().
97             return true;
98         }
99         return false;
100     }
101 
102     mutable std::atomic<int32_t> fRefCnt;
103     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
104 
105     using INHERITED = SkNoncopyable;
106 };
107 
108 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag109     GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0)
110     {
111         isGrGpuResourceTagValid = false;
112     }
113 
GrGpuResourceTagGrGpuResourceTag114     GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid, const std::string& name)
115         : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fName(name)
116     {
117         isGrGpuResourceTagValid = fPid || fTid || fWid || fFid;
118     }
119 
120     bool operator< (const GrGpuResourceTag& tag) const {
121         if (fPid != tag.fPid) {
122             return fPid < tag.fPid;
123         }
124         if (fTid != tag.fTid) {
125             return fTid < tag.fTid;
126         }
127         if (fWid != tag.fWid) {
128             return fWid < tag.fWid;
129         }
130         if (fFid != tag.fFid) {
131             return fFid < tag.fFid;
132         }
133         return false;
134     }
135 
136     bool operator== (const GrGpuResourceTag& tag) const {
137         return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
138     }
139 
toStringGrGpuResourceTag140     std::string toString() const {
141         return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
142             + std::to_string(fWid) + "," + std::to_string(fFid) + "]";
143     }
144 
isGrTagValidGrGpuResourceTag145     bool isGrTagValid() const {
146         return isGrGpuResourceTagValid;
147     }
148 
filterGrGpuResourceTag149     bool filter(GrGpuResourceTag& tag) const {
150         if (!isGrTagValid()) {
151             return !tag.isGrTagValid();
152         }
153         if (fPid && fPid != tag.fPid) {
154             return false;
155         }
156         if (fTid && fTid != tag.fTid) {
157             return false;
158         }
159         if (fWid && fWid != tag.fWid) {
160             return false;
161         }
162         if (fFid && fFid != tag.fFid) {
163             return false;
164         }
165         return true;
166     }
167 
filterGrGpuResourceTag168     bool filter(GrGpuResourceTag&& tag) const {
169         if (!isGrTagValid()) {
170             return !tag.isGrTagValid();
171         }
172         if (fPid && fPid != tag.fPid) {
173             return false;
174         }
175         if (fTid && fTid != tag.fTid) {
176             return false;
177         }
178         if (fWid && fWid != tag.fWid) {
179             return false;
180         }
181         if (fFid && fFid != tag.fFid) {
182             return false;
183         }
184         return true;
185     }
186     uint32_t fPid;
187     uint32_t fTid;
188     uint32_t fWid;
189     uint32_t fFid;
190     std::string fName;
191     bool isGrGpuResourceTagValid;
192 };
193 
194 /**
195  * Base class for objects that can be kept in the GrResourceCache.
196  */
197 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
198 public:
199     /**
200      * Tests whether a object has been abandoned or released. All objects will
201      * be in this state after their creating GrContext is destroyed or has
202      * contextLost called. It's up to the client to test wasDestroyed() before
203      * attempting to use an object if it holds refs on objects across
204      * ~GrContext, freeResources with the force flag, or contextLost.
205      *
206      * @return true if the object has been released or abandoned,
207      *         false otherwise.
208      */
wasDestroyed()209     bool wasDestroyed() const { return nullptr == fGpu; }
210 
setRealAlloc(bool realAlloc)211     void setRealAlloc(bool realAlloc) { fRealAlloc = realAlloc; } // OH ISSUE: set real alloc flag
isRealAlloc()212     bool isRealAlloc() { return fRealAlloc; } // OH ISSUE: get real alloc flag
setRealAllocSize(size_t realAllocSize)213     void setRealAllocSize(size_t realAllocSize) { fRealAllocSize = realAllocSize; } // OH ISSUE: set real alloc size
getRealAllocSize()214     size_t getRealAllocSize() { return fRealAllocSize; } // OH ISSUE: get real alloc size
215 
216     /**
217      * Retrieves the context that owns the object. Note that it is possible for
218      * this to return NULL. When objects have been release()ed or abandon()ed
219      * they no longer have an owning context. Destroying a GrDirectContext
220      * automatically releases all its resources.
221      */
222     const GrDirectContext* getContext() const;
223     GrDirectContext* getContext();
224 
225     /**
226      * Retrieves the amount of GPU memory used by this resource in bytes. It is
227      * approximate since we aren't aware of additional padding or copies made
228      * by the driver.
229      *
230      * @return the amount of GPU memory used in bytes
231      */
gpuMemorySize()232     size_t gpuMemorySize() const {
233         if (kInvalidGpuMemorySize == fGpuMemorySize) {
234             fGpuMemorySize = this->onGpuMemorySize();
235             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
236         }
237         return fGpuMemorySize;
238     }
239 
240     class UniqueID {
241     public:
242         UniqueID() = default;
243 
UniqueID(uint32_t id)244         explicit UniqueID(uint32_t id) : fID(id) {}
245 
asUInt()246         uint32_t asUInt() const { return fID; }
247 
248         bool operator==(const UniqueID& other) const { return fID == other.fID; }
249         bool operator!=(const UniqueID& other) const { return !(*this == other); }
250 
makeInvalid()251         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()252         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
253 
254     protected:
255         uint32_t fID = SK_InvalidUniqueID;
256     };
257 
258     /**
259      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
260      * not change when the content of the GrGpuResource object changes. This will never return
261      * 0.
262      */
uniqueID()263     UniqueID uniqueID() const { return fUniqueID; }
264 
265     /** Returns the current unique key for the resource. It will be invalid if the resource has no
266         associated unique key. */
getUniqueKey()267     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
268 
269     /**
270      * Internal-only helper class used for manipulations of the resource by the cache.
271      */
272     class CacheAccess;
273     inline CacheAccess cacheAccess();
274     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
275 
276     /**
277      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
278      */
279     class ProxyAccess;
280     inline ProxyAccess proxyAccess();
281 
282     /**
283      * Internal-only helper class used for manipulations of the resource by internal code.
284      */
285     class ResourcePriv;
286     inline ResourcePriv resourcePriv();
287     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
288 
289     /**
290      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
291      * Typically, subclasses should not need to override this, and should only
292      * need to override setMemoryBacking.
293      **/
294     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
295 
296     /**
297      * Describes the type of gpu resource that is represented by the implementing
298      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
299      * purposes by dumpMemoryStatistics().
300      *
301      * The value returned is expected to be long lived and will not be copied by the caller.
302      */
303     virtual const char* getResourceType() const = 0;
304 
305     static uint32_t CreateUniqueID();
306 
307     /**
308      * Set the resource tag.
309      */
310     void setResourceTag(const GrGpuResourceTag tag);
311 
312     /**
313      * Get the resource tag.
314      *
315      * @return all GrGpuResourceTags.
316      */
getResourceTag()317     GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
318 
319 protected:
320     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
321     // fully initialized (i.e. only from the constructors of the final class).
322     void registerWithCache(SkBudgeted);
323 
324     // This must be called by every GrGpuObject that references any wrapped backend objects. It
325     // should be called once the object is fully initialized (i.e. only from the constructors of the
326     // final class).
327     void registerWithCacheWrapped(GrWrapCacheable);
328 
329     GrGpuResource(GrGpu*);
330     virtual ~GrGpuResource();
331 
getGpu()332     GrGpu* getGpu() const { return fGpu; }
333 
334     /** Overridden to free GPU resources in the backend API. */
onRelease()335     virtual void onRelease() { }
336     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
337         This may be called when the underlying 3D context is no longer valid and so no
338         backend API calls should be made. */
onAbandon()339     virtual void onAbandon() { }
340 
341     /**
342      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
343      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)344     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
345 
346     /**
347      * Returns a string that uniquely identifies this resource.
348      */
349     SkString getResourceName() const;
350 
351     /**
352      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
353      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
354      * to customize various inputs.
355      */
356     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
357                                   const char* type, size_t size) const;
358 
359 
360 private:
361     bool isPurgeable() const;
362     bool hasRef() const;
363     bool hasNoCommandBufferUsages() const;
364 
365     /**
366      * Called by the registerWithCache if the resource is available to be used as scratch.
367      * Resource subclasses should override this if the instances should be recycled as scratch
368      * resources and populate the scratchKey with the key.
369      * By default resources are not recycled as scratch.
370      **/
computeScratchKey(GrScratchKey *)371     virtual void computeScratchKey(GrScratchKey*) const {}
372 
373     /**
374      * Removes references to objects in the underlying 3D API without freeing them.
375      * Called by CacheAccess.
376      */
377     void abandon();
378 
379     /**
380      * Frees the object in the underlying 3D API. Called by CacheAccess.
381      */
382     void release();
383 
384     virtual size_t onGpuMemorySize() const = 0;
385 
386     // See comments in CacheAccess and ResourcePriv.
387     void setUniqueKey(const GrUniqueKey&);
388     void removeUniqueKey();
389     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
390     void removeScratchKey();
391     void makeBudgeted();
392     void makeUnbudgeted();
393     void userRegisterResource();
394 
395 #ifdef SK_DEBUG
396     friend class GrGpu;  // for assert in GrGpu to access getGpu
397 #endif
398 
399     // An index into a heap when this resource is purgeable or an array when not. This is maintained
400     // by the cache.
401     int fCacheArrayIndex = -1;
402     // This value reflects how recently this resource was accessed in the cache. This is maintained
403     // by the cache.
404     uint32_t fTimestamp;
405     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
406 
407     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
408     GrScratchKey fScratchKey;
409     GrUniqueKey fUniqueKey;
410 
411     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
412     // is destroyed. Those calls set will this to NULL.
413     GrGpu* fGpu;
414     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
415 
416     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
417     bool fRefsWrappedObjects = false;
418     const UniqueID fUniqueID;
419     GrGpuResourceTag fGrResourceTag;
420 
421     using INHERITED = GrIORef<GrGpuResource>;
422     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
423                                          // notifyARefCntIsZero.
424     std::mutex mutex_; // The gpu cache is released abnormally due to multi threads.
425 
426     bool fRealAlloc = false; // OH ISSUE: real alloc flag
427     size_t fRealAllocSize = 0; // OH ISSUE: real alloc size
428 };
429 
430 class GrGpuResource::ProxyAccess {
431 private:
ProxyAccess(GrGpuResource * resource)432     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
433 
434     /** Proxies are allowed to take a resource from no refs to one ref. */
435     void ref(GrResourceCache* cache);
436 
437     // No taking addresses of this type.
438     const CacheAccess* operator&() const = delete;
439     CacheAccess* operator&() = delete;
440 
441     GrGpuResource* fResource;
442 
443     friend class GrGpuResource;
444     friend class GrSurfaceProxy;
445 };
446 
proxyAccess()447 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
448 
449 #endif
450