• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
15 #include <sstream>
16 #endif
17 #ifdef SKIA_OHOS_SINGLE_OWNER
18 #include <pthread.h>
19 #endif
20 
21 class GrGpu;
22 class GrResourceCache;
23 class SkTraceMemoryDump;
24 
25 /**
26  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
27  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
28  * exposing all of GrGpuResource.
29  *
30  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
31  * (static poly morphism using CRTP). It is legal for additional ref's to be added
32  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
33  * called.
34  */
35 template <typename DERIVED> class GrIORef : public SkNoncopyable {
36 public:
unique()37     bool unique() const { return fRefCnt == 1; }
38 
ref()39     void ref() const {
40         // Only the cache should be able to add the first ref to a resource.
41         SkASSERT(this->getRefCnt() > 0);
42         // No barrier required.
43         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
44     }
45 
46     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
47     enum class LastRemovedRef {
48         kMainRef,            // This refers to fRefCnt
49         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
50     };
51 
unref()52     void unref() const {
53         SkASSERT(this->getRefCnt() > 0);
54         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
55             this->notifyWillBeZero(LastRemovedRef::kMainRef);
56         }
57     }
58 
addCommandBufferUsage()59     void addCommandBufferUsage() const {
60         // No barrier required.
61         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
62     }
63 
removeCommandBufferUsage()64     void removeCommandBufferUsage() const {
65         SkASSERT(!this->hasNoCommandBufferUsages());
66         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
67             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
68         }
69     }
70 
71 #if GR_TEST_UTILS
testingOnly_getRefCnt()72     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
73 #endif
74 
75 protected:
GrIORef()76     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
77 
internalHasRef()78     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()79     bool internalHasNoCommandBufferUsages() const {
80         return SkToBool(this->hasNoCommandBufferUsages());
81     }
82 
83     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()84     void addInitialRef() const {
85         SkASSERT(fRefCnt >= 0);
86         // No barrier required.
87         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
88     }
89 
90 private:
notifyWillBeZero(LastRemovedRef removedRef)91     void notifyWillBeZero(LastRemovedRef removedRef) const {
92         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
93     }
94 
getRefCnt()95     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
96 
hasNoCommandBufferUsages()97     bool hasNoCommandBufferUsages() const {
98         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
99             // The acquire barrier is only really needed if we return true.  It
100             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
101             // until previous owners are all totally done calling removeCommandBufferUsage().
102             return true;
103         }
104         return false;
105     }
106 
107     mutable std::atomic<int32_t> fRefCnt;
108     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
109 
110     using INHERITED = SkNoncopyable;
111 };
112 
113 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag114     GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0)
115     {
116         isGrGpuResourceTagValid = false;
117     }
118 
GrGpuResourceTagGrGpuResourceTag119     GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid, const std::string& name)
120         : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fName(name)
121     {
122         isGrGpuResourceTagValid = fPid || fTid || fWid || fFid;
123     }
124 
125     bool operator< (const GrGpuResourceTag& tag) const {
126         if (fPid != tag.fPid) {
127             return fPid < tag.fPid;
128         }
129         if (fTid != tag.fTid) {
130             return fTid < tag.fTid;
131         }
132         if (fWid != tag.fWid) {
133             return fWid < tag.fWid;
134         }
135         if (fFid != tag.fFid) {
136             return fFid < tag.fFid;
137         }
138         return false;
139     }
140 
141     bool operator== (const GrGpuResourceTag& tag) const {
142         return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
143     }
144 
toStringGrGpuResourceTag145     std::string toString() const {
146         return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
147             + std::to_string(fWid) + "," + std::to_string(fFid) + "]";
148     }
149 
isGrTagValidGrGpuResourceTag150     bool isGrTagValid() const {
151         return isGrGpuResourceTagValid;
152     }
153 
filterGrGpuResourceTag154     bool filter(GrGpuResourceTag& tag) const {
155         if (!isGrTagValid()) {
156             return !tag.isGrTagValid();
157         }
158         if (fPid && fPid != tag.fPid) {
159             return false;
160         }
161         if (fTid && fTid != tag.fTid) {
162             return false;
163         }
164         if (fWid && fWid != tag.fWid) {
165             return false;
166         }
167         if (fFid && fFid != tag.fFid) {
168             return false;
169         }
170         return true;
171     }
172 
filterGrGpuResourceTag173     bool filter(GrGpuResourceTag&& tag) const {
174         if (!isGrTagValid()) {
175             return !tag.isGrTagValid();
176         }
177         if (fPid && fPid != tag.fPid) {
178             return false;
179         }
180         if (fTid && fTid != tag.fTid) {
181             return false;
182         }
183         if (fWid && fWid != tag.fWid) {
184             return false;
185         }
186         if (fFid && fFid != tag.fFid) {
187             return false;
188         }
189         return true;
190     }
191     uint32_t fPid;
192     uint32_t fTid;
193     uint32_t fWid;
194     uint32_t fFid;
195     std::string fName;
196     bool isGrGpuResourceTagValid;
197 };
198 
199 /**
200  * Base class for objects that can be kept in the GrResourceCache.
201  */
202 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
203 public:
checkMagic()204     inline bool checkMagic() {
205         return fMagicNum == MAGIC_INIT;
206     }
207     /**
208      * Tests whether a object has been abandoned or released. All objects will
209      * be in this state after their creating GrContext is destroyed or has
210      * contextLost called. It's up to the client to test wasDestroyed() before
211      * attempting to use an object if it holds refs on objects across
212      * ~GrContext, freeResources with the force flag, or contextLost.
213      *
214      * @return true if the object has been released or abandoned,
215      *         false otherwise.
216      */
wasDestroyed()217     bool wasDestroyed() const { return nullptr == fGpu; }
218 
setRealAlloc(bool realAlloc)219     void setRealAlloc(bool realAlloc) { fRealAlloc = realAlloc; } // OH ISSUE: set real alloc flag
isRealAlloc()220     bool isRealAlloc() { return fRealAlloc; } // OH ISSUE: get real alloc flag
setRealAllocSize(size_t realAllocSize)221     void setRealAllocSize(size_t realAllocSize) { fRealAllocSize = realAllocSize; } // OH ISSUE: set real alloc size
getRealAllocSize()222     size_t getRealAllocSize() { return fRealAllocSize; } // OH ISSUE: get real alloc size
223 
224     /**
225      * Retrieves the context that owns the object. Note that it is possible for
226      * this to return NULL. When objects have been release()ed or abandon()ed
227      * they no longer have an owning context. Destroying a GrDirectContext
228      * automatically releases all its resources.
229      */
230     const GrDirectContext* getContext() const;
231     GrDirectContext* getContext();
232 
233     /**
234      * Retrieves the amount of GPU memory used by this resource in bytes. It is
235      * approximate since we aren't aware of additional padding or copies made
236      * by the driver.
237      *
238      * @return the amount of GPU memory used in bytes
239      */
gpuMemorySize()240     size_t gpuMemorySize() const {
241         if (kInvalidGpuMemorySize == fGpuMemorySize) {
242             fGpuMemorySize = this->onGpuMemorySize();
243             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
244         }
245         return fGpuMemorySize;
246     }
247 
248     class UniqueID {
249     public:
250         UniqueID() = default;
251 
UniqueID(uint32_t id)252         explicit UniqueID(uint32_t id) : fID(id) {}
253 
asUInt()254         uint32_t asUInt() const { return fID; }
255 
256         bool operator==(const UniqueID& other) const { return fID == other.fID; }
257         bool operator!=(const UniqueID& other) const { return !(*this == other); }
258 
makeInvalid()259         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()260         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
261 
262     protected:
263         uint32_t fID = SK_InvalidUniqueID;
264     };
265 
266     /**
267      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
268      * not change when the content of the GrGpuResource object changes. This will never return
269      * 0.
270      */
uniqueID()271     UniqueID uniqueID() const { return fUniqueID; }
272 
273     /** Returns the current unique key for the resource. It will be invalid if the resource has no
274         associated unique key. */
getUniqueKey()275     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
276 
277     /**
278      * Internal-only helper class used for manipulations of the resource by the cache.
279      */
280     class CacheAccess;
281     inline CacheAccess cacheAccess();
282     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
283 
284     /**
285      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
286      */
287     class ProxyAccess;
288     inline ProxyAccess proxyAccess();
289 
290     /**
291      * Internal-only helper class used for manipulations of the resource by internal code.
292      */
293     class ResourcePriv;
294     inline ResourcePriv resourcePriv();
295     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
296 
297     /**
298      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
299      * Typically, subclasses should not need to override this, and should only
300      * need to override setMemoryBacking.
301      **/
302     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
303 
304     /**
305      * Describes the type of gpu resource that is represented by the implementing
306      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
307      * purposes by dumpMemoryStatistics().
308      *
309      * The value returned is expected to be long lived and will not be copied by the caller.
310      */
311     virtual const char* getResourceType() const = 0;
312 
313     static uint32_t CreateUniqueID();
314 
315     /**
316      * Set the resource tag.
317      */
318     void setResourceTag(const GrGpuResourceTag tag, bool curRealAlloc = false);
319 
320     /**
321      * Get the resource tag.
322      *
323      * @return all GrGpuResourceTags.
324      */
getResourceTag()325     GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
326 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump)327     virtual void dumpVkImageInfo(std::stringstream& dump) const {
328         dump << "\n";
329     }
330 #endif
331 
332 protected:
333     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
334     // fully initialized (i.e. only from the constructors of the final class).
335     void registerWithCache(SkBudgeted);
336 
337     // This must be called by every GrGpuObject that references any wrapped backend objects. It
338     // should be called once the object is fully initialized (i.e. only from the constructors of the
339     // final class).
340     void registerWithCacheWrapped(GrWrapCacheable);
341 
342     GrGpuResource(GrGpu*);
343     virtual ~GrGpuResource();
344 
getGpu()345     GrGpu* getGpu() const { return fGpu; }
346 
347     /** Overridden to free GPU resources in the backend API. */
onRelease()348     virtual void onRelease() { }
349     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
350         This may be called when the underlying 3D context is no longer valid and so no
351         backend API calls should be made. */
onAbandon()352     virtual void onAbandon() { }
353 
354     /**
355      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
356      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)357     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
358 
359     /**
360      * Returns a string that uniquely identifies this resource.
361      */
362     SkString getResourceName() const;
363 
364     /**
365      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
366      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
367      * to customize various inputs.
368      */
369     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
370                                   const char* type, size_t size) const;
371 
372 
373 private:
374     bool isPurgeable() const;
375     bool hasRef() const;
376     bool hasNoCommandBufferUsages() const;
377 
378     /**
379      * Called by the registerWithCache if the resource is available to be used as scratch.
380      * Resource subclasses should override this if the instances should be recycled as scratch
381      * resources and populate the scratchKey with the key.
382      * By default resources are not recycled as scratch.
383      **/
computeScratchKey(GrScratchKey *)384     virtual void computeScratchKey(GrScratchKey*) const {}
385 
386     /**
387      * Removes references to objects in the underlying 3D API without freeing them.
388      * Called by CacheAccess.
389      */
390     void abandon();
391 
392     /**
393      * Frees the object in the underlying 3D API. Called by CacheAccess.
394      */
395     void release();
396 
397     virtual size_t onGpuMemorySize() const = 0;
398 
399     // See comments in CacheAccess and ResourcePriv.
400     void setUniqueKey(const GrUniqueKey&);
401     void removeUniqueKey();
402     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
403     void removeScratchKey();
404     void makeBudgeted();
405     void makeUnbudgeted();
406     void userRegisterResource();
407 
408 #ifdef SK_DEBUG
409     friend class GrGpu;  // for assert in GrGpu to access getGpu
410 #endif
411     static constexpr uint32_t MAGIC_INIT = 0xDEADBEEF;
412     uint32_t fMagicNum = MAGIC_INIT;
413     // An index into a heap when this resource is purgeable or an array when not. This is maintained
414     // by the cache.
415     int fCacheArrayIndex;
416     // This value reflects how recently this resource was accessed in the cache. This is maintained
417     // by the cache.
418     uint32_t fTimestamp;
419     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
420 
421     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
422     GrScratchKey fScratchKey;
423     GrUniqueKey fUniqueKey;
424 
425     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
426     // is destroyed. Those calls set will this to NULL.
427     GrGpu* fGpu;
428     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
429 
430     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
431     bool fRefsWrappedObjects = false;
432     const UniqueID fUniqueID;
433     GrGpuResourceTag fGrResourceTag;
434 
435     using INHERITED = GrIORef<GrGpuResource>;
436     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
437                                          // notifyARefCntIsZero.
438 
439     bool fRealAlloc = false; // OH ISSUE: real alloc flag
440     size_t fRealAllocSize = 0; // OH ISSUE: real alloc size
441 
442 #ifdef SKIA_OHOS_SINGLE_OWNER
443     pthread_t fTid = pthread_self();
444 #endif
445 };
446 
447 class GrGpuResource::ProxyAccess {
448 private:
ProxyAccess(GrGpuResource * resource)449     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
450 
451     /** Proxies are allowed to take a resource from no refs to one ref. */
452     void ref(GrResourceCache* cache);
453 
454     // No taking addresses of this type.
455     const CacheAccess* operator&() const = delete;
456     CacheAccess* operator&() = delete;
457 
458     GrGpuResource* fResource;
459 
460     friend class GrGpuResource;
461     friend class GrSurfaceProxy;
462 };
463 
proxyAccess()464 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
465 
466 #endif
467