• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
15 #include <sstream>
16 #endif
17 #ifdef SKIA_OHOS_SINGLE_OWNER
18 #include <pthread.h>
19 #endif
20 
21 class GrGpu;
22 class GrResourceCache;
23 class SkTraceMemoryDump;
24 
25 /**
26  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
27  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
28  * exposing all of GrGpuResource.
29  *
30  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
31  * (static poly morphism using CRTP). It is legal for additional ref's to be added
32  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
33  * called.
34  */
35 template <typename DERIVED> class GrIORef : public SkNoncopyable {
36 public:
unique()37     bool unique() const { return fRefCnt == 1; }
38 
ref()39     void ref() const {
40         // Only the cache should be able to add the first ref to a resource.
41         SkASSERT(this->getRefCnt() > 0);
42         // No barrier required.
43         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
44     }
45 
46     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
47     enum class LastRemovedRef {
48         kMainRef,            // This refers to fRefCnt
49         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
50     };
51 
unref()52     void unref() const {
53         SkASSERT(this->getRefCnt() > 0);
54         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
55             this->notifyWillBeZero(LastRemovedRef::kMainRef);
56         }
57     }
58 
addCommandBufferUsage()59     void addCommandBufferUsage() const {
60         // No barrier required.
61         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
62     }
63 
removeCommandBufferUsage()64     void removeCommandBufferUsage() const {
65         SkASSERT(!this->hasNoCommandBufferUsages());
66         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
67             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
68         }
69     }
70 
71 #if GR_TEST_UTILS
testingOnly_getRefCnt()72     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
73 #endif
74 
75 protected:
GrIORef()76     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
77 
internalHasRef()78     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()79     bool internalHasNoCommandBufferUsages() const {
80         return SkToBool(this->hasNoCommandBufferUsages());
81     }
82 
83     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()84     void addInitialRef() const {
85         SkASSERT(fRefCnt >= 0);
86         // No barrier required.
87         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
88     }
89 
90 private:
notifyWillBeZero(LastRemovedRef removedRef)91     void notifyWillBeZero(LastRemovedRef removedRef) const {
92         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
93     }
94 
getRefCnt()95     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
96 
hasNoCommandBufferUsages()97     bool hasNoCommandBufferUsages() const {
98         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
99             // The acquire barrier is only really needed if we return true.  It
100             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
101             // until previous owners are all totally done calling removeCommandBufferUsage().
102             return true;
103         }
104         return false;
105     }
106 
107     mutable std::atomic<int32_t> fRefCnt;
108     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
109 
110     using INHERITED = SkNoncopyable;
111 };
112 
113 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag114     GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0), fSid(0)
115     {
116         isGrGpuResourceTagValid = false;
117     }
118 
GrGpuResourceTagGrGpuResourceTag119     GrGpuResourceTag(uint32_t pid, uint32_t tid, uint64_t wid, uint32_t fid, uint32_t sid, const std::string& name)
120         : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fSid(sid), fName(name)
121     {
122         isGrGpuResourceTagValid = fPid || fTid || fWid || fCid || fFid || fSid;
123     }
124 
125     bool operator< (const GrGpuResourceTag& tag) const {
126         if (fPid != tag.fPid) {
127             return fPid < tag.fPid;
128         }
129         if (fTid != tag.fTid) {
130             return fTid < tag.fTid;
131         }
132         if (fWid != tag.fWid) {
133             return fWid < tag.fWid;
134         }
135         if (fFid != tag.fFid) {
136             return fFid < tag.fFid;
137         }
138         return false;
139     }
140 
141     bool operator== (const GrGpuResourceTag& tag) const {
142         return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
143     }
144 
toStringGrGpuResourceTag145     std::string toString() const {
146         return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
147             + std::to_string(fWid) + "," + std::to_string(fFid) + ","
148             + std::to_string(fCid) + "," + std::to_string(fSid) + "]";
149     }
150 
isGrTagValidGrGpuResourceTag151     bool isGrTagValid() const {
152         return isGrGpuResourceTagValid;
153     }
154 
filterGrGpuResourceTag155     bool filter(GrGpuResourceTag& tag) const {
156         if (!isGrTagValid()) {
157             return !tag.isGrTagValid();
158         }
159         if (fPid && fPid != tag.fPid) {
160             return false;
161         }
162         if (fTid && fTid != tag.fTid) {
163             return false;
164         }
165         if (fWid && fWid != tag.fWid) {
166             return false;
167         }
168         if (fFid && fFid != tag.fFid) {
169             return false;
170         }
171         return true;
172     }
173 
filterGrGpuResourceTag174     bool filter(GrGpuResourceTag&& tag) const {
175         if (!isGrTagValid()) {
176             return !tag.isGrTagValid();
177         }
178         if (fPid && fPid != tag.fPid) {
179             return false;
180         }
181         if (fTid && fTid != tag.fTid) {
182             return false;
183         }
184         if (fWid && fWid != tag.fWid) {
185             return false;
186         }
187         if (fFid && fFid != tag.fFid) {
188             return false;
189         }
190         return true;
191     }
192     uint32_t fPid;
193     uint32_t fTid;
194     uint64_t fWid;
195     uint64_t fCid{0};
196     uint32_t fFid;
197     uint32_t fSid;
198     std::string fName;
199     bool isGrGpuResourceTagValid;
200 };
201 
202 /**
203  * Base class for objects that can be kept in the GrResourceCache.
204  */
205 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
206 public:
207     /**
208      * Tests whether a object has been abandoned or released. All objects will
209      * be in this state after their creating GrContext is destroyed or has
210      * contextLost called. It's up to the client to test wasDestroyed() before
211      * attempting to use an object if it holds refs on objects across
212      * ~GrContext, freeResources with the force flag, or contextLost.
213      *
214      * @return true if the object has been released or abandoned,
215      *         false otherwise.
216      */
wasDestroyed()217     bool wasDestroyed() const { return nullptr == fGpu; }
218 
setRealAlloc(bool realAlloc)219     void setRealAlloc(bool realAlloc) { fRealAlloc = realAlloc; } // OH ISSUE: set real alloc flag
isRealAlloc()220     bool isRealAlloc() { return fRealAlloc; } // OH ISSUE: get real alloc flag
221 
222     /**
223      * Retrieves the context that owns the object. Note that it is possible for
224      * this to return NULL. When objects have been release()ed or abandon()ed
225      * they no longer have an owning context. Destroying a GrDirectContext
226      * automatically releases all its resources.
227      */
228     const GrDirectContext* getContext() const;
229     GrDirectContext* getContext();
230 
231     /**
232      * Retrieves the amount of GPU memory used by this resource in bytes. It is
233      * approximate since we aren't aware of additional padding or copies made
234      * by the driver.
235      *
236      * @return the amount of GPU memory used in bytes
237      */
gpuMemorySize()238     size_t gpuMemorySize() const {
239         if (kInvalidGpuMemorySize == fGpuMemorySize) {
240             fGpuMemorySize = this->onGpuMemorySize();
241             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
242         }
243         return fGpuMemorySize;
244     }
245 
246     class UniqueID {
247     public:
248         UniqueID() = default;
249 
UniqueID(uint32_t id)250         explicit UniqueID(uint32_t id) : fID(id) {}
251 
asUInt()252         uint32_t asUInt() const { return fID; }
253 
254         bool operator==(const UniqueID& other) const { return fID == other.fID; }
255         bool operator!=(const UniqueID& other) const { return !(*this == other); }
256 
makeInvalid()257         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()258         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
259 
260     protected:
261         uint32_t fID = SK_InvalidUniqueID;
262     };
263 
264     /**
265      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
266      * not change when the content of the GrGpuResource object changes. This will never return
267      * 0.
268      */
uniqueID()269     UniqueID uniqueID() const { return fUniqueID; }
270 
271     /** Returns the current unique key for the resource. It will be invalid if the resource has no
272         associated unique key. */
getUniqueKey()273     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
274 
275     /**
276      * Internal-only helper class used for manipulations of the resource by the cache.
277      */
278     class CacheAccess;
279     inline CacheAccess cacheAccess();
280     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
281 
282     /**
283      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
284      */
285     class ProxyAccess;
286     inline ProxyAccess proxyAccess();
287 
288     /**
289      * Internal-only helper class used for manipulations of the resource by internal code.
290      */
291     class ResourcePriv;
292     inline ResourcePriv resourcePriv();
293     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
294 
295     /**
296      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
297      * Typically, subclasses should not need to override this, and should only
298      * need to override setMemoryBacking.
299      **/
300     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
301 
302     /**
303      * Describes the type of gpu resource that is represented by the implementing
304      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
305      * purposes by dumpMemoryStatistics().
306      *
307      * The value returned is expected to be long lived and will not be copied by the caller.
308      */
309     virtual const char* getResourceType() const = 0;
310 
311     static uint32_t CreateUniqueID();
312 
313     /**
314      * Set the resource tag.
315      */
316     void setResourceTag(const GrGpuResourceTag tag, bool curRealAlloc = false);
317 
318     /**
319      * Get the resource tag.
320      *
321      * @return all GrGpuResourceTags.
322      */
getResourceTag()323     GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
324 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump)325     virtual void dumpVkImageInfo(std::stringstream& dump) const {
326         dump << "\n";
327     }
328 #endif
329 
330 protected:
331     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
332     // fully initialized (i.e. only from the constructors of the final class).
333     void registerWithCache(SkBudgeted);
334 
335     // This must be called by every GrGpuObject that references any wrapped backend objects. It
336     // should be called once the object is fully initialized (i.e. only from the constructors of the
337     // final class).
338     void registerWithCacheWrapped(GrWrapCacheable);
339 
340     GrGpuResource(GrGpu*);
341     virtual ~GrGpuResource();
342 
getGpu()343     GrGpu* getGpu() const { return fGpu; }
344 
345     /** Overridden to free GPU resources in the backend API. */
onRelease()346     virtual void onRelease() { }
347     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
348         This may be called when the underlying 3D context is no longer valid and so no
349         backend API calls should be made. */
onAbandon()350     virtual void onAbandon() { }
351 
352     /**
353      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
354      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)355     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
356 
357     /**
358      * Returns a string that uniquely identifies this resource.
359      */
360     SkString getResourceName() const;
361 
362     /**
363      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
364      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
365      * to customize various inputs.
366      */
367     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
368                                   const char* type, size_t size) const;
369 
370 
371 private:
372     bool isPurgeable() const;
373     bool hasRef() const;
374     bool hasNoCommandBufferUsages() const;
375 
376     /**
377      * Called by the registerWithCache if the resource is available to be used as scratch.
378      * Resource subclasses should override this if the instances should be recycled as scratch
379      * resources and populate the scratchKey with the key.
380      * By default resources are not recycled as scratch.
381      **/
computeScratchKey(GrScratchKey *)382     virtual void computeScratchKey(GrScratchKey*) const {}
383 
384     /**
385      * Removes references to objects in the underlying 3D API without freeing them.
386      * Called by CacheAccess.
387      */
388     void abandon();
389 
390     /**
391      * Frees the object in the underlying 3D API. Called by CacheAccess.
392      */
393     void release();
394 
395     virtual size_t onGpuMemorySize() const = 0;
396 
397     // See comments in CacheAccess and ResourcePriv.
398     void setUniqueKey(const GrUniqueKey&);
399     void removeUniqueKey();
400     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
401     void removeScratchKey();
402     void makeBudgeted();
403     void makeUnbudgeted();
404     void userRegisterResource();
405 
406 #ifdef SK_DEBUG
407     friend class GrGpu;  // for assert in GrGpu to access getGpu
408 #endif
409     // An index into a heap when this resource is purgeable or an array when not. This is maintained
410     // by the cache.
411     int fCacheArrayIndex;
412     // This value reflects how recently this resource was accessed in the cache. This is maintained
413     // by the cache.
414     uint32_t fTimestamp;
415     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
416 
417     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
418     GrScratchKey fScratchKey;
419     GrUniqueKey fUniqueKey;
420 
421     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
422     // is destroyed. Those calls set will this to NULL.
423     GrGpu* fGpu;
424     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
425 
426     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
427     bool fRefsWrappedObjects = false;
428     const UniqueID fUniqueID;
429     GrGpuResourceTag fGrResourceTag;
430 
431     using INHERITED = GrIORef<GrGpuResource>;
432     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
433                                          // notifyARefCntIsZero.
434 
435     bool fRealAlloc = false; // OH ISSUE: real alloc flag
436 
437 #ifdef SKIA_OHOS_SINGLE_OWNER
438     pthread_t fTid = pthread_self();
439 #endif
440 };
441 
442 class GrGpuResource::ProxyAccess {
443 private:
ProxyAccess(GrGpuResource * resource)444     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
445 
446     /** Proxies are allowed to take a resource from no refs to one ref. */
447     void ref(GrResourceCache* cache);
448 
449     // No taking addresses of this type.
450     const CacheAccess* operator&() const = delete;
451     CacheAccess* operator&() = delete;
452 
453     GrGpuResource* fResource;
454 
455     friend class GrGpuResource;
456     friend class GrSurfaceProxy;
457 };
458 
proxyAccess()459 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
460 
461 #endif
462