• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/core/SkString.h"
12 #include "include/core/SkTypes.h"
13 #include "include/private/base/SkNoncopyable.h"
14 #include "include/private/base/SkTo.h"
15 #include "include/private/gpu/ganesh/GrTypesPriv.h"
16 #include "src/gpu/GpuTypesPriv.h"
17 #include "src/gpu/ResourceKey.h"
18 
19 #include <atomic>
20 #include <cstddef>
21 #include <cstdint>
22 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
23 #include <sstream>
24 #endif
25 #include <string>
26 #include <string_view>
27 
28 class GrDirectContext;
29 class GrGpu;
30 class GrResourceCache;
31 class GrSurface;
32 class SkTraceMemoryDump;
33 
34 namespace skgpu {
35 enum class Budgeted : bool;
36 }
37 
38 /**
39  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
40  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
41  * exposing all of GrGpuResource.
42  *
43  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
44  * (static poly morphism using CRTP). It is legal for additional ref's to be added
45  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
46  * called.
47  */
48 template <typename DERIVED> class GrIORef : public SkNoncopyable {
49 public:
unique()50     bool unique() const { return fRefCnt == 1; }
51 
ref()52     void ref() const {
53         // Only the cache should be able to add the first ref to a resource.
54         SkASSERT(this->getRefCnt() > 0);
55         // No barrier required.
56         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
57     }
58 
59     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
60     enum class LastRemovedRef {
61         kMainRef,            // This refers to fRefCnt
62         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
63     };
64 
unref()65     void unref() const {
66         SkASSERT(this->getRefCnt() > 0);
67         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
68             this->notifyWillBeZero(LastRemovedRef::kMainRef);
69         }
70     }
71 
refCommandBuffer()72     void refCommandBuffer() const {
73         // No barrier required.
74         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
75     }
76 
unrefCommandBuffer()77     void unrefCommandBuffer() const {
78         SkASSERT(!this->hasNoCommandBufferUsages());
79         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
80             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
81         }
82     }
83 
84 #if defined(GPU_TEST_UTILS)
testingOnly_getRefCnt()85     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
86 #endif
87 
88 protected:
GrIORef()89     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
90 
internalHasRef()91     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()92     bool internalHasNoCommandBufferUsages() const {
93         return SkToBool(this->hasNoCommandBufferUsages());
94     }
95 
96     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()97     void addInitialRef() const {
98         SkASSERT(fRefCnt >= 0);
99         // No barrier required.
100         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
101     }
102 
103 private:
notifyWillBeZero(LastRemovedRef removedRef)104     void notifyWillBeZero(LastRemovedRef removedRef) const {
105         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
106     }
107 
getRefCnt()108     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
109 
hasNoCommandBufferUsages()110     bool hasNoCommandBufferUsages() const {
111         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
112             // The acquire barrier is only really needed if we return true.  It
113             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
114             // until previous owners are all totally done calling removeCommandBufferUsage().
115             return true;
116         }
117         return false;
118     }
119 
120     mutable std::atomic<int32_t> fRefCnt;
121     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
122 
123     using INHERITED = SkNoncopyable;
124 };
125 
126 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag127     GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0), fSid(0)
128     {
129         isGrGpuResourceTagValid = false;
130     }
131 
GrGpuResourceTagGrGpuResourceTag132     GrGpuResourceTag(uint32_t pid, uint32_t tid, uint64_t wid, uint32_t fid, uint32_t sid, const std::string& name)
133         : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fSid(sid), fName(name)
134     {
135         isGrGpuResourceTagValid = fPid || fTid || fWid || fCid || fFid || fSid;
136     }
137 
138     bool operator< (const GrGpuResourceTag& tag) const {
139         if (fPid != tag.fPid) {
140             return fPid < tag.fPid;
141         }
142         if (fTid != tag.fTid) {
143             return fTid < tag.fTid;
144         }
145         if (fWid != tag.fWid) {
146             return fWid < tag.fWid;
147         }
148         if (fFid != tag.fFid) {
149             return fFid < tag.fFid;
150         }
151         return false;
152     }
153 
154     bool operator== (const GrGpuResourceTag& tag) const {
155         return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
156     }
157 
toStringGrGpuResourceTag158     std::string toString() const {
159         return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
160             + std::to_string(fWid) + "," + std::to_string(fFid) + ","
161             + std::to_string(fCid) + "," + std::to_string(fSid) + "]";
162     }
163 
isGrTagValidGrGpuResourceTag164     bool isGrTagValid() const {
165         return isGrGpuResourceTagValid;
166     }
167 
filterGrGpuResourceTag168     bool filter(GrGpuResourceTag& tag) const {
169         if (!isGrTagValid()) {
170             return !tag.isGrTagValid();
171         }
172         if (fPid && fPid != tag.fPid) {
173             return false;
174         }
175         if (fTid && fTid != tag.fTid) {
176             return false;
177         }
178         if (fWid && fWid != tag.fWid) {
179             return false;
180         }
181         if (fFid && fFid != tag.fFid) {
182             return false;
183         }
184         return true;
185     }
186 
filterGrGpuResourceTag187     bool filter(GrGpuResourceTag&& tag) const {
188         if (!isGrTagValid()) {
189             return !tag.isGrTagValid();
190         }
191         if (fPid && fPid != tag.fPid) {
192             return false;
193         }
194         if (fTid && fTid != tag.fTid) {
195             return false;
196         }
197         if (fWid && fWid != tag.fWid) {
198             return false;
199         }
200         if (fFid && fFid != tag.fFid) {
201             return false;
202         }
203         return true;
204     }
205     uint32_t fPid;
206     uint32_t fTid;
207     uint64_t fWid;
208     uint64_t fCid{0};
209     uint32_t fFid;
210     uint32_t fSid;
211     std::string fName;
212     bool isGrGpuResourceTagValid;
213 };
214 
215 /**
216  * Base class for objects that can be kept in the GrResourceCache.
217  */
218 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
219 public:
220     /**
221      * Tests whether a object has been abandoned or released. All objects will
222      * be in this state after their creating GrContext is destroyed or has
223      * contextLost called. It's up to the client to test wasDestroyed() before
224      * attempting to use an object if it holds refs on objects across
225      * ~GrContext, freeResources with the force flag, or contextLost.
226      *
227      * @return true if the object has been released or abandoned,
228      *         false otherwise.
229      */
wasDestroyed()230     bool wasDestroyed() const { return nullptr == fGpu; }
231 
setRealAlloc(bool realAlloc)232     void setRealAlloc(bool realAlloc) { fRealAlloc = realAlloc; } // OH ISSUE: set real alloc flag
isRealAlloc()233     bool isRealAlloc() { return fRealAlloc; } // OH ISSUE: get real alloc flag
234 
235     /**
236      * Retrieves the context that owns the object. Note that it is possible for
237      * this to return NULL. When objects have been release()ed or abandon()ed
238      * they no longer have an owning context. Destroying a GrDirectContext
239      * automatically releases all its resources.
240      */
241     const GrDirectContext* getContext() const;
242     GrDirectContext* getContext();
243 
244     /**
245      * Retrieves the amount of GPU memory used by this resource in bytes. It is
246      * approximate since we aren't aware of additional padding or copies made
247      * by the driver.
248      *
249      * @return the amount of GPU memory used in bytes
250      */
gpuMemorySize()251     size_t gpuMemorySize() const {
252         if (kInvalidGpuMemorySize == fGpuMemorySize) {
253             fGpuMemorySize = this->onGpuMemorySize();
254             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
255         }
256         return fGpuMemorySize;
257     }
258 
259     class UniqueID {
260     public:
261         UniqueID() = default;
262 
UniqueID(uint32_t id)263         explicit UniqueID(uint32_t id) : fID(id) {}
264 
asUInt()265         uint32_t asUInt() const { return fID; }
266 
267         bool operator==(const UniqueID& other) const { return fID == other.fID; }
268         bool operator!=(const UniqueID& other) const { return !(*this == other); }
269 
makeInvalid()270         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()271         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
272 
273     protected:
274         uint32_t fID = SK_InvalidUniqueID;
275     };
276 
277     /**
278      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
279      * not change when the content of the GrGpuResource object changes. This will never return
280      * 0.
281      */
uniqueID()282     UniqueID uniqueID() const { return fUniqueID; }
283 
284     /** Returns the current unique key for the resource. It will be invalid if the resource has no
285         associated unique key. */
getUniqueKey()286     const skgpu::UniqueKey& getUniqueKey() const { return fUniqueKey; }
287 
getLabel()288     std::string getLabel() const { return fLabel; }
289 
setLabel(std::string_view label)290     void setLabel(std::string_view label) {
291         fLabel = label;
292         this->onSetLabel();
293     }
294 
295     /**
296      * Internal-only helper class used for manipulations of the resource by the cache.
297      */
298     class CacheAccess;
299     inline CacheAccess cacheAccess();
300     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
301 
302     /**
303      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
304      */
305     class ProxyAccess;
306     inline ProxyAccess proxyAccess();
307 
308     /**
309      * Internal-only helper class used for manipulations of the resource by internal code.
310      */
311     class ResourcePriv;
312     inline ResourcePriv resourcePriv();
313     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
314 
315     /**
316      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
317      * Typically, subclasses should not need to override this, and should only
318      * need to override setMemoryBacking.
319      **/
320     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
321 
322     /**
323      * Describes the type of gpu resource that is represented by the implementing
324      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
325      * purposes by dumpMemoryStatistics().
326      *
327      * The value returned is expected to be long lived and will not be copied by the caller.
328      */
329     virtual const char* getResourceType() const = 0;
330 
331     static uint32_t CreateUniqueID();
332 
333     /**
334      * Set the resource tag.
335      */
336     void setResourceTag(const GrGpuResourceTag tag, bool curRealAlloc = false);
337 
338     /**
339      * Get the resource tag.
340      *
341      * @return all GrGpuResourceTags.
342      */
getResourceTag()343     GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
344 
345 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpVkImageInfo(std::stringstream & dump)346     virtual void dumpVkImageInfo(std::stringstream& dump) const {
347         dump << "\n";
348     }
349 #endif
350 
351 #if defined(GPU_TEST_UTILS)
asSurface()352     virtual const GrSurface* asSurface() const { return nullptr; }
353 #endif
354 
355 protected:
356     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
357     // fully initialized (i.e. only from the constructors of the final class).
358     void registerWithCache(skgpu::Budgeted);
359 
360     // This must be called by every GrGpuObject that references any wrapped backend objects. It
361     // should be called once the object is fully initialized (i.e. only from the constructors of the
362     // final class).
363     void registerWithCacheWrapped(GrWrapCacheable);
364 
365     GrGpuResource(GrGpu*, std::string_view label);
366     virtual ~GrGpuResource();
367 
getGpu()368     GrGpu* getGpu() const { return fGpu; }
369 
370     /** Overridden to free GPU resources in the backend API. */
onRelease()371     virtual void onRelease() { }
372     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
373         This may be called when the underlying 3D context is no longer valid and so no
374         backend API calls should be made. */
onAbandon()375     virtual void onAbandon() { }
376 
377     /**
378      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
379      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)380     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
381 
382     /**
383      * Returns a string that uniquely identifies this resource.
384      */
385     SkString getResourceName() const;
386 
387     /**
388      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
389      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
390      * to customize various inputs.
391      */
392     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
393                                   const char* type, size_t size) const;
394 
395 
396 private:
397     bool isPurgeable() const;
398     bool hasRef() const;
399     bool hasNoCommandBufferUsages() const;
400 
401     /**
402      * Called by the registerWithCache if the resource is available to be used as scratch.
403      * Resource subclasses should override this if the instances should be recycled as scratch
404      * resources and populate the scratchKey with the key.
405      * By default resources are not recycled as scratch.
406      **/
computeScratchKey(skgpu::ScratchKey *)407     virtual void computeScratchKey(skgpu::ScratchKey*) const {}
408 
409     /**
410      * Removes references to objects in the underlying 3D API without freeing them.
411      * Called by CacheAccess.
412      */
413     void abandon();
414 
415     /**
416      * Frees the object in the underlying 3D API. Called by CacheAccess.
417      */
418     void release();
419 
420     virtual size_t onGpuMemorySize() const = 0;
421 
422     virtual void onSetLabel() = 0;
423 
424     // See comments in CacheAccess and ResourcePriv.
425     void setUniqueKey(const skgpu::UniqueKey&);
426     void removeUniqueKey();
427     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
428     void removeScratchKey();
429     void makeBudgeted();
430     void makeUnbudgeted();
431     void userRegisterResource();
432 
433 #ifdef SK_DEBUG
434     friend class GrGpu;  // for assert in GrGpu to access getGpu
435 #endif
436 
437     // An index into a heap when this resource is purgeable or an array when not. This is maintained
438     // by the cache.
439     int fCacheArrayIndex;
440     // This value reflects how recently this resource was accessed in the cache. This is maintained
441     // by the cache.
442     uint32_t fTimestamp;
443     skgpu::StdSteadyClock::time_point fTimeWhenBecamePurgeable;
444 
445     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
446     skgpu::ScratchKey fScratchKey;
447     skgpu::UniqueKey fUniqueKey;
448 
449     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
450     // is destroyed. Those calls will set this to NULL.
451     GrGpu* fGpu;
452     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
453 
454     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
455     bool fRefsWrappedObjects = false;
456     const UniqueID fUniqueID;
457     GrGpuResourceTag fGrResourceTag;
458     std::string fLabel;
459 
460     using INHERITED = GrIORef<GrGpuResource>;
461     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
462                                          // notifyARefCntIsZero.
463     bool fRealAlloc = false; // OH ISSUE: real alloc flag
464 };
465 
466 class GrGpuResource::ProxyAccess {
467 private:
ProxyAccess(GrGpuResource * resource)468     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
469 
470     /** Proxies are allowed to take a resource from no refs to one ref. */
471     void ref(GrResourceCache* cache);
472 
473     // No taking addresses of this type.
474     const CacheAccess* operator&() const = delete;
475     CacheAccess* operator&() = delete;
476 
477     GrGpuResource* fResource;
478 
479     friend class GrGpuResource;
480     friend class GrSurfaceProxy;
481 };
482 
proxyAccess()483 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
484 
485 #endif
486