• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_Resource_DEFINED
9 #define skgpu_graphite_Resource_DEFINED
10 
11 #include "include/gpu/GpuTypes.h"
12 #include "include/private/base/SkMutex.h"
13 #include "src/gpu/GpuTypesPriv.h"
14 #include "src/gpu/graphite/GraphiteResourceKey.h"
15 #include "src/gpu/graphite/ResourceTypes.h"
16 
17 #include <atomic>
18 #include <functional>
19 #include <string>
20 #include <string_view>
21 
22 class SkMutex;
23 class SkTraceMemoryDump;
24 
25 namespace skgpu::graphite {
26 
27 class ResourceCache;
28 class SharedContext;
29 
30 #if defined(GRAPHITE_TEST_UTILS)
31 class Texture;
32 #endif
33 
34 /**
35  * Base class for objects that can be kept in the ResourceCache.
36  */
37 class Resource {
38 public:
39     Resource(const Resource&) = delete;
40     Resource(Resource&&) = delete;
41     Resource& operator=(const Resource&) = delete;
42     Resource& operator=(Resource&&) = delete;
43 
44     // Adds a usage ref to the resource. Named ref so we can easily manage usage refs with sk_sp.
ref()45     void ref() const {
46         // Only the cache should be able to add the first usage ref to a resource.
47         SkASSERT(this->hasUsageRef());
48         // No barrier required.
49         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
50     }
51 
52     // Removes a usage ref from the resource
unref()53     void unref() const {
54         bool shouldFree = false;
55         {
56             SkAutoMutexExclusive locked(fUnrefMutex);
57             SkASSERT(this->hasUsageRef());
58             // A release here acts in place of all releases we "should" have been doing in ref().
59             if (1 == fUsageRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
60                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kUsage);
61             }
62         }
63         if (shouldFree) {
64             Resource* mutableThis = const_cast<Resource*>(this);
65             mutableThis->internalDispose();
66         }
67     }
68 
69     // Adds a command buffer ref to the resource
refCommandBuffer()70     void refCommandBuffer() const {
71         if (fCommandBufferRefsAsUsageRefs) {
72             return this->ref();
73         }
74         // No barrier required.
75         (void)fCommandBufferRefCnt.fetch_add(+1, std::memory_order_relaxed);
76     }
77 
78     // Removes a command buffer ref from the resource
unrefCommandBuffer()79     void unrefCommandBuffer() const {
80         if (fCommandBufferRefsAsUsageRefs) {
81             return this->unref();
82         }
83         bool shouldFree = false;
84         {
85             SkAutoMutexExclusive locked(fUnrefMutex);
86             SkASSERT(this->hasCommandBufferRef());
87             // A release here acts in place of all releases we "should" have been doing in ref().
88             if (1 == fCommandBufferRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
89                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCommandBuffer);
90             }
91         }
92         if (shouldFree) {
93             Resource* mutableThis = const_cast<Resource*>(this);
94             mutableThis->internalDispose();
95         }
96     }
97 
ownership()98     Ownership ownership() const { return fOwnership; }
99 
budgeted()100     skgpu::Budgeted budgeted() const { return fBudgeted; }
101 
102     // Retrieves the amount of GPU memory used by this resource in bytes. It is approximate since we
103     // aren't aware of additional padding or copies made by the driver.
gpuMemorySize()104     size_t gpuMemorySize() const { return fGpuMemorySize; }
105 
106     class UniqueID {
107     public:
108         UniqueID() = default;
109 
UniqueID(uint32_t id)110         explicit UniqueID(uint32_t id) : fID(id) {}
111 
asUInt()112         uint32_t asUInt() const { return fID; }
113 
114         bool operator==(const UniqueID& other) const { return fID == other.fID; }
115         bool operator!=(const UniqueID& other) const { return !(*this == other); }
116 
117     private:
118         uint32_t fID = SK_InvalidUniqueID;
119     };
120 
121     // Gets an id that is unique for this Resource object. It is static in that it does not change
122     // when the content of the Resource object changes. This will never return 0.
uniqueID()123     UniqueID uniqueID() const { return fUniqueID; }
124 
125     // Describes the type of gpu resource that is represented by the implementing
126     // class (e.g. texture, buffer, etc).  This data is used for diagnostic
127     // purposes by dumpMemoryStatistics().
128     //
129     // The value returned is expected to be long lived and will not be copied by the caller.
130     virtual const char* getResourceType() const = 0;
131 
getLabel()132     std::string getLabel() const { return fLabel; }
133 
134     // We allow the label on a Resource to change when used for a different function. For example
135     // when reusing a scratch Texture we can change the label to match callers current use.
setLabel(std::string_view label)136     void setLabel(std::string_view label) {
137         fLabel = label;
138 
139         if (!fLabel.empty()) {
140             const std::string fullLabel = "Skia_" + fLabel;
141             this->setBackendLabel(fullLabel.c_str());
142         }
143     }
144 
145     // Tests whether a object has been abandoned or released. All objects will be in this state
146     // after their creating Context is destroyed or abandoned.
147     //
148     // @return true if the object has been released or abandoned,
149     //         false otherwise.
150     // TODO: As of now this function isn't really needed because in freeGpuData we are always
151     // deleting this object. However, I want to implement all the purging logic first to make sure
152     // we don't have a use case for calling internalDispose but not wanting to delete the actual
153     // object yet.
wasDestroyed()154     bool wasDestroyed() const { return fSharedContext == nullptr; }
155 
key()156     const GraphiteResourceKey& key() const { return fKey; }
157     // This should only ever be called by the ResourceProvider
setKey(const GraphiteResourceKey & key)158     void setKey(const GraphiteResourceKey& key) {
159         SkASSERT(key.shareable() == Shareable::kNo || this->budgeted() == skgpu::Budgeted::kYes);
160         fKey = key;
161     }
162 
163     // Dumps memory usage information for this Resource to traceMemoryDump.
164     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
165 
166     /**
167      * If the resource has a non-shareable key then this gives the resource subclass an opportunity
168      * to prepare itself to re-enter the cache. The ResourceCache extends its privilege to take the
169      * first UsageRef to this function via takeRef. If takeRef is called this resource will not
170      * immediately enter the cache but will be re-reprocessed with the Usage Ref count again reaches
171      * zero.
172      */
prepareForReturnToCache(const std::function<void ()> & takeRef)173     virtual void prepareForReturnToCache(const std::function<void()>& takeRef) {}
174 
175 #if defined(GRAPHITE_TEST_UTILS)
testingShouldDeleteASAP()176     bool testingShouldDeleteASAP() const { return fDeleteASAP == DeleteASAP::kYes; }
177 
asTexture()178     virtual const Texture* asTexture() const { return nullptr; }
179 #endif
180 
181 protected:
182     Resource(const SharedContext*,
183              Ownership,
184              skgpu::Budgeted,
185              size_t gpuMemorySize,
186              bool commandBufferRefsAsUsageRefs = false);
187     virtual ~Resource();
188 
sharedContext()189     const SharedContext* sharedContext() const { return fSharedContext; }
190 
191     // Overridden to add extra information to the memory dump.
onDumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const char * dumpName)192     virtual void onDumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump,
193                                         const char* dumpName) const {}
194 
195 #ifdef SK_DEBUG
debugHasCommandBufferRef()196     bool debugHasCommandBufferRef() const {
197         return hasCommandBufferRef();
198     }
199 #endif
200 
201     // Needed to be protected for DawnBuffer emscripten prepareForReturnToCache
setDeleteASAP()202     void setDeleteASAP() { fDeleteASAP = DeleteASAP::kYes; }
203 
204 private:
205     friend class ProxyCache; // for setDeleteASAP and updateAccessTime
206 
207     // Overridden to free GPU resources in the backend API.
208     virtual void freeGpuData() = 0;
209 
210     // Overridden to call any release callbacks, if necessary
invokeReleaseProc()211     virtual void invokeReleaseProc() {}
212 
213     enum class DeleteASAP : bool {
214         kNo = false,
215         kYes = true,
216     };
217 
shouldDeleteASAP()218     DeleteASAP shouldDeleteASAP() const { return fDeleteASAP; }
219 
220     // In the ResourceCache this is called whenever a Resource is moved into the purgeableQueue. It
221     // may also be called by the ProxyCache to track the time on Resources it is holding on to.
updateAccessTime()222     void updateAccessTime() {
223         fLastAccess = skgpu::StdSteadyClock::now();
224     }
lastAccessTime()225     skgpu::StdSteadyClock::time_point lastAccessTime() const {
226         return fLastAccess;
227     }
228 
setBackendLabel(char const * label)229     virtual void setBackendLabel(char const* label) {}
230 
231     ////////////////////////////////////////////////////////////////////////////
232     // The following set of functions are only meant to be called by the ResourceCache. We don't
233     // want them public general users of a Resource, but they also aren't purely internal calls.
234     ////////////////////////////////////////////////////////////////////////////
235     friend ResourceCache;
236 
makeBudgeted()237     void makeBudgeted() { fBudgeted = skgpu::Budgeted::kYes; }
makeUnbudgeted()238     void makeUnbudgeted() { fBudgeted = skgpu::Budgeted::kNo; }
239 
240     // This version of ref allows adding a ref when the usage count is 0. This should only be called
241     // from the ResourceCache.
initialUsageRef()242     void initialUsageRef() const {
243         // Only the cache should be able to add the first usage ref to a resource.
244         SkASSERT(fUsageRefCnt >= 0);
245         // No barrier required.
246         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
247     }
248 
249     bool isPurgeable() const;
accessReturnIndex()250     int* accessReturnIndex()  const { return &fReturnIndex; }
accessCacheIndex()251     int* accessCacheIndex()  const { return &fCacheArrayIndex; }
252 
timestamp()253     uint32_t timestamp() const { return fTimestamp; }
setTimestamp(uint32_t ts)254     void setTimestamp(uint32_t ts) { fTimestamp = ts; }
255 
256     void registerWithCache(sk_sp<ResourceCache>);
257 
258     // Adds a cache ref to the resource. This is only called by ResourceCache. A Resource will only
259     // ever add a ref when the Resource is part of the cache (i.e. when insertResource is called)
260     // and while the Resource is in the ResourceCache::ReturnQueue.
refCache()261     void refCache() const {
262         // No barrier required.
263         (void)fCacheRefCnt.fetch_add(+1, std::memory_order_relaxed);
264     }
265 
266     // Removes a cache ref from the resource. The unref here should only ever be called from the
267     // ResourceCache and only in the Recorder thread the ResourceCache is part of.
unrefCache()268     void unrefCache() const {
269         bool shouldFree = false;
270         {
271             SkAutoMutexExclusive locked(fUnrefMutex);
272             SkASSERT(this->hasCacheRef());
273             // A release here acts in place of all releases we "should" have been doing in ref().
274             if (1 == fCacheRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
275                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCache);
276             }
277         }
278         if (shouldFree) {
279             Resource* mutableThis = const_cast<Resource*>(this);
280             mutableThis->internalDispose();
281         }
282     }
283 
284 #ifdef SK_DEBUG
isUsableAsScratch()285     bool isUsableAsScratch() const {
286         return fKey.shareable() == Shareable::kNo && !this->hasUsageRef() && fNonShareableInCache;
287     }
288 #endif
289 
290     ////////////////////////////////////////////////////////////////////////////
291     // The remaining calls are meant to be truely private
292     ////////////////////////////////////////////////////////////////////////////
hasUsageRef()293     bool hasUsageRef() const {
294         if (0 == fUsageRefCnt.load(std::memory_order_acquire)) {
295             // The acquire barrier is only really needed if we return true.  It
296             // prevents code conditioned on the result of hasUsageRef() from running until previous
297             // owners are all totally done calling unref().
298             return false;
299         }
300         return true;
301     }
302 
hasCommandBufferRef()303     bool hasCommandBufferRef() const {
304         // Note that we don't check here for fCommandBufferRefsAsUsageRefs. This should always
305         // report zero if that value is true.
306         if (0 == fCommandBufferRefCnt.load(std::memory_order_acquire)) {
307             // The acquire barrier is only really needed if we return true.  It
308             // prevents code conditioned on the result of hasCommandBufferRef() from running
309             // until previous owners are all totally done calling unrefCommandBuffer().
310             return false;
311         }
312         SkASSERT(!fCommandBufferRefsAsUsageRefs);
313         return true;
314     }
315 
hasCacheRef()316     bool hasCacheRef() const {
317         if (0 == fCacheRefCnt.load(std::memory_order_acquire)) {
318             // The acquire barrier is only really needed if we return true. It
319             // prevents code conditioned on the result of hasUsageRef() from running until previous
320             // owners are all totally done calling unref().
321             return false;
322         }
323         return true;
324     }
325 
hasAnyRefs()326     bool hasAnyRefs() const {
327         return this->hasUsageRef() || this->hasCommandBufferRef() || this->hasCacheRef();
328     }
329 
330     bool notifyARefIsZero(LastRemovedRef removedRef) const;
331 
332     // Frees the object in the underlying 3D API.
333     void internalDispose();
334 
335     // We need to guard calling unref on the usage and command buffer refs since they each could be
336     // unreffed on different threads. This can lead to calling notifyARefIsZero twice with each
337     // instance thinking there are no more refs left and both trying to delete the object.
338     mutable SkMutex fUnrefMutex;
339 
340     SkDEBUGCODE(mutable bool fCalledRemovedFromCache = false;)
341 
342     // This is not ref'ed but internalDispose() will be called before the Gpu object is destroyed.
343     // That call will set this to nullptr.
344     const SharedContext* fSharedContext;
345 
346     mutable std::atomic<int32_t> fUsageRefCnt;
347     mutable std::atomic<int32_t> fCommandBufferRefCnt;
348     mutable std::atomic<int32_t> fCacheRefCnt;
349     // Indicates that CommandBufferRefs should be rerouted to UsageRefs.
350     const bool fCommandBufferRefsAsUsageRefs = false;
351 
352     GraphiteResourceKey fKey;
353 
354     sk_sp<ResourceCache> fReturnCache;
355     // An index into the return cache so we know whether or not the resource is already waiting to
356     // be returned or not.
357     mutable int fReturnIndex = -1;
358 
359     Ownership fOwnership;
360 
361     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
362     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
363 
364     // All resource created internally by Graphite and held in the ResourceCache as a shared
365     // resource or available scratch resource are considered budgeted. Resources that back client
366     // owned objects (e.g. SkSurface or SkImage) are not budgeted and do not count against cache
367     // limits.
368     skgpu::Budgeted fBudgeted;
369 
370     // This is only used by ProxyCache::purgeProxiesNotUsedSince which is called from
371     // ResourceCache::purgeResourcesNotUsedSince. When kYes, this signals that the Resource
372     // should've been purged based on its timestamp at some point regardless of what its
373     // current timestamp may indicate (since the timestamp will be updated when the Resource
374     // is returned to the ResourceCache).
375     DeleteASAP fDeleteASAP = DeleteASAP::kNo;
376 
377     // An index into a heap when this resource is purgeable or an array when not. This is maintained
378     // by the cache.
379     mutable int fCacheArrayIndex = -1;
380     // This value reflects how recently this resource was accessed in the cache. This is maintained
381     // by the cache.
382     uint32_t fTimestamp;
383     skgpu::StdSteadyClock::time_point fLastAccess;
384 
385     const UniqueID fUniqueID;
386 
387     // String used to describe the current use of this Resource.
388     std::string fLabel;
389 
390     // This is only used during validation checking. Lots of the validation code depends on a
391     // resource being purgeable or not. However, purgeable itself just means having no refs. The
392     // refs can be removed before a Resource is returned to the cache (or even added to the
393     // ReturnQueue).
394     SkDEBUGCODE(mutable bool fNonShareableInCache = false;)
395 };
396 
397 } // namespace skgpu::graphite
398 
399 #endif // skgpu_graphite_Resource_DEFINED
400