• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_Resource_DEFINED
9 #define skgpu_graphite_Resource_DEFINED
10 
11 #include "include/gpu/GpuTypes.h"
12 #include "include/private/base/SkMutex.h"
13 #include "src/gpu/graphite/GraphiteResourceKey.h"
14 #include "src/gpu/graphite/ResourceTypes.h"
15 
16 #include <atomic>
17 
18 class SkMutex;
19 
20 namespace skgpu::graphite {
21 
22 class ResourceCache;
23 class SharedContext;
24 
25 /**
26  * Base class for objects that can be kept in the ResourceCache.
27  */
28 class Resource {
29 public:
30     Resource(const Resource&) = delete;
31     Resource(Resource&&) = delete;
32     Resource& operator=(const Resource&) = delete;
33     Resource& operator=(Resource&&) = delete;
34 
35     // Adds a usage ref to the resource. Named ref so we can easily manage usage refs with sk_sp.
ref()36     void ref() const {
37         // Only the cache should be able to add the first usage ref to a resource.
38         SkASSERT(this->hasUsageRef());
39         // No barrier required.
40         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
41     }
42 
43     // Removes a usage ref from the resource
unref()44     void unref() const {
45         bool shouldFree = false;
46         {
47             SkAutoMutexExclusive locked(fUnrefMutex);
48             SkASSERT(this->hasUsageRef());
49             // A release here acts in place of all releases we "should" have been doing in ref().
50             if (1 == fUsageRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
51                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kUsage);
52             }
53         }
54         if (shouldFree) {
55             Resource* mutableThis = const_cast<Resource*>(this);
56             mutableThis->internalDispose();
57         }
58     }
59 
60     // Adds a command buffer ref to the resource
refCommandBuffer()61     void refCommandBuffer() const {
62         // No barrier required.
63         (void)fCommandBufferRefCnt.fetch_add(+1, std::memory_order_relaxed);
64     }
65 
66     // Removes a command buffer ref from the resource
unrefCommandBuffer()67     void unrefCommandBuffer() const {
68         bool shouldFree = false;
69         {
70             SkAutoMutexExclusive locked(fUnrefMutex);
71             SkASSERT(this->hasCommandBufferRef());
72             // A release here acts in place of all releases we "should" have been doing in ref().
73             if (1 == fCommandBufferRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
74                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCommandBuffer);
75             }
76         }
77         if (shouldFree) {
78             Resource* mutableThis = const_cast<Resource*>(this);
79             mutableThis->internalDispose();
80         }
81     }
82 
ownership()83     Ownership ownership() const { return fOwnership; }
84 
budgeted()85     skgpu::Budgeted budgeted() const { return fBudgeted; }
86 
87     // Tests whether a object has been abandoned or released. All objects will be in this state
88     // after their creating Context is destroyed or abandoned.
89     //
90     // @return true if the object has been released or abandoned,
91     //         false otherwise.
92     // TODO: As of now this function isn't really needed because in freeGpuData we are always
93     // deleting this object. However, I want to implement all the purging logic first to make sure
94     // we don't have a use case for calling internalDispose but not wanting to delete the actual
95     // object yet.
wasDestroyed()96     bool wasDestroyed() const { return fSharedContext == nullptr; }
97 
key()98     const GraphiteResourceKey& key() const { return fKey; }
99     // This should only ever be called by the ResourceProvider
setKey(const GraphiteResourceKey & key)100     void setKey(const GraphiteResourceKey& key) {
101         SkASSERT(key.shareable() == Shareable::kNo || this->budgeted() == skgpu::Budgeted::kYes);
102         fKey = key;
103     }
104 
105 protected:
106     Resource(const SharedContext*, Ownership, skgpu::Budgeted);
107     virtual ~Resource();
108 
sharedContext()109     const SharedContext* sharedContext() const { return fSharedContext; }
110 
111     // Overridden to free GPU resources in the backend API.
112     virtual void freeGpuData() = 0;
113 
114 #ifdef SK_DEBUG
debugHasCommandBufferRef()115     bool debugHasCommandBufferRef() const {
116         return hasCommandBufferRef();
117     }
118 #endif
119 
120 private:
121     ////////////////////////////////////////////////////////////////////////////
122     // The following set of functions are only meant to be called by the ResourceCache. We don't
123     // want them public general users of a Resource, but they also aren't purely internal calls.
124     ////////////////////////////////////////////////////////////////////////////
125     friend ResourceCache;
126 
makeBudgeted()127     void makeBudgeted() { fBudgeted = skgpu::Budgeted::kYes; }
makeUnbudgeted()128     void makeUnbudgeted() { fBudgeted = skgpu::Budgeted::kNo; }
129 
130     // This version of ref allows adding a ref when the usage count is 0. This should only be called
131     // from the ResourceCache.
initialUsageRef()132     void initialUsageRef() const {
133         // Only the cache should be able to add the first usage ref to a resource.
134         SkASSERT(fUsageRefCnt >= 0);
135         // No barrier required.
136         (void)fUsageRefCnt.fetch_add(+1, std::memory_order_relaxed);
137     }
138 
139     bool isPurgeable() const;
accessReturnIndex()140     int* accessReturnIndex()  const { return &fReturnIndex; }
accessCacheIndex()141     int* accessCacheIndex()  const { return &fCacheArrayIndex; }
142 
timestamp()143     uint32_t timestamp() const { return fTimestamp; }
setTimestamp(uint32_t ts)144     void setTimestamp(uint32_t ts) { fTimestamp = ts; }
145 
146     void registerWithCache(sk_sp<ResourceCache>);
147 
148     // Adds a cache ref to the resource. This is only called by ResourceCache. A Resource will only
149     // ever add a ref when the Resource is part of the cache (i.e. when insertResource is called)
150     // and while the Resource is in the ResourceCache::ReturnQueue.
refCache()151     void refCache() const {
152         // No barrier required.
153         (void)fCacheRefCnt.fetch_add(+1, std::memory_order_relaxed);
154     }
155 
156     // Removes a cache ref from the resource. The unref here should only ever be called from the
157     // ResourceCache and only in the Recorder thread the ResourceCache is part of.
unrefCache()158     void unrefCache() const {
159         bool shouldFree = false;
160         {
161             SkAutoMutexExclusive locked(fUnrefMutex);
162             SkASSERT(this->hasCacheRef());
163             // A release here acts in place of all releases we "should" have been doing in ref().
164             if (1 == fCacheRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
165                 shouldFree = this->notifyARefIsZero(LastRemovedRef::kCache);
166             }
167         }
168         if (shouldFree) {
169             Resource* mutableThis = const_cast<Resource*>(this);
170             mutableThis->internalDispose();
171         }
172     }
173 
174 #ifdef SK_DEBUG
isUsableAsScratch()175     bool isUsableAsScratch() const {
176         return fKey.shareable() == Shareable::kNo && !this->hasUsageRef() && fNonShareableInCache;
177     }
178 #endif
179 
180     ////////////////////////////////////////////////////////////////////////////
181     // The remaining calls are meant to be truely private
182     ////////////////////////////////////////////////////////////////////////////
hasUsageRef()183     bool hasUsageRef() const {
184         if (0 == fUsageRefCnt.load(std::memory_order_acquire)) {
185             // The acquire barrier is only really needed if we return true.  It
186             // prevents code conditioned on the result of hasUsageRef() from running until previous
187             // owners are all totally done calling unref().
188             return false;
189         }
190         return true;
191     }
192 
hasCommandBufferRef()193     bool hasCommandBufferRef() const {
194         if (0 == fCommandBufferRefCnt.load(std::memory_order_acquire)) {
195             // The acquire barrier is only really needed if we return true.  It
196             // prevents code conditioned on the result of hasCommandBufferRef() from running
197             // until previous owners are all totally done calling unrefCommandBuffer().
198             return false;
199         }
200         return true;
201     }
202 
hasCacheRef()203     bool hasCacheRef() const {
204         if (0 == fCacheRefCnt.load(std::memory_order_acquire)) {
205             // The acquire barrier is only really needed if we return true. It
206             // prevents code conditioned on the result of hasUsageRef() from running until previous
207             // owners are all totally done calling unref().
208             return false;
209         }
210         return true;
211     }
212 
hasAnyRefs()213     bool hasAnyRefs() const {
214         return this->hasUsageRef() || this->hasCommandBufferRef() || this->hasCacheRef();
215     }
216 
217     bool notifyARefIsZero(LastRemovedRef removedRef) const;
218 
219     // Frees the object in the underlying 3D API.
220     void internalDispose();
221 
222     // We need to guard calling unref on the usage and command buffer refs since they each could be
223     // unreffed on different threads. This can lead to calling notifyARefIsZero twice with each
224     // instance thinking there are no more refs left and both trying to delete the object.
225     mutable SkMutex fUnrefMutex;
226 
227     SkDEBUGCODE(mutable bool fCalledRemovedFromCache = false;)
228 
229     // This is not ref'ed but internalDispose() will be called before the Gpu object is destroyed.
230     // That call will set this to nullptr.
231     const SharedContext* fSharedContext;
232 
233     mutable std::atomic<int32_t> fUsageRefCnt;
234     mutable std::atomic<int32_t> fCommandBufferRefCnt;
235     mutable std::atomic<int32_t> fCacheRefCnt;
236 
237     GraphiteResourceKey fKey;
238 
239     sk_sp<ResourceCache> fReturnCache;
240     // An index into the return cache so we know whether or not the resource is already waiting to
241     // be returned or not.
242     mutable int fReturnIndex = -1;
243 
244     Ownership fOwnership;
245 
246     // All resource created internally by Graphite and held in the ResourceCache as a shared
247     // shared resource or available scratch resource are considered budgeted. Resources that back
248     // client owned objects (e.g. SkSurface or SkImage) are not budgeted and do not count against
249     // cache limits.
250     skgpu::Budgeted fBudgeted;
251 
252     // An index into a heap when this resource is purgeable or an array when not. This is maintained
253     // by the cache.
254     mutable int fCacheArrayIndex = -1;
255     // This value reflects how recently this resource was accessed in the cache. This is maintained
256     // by the cache.
257     uint32_t fTimestamp;
258 
259     // This is only used during validation checking. Lots of the validation code depends on a
260     // resource being purgeable or not. However, purgeable itself just means having no refs. The
261     // refs can be removed before a Resource is returned to the cache (or even added to the
262     // ReturnQueue).
263     SkDEBUGCODE(mutable bool fNonShareableInCache = false);
264 };
265 
266 } // namespace skgpu::graphite
267 
268 #endif // skgpu_graphite_Resource_DEFINED
269 
270