1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10
11 #include "include/private/base/SkNoncopyable.h"
12 #include "include/private/gpu/ganesh/GrTypesPriv.h"
13 #include "src/gpu/ResourceKey.h"
14
15 class GrGpu;
16 class GrResourceCache;
17 class SkTraceMemoryDump;
18
19 /**
20 * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
21 * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
22 * exposing all of GrGpuResource.
23 *
24 * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
25 * (static poly morphism using CRTP). It is legal for additional ref's to be added
26 * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
27 * called.
28 */
29 template <typename DERIVED> class GrIORef : public SkNoncopyable {
30 public:
unique()31 bool unique() const { return fRefCnt == 1; }
32
ref()33 void ref() const {
34 // Only the cache should be able to add the first ref to a resource.
35 SkASSERT(this->getRefCnt() > 0);
36 // No barrier required.
37 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
38 }
39
40 // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
41 enum class LastRemovedRef {
42 kMainRef, // This refers to fRefCnt
43 kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
44 };
45
unref()46 void unref() const {
47 SkASSERT(this->getRefCnt() > 0);
48 if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
49 this->notifyWillBeZero(LastRemovedRef::kMainRef);
50 }
51 }
52
addCommandBufferUsage()53 void addCommandBufferUsage() const {
54 // No barrier required.
55 (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
56 }
57
removeCommandBufferUsage()58 void removeCommandBufferUsage() const {
59 SkASSERT(!this->hasNoCommandBufferUsages());
60 if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
61 this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
62 }
63 }
64
65 #if GR_TEST_UTILS
testingOnly_getRefCnt()66 int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
67 #endif
68
69 protected:
GrIORef()70 GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
71
internalHasRef()72 bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()73 bool internalHasNoCommandBufferUsages() const {
74 return SkToBool(this->hasNoCommandBufferUsages());
75 }
76
77 // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()78 void addInitialRef() const {
79 SkASSERT(fRefCnt >= 0);
80 // No barrier required.
81 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
82 }
83
84 private:
notifyWillBeZero(LastRemovedRef removedRef)85 void notifyWillBeZero(LastRemovedRef removedRef) const {
86 static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
87 }
88
getRefCnt()89 int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
90
hasNoCommandBufferUsages()91 bool hasNoCommandBufferUsages() const {
92 if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
93 // The acquire barrier is only really needed if we return true. It
94 // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
95 // until previous owners are all totally done calling removeCommandBufferUsage().
96 return true;
97 }
98 return false;
99 }
100
101 mutable std::atomic<int32_t> fRefCnt;
102 mutable std::atomic<int32_t> fCommandBufferUsageCnt;
103
104 using INHERITED = SkNoncopyable;
105 };
106
107 /**
108 * Base class for objects that can be kept in the GrResourceCache.
109 */
110 class GrGpuResource : public GrIORef<GrGpuResource> {
111 public:
112 /**
113 * Tests whether a object has been abandoned or released. All objects will
114 * be in this state after their creating GrContext is destroyed or has
115 * contextLost called. It's up to the client to test wasDestroyed() before
116 * attempting to use an object if it holds refs on objects across
117 * ~GrContext, freeResources with the force flag, or contextLost.
118 *
119 * @return true if the object has been released or abandoned,
120 * false otherwise.
121 */
wasDestroyed()122 bool wasDestroyed() const { return nullptr == fGpu; }
123
124 /**
125 * Retrieves the context that owns the object. Note that it is possible for
126 * this to return NULL. When objects have been release()ed or abandon()ed
127 * they no longer have an owning context. Destroying a GrDirectContext
128 * automatically releases all its resources.
129 */
130 const GrDirectContext* getContext() const;
131 GrDirectContext* getContext();
132
133 /**
134 * Retrieves the amount of GPU memory used by this resource in bytes. It is
135 * approximate since we aren't aware of additional padding or copies made
136 * by the driver.
137 *
138 * @return the amount of GPU memory used in bytes
139 */
gpuMemorySize()140 size_t gpuMemorySize() const {
141 if (kInvalidGpuMemorySize == fGpuMemorySize) {
142 fGpuMemorySize = this->onGpuMemorySize();
143 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
144 }
145 return fGpuMemorySize;
146 }
147
148 class UniqueID {
149 public:
150 UniqueID() = default;
151
UniqueID(uint32_t id)152 explicit UniqueID(uint32_t id) : fID(id) {}
153
asUInt()154 uint32_t asUInt() const { return fID; }
155
156 bool operator==(const UniqueID& other) const { return fID == other.fID; }
157 bool operator!=(const UniqueID& other) const { return !(*this == other); }
158
makeInvalid()159 void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()160 bool isInvalid() const { return fID == SK_InvalidUniqueID; }
161
162 protected:
163 uint32_t fID = SK_InvalidUniqueID;
164 };
165
166 /**
167 * Gets an id that is unique for this GrGpuResource object. It is static in that it does
168 * not change when the content of the GrGpuResource object changes. This will never return
169 * 0.
170 */
uniqueID()171 UniqueID uniqueID() const { return fUniqueID; }
172
173 /** Returns the current unique key for the resource. It will be invalid if the resource has no
174 associated unique key. */
getUniqueKey()175 const skgpu::UniqueKey& getUniqueKey() const { return fUniqueKey; }
176
getLabel()177 std::string getLabel() const { return fLabel; }
178
setLabel(std::string_view label)179 void setLabel(std::string_view label) {
180 fLabel = label;
181 this->onSetLabel();
182 }
183
184 /**
185 * Internal-only helper class used for manipulations of the resource by the cache.
186 */
187 class CacheAccess;
188 inline CacheAccess cacheAccess();
189 inline const CacheAccess cacheAccess() const; // NOLINT(readability-const-return-type)
190
191 /**
192 * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
193 */
194 class ProxyAccess;
195 inline ProxyAccess proxyAccess();
196
197 /**
198 * Internal-only helper class used for manipulations of the resource by internal code.
199 */
200 class ResourcePriv;
201 inline ResourcePriv resourcePriv();
202 inline const ResourcePriv resourcePriv() const; // NOLINT(readability-const-return-type)
203
204 /**
205 * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
206 * Typically, subclasses should not need to override this, and should only
207 * need to override setMemoryBacking.
208 **/
209 virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
210
211 /**
212 * Describes the type of gpu resource that is represented by the implementing
213 * class (e.g. texture, buffer object, stencil). This data is used for diagnostic
214 * purposes by dumpMemoryStatistics().
215 *
216 * The value returned is expected to be long lived and will not be copied by the caller.
217 */
218 virtual const char* getResourceType() const = 0;
219
220 static uint32_t CreateUniqueID();
221
222 protected:
223 // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
224 // fully initialized (i.e. only from the constructors of the final class).
225 void registerWithCache(skgpu::Budgeted);
226
227 // This must be called by every GrGpuObject that references any wrapped backend objects. It
228 // should be called once the object is fully initialized (i.e. only from the constructors of the
229 // final class).
230 void registerWithCacheWrapped(GrWrapCacheable);
231
232 GrGpuResource(GrGpu*, std::string_view label);
233 virtual ~GrGpuResource();
234
getGpu()235 GrGpu* getGpu() const { return fGpu; }
236
237 /** Overridden to free GPU resources in the backend API. */
onRelease()238 virtual void onRelease() { }
239 /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
240 This may be called when the underlying 3D context is no longer valid and so no
241 backend API calls should be made. */
onAbandon()242 virtual void onAbandon() { }
243
244 /**
245 * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
246 **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)247 virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
248
249 /**
250 * Returns a string that uniquely identifies this resource.
251 */
252 SkString getResourceName() const;
253
254 /**
255 * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
256 * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
257 * to customize various inputs.
258 */
259 void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
260 const char* type, size_t size) const;
261
262
263 private:
264 bool isPurgeable() const;
265 bool hasRef() const;
266 bool hasNoCommandBufferUsages() const;
267
268 /**
269 * Called by the registerWithCache if the resource is available to be used as scratch.
270 * Resource subclasses should override this if the instances should be recycled as scratch
271 * resources and populate the scratchKey with the key.
272 * By default resources are not recycled as scratch.
273 **/
computeScratchKey(skgpu::ScratchKey *)274 virtual void computeScratchKey(skgpu::ScratchKey*) const {}
275
276 /**
277 * Removes references to objects in the underlying 3D API without freeing them.
278 * Called by CacheAccess.
279 */
280 void abandon();
281
282 /**
283 * Frees the object in the underlying 3D API. Called by CacheAccess.
284 */
285 void release();
286
287 virtual size_t onGpuMemorySize() const = 0;
288
289 virtual void onSetLabel() = 0;
290
291 // See comments in CacheAccess and ResourcePriv.
292 void setUniqueKey(const skgpu::UniqueKey&);
293 void removeUniqueKey();
294 void notifyARefCntIsZero(LastRemovedRef removedRef) const;
295 void removeScratchKey();
296 void makeBudgeted();
297 void makeUnbudgeted();
298
299 #ifdef SK_DEBUG
300 friend class GrGpu; // for assert in GrGpu to access getGpu
301 #endif
302
303 // An index into a heap when this resource is purgeable or an array when not. This is maintained
304 // by the cache.
305 int fCacheArrayIndex;
306 // This value reflects how recently this resource was accessed in the cache. This is maintained
307 // by the cache.
308 uint32_t fTimestamp;
309 GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
310
311 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
312 skgpu::ScratchKey fScratchKey;
313 skgpu::UniqueKey fUniqueKey;
314
315 // This is not ref'ed but abandon() or release() will be called before the GrGpu object
316 // is destroyed. Those calls will set this to NULL.
317 GrGpu* fGpu;
318 mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
319
320 GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
321 bool fRefsWrappedObjects = false;
322 const UniqueID fUniqueID;
323 std::string fLabel;
324
325 using INHERITED = GrIORef<GrGpuResource>;
326 friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
327 // notifyARefCntIsZero.
328 };
329
330 class GrGpuResource::ProxyAccess {
331 private:
ProxyAccess(GrGpuResource * resource)332 ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
333
334 /** Proxies are allowed to take a resource from no refs to one ref. */
335 void ref(GrResourceCache* cache);
336
337 // No taking addresses of this type.
338 const CacheAccess* operator&() const = delete;
339 CacheAccess* operator&() = delete;
340
341 GrGpuResource* fResource;
342
343 friend class GrGpuResource;
344 friend class GrSurfaceProxy;
345 };
346
proxyAccess()347 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
348
349 #endif
350