1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 #include <mutex>
15
16 class GrGpu;
17 class GrResourceCache;
18 class SkTraceMemoryDump;
19
20 /**
21 * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
22 * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
23 * exposing all of GrGpuResource.
24 *
25 * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
26 * (static poly morphism using CRTP). It is legal for additional ref's to be added
27 * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
28 * called.
29 */
30 template <typename DERIVED> class GrIORef : public SkNoncopyable {
31 public:
unique()32 bool unique() const { return fRefCnt == 1; }
33
ref()34 void ref() const {
35 // Only the cache should be able to add the first ref to a resource.
36 SkASSERT(this->getRefCnt() > 0);
37 // No barrier required.
38 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
39 }
40
41 // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
42 enum class LastRemovedRef {
43 kMainRef, // This refers to fRefCnt
44 kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
45 };
46
unref()47 void unref() const {
48 SkASSERT(this->getRefCnt() > 0);
49 if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
50 this->notifyWillBeZero(LastRemovedRef::kMainRef);
51 }
52 }
53
addCommandBufferUsage()54 void addCommandBufferUsage() const {
55 // No barrier required.
56 (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
57 }
58
removeCommandBufferUsage()59 void removeCommandBufferUsage() const {
60 SkASSERT(!this->hasNoCommandBufferUsages());
61 if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
62 this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
63 }
64 }
65
66 #if GR_TEST_UTILS
testingOnly_getRefCnt()67 int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
68 #endif
69
70 protected:
GrIORef()71 GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
72
internalHasRef()73 bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()74 bool internalHasNoCommandBufferUsages() const {
75 return SkToBool(this->hasNoCommandBufferUsages());
76 }
77
78 // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()79 void addInitialRef() const {
80 SkASSERT(fRefCnt >= 0);
81 // No barrier required.
82 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
83 }
84
85 private:
notifyWillBeZero(LastRemovedRef removedRef)86 void notifyWillBeZero(LastRemovedRef removedRef) const {
87 static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
88 }
89
getRefCnt()90 int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
91
hasNoCommandBufferUsages()92 bool hasNoCommandBufferUsages() const {
93 if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
94 // The acquire barrier is only really needed if we return true. It
95 // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
96 // until previous owners are all totally done calling removeCommandBufferUsage().
97 return true;
98 }
99 return false;
100 }
101
102 mutable std::atomic<int32_t> fRefCnt;
103 mutable std::atomic<int32_t> fCommandBufferUsageCnt;
104
105 using INHERITED = SkNoncopyable;
106 };
107
108 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag109 GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0) {}
110
GrGpuResourceTagGrGpuResourceTag111 GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid)
112 : fPid(pid), fTid(tid), fWid(wid), fFid(fid) {}
113
114 bool operator< (const GrGpuResourceTag& tag) const {
115 if (fPid != tag.fPid) {
116 return fPid < tag.fPid;
117 }
118 if (fTid != tag.fTid) {
119 return fTid < tag.fTid;
120 }
121 if (fWid != tag.fWid) {
122 return fWid < tag.fWid;
123 }
124 if (fFid != tag.fFid) {
125 return fFid < tag.fFid;
126 }
127 return false;
128 }
129
130 bool operator== (const GrGpuResourceTag& tag) const {
131 return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
132 }
133
toStringGrGpuResourceTag134 std::string toString() const {
135 return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
136 + std::to_string(fWid) + "," + std::to_string(fFid) + "]";
137 }
138
isGrTagValidGrGpuResourceTag139 bool isGrTagValid() const {
140 return fPid || fTid || fWid || fFid;
141 }
142
filterGrGpuResourceTag143 bool filter(GrGpuResourceTag& tag) const {
144 if (!isGrTagValid()) {
145 return !tag.isGrTagValid();
146 }
147 if (fPid && fPid != tag.fPid) {
148 return false;
149 }
150 if (fTid && fTid != tag.fTid) {
151 return false;
152 }
153 if (fWid && fWid != tag.fWid) {
154 return false;
155 }
156 if (fFid && fFid != tag.fFid) {
157 return false;
158 }
159 return true;
160 }
161
filterGrGpuResourceTag162 bool filter(GrGpuResourceTag&& tag) const {
163 if (!isGrTagValid()) {
164 return !tag.isGrTagValid();
165 }
166 if (fPid && fPid != tag.fPid) {
167 return false;
168 }
169 if (fTid && fTid != tag.fTid) {
170 return false;
171 }
172 if (fWid && fWid != tag.fWid) {
173 return false;
174 }
175 if (fFid && fFid != tag.fFid) {
176 return false;
177 }
178 return true;
179 }
180 uint32_t fPid;
181 uint32_t fTid;
182 uint32_t fWid;
183 uint32_t fFid;
184 };
185
186 /**
187 * Base class for objects that can be kept in the GrResourceCache.
188 */
189 class GrGpuResource : public GrIORef<GrGpuResource> {
190 public:
191 /**
192 * Tests whether a object has been abandoned or released. All objects will
193 * be in this state after their creating GrContext is destroyed or has
194 * contextLost called. It's up to the client to test wasDestroyed() before
195 * attempting to use an object if it holds refs on objects across
196 * ~GrContext, freeResources with the force flag, or contextLost.
197 *
198 * @return true if the object has been released or abandoned,
199 * false otherwise.
200 */
wasDestroyed()201 bool wasDestroyed() const { return nullptr == fGpu; }
202
203 /**
204 * Retrieves the context that owns the object. Note that it is possible for
205 * this to return NULL. When objects have been release()ed or abandon()ed
206 * they no longer have an owning context. Destroying a GrDirectContext
207 * automatically releases all its resources.
208 */
209 const GrDirectContext* getContext() const;
210 GrDirectContext* getContext();
211
212 /**
213 * Retrieves the amount of GPU memory used by this resource in bytes. It is
214 * approximate since we aren't aware of additional padding or copies made
215 * by the driver.
216 *
217 * @return the amount of GPU memory used in bytes
218 */
gpuMemorySize()219 size_t gpuMemorySize() const {
220 if (kInvalidGpuMemorySize == fGpuMemorySize) {
221 fGpuMemorySize = this->onGpuMemorySize();
222 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
223 }
224 return fGpuMemorySize;
225 }
226
227 class UniqueID {
228 public:
229 UniqueID() = default;
230
UniqueID(uint32_t id)231 explicit UniqueID(uint32_t id) : fID(id) {}
232
asUInt()233 uint32_t asUInt() const { return fID; }
234
235 bool operator==(const UniqueID& other) const { return fID == other.fID; }
236 bool operator!=(const UniqueID& other) const { return !(*this == other); }
237
makeInvalid()238 void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()239 bool isInvalid() const { return fID == SK_InvalidUniqueID; }
240
241 protected:
242 uint32_t fID = SK_InvalidUniqueID;
243 };
244
245 /**
246 * Gets an id that is unique for this GrGpuResource object. It is static in that it does
247 * not change when the content of the GrGpuResource object changes. This will never return
248 * 0.
249 */
uniqueID()250 UniqueID uniqueID() const { return fUniqueID; }
251
252 /** Returns the current unique key for the resource. It will be invalid if the resource has no
253 associated unique key. */
getUniqueKey()254 const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
255
256 /**
257 * Internal-only helper class used for manipulations of the resource by the cache.
258 */
259 class CacheAccess;
260 inline CacheAccess cacheAccess();
261 inline const CacheAccess cacheAccess() const; // NOLINT(readability-const-return-type)
262
263 /**
264 * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
265 */
266 class ProxyAccess;
267 inline ProxyAccess proxyAccess();
268
269 /**
270 * Internal-only helper class used for manipulations of the resource by internal code.
271 */
272 class ResourcePriv;
273 inline ResourcePriv resourcePriv();
274 inline const ResourcePriv resourcePriv() const; // NOLINT(readability-const-return-type)
275
276 /**
277 * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
278 * Typically, subclasses should not need to override this, and should only
279 * need to override setMemoryBacking.
280 **/
281 virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
282
283 /**
284 * Describes the type of gpu resource that is represented by the implementing
285 * class (e.g. texture, buffer object, stencil). This data is used for diagnostic
286 * purposes by dumpMemoryStatistics().
287 *
288 * The value returned is expected to be long lived and will not be copied by the caller.
289 */
290 virtual const char* getResourceType() const = 0;
291
292 static uint32_t CreateUniqueID();
293
294 /**
295 * Set the resource tag.
296 */
setResourceTag(const GrGpuResourceTag tag)297 void setResourceTag(const GrGpuResourceTag tag) { fGrResourceTag = tag; }
298
299 /**
300 * Get the resource tag.
301 *
302 * @return all GrGpuResourceTags.
303 */
getResourceTag()304 GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
305
306 protected:
307 // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
308 // fully initialized (i.e. only from the constructors of the final class).
309 void registerWithCache(SkBudgeted);
310
311 // This must be called by every GrGpuObject that references any wrapped backend objects. It
312 // should be called once the object is fully initialized (i.e. only from the constructors of the
313 // final class).
314 void registerWithCacheWrapped(GrWrapCacheable);
315
316 GrGpuResource(GrGpu*);
317 virtual ~GrGpuResource();
318
getGpu()319 GrGpu* getGpu() const { return fGpu; }
320
321 /** Overridden to free GPU resources in the backend API. */
onRelease()322 virtual void onRelease() { }
323 /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
324 This may be called when the underlying 3D context is no longer valid and so no
325 backend API calls should be made. */
onAbandon()326 virtual void onAbandon() { }
327
328 /**
329 * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
330 **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)331 virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
332
333 /**
334 * Returns a string that uniquely identifies this resource.
335 */
336 SkString getResourceName() const;
337
338 /**
339 * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
340 * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
341 * to customize various inputs.
342 */
343 void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
344 const char* type, size_t size) const;
345
346
347 private:
348 bool isPurgeable() const;
349 bool hasRef() const;
350 bool hasNoCommandBufferUsages() const;
351
352 /**
353 * Called by the registerWithCache if the resource is available to be used as scratch.
354 * Resource subclasses should override this if the instances should be recycled as scratch
355 * resources and populate the scratchKey with the key.
356 * By default resources are not recycled as scratch.
357 **/
computeScratchKey(GrScratchKey *)358 virtual void computeScratchKey(GrScratchKey*) const {}
359
360 /**
361 * Removes references to objects in the underlying 3D API without freeing them.
362 * Called by CacheAccess.
363 */
364 void abandon();
365
366 /**
367 * Frees the object in the underlying 3D API. Called by CacheAccess.
368 */
369 void release();
370
371 virtual size_t onGpuMemorySize() const = 0;
372
373 // See comments in CacheAccess and ResourcePriv.
374 void setUniqueKey(const GrUniqueKey&);
375 void removeUniqueKey();
376 void notifyARefCntIsZero(LastRemovedRef removedRef) const;
377 void removeScratchKey();
378 void makeBudgeted();
379 void makeUnbudgeted();
380 void userRegisterResource();
381
382 #ifdef SK_DEBUG
383 friend class GrGpu; // for assert in GrGpu to access getGpu
384 #endif
385
386 // An index into a heap when this resource is purgeable or an array when not. This is maintained
387 // by the cache.
388 int fCacheArrayIndex = -1;
389 // This value reflects how recently this resource was accessed in the cache. This is maintained
390 // by the cache.
391 uint32_t fTimestamp;
392 GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
393
394 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
395 GrScratchKey fScratchKey;
396 GrUniqueKey fUniqueKey;
397
398 // This is not ref'ed but abandon() or release() will be called before the GrGpu object
399 // is destroyed. Those calls set will this to NULL.
400 GrGpu* fGpu;
401 mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
402
403 GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
404 bool fRefsWrappedObjects = false;
405 const UniqueID fUniqueID;
406 GrGpuResourceTag fGrResourceTag;
407
408 using INHERITED = GrIORef<GrGpuResource>;
409 friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
410 // notifyARefCntIsZero.
411 std::mutex mutex_; // The gpu cache is released abnormally due to multi threads.
412 };
413
414 class GrGpuResource::ProxyAccess {
415 private:
ProxyAccess(GrGpuResource * resource)416 ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
417
418 /** Proxies are allowed to take a resource from no refs to one ref. */
419 void ref(GrResourceCache* cache);
420
421 // No taking addresses of this type.
422 const CacheAccess* operator&() const = delete;
423 CacheAccess* operator&() = delete;
424
425 GrGpuResource* fResource;
426
427 friend class GrGpuResource;
428 friend class GrSurfaceProxy;
429 };
430
proxyAccess()431 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
432
433 #endif
434