1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14
15 class GrGpu;
16 class GrResourceCache;
17 class SkTraceMemoryDump;
18
19 /**
20 * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
21 * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
22 * exposing all of GrGpuResource.
23 *
24 * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
25 * (static poly morphism using CRTP). It is legal for additional ref's to be added
26 * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
27 * called.
28 */
29 template <typename DERIVED> class GrIORef : public SkNoncopyable {
30 public:
unique()31 bool unique() const { return fRefCnt == 1; }
32
ref()33 void ref() const {
34 // Only the cache should be able to add the first ref to a resource.
35 SkASSERT(this->getRefCnt() > 0);
36 // No barrier required.
37 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
38 }
39
40 // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
41 enum class LastRemovedRef {
42 kMainRef, // This refers to fRefCnt
43 kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
44 };
45
unref()46 void unref() const {
47 SkASSERT(this->getRefCnt() > 0);
48 if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
49 this->notifyWillBeZero(LastRemovedRef::kMainRef);
50 }
51 }
52
addCommandBufferUsage()53 void addCommandBufferUsage() const {
54 // No barrier required.
55 (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
56 }
57
removeCommandBufferUsage()58 void removeCommandBufferUsage() const {
59 SkASSERT(!this->hasNoCommandBufferUsages());
60 if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
61 this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
62 }
63 }
64
65 #if GR_TEST_UTILS
testingOnly_getRefCnt()66 int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
67 #endif
68
69 protected:
GrIORef()70 GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
71
internalHasRef()72 bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()73 bool internalHasNoCommandBufferUsages() const {
74 return SkToBool(this->hasNoCommandBufferUsages());
75 }
76
77 // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()78 void addInitialRef() const {
79 SkASSERT(fRefCnt >= 0);
80 // No barrier required.
81 (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
82 }
83
84 private:
notifyWillBeZero(LastRemovedRef removedRef)85 void notifyWillBeZero(LastRemovedRef removedRef) const {
86 static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
87 }
88
getRefCnt()89 int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
90
hasNoCommandBufferUsages()91 bool hasNoCommandBufferUsages() const {
92 if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
93 // The acquire barrier is only really needed if we return true. It
94 // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
95 // until previous owners are all totally done calling removeCommandBufferUsage().
96 return true;
97 }
98 return false;
99 }
100
101 mutable std::atomic<int32_t> fRefCnt;
102 mutable std::atomic<int32_t> fCommandBufferUsageCnt;
103
104 using INHERITED = SkNoncopyable;
105 };
106
107 struct GrGpuResourceTag {
GrGpuResourceTagGrGpuResourceTag108 GrGpuResourceTag() : fPid(0), fTid(0), fWid(0), fFid(0)
109 {
110 isGrGpuResourceTagValid = false;
111 }
112
GrGpuResourceTagGrGpuResourceTag113 GrGpuResourceTag(uint32_t pid, uint32_t tid, uint32_t wid, uint32_t fid, const std::string& name)
114 : fPid(pid), fTid(tid), fWid(wid), fFid(fid), fName(name)
115 {
116 isGrGpuResourceTagValid = fPid || fTid || fWid || fFid;
117 }
118
119 bool operator< (const GrGpuResourceTag& tag) const {
120 if (fPid != tag.fPid) {
121 return fPid < tag.fPid;
122 }
123 if (fTid != tag.fTid) {
124 return fTid < tag.fTid;
125 }
126 if (fWid != tag.fWid) {
127 return fWid < tag.fWid;
128 }
129 if (fFid != tag.fFid) {
130 return fFid < tag.fFid;
131 }
132 return false;
133 }
134
135 bool operator== (const GrGpuResourceTag& tag) const {
136 return (fPid == tag.fPid) && (fTid == tag.fTid) && (fWid == tag.fWid) && (fFid == tag.fFid);
137 }
138
toStringGrGpuResourceTag139 std::string toString() const {
140 return "[" + std::to_string(fPid) + "," + std::to_string(fTid) + ","
141 + std::to_string(fWid) + "," + std::to_string(fFid) + "]";
142 }
143
isGrTagValidGrGpuResourceTag144 bool isGrTagValid() const {
145 return isGrGpuResourceTagValid;
146 }
147
filterGrGpuResourceTag148 bool filter(GrGpuResourceTag& tag) const {
149 if (!isGrTagValid()) {
150 return !tag.isGrTagValid();
151 }
152 if (fPid && fPid != tag.fPid) {
153 return false;
154 }
155 if (fTid && fTid != tag.fTid) {
156 return false;
157 }
158 if (fWid && fWid != tag.fWid) {
159 return false;
160 }
161 if (fFid && fFid != tag.fFid) {
162 return false;
163 }
164 return true;
165 }
166
filterGrGpuResourceTag167 bool filter(GrGpuResourceTag&& tag) const {
168 if (!isGrTagValid()) {
169 return !tag.isGrTagValid();
170 }
171 if (fPid && fPid != tag.fPid) {
172 return false;
173 }
174 if (fTid && fTid != tag.fTid) {
175 return false;
176 }
177 if (fWid && fWid != tag.fWid) {
178 return false;
179 }
180 if (fFid && fFid != tag.fFid) {
181 return false;
182 }
183 return true;
184 }
185 uint32_t fPid;
186 uint32_t fTid;
187 uint32_t fWid;
188 uint32_t fFid;
189 std::string fName;
190 bool isGrGpuResourceTagValid;
191 };
192
193 /**
194 * Base class for objects that can be kept in the GrResourceCache.
195 */
196 class SK_API GrGpuResource : public GrIORef<GrGpuResource> {
197 public:
198 /**
199 * Tests whether a object has been abandoned or released. All objects will
200 * be in this state after their creating GrContext is destroyed or has
201 * contextLost called. It's up to the client to test wasDestroyed() before
202 * attempting to use an object if it holds refs on objects across
203 * ~GrContext, freeResources with the force flag, or contextLost.
204 *
205 * @return true if the object has been released or abandoned,
206 * false otherwise.
207 */
wasDestroyed()208 bool wasDestroyed() const { return nullptr == fGpu; }
209
setRealAlloc(bool realAlloc)210 void setRealAlloc(bool realAlloc) { fRealAlloc = realAlloc; } // OH ISSUE: set real alloc flag
isRealAlloc()211 bool isRealAlloc() { return fRealAlloc; } // OH ISSUE: get real alloc flag
setRealAllocSize(size_t realAllocSize)212 void setRealAllocSize(size_t realAllocSize) { fRealAllocSize = realAllocSize; } // OH ISSUE: set real alloc size
getRealAllocSize()213 size_t getRealAllocSize() { return fRealAllocSize; } // OH ISSUE: get real alloc size
214
215 /**
216 * Retrieves the context that owns the object. Note that it is possible for
217 * this to return NULL. When objects have been release()ed or abandon()ed
218 * they no longer have an owning context. Destroying a GrDirectContext
219 * automatically releases all its resources.
220 */
221 const GrDirectContext* getContext() const;
222 GrDirectContext* getContext();
223
224 /**
225 * Retrieves the amount of GPU memory used by this resource in bytes. It is
226 * approximate since we aren't aware of additional padding or copies made
227 * by the driver.
228 *
229 * @return the amount of GPU memory used in bytes
230 */
gpuMemorySize()231 size_t gpuMemorySize() const {
232 if (kInvalidGpuMemorySize == fGpuMemorySize) {
233 fGpuMemorySize = this->onGpuMemorySize();
234 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
235 }
236 return fGpuMemorySize;
237 }
238
239 class UniqueID {
240 public:
241 UniqueID() = default;
242
UniqueID(uint32_t id)243 explicit UniqueID(uint32_t id) : fID(id) {}
244
asUInt()245 uint32_t asUInt() const { return fID; }
246
247 bool operator==(const UniqueID& other) const { return fID == other.fID; }
248 bool operator!=(const UniqueID& other) const { return !(*this == other); }
249
makeInvalid()250 void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()251 bool isInvalid() const { return fID == SK_InvalidUniqueID; }
252
253 protected:
254 uint32_t fID = SK_InvalidUniqueID;
255 };
256
257 /**
258 * Gets an id that is unique for this GrGpuResource object. It is static in that it does
259 * not change when the content of the GrGpuResource object changes. This will never return
260 * 0.
261 */
uniqueID()262 UniqueID uniqueID() const { return fUniqueID; }
263
264 /** Returns the current unique key for the resource. It will be invalid if the resource has no
265 associated unique key. */
getUniqueKey()266 const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
267
268 /**
269 * Internal-only helper class used for manipulations of the resource by the cache.
270 */
271 class CacheAccess;
272 inline CacheAccess cacheAccess();
273 inline const CacheAccess cacheAccess() const; // NOLINT(readability-const-return-type)
274
275 /**
276 * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
277 */
278 class ProxyAccess;
279 inline ProxyAccess proxyAccess();
280
281 /**
282 * Internal-only helper class used for manipulations of the resource by internal code.
283 */
284 class ResourcePriv;
285 inline ResourcePriv resourcePriv();
286 inline const ResourcePriv resourcePriv() const; // NOLINT(readability-const-return-type)
287
288 /**
289 * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
290 * Typically, subclasses should not need to override this, and should only
291 * need to override setMemoryBacking.
292 **/
293 virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
294
295 /**
296 * Describes the type of gpu resource that is represented by the implementing
297 * class (e.g. texture, buffer object, stencil). This data is used for diagnostic
298 * purposes by dumpMemoryStatistics().
299 *
300 * The value returned is expected to be long lived and will not be copied by the caller.
301 */
302 virtual const char* getResourceType() const = 0;
303
304 static uint32_t CreateUniqueID();
305
306 /**
307 * Set the resource tag.
308 */
309 void setResourceTag(const GrGpuResourceTag tag);
310
311 /**
312 * Get the resource tag.
313 *
314 * @return all GrGpuResourceTags.
315 */
getResourceTag()316 GrGpuResourceTag getResourceTag() const { return fGrResourceTag; }
317
318 protected:
319 // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
320 // fully initialized (i.e. only from the constructors of the final class).
321 void registerWithCache(SkBudgeted);
322
323 // This must be called by every GrGpuObject that references any wrapped backend objects. It
324 // should be called once the object is fully initialized (i.e. only from the constructors of the
325 // final class).
326 void registerWithCacheWrapped(GrWrapCacheable);
327
328 GrGpuResource(GrGpu*);
329 virtual ~GrGpuResource();
330
getGpu()331 GrGpu* getGpu() const { return fGpu; }
332
333 /** Overridden to free GPU resources in the backend API. */
onRelease()334 virtual void onRelease() { }
335 /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
336 This may be called when the underlying 3D context is no longer valid and so no
337 backend API calls should be made. */
onAbandon()338 virtual void onAbandon() { }
339
340 /**
341 * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
342 **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)343 virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
344
345 /**
346 * Returns a string that uniquely identifies this resource.
347 */
348 SkString getResourceName() const;
349
350 /**
351 * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
352 * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
353 * to customize various inputs.
354 */
355 void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
356 const char* type, size_t size) const;
357
358
359 private:
360 bool isPurgeable() const;
361 bool hasRef() const;
362 bool hasNoCommandBufferUsages() const;
363
364 /**
365 * Called by the registerWithCache if the resource is available to be used as scratch.
366 * Resource subclasses should override this if the instances should be recycled as scratch
367 * resources and populate the scratchKey with the key.
368 * By default resources are not recycled as scratch.
369 **/
computeScratchKey(GrScratchKey *)370 virtual void computeScratchKey(GrScratchKey*) const {}
371
372 /**
373 * Removes references to objects in the underlying 3D API without freeing them.
374 * Called by CacheAccess.
375 */
376 void abandon();
377
378 /**
379 * Frees the object in the underlying 3D API. Called by CacheAccess.
380 */
381 void release();
382
383 virtual size_t onGpuMemorySize() const = 0;
384
385 // See comments in CacheAccess and ResourcePriv.
386 void setUniqueKey(const GrUniqueKey&);
387 void removeUniqueKey();
388 void notifyARefCntIsZero(LastRemovedRef removedRef) const;
389 void removeScratchKey();
390 void makeBudgeted();
391 void makeUnbudgeted();
392 void userRegisterResource();
393
394 #ifdef SK_DEBUG
395 friend class GrGpu; // for assert in GrGpu to access getGpu
396 #endif
397
398 // An index into a heap when this resource is purgeable or an array when not. This is maintained
399 // by the cache.
400 int fCacheArrayIndex;
401 // This value reflects how recently this resource was accessed in the cache. This is maintained
402 // by the cache.
403 uint32_t fTimestamp;
404 GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
405
406 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
407 GrScratchKey fScratchKey;
408 GrUniqueKey fUniqueKey;
409
410 // This is not ref'ed but abandon() or release() will be called before the GrGpu object
411 // is destroyed. Those calls set will this to NULL.
412 GrGpu* fGpu;
413 mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
414
415 GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
416 bool fRefsWrappedObjects = false;
417 const UniqueID fUniqueID;
418 GrGpuResourceTag fGrResourceTag;
419
420 using INHERITED = GrIORef<GrGpuResource>;
421 friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
422 // notifyARefCntIsZero.
423
424 bool fRealAlloc = false; // OH ISSUE: real alloc flag
425 size_t fRealAllocSize = 0; // OH ISSUE: real alloc size
426 };
427
428 class GrGpuResource::ProxyAccess {
429 private:
ProxyAccess(GrGpuResource * resource)430 ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
431
432 /** Proxies are allowed to take a resource from no refs to one ref. */
433 void ref(GrResourceCache* cache);
434
435 // No taking addresses of this type.
436 const CacheAccess* operator&() const = delete;
437 CacheAccess* operator&() = delete;
438
439 GrGpuResource* fResource;
440
441 friend class GrGpuResource;
442 friend class GrSurfaceProxy;
443 };
444
proxyAccess()445 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
446
447 #endif
448