• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpuResource_DEFINED
9 #define GrGpuResource_DEFINED
10 
11 #include "include/private/GrResourceKey.h"
12 #include "include/private/GrTypesPriv.h"
13 #include "include/private/SkNoncopyable.h"
14 
15 class GrGpu;
16 class GrResourceCache;
17 class SkTraceMemoryDump;
18 
19 /**
20  * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
21  * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
22  * exposing all of GrGpuResource.
23  *
24  * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
25  * (static poly morphism using CRTP). It is legal for additional ref's to be added
26  * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
27  * called.
28  */
29 template <typename DERIVED> class GrIORef : public SkNoncopyable {
30 public:
unique()31     bool unique() const { return fRefCnt == 1; }
32 
ref()33     void ref() const {
34         // Only the cache should be able to add the first ref to a resource.
35         SkASSERT(this->getRefCnt() > 0);
36         // No barrier required.
37         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
38     }
39 
40     // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
41     enum class LastRemovedRef {
42         kMainRef,            // This refers to fRefCnt
43         kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
44     };
45 
unref()46     void unref() const {
47         SkASSERT(this->getRefCnt() > 0);
48         if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
49             this->notifyWillBeZero(LastRemovedRef::kMainRef);
50         }
51     }
52 
addCommandBufferUsage()53     void addCommandBufferUsage() const {
54         // No barrier required.
55         (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
56     }
57 
removeCommandBufferUsage()58     void removeCommandBufferUsage() const {
59         SkASSERT(!this->hasNoCommandBufferUsages());
60         if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
61             this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
62         }
63     }
64 
65 #if GR_TEST_UTILS
testingOnly_getRefCnt()66     int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
67 #endif
68 
69 protected:
GrIORef()70     GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
71 
internalHasRef()72     bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
internalHasNoCommandBufferUsages()73     bool internalHasNoCommandBufferUsages() const {
74         return SkToBool(this->hasNoCommandBufferUsages());
75     }
76 
77     // Privileged method that allows going from ref count = 0 to ref count = 1.
addInitialRef()78     void addInitialRef() const {
79         SkASSERT(fRefCnt >= 0);
80         // No barrier required.
81         (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
82     }
83 
84 private:
notifyWillBeZero(LastRemovedRef removedRef)85     void notifyWillBeZero(LastRemovedRef removedRef) const {
86         static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
87     }
88 
getRefCnt()89     int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
90 
hasNoCommandBufferUsages()91     bool hasNoCommandBufferUsages() const {
92         if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
93             // The acquire barrier is only really needed if we return true.  It
94             // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
95             // until previous owners are all totally done calling removeCommandBufferUsage().
96             return true;
97         }
98         return false;
99     }
100 
101     mutable std::atomic<int32_t> fRefCnt;
102     mutable std::atomic<int32_t> fCommandBufferUsageCnt;
103 
104     using INHERITED = SkNoncopyable;
105 };
106 
107 /**
108  * Base class for objects that can be kept in the GrResourceCache.
109  */
110 class GrGpuResource : public GrIORef<GrGpuResource> {
111 public:
112     /**
113      * Tests whether a object has been abandoned or released. All objects will
114      * be in this state after their creating GrContext is destroyed or has
115      * contextLost called. It's up to the client to test wasDestroyed() before
116      * attempting to use an object if it holds refs on objects across
117      * ~GrContext, freeResources with the force flag, or contextLost.
118      *
119      * @return true if the object has been released or abandoned,
120      *         false otherwise.
121      */
wasDestroyed()122     bool wasDestroyed() const { return nullptr == fGpu; }
123 
124     /**
125      * Retrieves the context that owns the object. Note that it is possible for
126      * this to return NULL. When objects have been release()ed or abandon()ed
127      * they no longer have an owning context. Destroying a GrDirectContext
128      * automatically releases all its resources.
129      */
130     const GrDirectContext* getContext() const;
131     GrDirectContext* getContext();
132 
133     /**
134      * Retrieves the amount of GPU memory used by this resource in bytes. It is
135      * approximate since we aren't aware of additional padding or copies made
136      * by the driver.
137      *
138      * @return the amount of GPU memory used in bytes
139      */
gpuMemorySize()140     size_t gpuMemorySize() const {
141         if (kInvalidGpuMemorySize == fGpuMemorySize) {
142             fGpuMemorySize = this->onGpuMemorySize();
143             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
144         }
145         return fGpuMemorySize;
146     }
147 
148     class UniqueID {
149     public:
150         UniqueID() = default;
151 
UniqueID(uint32_t id)152         explicit UniqueID(uint32_t id) : fID(id) {}
153 
asUInt()154         uint32_t asUInt() const { return fID; }
155 
156         bool operator==(const UniqueID& other) const { return fID == other.fID; }
157         bool operator!=(const UniqueID& other) const { return !(*this == other); }
158 
makeInvalid()159         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()160         bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
161 
162     protected:
163         uint32_t fID = SK_InvalidUniqueID;
164     };
165 
166     /**
167      * Gets an id that is unique for this GrGpuResource object. It is static in that it does
168      * not change when the content of the GrGpuResource object changes. This will never return
169      * 0.
170      */
uniqueID()171     UniqueID uniqueID() const { return fUniqueID; }
172 
173     /** Returns the current unique key for the resource. It will be invalid if the resource has no
174         associated unique key. */
getUniqueKey()175     const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
176 
177     /**
178      * Internal-only helper class used for manipulations of the resource by the cache.
179      */
180     class CacheAccess;
181     inline CacheAccess cacheAccess();
182     inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
183 
184     /**
185      * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
186      */
187     class ProxyAccess;
188     inline ProxyAccess proxyAccess();
189 
190     /**
191      * Internal-only helper class used for manipulations of the resource by internal code.
192      */
193     class ResourcePriv;
194     inline ResourcePriv resourcePriv();
195     inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
196 
197     /**
198      * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
199      * Typically, subclasses should not need to override this, and should only
200      * need to override setMemoryBacking.
201      **/
202     virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
203 
204     /**
205      * Describes the type of gpu resource that is represented by the implementing
206      * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
207      * purposes by dumpMemoryStatistics().
208      *
209      * The value returned is expected to be long lived and will not be copied by the caller.
210      */
211     virtual const char* getResourceType() const = 0;
212 
213     static uint32_t CreateUniqueID();
214 
215 protected:
216     // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
217     // fully initialized (i.e. only from the constructors of the final class).
218     void registerWithCache(SkBudgeted);
219 
220     // This must be called by every GrGpuObject that references any wrapped backend objects. It
221     // should be called once the object is fully initialized (i.e. only from the constructors of the
222     // final class).
223     void registerWithCacheWrapped(GrWrapCacheable);
224 
225     GrGpuResource(GrGpu*);
226     virtual ~GrGpuResource();
227 
getGpu()228     GrGpu* getGpu() const { return fGpu; }
229 
230     /** Overridden to free GPU resources in the backend API. */
onRelease()231     virtual void onRelease() { }
232     /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
233         This may be called when the underlying 3D context is no longer valid and so no
234         backend API calls should be made. */
onAbandon()235     virtual void onAbandon() { }
236 
237     /**
238      * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
239      **/
setMemoryBacking(SkTraceMemoryDump *,const SkString &)240     virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
241 
242     /**
243      * Returns a string that uniquely identifies this resource.
244      */
245     SkString getResourceName() const;
246 
247     /**
248      * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
249      * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
250      * to customize various inputs.
251      */
252     void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
253                                   const char* type, size_t size) const;
254 
255 
256 private:
257     bool isPurgeable() const;
258     bool hasRef() const;
259     bool hasNoCommandBufferUsages() const;
260 
261     /**
262      * Called by the registerWithCache if the resource is available to be used as scratch.
263      * Resource subclasses should override this if the instances should be recycled as scratch
264      * resources and populate the scratchKey with the key.
265      * By default resources are not recycled as scratch.
266      **/
computeScratchKey(GrScratchKey *)267     virtual void computeScratchKey(GrScratchKey*) const {}
268 
269     /**
270      * Removes references to objects in the underlying 3D API without freeing them.
271      * Called by CacheAccess.
272      */
273     void abandon();
274 
275     /**
276      * Frees the object in the underlying 3D API. Called by CacheAccess.
277      */
278     void release();
279 
280     virtual size_t onGpuMemorySize() const = 0;
281 
282     // See comments in CacheAccess and ResourcePriv.
283     void setUniqueKey(const GrUniqueKey&);
284     void removeUniqueKey();
285     void notifyARefCntIsZero(LastRemovedRef removedRef) const;
286     void removeScratchKey();
287     void makeBudgeted();
288     void makeUnbudgeted();
289 
290 #ifdef SK_DEBUG
291     friend class GrGpu;  // for assert in GrGpu to access getGpu
292 #endif
293 
294     // An index into a heap when this resource is purgeable or an array when not. This is maintained
295     // by the cache.
296     int fCacheArrayIndex;
297     // This value reflects how recently this resource was accessed in the cache. This is maintained
298     // by the cache.
299     uint32_t fTimestamp;
300     GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
301 
302     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
303     GrScratchKey fScratchKey;
304     GrUniqueKey fUniqueKey;
305 
306     // This is not ref'ed but abandon() or release() will be called before the GrGpu object
307     // is destroyed. Those calls set will this to NULL.
308     GrGpu* fGpu;
309     mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
310 
311     GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
312     bool fRefsWrappedObjects = false;
313     const UniqueID fUniqueID;
314 
315     using INHERITED = GrIORef<GrGpuResource>;
316     friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
317                                          // notifyARefCntIsZero.
318 };
319 
320 class GrGpuResource::ProxyAccess {
321 private:
ProxyAccess(GrGpuResource * resource)322     ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
323 
324     /** Proxies are allowed to take a resource from no refs to one ref. */
325     void ref(GrResourceCache* cache);
326 
327     // No taking addresses of this type.
328     const CacheAccess* operator&() const = delete;
329     CacheAccess* operator&() = delete;
330 
331     GrGpuResource* fResource;
332 
333     friend class GrGpuResource;
334     friend class GrSurfaceProxy;
335 };
336 
proxyAccess()337 inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
338 
339 #endif
340