• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrSurfaceProxy_DEFINED
9 #define GrSurfaceProxy_DEFINED
10 
11 #include "../private/SkNoncopyable.h"
12 #include "GrBackendSurface.h"
13 #include "GrGpuResource.h"
14 #include "GrSurface.h"
15 
16 #include "SkRect.h"
17 
18 class GrCaps;
19 class GrOpList;
20 class GrProxyProvider;
21 class GrRenderTargetOpList;
22 class GrRenderTargetProxy;
23 class GrResourceProvider;
24 class GrSurfaceContext;
25 class GrSurfaceProxyPriv;
26 class GrTextureOpList;
27 class GrTextureProxy;
28 
29 // This class replicates the functionality GrIORef<GrSurface> but tracks the
30 // utilitization for later resource allocation (for the deferred case) and
31 // forwards on the utilization in the wrapped case
32 class GrIORefProxy : public SkNoncopyable {
33 public:
ref()34     void ref() const {
35         this->validate();
36 
37         ++fRefCnt;
38         if (fTarget) {
39             fTarget->ref();
40         }
41     }
42 
unref()43     void unref() const {
44         this->validate();
45 
46         if (fTarget) {
47             fTarget->unref();
48         }
49 
50         --fRefCnt;
51         this->didRemoveRefOrPendingIO();
52     }
53 
54 #ifdef SK_DEBUG
isUnique_debugOnly()55     bool isUnique_debugOnly() const { // For asserts.
56         SkASSERT(fRefCnt >= 0 && fPendingWrites >= 0 && fPendingReads >= 0);
57         return 1 == fRefCnt + fPendingWrites + fPendingReads;
58     }
59 #endif
60 
release()61     void release() {
62         // The proxy itself may still have multiple refs. It can be owned by an SkImage and multiple
63         // SkDeferredDisplayLists at the same time if we are using DDLs.
64         SkASSERT(0 == fPendingReads);
65         SkASSERT(0 == fPendingWrites);
66 
67         // In the current hybrid world, the proxy and backing surface are ref/unreffed in
68         // synchrony. Each ref we've added or removed to the proxy was mirrored to the backing
69         // surface. Though, that backing surface could be owned by other proxies as well. Remove
70         // a ref from the backing surface for each ref the proxy has since we are about to remove
71         // our pointer to the surface. If this proxy is reinstantiated then all the proxy's refs
72         // get transferred to the (possibly new) backing surface.
73         for (int refs = fRefCnt; refs; --refs) {
74             fTarget->unref();
75         }
76         fTarget = nullptr;
77     }
78 
validate()79     void validate() const {
80 #ifdef SK_DEBUG
81         SkASSERT(fRefCnt >= 0);
82         SkASSERT(fPendingReads >= 0);
83         SkASSERT(fPendingWrites >= 0);
84         SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 1);
85 
86         if (fTarget) {
87             // The backing GrSurface can have more refs than the proxy if the proxy
88             // started off wrapping an external resource (that came in with refs).
89             // The GrSurface should never have fewer refs than the proxy however.
90             SkASSERT(fTarget->fRefCnt >= fRefCnt);
91             SkASSERT(fTarget->fPendingReads >= fPendingReads);
92             SkASSERT(fTarget->fPendingWrites >= fPendingWrites);
93         }
94 #endif
95     }
96 
97     int32_t getBackingRefCnt_TestOnly() const;
98     int32_t getPendingReadCnt_TestOnly() const;
99     int32_t getPendingWriteCnt_TestOnly() const;
100 
addPendingRead()101     void addPendingRead() const {
102         this->validate();
103 
104         ++fPendingReads;
105         if (fTarget) {
106             fTarget->addPendingRead();
107         }
108     }
109 
completedRead()110     void completedRead() const {
111         this->validate();
112 
113         if (fTarget) {
114             fTarget->completedRead();
115         }
116 
117         --fPendingReads;
118         this->didRemoveRefOrPendingIO();
119     }
120 
addPendingWrite()121     void addPendingWrite() const {
122         this->validate();
123 
124         ++fPendingWrites;
125         if (fTarget) {
126             fTarget->addPendingWrite();
127         }
128     }
129 
completedWrite()130     void completedWrite() const {
131         this->validate();
132 
133         if (fTarget) {
134             fTarget->completedWrite();
135         }
136 
137         --fPendingWrites;
138         this->didRemoveRefOrPendingIO();
139     }
140 
141 protected:
GrIORefProxy()142     GrIORefProxy() : fTarget(nullptr), fRefCnt(1), fPendingReads(0), fPendingWrites(0) {}
GrIORefProxy(sk_sp<GrSurface> surface)143     GrIORefProxy(sk_sp<GrSurface> surface) : fRefCnt(1), fPendingReads(0), fPendingWrites(0) {
144         // Since we're manually forwarding on refs & unrefs we don't want sk_sp doing
145         // anything extra.
146         fTarget = surface.release();
147     }
~GrIORefProxy()148     virtual ~GrIORefProxy() {
149         // We don't unref 'fTarget' here since the 'unref' method will already
150         // have forwarded on the unref call that got us here.
151     }
152 
153     // This GrIORefProxy was deferred before but has just been instantiated. To
154     // make all the reffing & unreffing work out we now need to transfer any deferred
155     // refs & unrefs to the new GrSurface
transferRefs()156     void transferRefs() {
157         SkASSERT(fTarget);
158 
159         SkASSERT(fTarget->fRefCnt > 0);
160         fTarget->fRefCnt += (fRefCnt-1); // don't xfer the proxy's creation ref
161         fTarget->fPendingReads += fPendingReads;
162         fTarget->fPendingWrites += fPendingWrites;
163     }
164 
internalGetProxyRefCnt()165     int32_t internalGetProxyRefCnt() const {
166         return fRefCnt;
167     }
168 
internalHasPendingIO()169     bool internalHasPendingIO() const {
170         if (fTarget) {
171             return fTarget->internalHasPendingIO();
172         }
173 
174         return SkToBool(fPendingWrites | fPendingReads);
175     }
176 
internalHasPendingWrite()177     bool internalHasPendingWrite() const {
178         if (fTarget) {
179             return fTarget->internalHasPendingWrite();
180         }
181 
182         return SkToBool(fPendingWrites);
183     }
184 
185     // For deferred proxies this will be null. For wrapped proxies it will point to the
186     // wrapped resource.
187     GrSurface* fTarget;
188 
189 private:
190     // This class is used to manage conversion of refs to pending reads/writes.
191     template <typename> friend class GrProxyRef;
192 
didRemoveRefOrPendingIO()193     void didRemoveRefOrPendingIO() const {
194         if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
195             delete this;
196         }
197     }
198 
199     mutable int32_t fRefCnt;
200     mutable int32_t fPendingReads;
201     mutable int32_t fPendingWrites;
202 };
203 
204 class GrSurfaceProxy : public GrIORefProxy {
205 public:
206     enum class LazyInstantiationType {
207         kSingleUse,      // Instantiation callback is allowed to be called only once.
208         kMultipleUse,    // Instantiation callback can be called multiple times.
209         kDeinstantiate,  // Instantiation callback can be called multiple times,
210                          // but we will deinstantiate the proxy after every flush.
211     };
212 
213     enum class LazyState {
214         kNot,       // The proxy is instantiated or does not have a lazy callback
215         kPartially, // The proxy has a lazy callback but knows basic information about itself.
216         kFully,     // The proxy has a lazy callback and also doesn't know its width, height, etc.
217     };
218 
lazyInstantiationState()219     LazyState lazyInstantiationState() const {
220         if (fTarget || !SkToBool(fLazyInstantiateCallback)) {
221             return LazyState::kNot;
222         } else {
223             if (fWidth <= 0) {
224                 SkASSERT(fHeight <= 0);
225                 return LazyState::kFully;
226             } else {
227                 SkASSERT(fHeight > 0);
228                 return LazyState::kPartially;
229             }
230         }
231     }
232 
config()233     GrPixelConfig config() const { return fConfig; }
width()234     int width() const {
235         SkASSERT(LazyState::kFully != this->lazyInstantiationState());
236         return fWidth;
237     }
height()238     int height() const {
239         SkASSERT(LazyState::kFully != this->lazyInstantiationState());
240         return fHeight;
241     }
242 
isize()243     SkISize isize() const { return {fWidth, fHeight}; }
244 
245     int worstCaseWidth() const;
246     int worstCaseHeight() const;
247     /**
248      * Helper that gets the width and height of the surface as a bounding rectangle.
249      */
getBoundsRect()250     SkRect getBoundsRect() const {
251         SkASSERT(LazyState::kFully != this->lazyInstantiationState());
252         return SkRect::MakeIWH(this->width(), this->height());
253     }
254     /**
255      * Helper that gets the worst case width and height of the surface as a bounding rectangle.
256      */
getWorstCaseBoundsRect()257     SkRect getWorstCaseBoundsRect() const {
258         SkASSERT(LazyState::kFully != this->lazyInstantiationState());
259         return SkRect::MakeIWH(this->worstCaseWidth(), this->worstCaseHeight());
260     }
261 
origin()262     GrSurfaceOrigin origin() const {
263         SkASSERT(kTopLeft_GrSurfaceOrigin == fOrigin || kBottomLeft_GrSurfaceOrigin == fOrigin);
264         return fOrigin;
265     }
266 
backendFormat()267     const GrBackendFormat& backendFormat() const { return fFormat; }
268 
269     class UniqueID {
270     public:
InvalidID()271         static UniqueID InvalidID() {
272             return UniqueID(uint32_t(SK_InvalidUniqueID));
273         }
274 
275         // wrapped
UniqueID(const GrGpuResource::UniqueID & id)276         explicit UniqueID(const GrGpuResource::UniqueID& id) : fID(id.asUInt()) { }
277         // deferred and lazy-callback
UniqueID()278         UniqueID() : fID(GrGpuResource::CreateUniqueID()) { }
279 
asUInt()280         uint32_t asUInt() const { return fID; }
281 
282         bool operator==(const UniqueID& other) const {
283             return fID == other.fID;
284         }
285         bool operator!=(const UniqueID& other) const {
286             return !(*this == other);
287         }
288 
makeInvalid()289         void makeInvalid() { fID = SK_InvalidUniqueID; }
isInvalid()290         bool isInvalid() const { return SK_InvalidUniqueID == fID; }
291 
292     private:
UniqueID(uint32_t id)293         explicit UniqueID(uint32_t id) : fID(id) {}
294 
295         uint32_t fID;
296     };
297 
298     /*
299      * The contract for the uniqueID is:
300      *   for wrapped resources:
301      *      the uniqueID will match that of the wrapped resource
302      *
303      *   for deferred resources:
304      *      the uniqueID will be different from the real resource, when it is allocated
305      *      the proxy's uniqueID will not change across the instantiate call
306      *
307      *    the uniqueIDs of the proxies and the resources draw from the same pool
308      *
309      * What this boils down to is that the uniqueID of a proxy can be used to consistently
310      * track/identify a proxy but should never be used to distinguish between
311      * resources and proxies - beware!
312      */
uniqueID()313     UniqueID uniqueID() const { return fUniqueID; }
314 
underlyingUniqueID()315     UniqueID underlyingUniqueID() const {
316         if (fTarget) {
317             return UniqueID(fTarget->uniqueID());
318         }
319 
320         return fUniqueID;
321     }
322 
323     virtual bool instantiate(GrResourceProvider* resourceProvider) = 0;
324 
325     void deinstantiate();
326 
327     /**
328      * Proxies that are already instantiated and whose backing surface cannot be recycled to
329      * instantiate other proxies do not need to be considered by GrResourceAllocator.
330      */
331     bool canSkipResourceAllocator() const;
332 
333     /**
334      * @return the texture proxy associated with the surface proxy, may be NULL.
335      */
asTextureProxy()336     virtual GrTextureProxy* asTextureProxy() { return nullptr; }
asTextureProxy()337     virtual const GrTextureProxy* asTextureProxy() const { return nullptr; }
338 
339     /**
340      * @return the render target proxy associated with the surface proxy, may be NULL.
341      */
asRenderTargetProxy()342     virtual GrRenderTargetProxy* asRenderTargetProxy() { return nullptr; }
asRenderTargetProxy()343     virtual const GrRenderTargetProxy* asRenderTargetProxy() const { return nullptr; }
344 
isInstantiated()345     bool isInstantiated() const { return SkToBool(fTarget); }
346 
347     // If the proxy is already instantiated, return its backing GrTexture; if not, return null.
peekSurface()348     GrSurface* peekSurface() const { return fTarget; }
349 
350     // If this is a texture proxy and the proxy is already instantiated, return its backing
351     // GrTexture; if not, return null.
peekTexture()352     GrTexture* peekTexture() const { return fTarget ? fTarget->asTexture() : nullptr; }
353 
354     // If this is a render target proxy and the proxy is already instantiated, return its backing
355     // GrRenderTarget; if not, return null.
peekRenderTarget()356     GrRenderTarget* peekRenderTarget() const {
357         return fTarget ? fTarget->asRenderTarget() : nullptr;
358     }
359 
360     /**
361      * Does the resource count against the resource budget?
362      */
isBudgeted()363     SkBudgeted isBudgeted() const { return fBudgeted; }
364 
365     /**
366      * The pixel values of this proxy's surface cannot be modified (e.g. doesn't support write
367      * pixels or MIP map level regen). Read-only proxies also bypass interval tracking and
368      * assignment in GrResourceAllocator.
369      */
readOnly()370     bool readOnly() const { return fSurfaceFlags & GrInternalSurfaceFlags::kReadOnly; }
371 
372     void setLastOpList(GrOpList* opList);
getLastOpList()373     GrOpList* getLastOpList() { return fLastOpList; }
374 
375     GrRenderTargetOpList* getLastRenderTargetOpList();
376     GrTextureOpList* getLastTextureOpList();
377 
378     /**
379      * Retrieves the amount of GPU memory that will be or currently is used by this resource
380      * in bytes. It is approximate since we aren't aware of additional padding or copies made
381      * by the driver.
382      *
383      * @return the amount of GPU memory used in bytes
384      */
gpuMemorySize()385     size_t gpuMemorySize() const {
386         SkASSERT(LazyState::kFully != this->lazyInstantiationState());
387         if (fTarget) {
388             return fTarget->gpuMemorySize();
389         }
390         if (kInvalidGpuMemorySize == fGpuMemorySize) {
391             fGpuMemorySize = this->onUninstantiatedGpuMemorySize();
392             SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
393         }
394         return fGpuMemorySize;
395     }
396 
397     // Helper function that creates a temporary SurfaceContext to perform the copy
398     // The copy is is not a render target and not multisampled.
399     static sk_sp<GrTextureProxy> Copy(GrContext*, GrSurfaceProxy* src, GrMipMapped, SkIRect srcRect,
400                                       SkBackingFit, SkBudgeted);
401 
402     // Copy the entire 'src'
403     static sk_sp<GrTextureProxy> Copy(GrContext*, GrSurfaceProxy* src, GrMipMapped, SkBackingFit,
404                                       SkBudgeted budgeted);
405 
406     // Test-only entry point - should decrease in use as proxies propagate
407     static sk_sp<GrSurfaceContext> TestCopy(GrContext* context, const GrSurfaceDesc& dstDesc,
408                                             GrSurfaceOrigin, GrSurfaceProxy* srcProxy);
409 
410     bool isWrapped_ForTesting() const;
411 
412     SkDEBUGCODE(void validate(GrContext*) const;)
413 
414     // Provides access to functions that aren't part of the public API.
415     inline GrSurfaceProxyPriv priv();
416     inline const GrSurfaceProxyPriv priv() const;
417 
418     GrInternalSurfaceFlags testingOnly_getFlags() const;
419 
420 protected:
421     // Deferred version
GrSurfaceProxy(const GrBackendFormat & format,const GrSurfaceDesc & desc,GrSurfaceOrigin origin,SkBackingFit fit,SkBudgeted budgeted,GrInternalSurfaceFlags surfaceFlags)422     GrSurfaceProxy(const GrBackendFormat& format, const GrSurfaceDesc& desc,
423                    GrSurfaceOrigin origin, SkBackingFit fit,
424                    SkBudgeted budgeted, GrInternalSurfaceFlags surfaceFlags)
425             : GrSurfaceProxy(nullptr, LazyInstantiationType::kSingleUse, format, desc, origin, fit,
426                              budgeted, surfaceFlags) {
427         // Note: this ctor pulls a new uniqueID from the same pool at the GrGpuResources
428     }
429 
430     using LazyInstantiateCallback = std::function<sk_sp<GrSurface>(GrResourceProvider*)>;
431 
432     // Lazy-callback version
433     GrSurfaceProxy(LazyInstantiateCallback&&, LazyInstantiationType,
434                    const GrBackendFormat& format, const GrSurfaceDesc&, GrSurfaceOrigin,
435                    SkBackingFit, SkBudgeted, GrInternalSurfaceFlags);
436 
437     // Wrapped version.
438     GrSurfaceProxy(sk_sp<GrSurface>, GrSurfaceOrigin, SkBackingFit);
439 
440     virtual ~GrSurfaceProxy();
441 
442     friend class GrSurfaceProxyPriv;
443 
444     // Methods made available via GrSurfaceProxyPriv
getProxyRefCnt()445     int32_t getProxyRefCnt() const {
446         return this->internalGetProxyRefCnt();
447     }
448 
hasPendingIO()449     bool hasPendingIO() const {
450         return this->internalHasPendingIO();
451     }
452 
hasPendingWrite()453     bool hasPendingWrite() const {
454         return this->internalHasPendingWrite();
455     }
456 
457     void computeScratchKey(GrScratchKey*) const;
458 
459     virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
460     void assign(sk_sp<GrSurface> surface);
461 
462     sk_sp<GrSurface> createSurfaceImpl(GrResourceProvider*, int sampleCnt, bool needsStencil,
463                                        GrSurfaceDescFlags, GrMipMapped) const;
464 
465     bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt, bool needsStencil,
466                          GrSurfaceDescFlags descFlags, GrMipMapped, const GrUniqueKey*);
467 
468     // In many cases these flags aren't actually known until the proxy has been instantiated.
469     // However, Ganesh frequently needs to change its behavior based on these settings. For
470     // internally create proxies we will know these properties ahead of time. For wrapped
471     // proxies we will copy the properties off of the GrSurface. For lazy proxies we force the
472     // call sites to provide the required information ahead of time. At instantiation time
473     // we verify that the assumed properties match the actual properties.
474     GrInternalSurfaceFlags fSurfaceFlags;
475 
476 private:
477     // For wrapped resources, 'fFormat', 'fConfig', 'fWidth', 'fHeight', and 'fOrigin; will always
478     // be filled in from the wrapped resource.
479     GrBackendFormat        fFormat;
480     GrPixelConfig          fConfig;
481     int                    fWidth;
482     int                    fHeight;
483     GrSurfaceOrigin        fOrigin;
484     SkBackingFit           fFit;      // always kApprox for lazy-callback resources
485                                       // always kExact for wrapped resources
486     mutable SkBudgeted     fBudgeted; // always kYes for lazy-callback resources
487                                       // set from the backing resource for wrapped resources
488                                       // mutable bc of SkSurface/SkImage wishy-washiness
489 
490     const UniqueID         fUniqueID; // set from the backing resource for wrapped resources
491 
492     LazyInstantiateCallback fLazyInstantiateCallback;
493     // If this is set to kSingleuse, then after one call to fLazyInstantiateCallback we will cleanup
494     // the lazy callback and then delete it. This will allow for any refs and resources being held
495     // by the standard function to be released. This is specifically useful in non-dll cases where
496     // we make lazy proxies and instantiate them immediately.
497     // Note: This is ignored if fLazyInstantiateCallback is null.
498     LazyInstantiationType  fLazyInstantiationType;
499 
500     SkDEBUGCODE(void validateSurface(const GrSurface*);)
501     SkDEBUGCODE(virtual void onValidateSurface(const GrSurface*) = 0;)
502 
503     static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
504     SkDEBUGCODE(size_t getRawGpuMemorySize_debugOnly() const { return fGpuMemorySize; })
505 
506     virtual size_t onUninstantiatedGpuMemorySize() const = 0;
507 
508     bool                   fNeedsClear;
509 
510     // This entry is lazily evaluated so, when the proxy wraps a resource, the resource
511     // will be called but, when the proxy is deferred, it will compute the answer itself.
512     // If the proxy computes its own answer that answer is checked (in debug mode) in
513     // the instantiation method.
514     mutable size_t         fGpuMemorySize;
515 
516     // The last opList that wrote to or is currently going to write to this surface
517     // The opList can be closed (e.g., no surface context is currently bound
518     // to this proxy).
519     // This back-pointer is required so that we can add a dependancy between
520     // the opList used to create the current contents of this surface
521     // and the opList of a destination surface to which this one is being drawn or copied.
522     // This pointer is unreffed. OpLists own a ref on their surface proxies.
523     GrOpList*              fLastOpList;
524 
525     typedef GrIORefProxy INHERITED;
526 };
527 
528 #endif
529