• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "GrGpuResource.h"
12 #include "GrNonAtomicRef.h"
13 #include "GrTracing.h"
14 #include "GrXferProcessor.h"
15 #include "SkMatrix.h"
16 #include "SkRect.h"
17 #include "SkString.h"
18 #include <atomic>
19 #include <new>
20 
21 class GrCaps;
22 class GrGpuCommandBuffer;
23 class GrOpFlushState;
24 class GrRenderTargetOpList;
25 
26 /**
27  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
28  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
29  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
30  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
31  * and minimize state changes.
32  *
33  * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
34  * one takes on the union of the data and the other is left empty. The merged op becomes responsible
35  * for drawing the data from both the original ops. When ops are chained each op maintains its own
36  * data but they are linked in a list and the head op becomes responsible for executing the work for
37  * the chain.
38  *
39  * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
40  * it must be the case that any op that can chain with A will either merge or chain with any op
41  * that can chain to B.
42  *
43  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
44  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
45  * in turn depend upon the clip.
46  */
47 #define GR_OP_SPEW 0
48 #if GR_OP_SPEW
49     #define GrOP_SPEW(code) code
50     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
51 #else
52     #define GrOP_SPEW(code)
53     #define GrOP_INFO(...)
54 #endif
55 
56 // Print out op information at flush time
57 #define GR_FLUSH_TIME_OP_SPEW 0
58 
59 // A helper macro to generate a class static id
60 #define DEFINE_OP_CLASS_ID \
61     static uint32_t ClassID() { \
62         static uint32_t kClassID = GenOpClassID(); \
63         return kClassID; \
64     }
65 
66 class GrOp : private SkNoncopyable {
67 public:
68     virtual ~GrOp() = default;
69 
70     virtual const char* name() const = 0;
71 
72     typedef std::function<void(GrSurfaceProxy*)> VisitProxyFunc;
73 
74     /**
75      * Knowning the type of visitor may enable an op to be more efficient by skipping irrelevant
76      * proxies on visitProxies.
77      */
78     enum class VisitorType : unsigned {
79         /**
80          * Ops *may* skip proxy visitation for allocation for proxies that have the
81          * canSkipResourceAllocator() property.
82          */
83         kAllocatorGather,
84         /**
85          * Ops should visit all proxies.
86          */
87         kOther,
88     };
89     virtual void visitProxies(const VisitProxyFunc&, VisitorType = VisitorType::kOther) const {
90         // This default implementation assumes the op has no proxies
91     }
92 
93     enum class CombineResult {
94         /**
95          * The op that combineIfPossible was called on now represents its own work plus that of
96          * the passed op. The passed op should be destroyed without being flushed. Currently it
97          * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
98          * chain (though the op on which combineIfPossible() was called may be).
99          */
100         kMerged,
101         /**
102          * The caller *may* (but is not required) to chain these ops together. If they are chained
103          * then prepare() and execute() will be called on the head op but not the other ops in the
104          * chain. The head op will prepare and execute on behalf of all the ops in the chain.
105          */
106         kMayChain,
107         /**
108          * The ops cannot be combined.
109          */
110         kCannotCombine
111     };
112 
113     CombineResult combineIfPossible(GrOp* that, const GrCaps& caps);
114 
bounds()115     const SkRect& bounds() const {
116         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
117         return fBounds;
118     }
119 
setClippedBounds(const SkRect & clippedBounds)120     void setClippedBounds(const SkRect& clippedBounds) {
121         fBounds = clippedBounds;
122         // The clipped bounds already incorporate any effect of the bounds flags.
123         fBoundsFlags = 0;
124     }
125 
hasAABloat()126     bool hasAABloat() const {
127         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
128         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
129     }
130 
hasZeroArea()131     bool hasZeroArea() const {
132         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
133         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
134     }
135 
136 #ifdef SK_DEBUG
137     // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool
138     void* operator new(size_t size);
139     void operator delete(void* target);
140 
new(size_t size,void * placement)141     void* operator new(size_t size, void* placement) {
142         return ::operator new(size, placement);
143     }
delete(void * target,void * placement)144     void operator delete(void* target, void* placement) {
145         ::operator delete(target, placement);
146     }
147 #endif
148 
149     /**
150      * Helper for safely down-casting to a GrOp subclass
151      */
cast()152     template <typename T> const T& cast() const {
153         SkASSERT(T::ClassID() == this->classID());
154         return *static_cast<const T*>(this);
155     }
156 
cast()157     template <typename T> T* cast() {
158         SkASSERT(T::ClassID() == this->classID());
159         return static_cast<T*>(this);
160     }
161 
classID()162     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
163 
164     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()165     uint32_t uniqueID() const {
166         if (kIllegalOpID == fUniqueID) {
167             fUniqueID = GenOpID();
168         }
169         return fUniqueID;
170     }
171 
172     /**
173      * Called prior to executing. The op should perform any resource creation or data transfers
174      * necessary before execute() is called.
175      */
prepare(GrOpFlushState * state)176     void prepare(GrOpFlushState* state) { this->onPrepare(state); }
177 
178     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state,const SkRect & chainBounds)179     void execute(GrOpFlushState* state, const SkRect& chainBounds) {
180         TRACE_EVENT0("skia", name());
181         this->onExecute(state, chainBounds);
182     }
183 
184     /** Used for spewing information about ops when debugging. */
185 #ifdef SK_DEBUG
dumpInfo()186     virtual SkString dumpInfo() const {
187         SkString string;
188         string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
189                        fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
190         return string;
191     }
192 #else
dumpInfo()193     SkString dumpInfo() const { return SkString("<Op information unavailable>"); }
194 #endif
195 
196     /**
197      * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
198      * subclass. E.g.:
199      *     for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
200      *         // ...
201      *     }
202      */
203     template <typename OpSubclass = GrOp> class ChainRange {
204     private:
205         class Iter {
206         public:
Iter(const OpSubclass * head)207             explicit Iter(const OpSubclass* head) : fCurr(head) {}
208             inline Iter& operator++() {
209                 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
210             }
211             const OpSubclass& operator*() const { return *fCurr; }
212             bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
213 
214         private:
215             const OpSubclass* fCurr;
216         };
217         const OpSubclass* fHead;
218 
219     public:
ChainRange(const OpSubclass * head)220         explicit ChainRange(const OpSubclass* head) : fHead(head) {}
begin()221         Iter begin() { return Iter(fHead); }
end()222         Iter end() { return Iter(nullptr); }
223     };
224 
225     /**
226      * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
227      * must be of the same subclass.
228      */
229     void chainConcat(std::unique_ptr<GrOp>);
230     /** Returns true if this is the head of a chain (including a length 1 chain). */
isChainHead()231     bool isChainHead() const { return !fPrevInChain; }
232     /** Returns true if this is the tail of a chain (including a length 1 chain). */
isChainTail()233     bool isChainTail() const { return !fNextInChain; }
234     /** The next op in the chain. */
nextInChain()235     GrOp* nextInChain() const { return fNextInChain.get(); }
236     /** The previous op in the chain. */
prevInChain()237     GrOp* prevInChain() const { return fPrevInChain; }
238     /**
239      * Cuts the chain after this op. The returned op is the op that was previously next in the
240      * chain or null if this was already a tail.
241      */
242     std::unique_ptr<GrOp> cutChain();
243     SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
244 
245 #ifdef SK_DEBUG
validate()246     virtual void validate() const {}
247 #endif
248 
249 protected:
250     GrOp(uint32_t classID);
251 
252     /**
253      * Indicates that the op will produce geometry that extends beyond its bounds for the
254      * purpose of ensuring that the fragment shader runs on partially covered pixels for
255      * non-MSAA antialiasing.
256      */
257     enum class HasAABloat : bool {
258         kNo = false,
259         kYes = true
260     };
261     /**
262      * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
263      * points).
264      */
265     enum class IsZeroArea : bool {
266         kNo = false,
267         kYes = true
268     };
269 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsZeroArea zeroArea)270     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
271         fBounds = newBounds;
272         this->setBoundsFlags(aabloat, zeroArea);
273     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsZeroArea zeroArea)274     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
275                               HasAABloat aabloat, IsZeroArea zeroArea) {
276         m.mapRect(&fBounds, srcBounds);
277         this->setBoundsFlags(aabloat, zeroArea);
278     }
makeFullScreen(GrSurfaceProxy * proxy)279     void makeFullScreen(GrSurfaceProxy* proxy) {
280         this->setBounds(SkRect::MakeIWH(proxy->width(), proxy->height()),
281                         HasAABloat::kNo, IsZeroArea::kNo);
282     }
283 
GenOpClassID()284     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
285 
286 private:
joinBounds(const GrOp & that)287     void joinBounds(const GrOp& that) {
288         if (that.hasAABloat()) {
289             fBoundsFlags |= kAABloat_BoundsFlag;
290         }
291         if (that.hasZeroArea()) {
292             fBoundsFlags |= kZeroArea_BoundsFlag;
293         }
294         return fBounds.joinPossiblyEmptyRect(that.fBounds);
295     }
296 
onCombineIfPossible(GrOp *,const GrCaps &)297     virtual CombineResult onCombineIfPossible(GrOp*, const GrCaps&) {
298         return CombineResult::kCannotCombine;
299     }
300 
301     virtual void onPrepare(GrOpFlushState*) = 0;
302     // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
303     // Otherwise, this op's bounds.
304     virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
305 
GenID(std::atomic<uint32_t> * idCounter)306     static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
307         uint32_t id = (*idCounter)++;
308         if (id == 0) {
309             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
310                    "subclass.");
311         }
312         return id;
313     }
314 
setBoundsFlags(HasAABloat aabloat,IsZeroArea zeroArea)315     void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
316         fBoundsFlags = 0;
317         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
318         fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
319     }
320 
321     enum {
322         kIllegalOpID = 0,
323     };
324 
325     enum BoundsFlags {
326         kAABloat_BoundsFlag                     = 0x1,
327         kZeroArea_BoundsFlag                    = 0x2,
328         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
329     };
330 
331     std::unique_ptr<GrOp>               fNextInChain;
332     GrOp*                               fPrevInChain = nullptr;
333     const uint16_t                      fClassID;
334     uint16_t                            fBoundsFlags;
335 
GenOpID()336     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
337     mutable uint32_t                    fUniqueID = SK_InvalidUniqueID;
338     SkRect                              fBounds;
339 
340     static std::atomic<uint32_t> gCurrOpUniqueID;
341     static std::atomic<uint32_t> gCurrOpClassID;
342 };
343 
344 #endif
345