• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "../private/SkAtomics.h"
12 #include "GrGpuResource.h"
13 #include "GrNonAtomicRef.h"
14 #include "GrXferProcessor.h"
15 #include "SkMatrix.h"
16 #include "SkRect.h"
17 #include "SkString.h"
18 
19 #include <new>
20 
21 class GrCaps;
22 class GrGpuCommandBuffer;
23 class GrOpFlushState;
24 class GrRenderTargetOpList;
25 
26 /**
27  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
28  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
29  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
30  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
31  * and minimize state changes.
32  *
33  * Ops of the same subclass may be merged using combineIfPossible. When two ops merge, one
34  * takes on the union of the data and the other is left empty. The merged op becomes responsible
35  * for drawing the data from both the original ops.
36  *
37  * If there are any possible optimizations which might require knowing more about the full state of
38  * the draw, e.g. whether or not the GrOp is allowed to tweak alpha for coverage, then this
39  * information will be communicated to the GrOp prior to geometry generation.
40  *
41  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
42  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
43  * in turn depend upon the clip.
44  */
45 #define GR_OP_SPEW 0
46 #if GR_OP_SPEW
47     #define GrOP_SPEW(code) code
48     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
49 #else
50     #define GrOP_SPEW(code)
51     #define GrOP_INFO(...)
52 #endif
53 
54 // A helper macro to generate a class static id
55 #define DEFINE_OP_CLASS_ID \
56     static uint32_t ClassID() { \
57         static uint32_t kClassID = GenOpClassID(); \
58         return kClassID; \
59     }
60 
61 class GrOp : private SkNoncopyable {
62 public:
63     GrOp(uint32_t classID);
64     virtual ~GrOp();
65 
66     virtual const char* name() const = 0;
67 
combineIfPossible(GrOp * that,const GrCaps & caps)68     bool combineIfPossible(GrOp* that, const GrCaps& caps) {
69         if (this->classID() != that->classID()) {
70             return false;
71         }
72 
73         return this->onCombineIfPossible(that, caps);
74     }
75 
bounds()76     const SkRect& bounds() const {
77         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
78         return fBounds;
79     }
80 
setClippedBounds(const SkRect & clippedBounds)81     void setClippedBounds(const SkRect& clippedBounds) {
82         fBounds = clippedBounds;
83         // The clipped bounds already incorporate any effect of the bounds flags.
84         fBoundsFlags = 0;
85     }
86 
hasAABloat()87     bool hasAABloat() const {
88         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
89         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
90     }
91 
hasZeroArea()92     bool hasZeroArea() const {
93         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
94         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
95     }
96 
97     void* operator new(size_t size);
98     void operator delete(void* target);
99 
new(size_t size,void * placement)100     void* operator new(size_t size, void* placement) {
101         return ::operator new(size, placement);
102     }
delete(void * target,void * placement)103     void operator delete(void* target, void* placement) {
104         ::operator delete(target, placement);
105     }
106 
107     /**
108      * Helper for safely down-casting to a GrOp subclass
109      */
cast()110     template <typename T> const T& cast() const {
111         SkASSERT(T::ClassID() == this->classID());
112         return *static_cast<const T*>(this);
113     }
114 
cast()115     template <typename T> T* cast() {
116         SkASSERT(T::ClassID() == this->classID());
117         return static_cast<T*>(this);
118     }
119 
classID()120     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
121 
122     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()123     uint32_t uniqueID() const {
124         if (kIllegalOpID == fUniqueID) {
125             fUniqueID = GenOpID();
126         }
127         return fUniqueID;
128     }
129 
130     /**
131      * This is called to notify the op that it has been recorded into a GrOpList. Ops can use this
132      * to begin preparations for the flush of the op list. Note that the op still may either be
133      * combined into another op or have another op combined into it via combineIfPossible() after
134      * this call is made.
135      */
wasRecorded(GrRenderTargetOpList *)136     virtual void wasRecorded(GrRenderTargetOpList*) {}
137 
138     /**
139      * Called prior to executing. The op should perform any resource creation or data transfers
140      * necessary before execute() is called.
141      */
prepare(GrOpFlushState * state)142     void prepare(GrOpFlushState* state) { this->onPrepare(state); }
143 
144     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state)145     void execute(GrOpFlushState* state) { this->onExecute(state); }
146 
147     /** Used for spewing information about ops when debugging. */
dumpInfo()148     virtual SkString dumpInfo() const {
149         SkString string;
150         string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
151                        fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
152         return string;
153     }
154 
needsCommandBufferIsolation()155     virtual bool needsCommandBufferIsolation() const { return false; }
156 
157 protected:
158     /**
159      * Indicates that the op will produce geometry that extends beyond its bounds for the
160      * purpose of ensuring that the fragment shader runs on partially covered pixels for
161      * non-MSAA antialiasing.
162      */
163     enum class HasAABloat {
164         kYes,
165         kNo
166     };
167     /**
168      * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
169      * points).
170      */
171     enum class IsZeroArea {
172         kYes,
173         kNo
174     };
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsZeroArea zeroArea)175     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
176         fBounds = newBounds;
177         this->setBoundsFlags(aabloat, zeroArea);
178     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsZeroArea zeroArea)179     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
180                               HasAABloat aabloat, IsZeroArea zeroArea) {
181         m.mapRect(&fBounds, srcBounds);
182         this->setBoundsFlags(aabloat, zeroArea);
183     }
184 
joinBounds(const GrOp & that)185     void joinBounds(const GrOp& that) {
186         if (that.hasAABloat()) {
187             fBoundsFlags |= kAABloat_BoundsFlag;
188         }
189         if (that.hasZeroArea()) {
190             fBoundsFlags |= kZeroArea_BoundsFlag;
191         }
192         return fBounds.joinPossiblyEmptyRect(that.fBounds);
193     }
194 
replaceBounds(const GrOp & that)195     void replaceBounds(const GrOp& that) {
196         fBounds = that.fBounds;
197         fBoundsFlags = that.fBoundsFlags;
198     }
199 
GenOpClassID()200     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
201 
202 private:
203     virtual bool onCombineIfPossible(GrOp*, const GrCaps& caps) = 0;
204 
205     virtual void onPrepare(GrOpFlushState*) = 0;
206     virtual void onExecute(GrOpFlushState*) = 0;
207 
GenID(int32_t * idCounter)208     static uint32_t GenID(int32_t* idCounter) {
209         // The atomic inc returns the old value not the incremented value. So we add
210         // 1 to the returned value.
211         uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
212         if (!id) {
213             SkFAIL("This should never wrap as it should only be called once for each GrOp "
214                    "subclass.");
215         }
216         return id;
217     }
218 
setBoundsFlags(HasAABloat aabloat,IsZeroArea zeroArea)219     void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
220         fBoundsFlags = 0;
221         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
222         fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
223     }
224 
225     enum {
226         kIllegalOpID = 0,
227     };
228 
229     enum BoundsFlags {
230         kAABloat_BoundsFlag                     = 0x1,
231         kZeroArea_BoundsFlag                    = 0x2,
232         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
233     };
234 
235     const uint16_t                      fClassID;
236     uint16_t                            fBoundsFlags;
237 
GenOpID()238     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
239     mutable uint32_t                    fUniqueID;
240     SkRect                              fBounds;
241 
242     static int32_t                      gCurrOpUniqueID;
243     static int32_t                      gCurrOpClassID;
244 };
245 
246 #endif
247