• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "../private/SkAtomics.h"
12 #include "GrGpuResource.h"
13 #include "GrNonAtomicRef.h"
14 #include "GrXferProcessor.h"
15 #include "SkMatrix.h"
16 #include "SkRect.h"
17 #include "SkString.h"
18 
19 #include <new>
20 
21 class GrCaps;
22 class GrGpuCommandBuffer;
23 class GrOpFlushState;
24 class GrRenderTargetOpList;
25 
26 /**
27  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
28  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
29  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
30  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
31  * and minimize state changes.
32  *
33  * Ops of the same subclass may be merged using combineIfPossible. When two ops merge, one
34  * takes on the union of the data and the other is left empty. The merged op becomes responsible
35  * for drawing the data from both the original ops.
36  *
37  * If there are any possible optimizations which might require knowing more about the full state of
38  * the draw, e.g. whether or not the GrOp is allowed to tweak alpha for coverage, then this
39  * information will be communicated to the GrOp prior to geometry generation.
40  *
41  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
42  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
43  * in turn depend upon the clip.
44  */
45 #define GR_OP_SPEW 0
46 #if GR_OP_SPEW
47     #define GrOP_SPEW(code) code
48     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
49 #else
50     #define GrOP_SPEW(code)
51     #define GrOP_INFO(...)
52 #endif
53 
54 // A helper macro to generate a class static id
55 #define DEFINE_OP_CLASS_ID \
56     static uint32_t ClassID() { \
57         static uint32_t kClassID = GenOpClassID(); \
58         return kClassID; \
59     }
60 
61 class GrOp : private SkNoncopyable {
62 public:
63     GrOp(uint32_t classID);
64     virtual ~GrOp();
65 
66     virtual const char* name() const = 0;
67 
68     typedef std::function<void(GrSurfaceProxy*)> VisitProxyFunc;
69 
visitProxies(const VisitProxyFunc &)70     virtual void visitProxies(const VisitProxyFunc&) const {
71         // This default implementation assumes the op has no proxies
72     }
73 
combineIfPossible(GrOp * that,const GrCaps & caps)74     bool combineIfPossible(GrOp* that, const GrCaps& caps) {
75         if (this->classID() != that->classID()) {
76             return false;
77         }
78 
79         return this->onCombineIfPossible(that, caps);
80     }
81 
bounds()82     const SkRect& bounds() const {
83         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
84         return fBounds;
85     }
86 
setClippedBounds(const SkRect & clippedBounds)87     void setClippedBounds(const SkRect& clippedBounds) {
88         fBounds = clippedBounds;
89         // The clipped bounds already incorporate any effect of the bounds flags.
90         fBoundsFlags = 0;
91     }
92 
hasAABloat()93     bool hasAABloat() const {
94         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
95         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
96     }
97 
hasZeroArea()98     bool hasZeroArea() const {
99         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
100         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
101     }
102 
103     void* operator new(size_t size);
104     void operator delete(void* target);
105 
new(size_t size,void * placement)106     void* operator new(size_t size, void* placement) {
107         return ::operator new(size, placement);
108     }
delete(void * target,void * placement)109     void operator delete(void* target, void* placement) {
110         ::operator delete(target, placement);
111     }
112 
113     /**
114      * Helper for safely down-casting to a GrOp subclass
115      */
cast()116     template <typename T> const T& cast() const {
117         SkASSERT(T::ClassID() == this->classID());
118         return *static_cast<const T*>(this);
119     }
120 
cast()121     template <typename T> T* cast() {
122         SkASSERT(T::ClassID() == this->classID());
123         return static_cast<T*>(this);
124     }
125 
classID()126     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
127 
128     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()129     uint32_t uniqueID() const {
130         if (kIllegalOpID == fUniqueID) {
131             fUniqueID = GenOpID();
132         }
133         return fUniqueID;
134     }
135 
136     /**
137      * This is called to notify the op that it has been recorded into a GrOpList. Ops can use this
138      * to begin preparations for the flush of the op list. Note that the op still may either be
139      * combined into another op or have another op combined into it via combineIfPossible() after
140      * this call is made.
141      */
wasRecorded(GrRenderTargetOpList *)142     virtual void wasRecorded(GrRenderTargetOpList*) {}
143 
144     /**
145      * Called prior to executing. The op should perform any resource creation or data transfers
146      * necessary before execute() is called.
147      */
prepare(GrOpFlushState * state)148     void prepare(GrOpFlushState* state) { this->onPrepare(state); }
149 
150     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state)151     void execute(GrOpFlushState* state) { this->onExecute(state); }
152 
153     /** Used for spewing information about ops when debugging. */
dumpInfo()154     virtual SkString dumpInfo() const {
155         SkString string;
156         string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
157                        fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
158         return string;
159     }
160 
161 protected:
162     /**
163      * Indicates that the op will produce geometry that extends beyond its bounds for the
164      * purpose of ensuring that the fragment shader runs on partially covered pixels for
165      * non-MSAA antialiasing.
166      */
167     enum class HasAABloat : bool {
168         kNo = false,
169         kYes = true
170     };
171     /**
172      * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
173      * points).
174      */
175     enum class IsZeroArea : bool {
176         kNo = false,
177         kYes = true
178     };
179 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsZeroArea zeroArea)180     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
181         fBounds = newBounds;
182         this->setBoundsFlags(aabloat, zeroArea);
183     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsZeroArea zeroArea)184     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
185                               HasAABloat aabloat, IsZeroArea zeroArea) {
186         m.mapRect(&fBounds, srcBounds);
187         this->setBoundsFlags(aabloat, zeroArea);
188     }
makeFullScreen(GrSurfaceProxy * proxy)189     void makeFullScreen(GrSurfaceProxy* proxy) {
190         this->setBounds(SkRect::MakeIWH(proxy->width(), proxy->height()),
191                         HasAABloat::kNo, IsZeroArea::kNo);
192     }
193 
joinBounds(const GrOp & that)194     void joinBounds(const GrOp& that) {
195         if (that.hasAABloat()) {
196             fBoundsFlags |= kAABloat_BoundsFlag;
197         }
198         if (that.hasZeroArea()) {
199             fBoundsFlags |= kZeroArea_BoundsFlag;
200         }
201         return fBounds.joinPossiblyEmptyRect(that.fBounds);
202     }
203 
replaceBounds(const GrOp & that)204     void replaceBounds(const GrOp& that) {
205         fBounds = that.fBounds;
206         fBoundsFlags = that.fBoundsFlags;
207     }
208 
GenOpClassID()209     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
210 
211 private:
212     virtual bool onCombineIfPossible(GrOp*, const GrCaps& caps) = 0;
213 
214     virtual void onPrepare(GrOpFlushState*) = 0;
215     virtual void onExecute(GrOpFlushState*) = 0;
216 
GenID(int32_t * idCounter)217     static uint32_t GenID(int32_t* idCounter) {
218         // The atomic inc returns the old value not the incremented value. So we add
219         // 1 to the returned value.
220         uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
221         if (!id) {
222             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
223                    "subclass.");
224         }
225         return id;
226     }
227 
setBoundsFlags(HasAABloat aabloat,IsZeroArea zeroArea)228     void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
229         fBoundsFlags = 0;
230         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
231         fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
232     }
233 
234     enum {
235         kIllegalOpID = 0,
236     };
237 
238     enum BoundsFlags {
239         kAABloat_BoundsFlag                     = 0x1,
240         kZeroArea_BoundsFlag                    = 0x2,
241         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
242     };
243 
244     const uint16_t                      fClassID;
245     uint16_t                            fBoundsFlags;
246 
GenOpID()247     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
248     mutable uint32_t                    fUniqueID;
249     SkRect                              fBounds;
250 
251     static int32_t                      gCurrOpUniqueID;
252     static int32_t                      gCurrOpClassID;
253 };
254 
255 #endif
256