• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkString.h"
14 #include "include/gpu/GrGpuResource.h"
15 #include "src/gpu/GrNonAtomicRef.h"
16 #include "src/gpu/GrTracing.h"
17 #include "src/gpu/GrXferProcessor.h"
18 #include <atomic>
19 #include <new>
20 
21 class GrCaps;
22 class GrGpuCommandBuffer;
23 class GrOpFlushState;
24 class GrRenderTargetOpList;
25 
26 /**
27  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
28  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
29  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
30  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
31  * and minimize state changes.
32  *
33  * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
34  * one takes on the union of the data and the other is left empty. The merged op becomes responsible
35  * for drawing the data from both the original ops. When ops are chained each op maintains its own
36  * data but they are linked in a list and the head op becomes responsible for executing the work for
37  * the chain.
38  *
39  * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
40  * it must be the case that any op that can chain with A will either merge or chain with any op
41  * that can chain to B.
42  *
43  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
44  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
45  * in turn depend upon the clip.
46  */
47 #define GR_OP_SPEW 0
48 #if GR_OP_SPEW
49     #define GrOP_SPEW(code) code
50     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
51 #else
52     #define GrOP_SPEW(code)
53     #define GrOP_INFO(...)
54 #endif
55 
56 // Print out op information at flush time
57 #define GR_FLUSH_TIME_OP_SPEW 0
58 
59 // A helper macro to generate a class static id
60 #define DEFINE_OP_CLASS_ID \
61     static uint32_t ClassID() { \
62         static uint32_t kClassID = GenOpClassID(); \
63         return kClassID; \
64     }
65 
66 class GrOp : private SkNoncopyable {
67 public:
68     virtual ~GrOp() = default;
69 
70     virtual const char* name() const = 0;
71 
72     using VisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipMapped)>;
73 
visitProxies(const VisitProxyFunc &)74     virtual void visitProxies(const VisitProxyFunc&) const {
75         // This default implementation assumes the op has no proxies
76     }
77 
78     enum class CombineResult {
79         /**
80          * The op that combineIfPossible was called on now represents its own work plus that of
81          * the passed op. The passed op should be destroyed without being flushed. Currently it
82          * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
83          * chain (though the op on which combineIfPossible() was called may be).
84          */
85         kMerged,
86         /**
87          * The caller *may* (but is not required) to chain these ops together. If they are chained
88          * then prepare() and execute() will be called on the head op but not the other ops in the
89          * chain. The head op will prepare and execute on behalf of all the ops in the chain.
90          */
91         kMayChain,
92         /**
93          * The ops cannot be combined.
94          */
95         kCannotCombine
96     };
97 
98     CombineResult combineIfPossible(GrOp* that, const GrCaps& caps);
99 
bounds()100     const SkRect& bounds() const {
101         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
102         return fBounds;
103     }
104 
setClippedBounds(const SkRect & clippedBounds)105     void setClippedBounds(const SkRect& clippedBounds) {
106         fBounds = clippedBounds;
107         // The clipped bounds already incorporate any effect of the bounds flags.
108         fBoundsFlags = 0;
109     }
110 
hasAABloat()111     bool hasAABloat() const {
112         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
113         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
114     }
115 
hasZeroArea()116     bool hasZeroArea() const {
117         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
118         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
119     }
120 
121 #ifdef SK_DEBUG
122     // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool
123     void* operator new(size_t size);
124     void operator delete(void* target);
125 
new(size_t size,void * placement)126     void* operator new(size_t size, void* placement) {
127         return ::operator new(size, placement);
128     }
delete(void * target,void * placement)129     void operator delete(void* target, void* placement) {
130         ::operator delete(target, placement);
131     }
132 #endif
133 
134     /**
135      * Helper for safely down-casting to a GrOp subclass
136      */
cast()137     template <typename T> const T& cast() const {
138         SkASSERT(T::ClassID() == this->classID());
139         return *static_cast<const T*>(this);
140     }
141 
cast()142     template <typename T> T* cast() {
143         SkASSERT(T::ClassID() == this->classID());
144         return static_cast<T*>(this);
145     }
146 
classID()147     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
148 
149     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()150     uint32_t uniqueID() const {
151         if (kIllegalOpID == fUniqueID) {
152             fUniqueID = GenOpID();
153         }
154         return fUniqueID;
155     }
156 
157     /**
158      * Called prior to executing. The op should perform any resource creation or data transfers
159      * necessary before execute() is called.
160      */
prepare(GrOpFlushState * state)161     void prepare(GrOpFlushState* state) { this->onPrepare(state); }
162 
163     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state,const SkRect & chainBounds)164     void execute(GrOpFlushState* state, const SkRect& chainBounds) {
165         TRACE_EVENT0("skia.gpu", name());
166         this->onExecute(state, chainBounds);
167     }
168 
169     /** Used for spewing information about ops when debugging. */
170 #ifdef SK_DEBUG
dumpInfo()171     virtual SkString dumpInfo() const {
172         SkString string;
173         string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
174                        fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
175         return string;
176     }
177 #else
dumpInfo()178     SkString dumpInfo() const { return SkString("<Op information unavailable>"); }
179 #endif
180 
181     /**
182      * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
183      * subclass. E.g.:
184      *     for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
185      *         // ...
186      *     }
187      */
188     template <typename OpSubclass = GrOp> class ChainRange {
189     private:
190         class Iter {
191         public:
Iter(const OpSubclass * head)192             explicit Iter(const OpSubclass* head) : fCurr(head) {}
193             inline Iter& operator++() {
194                 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
195             }
196             const OpSubclass& operator*() const { return *fCurr; }
197             bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
198 
199         private:
200             const OpSubclass* fCurr;
201         };
202         const OpSubclass* fHead;
203 
204     public:
ChainRange(const OpSubclass * head)205         explicit ChainRange(const OpSubclass* head) : fHead(head) {}
begin()206         Iter begin() { return Iter(fHead); }
end()207         Iter end() { return Iter(nullptr); }
208     };
209 
210     /**
211      * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
212      * must be of the same subclass.
213      */
214     void chainConcat(std::unique_ptr<GrOp>);
215     /** Returns true if this is the head of a chain (including a length 1 chain). */
isChainHead()216     bool isChainHead() const { return !fPrevInChain; }
217     /** Returns true if this is the tail of a chain (including a length 1 chain). */
isChainTail()218     bool isChainTail() const { return !fNextInChain; }
219     /** The next op in the chain. */
nextInChain()220     GrOp* nextInChain() const { return fNextInChain.get(); }
221     /** The previous op in the chain. */
prevInChain()222     GrOp* prevInChain() const { return fPrevInChain; }
223     /**
224      * Cuts the chain after this op. The returned op is the op that was previously next in the
225      * chain or null if this was already a tail.
226      */
227     std::unique_ptr<GrOp> cutChain();
228     SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
229 
230 #ifdef SK_DEBUG
validate()231     virtual void validate() const {}
232 #endif
233 
getGrOpTag()234     GrGpuResourceTag getGrOpTag() const { return fGrOpTag; }
235 
setGrOpTag(const GrGpuResourceTag tag)236     void setGrOpTag(const GrGpuResourceTag tag) { fGrOpTag = tag; }
237 
238 protected:
239     GrOp(uint32_t classID);
240 
241     /**
242      * Indicates that the op will produce geometry that extends beyond its bounds for the
243      * purpose of ensuring that the fragment shader runs on partially covered pixels for
244      * non-MSAA antialiasing.
245      */
246     enum class HasAABloat : bool {
247         kNo = false,
248         kYes = true
249     };
250     /**
251      * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
252      * points).
253      */
254     enum class IsZeroArea : bool {
255         kNo = false,
256         kYes = true
257     };
258 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsZeroArea zeroArea)259     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
260         fBounds = newBounds;
261         this->setBoundsFlags(aabloat, zeroArea);
262     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsZeroArea zeroArea)263     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
264                               HasAABloat aabloat, IsZeroArea zeroArea) {
265         m.mapRect(&fBounds, srcBounds);
266         this->setBoundsFlags(aabloat, zeroArea);
267     }
makeFullScreen(GrSurfaceProxy * proxy)268     void makeFullScreen(GrSurfaceProxy* proxy) {
269         this->setBounds(SkRect::MakeIWH(proxy->width(), proxy->height()),
270                         HasAABloat::kNo, IsZeroArea::kNo);
271     }
272 
GenOpClassID()273     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
274 
275 private:
joinBounds(const GrOp & that)276     void joinBounds(const GrOp& that) {
277         if (that.hasAABloat()) {
278             fBoundsFlags |= kAABloat_BoundsFlag;
279         }
280         if (that.hasZeroArea()) {
281             fBoundsFlags |= kZeroArea_BoundsFlag;
282         }
283         return fBounds.joinPossiblyEmptyRect(that.fBounds);
284     }
285 
onCombineIfPossible(GrOp *,const GrCaps &)286     virtual CombineResult onCombineIfPossible(GrOp*, const GrCaps&) {
287         return CombineResult::kCannotCombine;
288     }
289 
290     virtual void onPrepare(GrOpFlushState*) = 0;
291     // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
292     // Otherwise, this op's bounds.
293     virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
294 
GenID(std::atomic<uint32_t> * idCounter)295     static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
296         uint32_t id = (*idCounter)++;
297         if (id == 0) {
298             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
299                    "subclass.");
300         }
301         return id;
302     }
303 
setBoundsFlags(HasAABloat aabloat,IsZeroArea zeroArea)304     void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
305         fBoundsFlags = 0;
306         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
307         fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
308     }
309 
310     enum {
311         kIllegalOpID = 0,
312     };
313 
314     enum BoundsFlags {
315         kAABloat_BoundsFlag                     = 0x1,
316         kZeroArea_BoundsFlag                    = 0x2,
317         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
318     };
319 
320     std::unique_ptr<GrOp>               fNextInChain;
321     GrOp*                               fPrevInChain = nullptr;
322     const uint16_t                      fClassID;
323     uint16_t                            fBoundsFlags;
324 
325     GrGpuResourceTag fGrOpTag;
326 
GenOpID()327     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
328     mutable uint32_t                    fUniqueID = SK_InvalidUniqueID;
329     SkRect                              fBounds;
330 
331     static std::atomic<uint32_t> gCurrOpUniqueID;
332     static std::atomic<uint32_t> gCurrOpClassID;
333 };
334 
335 #endif
336