• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkString.h"
14 #include "include/gpu/GrRecordingContext.h"
15 #include "src/gpu/GrGpuResource.h"
16 #include "src/gpu/GrMemoryPool.h"
17 #include "src/gpu/GrRecordingContextPriv.h"
18 #include "src/gpu/GrTracing.h"
19 #include "src/gpu/GrXferProcessor.h"
20 #include <atomic>
21 #include <new>
22 
23 class GrAppliedClip;
24 class GrCaps;
25 class GrDstProxyView;
26 class GrOpFlushState;
27 class GrOpsRenderPass;
28 class GrPaint;
29 
30 /**
31  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
32  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
33  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
34  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
35  * and minimize state changes.
36  *
37  * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
38  * one takes on the union of the data and the other is left empty. The merged op becomes responsible
39  * for drawing the data from both the original ops. When ops are chained each op maintains its own
40  * data but they are linked in a list and the head op becomes responsible for executing the work for
41  * the chain.
42  *
43  * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
44  * it must be the case that any op that can chain with A will either merge or chain with any op
45  * that can chain to B.
46  *
47  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
48  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
49  * in turn depend upon the clip.
50  */
51 #define GR_OP_SPEW 0
52 #if GR_OP_SPEW
53     #define GrOP_SPEW(code) code
54     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
55 #else
56     #define GrOP_SPEW(code)
57     #define GrOP_INFO(...)
58 #endif
59 
60 // Print out op information at flush time
61 #define GR_FLUSH_TIME_OP_SPEW 0
62 
63 // A helper macro to generate a class static id
64 #define DEFINE_OP_CLASS_ID \
65     static uint32_t ClassID() { \
66         static uint32_t kClassID = GenOpClassID(); \
67         return kClassID; \
68     }
69 
70 class GrOp : private SkNoncopyable {
71 public:
72     using Owner = std::unique_ptr<GrOp>;
73 
74     template<typename Op, typename... Args>
Make(GrRecordingContext * context,Args &&...args)75     static Owner Make(GrRecordingContext* context, Args&&... args) {
76         return Owner{new Op(std::forward<Args>(args)...)};
77     }
78 
79     template<typename Op, typename... Args>
80     static Owner MakeWithProcessorSet(
81             GrRecordingContext* context, const SkPMColor4f& color,
82             GrPaint&& paint, Args&&... args);
83 
84     template<typename Op, typename... Args>
MakeWithExtraMemory(GrRecordingContext * context,size_t extraSize,Args &&...args)85     static Owner MakeWithExtraMemory(
86             GrRecordingContext* context, size_t extraSize, Args&&... args) {
87         void* bytes = ::operator new(sizeof(Op) + extraSize);
88         return Owner{new (bytes) Op(std::forward<Args>(args)...)};
89     }
90 
91     virtual ~GrOp() = default;
92 
93     virtual const char* name() const = 0;
94 
visitProxies(const GrVisitProxyFunc &)95     virtual void visitProxies(const GrVisitProxyFunc&) const {
96         // This default implementation assumes the op has no proxies
97     }
98 
99     enum class CombineResult {
100         /**
101          * The op that combineIfPossible was called on now represents its own work plus that of
102          * the passed op. The passed op should be destroyed without being flushed. Currently it
103          * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
104          * chain (though the op on which combineIfPossible() was called may be).
105          */
106         kMerged,
107         /**
108          * The caller *may* (but is not required) to chain these ops together. If they are chained
109          * then prepare() and execute() will be called on the head op but not the other ops in the
110          * chain. The head op will prepare and execute on behalf of all the ops in the chain.
111          */
112         kMayChain,
113         /**
114          * The ops cannot be combined.
115          */
116         kCannotCombine
117     };
118 
119     // The arenas are the same as what was available when the op was created.
120     CombineResult combineIfPossible(GrOp* that, SkArenaAlloc* alloc, const GrCaps& caps);
121 
bounds()122     const SkRect& bounds() const {
123         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
124         return fBounds;
125     }
126 
setClippedBounds(const SkRect & clippedBounds)127     void setClippedBounds(const SkRect& clippedBounds) {
128         fBounds = clippedBounds;
129         // The clipped bounds already incorporate any effect of the bounds flags.
130         fBoundsFlags = 0;
131     }
132 
hasAABloat()133     bool hasAABloat() const {
134         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
135         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
136     }
137 
hasZeroArea()138     bool hasZeroArea() const {
139         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
140         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
141     }
142 
delete(void * p)143     void operator delete(void* p) { ::operator delete(p); }
144 
145     /**
146      * Helper for safely down-casting to a GrOp subclass
147      */
cast()148     template <typename T> const T& cast() const {
149         SkASSERT(T::ClassID() == this->classID());
150         return *static_cast<const T*>(this);
151     }
152 
cast()153     template <typename T> T* cast() {
154         SkASSERT(T::ClassID() == this->classID());
155         return static_cast<T*>(this);
156     }
157 
classID()158     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
159 
160     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()161     uint32_t uniqueID() const {
162         if (kIllegalOpID == fUniqueID) {
163             fUniqueID = GenOpID();
164         }
165         return fUniqueID;
166     }
167 
168     /**
169      * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
170      * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
171      * ahead of time and when it has not been called).
172      */
prePrepare(GrRecordingContext * context,const GrSurfaceProxyView & dstView,GrAppliedClip * clip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)173     void prePrepare(GrRecordingContext* context, const GrSurfaceProxyView& dstView,
174                     GrAppliedClip* clip, const GrDstProxyView& dstProxyView,
175                     GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp) {
176         TRACE_EVENT0("skia.gpu", name());
177         this->onPrePrepare(context, dstView, clip, dstProxyView, renderPassXferBarriers,
178                            colorLoadOp);
179     }
180 
181     /**
182      * Called prior to executing. The op should perform any resource creation or data transfers
183      * necessary before execute() is called.
184      */
prepare(GrOpFlushState * state)185     void prepare(GrOpFlushState* state) {
186         TRACE_EVENT0("skia.gpu", name());
187         this->onPrepare(state);
188     }
189 
190     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state,const SkRect & chainBounds)191     void execute(GrOpFlushState* state, const SkRect& chainBounds) {
192         TRACE_EVENT0("skia.gpu", name());
193         this->onExecute(state, chainBounds);
194     }
195 
196     /** Used for spewing information about ops when debugging. */
197 #if GR_TEST_UTILS
dumpInfo()198     virtual SkString dumpInfo() const final {
199         return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
200                               this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop,
201                               fBounds.fRight, fBounds.fBottom);
202     }
203 #endif
204 
205     /**
206      * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
207      * subclass. E.g.:
208      *     for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
209      *         // ...
210      *     }
211      */
212     template <typename OpSubclass = GrOp> class ChainRange {
213     private:
214         class Iter {
215         public:
Iter(const OpSubclass * head)216             explicit Iter(const OpSubclass* head) : fCurr(head) {}
217             inline Iter& operator++() {
218                 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
219             }
220             const OpSubclass& operator*() const { return *fCurr; }
221             bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
222 
223         private:
224             const OpSubclass* fCurr;
225         };
226         const OpSubclass* fHead;
227 
228     public:
ChainRange(const OpSubclass * head)229         explicit ChainRange(const OpSubclass* head) : fHead(head) {}
begin()230         Iter begin() { return Iter(fHead); }
end()231         Iter end() { return Iter(nullptr); }
232     };
233 
234     /**
235      * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
236      * must be of the same subclass.
237      */
238     void chainConcat(GrOp::Owner);
239     /** Returns true if this is the head of a chain (including a length 1 chain). */
isChainHead()240     bool isChainHead() const { return !fPrevInChain; }
241     /** Returns true if this is the tail of a chain (including a length 1 chain). */
isChainTail()242     bool isChainTail() const { return !fNextInChain; }
243     /** The next op in the chain. */
nextInChain()244     GrOp* nextInChain() const { return fNextInChain.get(); }
245     /** The previous op in the chain. */
prevInChain()246     GrOp* prevInChain() const { return fPrevInChain; }
247     /**
248      * Cuts the chain after this op. The returned op is the op that was previously next in the
249      * chain or null if this was already a tail.
250      */
251     GrOp::Owner cutChain();
252     SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
253 
254 #ifdef SK_DEBUG
validate()255     virtual void validate() const {}
256 #endif
257 
getGrOpTag()258     const GrGpuResourceTag& getGrOpTag() const { return fGrOpTag; }
259 
setGrOpTag(const GrGpuResourceTag & tag)260     void setGrOpTag(const GrGpuResourceTag& tag) { fGrOpTag = tag; }
261 
262 protected:
263     GrOp(uint32_t classID);
264 
265     /**
266      * Indicates that the op will produce geometry that extends beyond its bounds for the
267      * purpose of ensuring that the fragment shader runs on partially covered pixels for
268      * non-MSAA antialiasing.
269      */
270     enum class HasAABloat : bool {
271         kNo = false,
272         kYes = true
273     };
274     /**
275      * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
276      * space is also considered a hairline.
277      */
278     enum class IsHairline : bool {
279         kNo = false,
280         kYes = true
281     };
282 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsHairline zeroArea)283     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
284         fBounds = newBounds;
285         this->setBoundsFlags(aabloat, zeroArea);
286     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsHairline zeroArea)287     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
288                               HasAABloat aabloat, IsHairline zeroArea) {
289         m.mapRect(&fBounds, srcBounds);
290         this->setBoundsFlags(aabloat, zeroArea);
291     }
makeFullScreen(GrSurfaceProxy * proxy)292     void makeFullScreen(GrSurfaceProxy* proxy) {
293         this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo);
294     }
295 
GenOpClassID()296     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
297 
298 private:
joinBounds(const GrOp & that)299     void joinBounds(const GrOp& that) {
300         if (that.hasAABloat()) {
301             fBoundsFlags |= kAABloat_BoundsFlag;
302         }
303         if (that.hasZeroArea()) {
304             fBoundsFlags |= kZeroArea_BoundsFlag;
305         }
306         return fBounds.joinPossiblyEmptyRect(that.fBounds);
307     }
308 
onCombineIfPossible(GrOp *,SkArenaAlloc *,const GrCaps &)309     virtual CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) {
310         return CombineResult::kCannotCombine;
311     }
312 
313     // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two?
314     virtual void onPrePrepare(GrRecordingContext*,
315                               const GrSurfaceProxyView& writeView,
316                               GrAppliedClip*,
317                               const GrDstProxyView&,
318                               GrXferBarrierFlags renderPassXferBarriers,
319                               GrLoadOp colorLoadOp) = 0;
320     virtual void onPrepare(GrOpFlushState*) = 0;
321     // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
322     // Otherwise, this op's bounds.
323     virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
324 #if GR_TEST_UTILS
onDumpInfo()325     virtual SkString onDumpInfo() const { return SkString(); }
326 #endif
327 
GenID(std::atomic<uint32_t> * idCounter)328     static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
329         uint32_t id = idCounter->fetch_add(1, std::memory_order_relaxed);
330         if (id == 0) {
331             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
332                      "subclass.");
333         }
334         return id;
335     }
336 
setBoundsFlags(HasAABloat aabloat,IsHairline zeroArea)337     void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
338         fBoundsFlags = 0;
339         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
340         fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
341     }
342 
343     enum {
344         kIllegalOpID = 0,
345     };
346 
347     enum BoundsFlags {
348         kAABloat_BoundsFlag                     = 0x1,
349         kZeroArea_BoundsFlag                    = 0x2,
350         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
351     };
352 
353     Owner                               fNextInChain{nullptr};
354     GrOp*                               fPrevInChain = nullptr;
355     const uint16_t                      fClassID;
356     uint16_t                            fBoundsFlags;
357 
358     GrGpuResourceTag fGrOpTag;
359 
GenOpID()360     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
361     mutable uint32_t                    fUniqueID = SK_InvalidUniqueID;
362     SkRect                              fBounds;
363 
364     static std::atomic<uint32_t> gCurrOpUniqueID;
365     static std::atomic<uint32_t> gCurrOpClassID;
366 };
367 
368 #endif
369