• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOp_DEFINED
9 #define GrOp_DEFINED
10 
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkString.h"
14 #include "include/gpu/GrRecordingContext.h"
15 #include "src/gpu/GrGpuResource.h"
16 #include "src/gpu/GrMemoryPool.h"
17 #include "src/gpu/GrRecordingContextPriv.h"
18 #include "src/gpu/GrTracing.h"
19 #include "src/gpu/GrXferProcessor.h"
20 #include <atomic>
21 #include <new>
22 
23 class GrAppliedClip;
24 class GrCaps;
25 class GrOpFlushState;
26 class GrOpsRenderPass;
27 class GrPaint;
28 
29 /**
30  * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
31  * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
32  * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
33  * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
34  * and minimize state changes.
35  *
36  * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
37  * one takes on the union of the data and the other is left empty. The merged op becomes responsible
38  * for drawing the data from both the original ops. When ops are chained each op maintains its own
39  * data but they are linked in a list and the head op becomes responsible for executing the work for
40  * the chain.
41  *
42  * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
43  * it must be the case that any op that can chain with A will either merge or chain with any op
44  * that can chain to B.
45  *
46  * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
47  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
48  * in turn depend upon the clip.
49  */
50 #define GR_OP_SPEW 0
51 #if GR_OP_SPEW
52     #define GrOP_SPEW(code) code
53     #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
54 #else
55     #define GrOP_SPEW(code)
56     #define GrOP_INFO(...)
57 #endif
58 
59 // Print out op information at flush time
60 #define GR_FLUSH_TIME_OP_SPEW 0
61 
62 // A helper macro to generate a class static id
63 #define DEFINE_OP_CLASS_ID \
64     static uint32_t ClassID() { \
65         static uint32_t kClassID = GenOpClassID(); \
66         return kClassID; \
67     }
68 
69 class GrOp : private SkNoncopyable {
70 public:
71         using Owner = std::unique_ptr<GrOp>;
72 
73     template<typename Op, typename... Args>
Make(GrRecordingContext * context,Args &&...args)74     static Owner Make(GrRecordingContext* context, Args&&... args) {
75         return Owner{new Op(std::forward<Args>(args)...)};
76     }
77 
78     template<typename Op, typename... Args>
79     static Owner MakeWithProcessorSet(
80             GrRecordingContext* context, const SkPMColor4f& color,
81             GrPaint&& paint, Args&&... args);
82 
83     template<typename Op, typename... Args>
MakeWithExtraMemory(GrRecordingContext * context,size_t extraSize,Args &&...args)84     static Owner MakeWithExtraMemory(
85             GrRecordingContext* context, size_t extraSize, Args&&... args) {
86         void* bytes = ::operator new(sizeof(Op) + extraSize);
87         return Owner{new (bytes) Op(std::forward<Args>(args)...)};
88     }
89 
90     virtual ~GrOp() = default;
91 
92     virtual const char* name() const = 0;
93 
94     using VisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>;
95 
visitProxies(const VisitProxyFunc &)96     virtual void visitProxies(const VisitProxyFunc&) const {
97         // This default implementation assumes the op has no proxies
98     }
99 
100     enum class CombineResult {
101         /**
102          * The op that combineIfPossible was called on now represents its own work plus that of
103          * the passed op. The passed op should be destroyed without being flushed. Currently it
104          * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
105          * chain (though the op on which combineIfPossible() was called may be).
106          */
107         kMerged,
108         /**
109          * The caller *may* (but is not required) to chain these ops together. If they are chained
110          * then prepare() and execute() will be called on the head op but not the other ops in the
111          * chain. The head op will prepare and execute on behalf of all the ops in the chain.
112          */
113         kMayChain,
114         /**
115          * The ops cannot be combined.
116          */
117         kCannotCombine
118     };
119 
120     // The arenas are the same as what was available when the op was created.
121     CombineResult combineIfPossible(GrOp* that, SkArenaAlloc* alloc, const GrCaps& caps);
122 
bounds()123     const SkRect& bounds() const {
124         SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
125         return fBounds;
126     }
127 
setClippedBounds(const SkRect & clippedBounds)128     void setClippedBounds(const SkRect& clippedBounds) {
129         fBounds = clippedBounds;
130         // The clipped bounds already incorporate any effect of the bounds flags.
131         fBoundsFlags = 0;
132     }
133 
hasAABloat()134     bool hasAABloat() const {
135         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
136         return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
137     }
138 
hasZeroArea()139     bool hasZeroArea() const {
140         SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
141         return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
142     }
143 
delete(void * p)144     void operator delete(void* p) { ::operator delete(p); }
145 
146     /**
147      * Helper for safely down-casting to a GrOp subclass
148      */
cast()149     template <typename T> const T& cast() const {
150         SkASSERT(T::ClassID() == this->classID());
151         return *static_cast<const T*>(this);
152     }
153 
cast()154     template <typename T> T* cast() {
155         SkASSERT(T::ClassID() == this->classID());
156         return static_cast<T*>(this);
157     }
158 
classID()159     uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
160 
161     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uniqueID()162     uint32_t uniqueID() const {
163         if (kIllegalOpID == fUniqueID) {
164             fUniqueID = GenOpID();
165         }
166         return fUniqueID;
167     }
168 
169     /**
170      * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
171      * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
172      * ahead of time and when it has not been called).
173      */
prePrepare(GrRecordingContext * context,const GrSurfaceProxyView & dstView,GrAppliedClip * clip,const GrXferProcessor::DstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)174     void prePrepare(GrRecordingContext* context, const GrSurfaceProxyView& dstView,
175                     GrAppliedClip* clip, const GrXferProcessor::DstProxyView& dstProxyView,
176                     GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp) {
177         TRACE_EVENT0("skia.gpu", name());
178         this->onPrePrepare(context, dstView, clip, dstProxyView, renderPassXferBarriers,
179                            colorLoadOp);
180     }
181 
182     /**
183      * Called prior to executing. The op should perform any resource creation or data transfers
184      * necessary before execute() is called.
185      */
prepare(GrOpFlushState * state)186     void prepare(GrOpFlushState* state) {
187         TRACE_EVENT0("skia.gpu", name());
188         this->onPrepare(state);
189     }
190 
191     /** Issues the op's commands to GrGpu. */
execute(GrOpFlushState * state,const SkRect & chainBounds)192     void execute(GrOpFlushState* state, const SkRect& chainBounds) {
193         TRACE_EVENT0("skia.gpu", name());
194         this->onExecute(state, chainBounds);
195     }
196 
197     /** Used for spewing information about ops when debugging. */
198 #if GR_TEST_UTILS
dumpInfo()199     virtual SkString dumpInfo() const final {
200         return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
201                               this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop,
202                               fBounds.fRight, fBounds.fBottom);
203     }
204 #endif
205 
206     /**
207      * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
208      * subclass. E.g.:
209      *     for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
210      *         // ...
211      *     }
212      */
213     template <typename OpSubclass = GrOp> class ChainRange {
214     private:
215         class Iter {
216         public:
Iter(const OpSubclass * head)217             explicit Iter(const OpSubclass* head) : fCurr(head) {}
218             inline Iter& operator++() {
219                 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
220             }
221             const OpSubclass& operator*() const { return *fCurr; }
222             bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
223 
224         private:
225             const OpSubclass* fCurr;
226         };
227         const OpSubclass* fHead;
228 
229     public:
ChainRange(const OpSubclass * head)230         explicit ChainRange(const OpSubclass* head) : fHead(head) {}
begin()231         Iter begin() { return Iter(fHead); }
end()232         Iter end() { return Iter(nullptr); }
233     };
234 
235     /**
236      * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
237      * must be of the same subclass.
238      */
239     void chainConcat(GrOp::Owner);
240     /** Returns true if this is the head of a chain (including a length 1 chain). */
isChainHead()241     bool isChainHead() const { return !fPrevInChain; }
242     /** Returns true if this is the tail of a chain (including a length 1 chain). */
isChainTail()243     bool isChainTail() const { return !fNextInChain; }
244     /** The next op in the chain. */
nextInChain()245     GrOp* nextInChain() const { return fNextInChain.get(); }
246     /** The previous op in the chain. */
prevInChain()247     GrOp* prevInChain() const { return fPrevInChain; }
248     /**
249      * Cuts the chain after this op. The returned op is the op that was previously next in the
250      * chain or null if this was already a tail.
251      */
252     GrOp::Owner cutChain();
253     SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
254 
255 #ifdef SK_DEBUG
validate()256     virtual void validate() const {}
257 #endif
258 
259 protected:
260     GrOp(uint32_t classID);
261 
262     /**
263      * Indicates that the op will produce geometry that extends beyond its bounds for the
264      * purpose of ensuring that the fragment shader runs on partially covered pixels for
265      * non-MSAA antialiasing.
266      */
267     enum class HasAABloat : bool {
268         kNo = false,
269         kYes = true
270     };
271     /**
272      * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
273      * space is also considered a hairline.
274      */
275     enum class IsHairline : bool {
276         kNo = false,
277         kYes = true
278     };
279 
setBounds(const SkRect & newBounds,HasAABloat aabloat,IsHairline zeroArea)280     void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
281         fBounds = newBounds;
282         this->setBoundsFlags(aabloat, zeroArea);
283     }
setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsHairline zeroArea)284     void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
285                               HasAABloat aabloat, IsHairline zeroArea) {
286         m.mapRect(&fBounds, srcBounds);
287         this->setBoundsFlags(aabloat, zeroArea);
288     }
makeFullScreen(GrSurfaceProxy * proxy)289     void makeFullScreen(GrSurfaceProxy* proxy) {
290         this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo);
291     }
292 
GenOpClassID()293     static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
294 
295 private:
joinBounds(const GrOp & that)296     void joinBounds(const GrOp& that) {
297         if (that.hasAABloat()) {
298             fBoundsFlags |= kAABloat_BoundsFlag;
299         }
300         if (that.hasZeroArea()) {
301             fBoundsFlags |= kZeroArea_BoundsFlag;
302         }
303         return fBounds.joinPossiblyEmptyRect(that.fBounds);
304     }
305 
onCombineIfPossible(GrOp *,SkArenaAlloc *,const GrCaps &)306     virtual CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) {
307         return CombineResult::kCannotCombine;
308     }
309 
310     // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two?
311     virtual void onPrePrepare(GrRecordingContext*,
312                               const GrSurfaceProxyView& writeView,
313                               GrAppliedClip*,
314                               const GrXferProcessor::DstProxyView&,
315                               GrXferBarrierFlags renderPassXferBarriers,
316                               GrLoadOp colorLoadOp) = 0;
317     virtual void onPrepare(GrOpFlushState*) = 0;
318     // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
319     // Otherwise, this op's bounds.
320     virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
321 #if GR_TEST_UTILS
onDumpInfo()322     virtual SkString onDumpInfo() const { return SkString(); }
323 #endif
324 
GenID(std::atomic<uint32_t> * idCounter)325     static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
326         uint32_t id = idCounter->fetch_add(1, std::memory_order_relaxed);
327         if (id == 0) {
328             SK_ABORT("This should never wrap as it should only be called once for each GrOp "
329                      "subclass.");
330         }
331         return id;
332     }
333 
setBoundsFlags(HasAABloat aabloat,IsHairline zeroArea)334     void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
335         fBoundsFlags = 0;
336         fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
337         fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
338     }
339 
340     enum {
341         kIllegalOpID = 0,
342     };
343 
344     enum BoundsFlags {
345         kAABloat_BoundsFlag                     = 0x1,
346         kZeroArea_BoundsFlag                    = 0x2,
347         SkDEBUGCODE(kUninitialized_BoundsFlag   = 0x4)
348     };
349 
350     Owner                               fNextInChain{nullptr};
351     GrOp*                               fPrevInChain = nullptr;
352     const uint16_t                      fClassID;
353     uint16_t                            fBoundsFlags;
354 
GenOpID()355     static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
356     mutable uint32_t                    fUniqueID = SK_InvalidUniqueID;
357     SkRect                              fBounds;
358 
359     static std::atomic<uint32_t> gCurrOpUniqueID;
360     static std::atomic<uint32_t> gCurrOpClassID;
361 };
362 
363 #endif
364