1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrOp_DEFINED 9 #define GrOp_DEFINED 10 11 #include "include/core/SkMatrix.h" 12 #include "include/core/SkRect.h" 13 #include "include/core/SkString.h" 14 #include "include/gpu/GrRecordingContext.h" 15 #include "src/gpu/GrGpuResource.h" 16 #include "src/gpu/GrMemoryPool.h" 17 #include "src/gpu/GrRecordingContextPriv.h" 18 #include "src/gpu/GrTracing.h" 19 #include "src/gpu/GrXferProcessor.h" 20 #include <atomic> 21 #include <new> 22 23 class GrAppliedClip; 24 class GrCaps; 25 class GrDstProxyView; 26 class GrOpFlushState; 27 class GrOpsRenderPass; 28 class GrPaint; 29 30 /** 31 * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to 32 * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it 33 * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp 34 * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls 35 * and minimize state changes. 36 * 37 * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge, 38 * one takes on the union of the data and the other is left empty. The merged op becomes responsible 39 * for drawing the data from both the original ops. When ops are chained each op maintains its own 40 * data but they are linked in a list and the head op becomes responsible for executing the work for 41 * the chain. 42 * 43 * It is required that chainability is transitive. Moreover, if op A is able to merge with B then 44 * it must be the case that any op that can chain with A will either merge or chain with any op 45 * that can chain to B. 46 * 47 * The bounds of the op must contain all the vertices in device space *irrespective* of the clip. 48 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot 49 * in turn depend upon the clip. 50 */ 51 #define GR_OP_SPEW 0 52 #if GR_OP_SPEW 53 #define GrOP_SPEW(code) code 54 #define GrOP_INFO(...) SkDebugf(__VA_ARGS__) 55 #else 56 #define GrOP_SPEW(code) 57 #define GrOP_INFO(...) 58 #endif 59 60 // Print out op information at flush time 61 #define GR_FLUSH_TIME_OP_SPEW 0 62 63 // A helper macro to generate a class static id 64 #define DEFINE_OP_CLASS_ID \ 65 static uint32_t ClassID() { \ 66 static uint32_t kClassID = GenOpClassID(); \ 67 return kClassID; \ 68 } 69 70 class GrOp : private SkNoncopyable { 71 public: 72 using Owner = std::unique_ptr<GrOp>; 73 74 template<typename Op, typename... Args> Make(GrRecordingContext * context,Args &&...args)75 static Owner Make(GrRecordingContext* context, Args&&... args) { 76 return Owner{new Op(std::forward<Args>(args)...)}; 77 } 78 79 template<typename Op, typename... Args> 80 static Owner MakeWithProcessorSet( 81 GrRecordingContext* context, const SkPMColor4f& color, 82 GrPaint&& paint, Args&&... args); 83 84 template<typename Op, typename... Args> MakeWithExtraMemory(GrRecordingContext * context,size_t extraSize,Args &&...args)85 static Owner MakeWithExtraMemory( 86 GrRecordingContext* context, size_t extraSize, Args&&... args) { 87 void* bytes = ::operator new(sizeof(Op) + extraSize); 88 return Owner{new (bytes) Op(std::forward<Args>(args)...)}; 89 } 90 91 virtual ~GrOp() = default; 92 93 virtual const char* name() const = 0; 94 visitProxies(const GrVisitProxyFunc &)95 virtual void visitProxies(const GrVisitProxyFunc&) const { 96 // This default implementation assumes the op has no proxies 97 } 98 99 enum class CombineResult { 100 /** 101 * The op that combineIfPossible was called on now represents its own work plus that of 102 * the passed op. The passed op should be destroyed without being flushed. Currently it 103 * is not legal to merge an op passed to combineIfPossible() the passed op is already in a 104 * chain (though the op on which combineIfPossible() was called may be). 105 */ 106 kMerged, 107 /** 108 * The caller *may* (but is not required) to chain these ops together. If they are chained 109 * then prepare() and execute() will be called on the head op but not the other ops in the 110 * chain. The head op will prepare and execute on behalf of all the ops in the chain. 111 */ 112 kMayChain, 113 /** 114 * The ops cannot be combined. 115 */ 116 kCannotCombine 117 }; 118 119 // The arenas are the same as what was available when the op was created. 120 CombineResult combineIfPossible(GrOp* that, SkArenaAlloc* alloc, const GrCaps& caps); 121 bounds()122 const SkRect& bounds() const { 123 SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags); 124 return fBounds; 125 } 126 setClippedBounds(const SkRect & clippedBounds)127 void setClippedBounds(const SkRect& clippedBounds) { 128 fBounds = clippedBounds; 129 // The clipped bounds already incorporate any effect of the bounds flags. 130 fBoundsFlags = 0; 131 } 132 hasAABloat()133 bool hasAABloat() const { 134 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); 135 return SkToBool(fBoundsFlags & kAABloat_BoundsFlag); 136 } 137 hasZeroArea()138 bool hasZeroArea() const { 139 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); 140 return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag); 141 } 142 delete(void * p)143 void operator delete(void* p) { ::operator delete(p); } 144 145 /** 146 * Helper for safely down-casting to a GrOp subclass 147 */ cast()148 template <typename T> const T& cast() const { 149 SkASSERT(T::ClassID() == this->classID()); 150 return *static_cast<const T*>(this); 151 } 152 cast()153 template <typename T> T* cast() { 154 SkASSERT(T::ClassID() == this->classID()); 155 return static_cast<T*>(this); 156 } 157 classID()158 uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; } 159 160 // We lazily initialize the uniqueID because currently the only user is GrAuditTrail uniqueID()161 uint32_t uniqueID() const { 162 if (kIllegalOpID == fUniqueID) { 163 fUniqueID = GenOpID(); 164 } 165 return fUniqueID; 166 } 167 168 /** 169 * This can optionally be called before 'prepare' (but after sorting). Each op that overrides 170 * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called 171 * ahead of time and when it has not been called). 172 */ prePrepare(GrRecordingContext * context,const GrSurfaceProxyView & dstView,GrAppliedClip * clip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)173 void prePrepare(GrRecordingContext* context, const GrSurfaceProxyView& dstView, 174 GrAppliedClip* clip, const GrDstProxyView& dstProxyView, 175 GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp) { 176 TRACE_EVENT0("skia.gpu", name()); 177 this->onPrePrepare(context, dstView, clip, dstProxyView, renderPassXferBarriers, 178 colorLoadOp); 179 } 180 181 /** 182 * Called prior to executing. The op should perform any resource creation or data transfers 183 * necessary before execute() is called. 184 */ prepare(GrOpFlushState * state)185 void prepare(GrOpFlushState* state) { 186 #ifdef SKIA_OHOS 187 HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::DETAIL, "prepare: %s", name()); 188 #else 189 TRACE_EVENT0("skia.gpu", name()); 190 #endif 191 this->onPrepare(state); 192 } 193 194 /** Issues the op's commands to GrGpu. */ execute(GrOpFlushState * state,const SkRect & chainBounds)195 void execute(GrOpFlushState* state, const SkRect& chainBounds) { 196 #ifdef SKIA_OHOS 197 HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::DETAIL, "execute: %s", name()); 198 #else 199 TRACE_EVENT0("skia.gpu", name()); 200 #endif 201 this->onExecute(state, chainBounds); 202 } 203 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS isStencilCullingOp()204 virtual bool isStencilCullingOp() { 205 return false; 206 } 207 208 bool fShouldDisableStencilCulling = false; 209 #endif 210 /** Used for spewing information about ops when debugging. */ 211 #if GR_TEST_UTILS dumpInfo()212 virtual SkString dumpInfo() const final { 213 return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]", 214 this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop, 215 fBounds.fRight, fBounds.fBottom); 216 } 217 #endif 218 219 /** 220 * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp 221 * subclass. E.g.: 222 * for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) { 223 * // ... 224 * } 225 */ 226 template <typename OpSubclass = GrOp> class ChainRange { 227 private: 228 class Iter { 229 public: Iter(const OpSubclass * head)230 explicit Iter(const OpSubclass* head) : fCurr(head) {} 231 inline Iter& operator++() { 232 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain())); 233 } 234 const OpSubclass& operator*() const { return *fCurr; } 235 bool operator!=(const Iter& that) const { return fCurr != that.fCurr; } 236 237 private: 238 const OpSubclass* fCurr; 239 }; 240 const OpSubclass* fHead; 241 242 public: ChainRange(const OpSubclass * head)243 explicit ChainRange(const OpSubclass* head) : fHead(head) {} begin()244 Iter begin() { return Iter(fHead); } end()245 Iter end() { return Iter(nullptr); } 246 }; 247 248 /** 249 * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops 250 * must be of the same subclass. 251 */ 252 void chainConcat(GrOp::Owner); 253 /** Returns true if this is the head of a chain (including a length 1 chain). */ isChainHead()254 bool isChainHead() const { return !fPrevInChain; } 255 /** Returns true if this is the tail of a chain (including a length 1 chain). */ isChainTail()256 bool isChainTail() const { return !fNextInChain; } 257 /** The next op in the chain. */ nextInChain()258 GrOp* nextInChain() const { return fNextInChain.get(); } 259 /** The previous op in the chain. */ prevInChain()260 GrOp* prevInChain() const { return fPrevInChain; } 261 /** 262 * Cuts the chain after this op. The returned op is the op that was previously next in the 263 * chain or null if this was already a tail. 264 */ 265 GrOp::Owner cutChain(); 266 SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const); 267 268 #ifdef SK_DEBUG validate()269 virtual void validate() const {} 270 #endif 271 getGrOpTag()272 const GrGpuResourceTag& getGrOpTag() const { return fGrOpTag; } 273 setGrOpTag(const GrGpuResourceTag & tag)274 void setGrOpTag(const GrGpuResourceTag& tag) { fGrOpTag = tag; } 275 276 protected: 277 GrOp(uint32_t classID); 278 279 /** 280 * Indicates that the op will produce geometry that extends beyond its bounds for the 281 * purpose of ensuring that the fragment shader runs on partially covered pixels for 282 * non-MSAA antialiasing. 283 */ 284 enum class HasAABloat : bool { 285 kNo = false, 286 kYes = true 287 }; 288 /** 289 * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device 290 * space is also considered a hairline. 291 */ 292 enum class IsHairline : bool { 293 kNo = false, 294 kYes = true 295 }; 296 setBounds(const SkRect & newBounds,HasAABloat aabloat,IsHairline zeroArea)297 void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) { 298 fBounds = newBounds; 299 this->setBoundsFlags(aabloat, zeroArea); 300 } setTransformedBounds(const SkRect & srcBounds,const SkMatrix & m,HasAABloat aabloat,IsHairline zeroArea)301 void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m, 302 HasAABloat aabloat, IsHairline zeroArea) { 303 m.mapRect(&fBounds, srcBounds); 304 this->setBoundsFlags(aabloat, zeroArea); 305 } makeFullScreen(GrSurfaceProxy * proxy)306 void makeFullScreen(GrSurfaceProxy* proxy) { 307 this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo); 308 } 309 GenOpClassID()310 static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); } 311 312 private: joinBounds(const GrOp & that)313 void joinBounds(const GrOp& that) { 314 if (that.hasAABloat()) { 315 fBoundsFlags |= kAABloat_BoundsFlag; 316 } 317 if (that.hasZeroArea()) { 318 fBoundsFlags |= kZeroArea_BoundsFlag; 319 } 320 return fBounds.joinPossiblyEmptyRect(that.fBounds); 321 } 322 onCombineIfPossible(GrOp *,SkArenaAlloc *,const GrCaps &)323 virtual CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) { 324 return CombineResult::kCannotCombine; 325 } 326 327 // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two? 328 virtual void onPrePrepare(GrRecordingContext*, 329 const GrSurfaceProxyView& writeView, 330 GrAppliedClip*, 331 const GrDstProxyView&, 332 GrXferBarrierFlags renderPassXferBarriers, 333 GrLoadOp colorLoadOp) = 0; 334 virtual void onPrepare(GrOpFlushState*) = 0; 335 // If this op is chained then chainBounds is the union of the bounds of all ops in the chain. 336 // Otherwise, this op's bounds. 337 virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0; 338 #if GR_TEST_UTILS onDumpInfo()339 virtual SkString onDumpInfo() const { return SkString(); } 340 #endif 341 GenID(std::atomic<uint32_t> * idCounter)342 static uint32_t GenID(std::atomic<uint32_t>* idCounter) { 343 uint32_t id = idCounter->fetch_add(1, std::memory_order_relaxed); 344 if (id == 0) { 345 SK_ABORT("This should never wrap as it should only be called once for each GrOp " 346 "subclass."); 347 } 348 return id; 349 } 350 setBoundsFlags(HasAABloat aabloat,IsHairline zeroArea)351 void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) { 352 fBoundsFlags = 0; 353 fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0; 354 fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0; 355 } 356 357 enum { 358 kIllegalOpID = 0, 359 }; 360 361 enum BoundsFlags { 362 kAABloat_BoundsFlag = 0x1, 363 kZeroArea_BoundsFlag = 0x2, 364 SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4) 365 }; 366 367 Owner fNextInChain{nullptr}; 368 GrOp* fPrevInChain = nullptr; 369 const uint16_t fClassID; 370 uint16_t fBoundsFlags; 371 372 GrGpuResourceTag fGrOpTag; 373 GenOpID()374 static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); } 375 mutable uint32_t fUniqueID = SK_InvalidUniqueID; 376 SkRect fBounds; 377 378 static std::atomic<uint32_t> gCurrOpUniqueID; 379 static std::atomic<uint32_t> gCurrOpClassID; 380 }; 381 382 #endif 383