1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrOpFlushState_DEFINED 9 #define GrOpFlushState_DEFINED 10 11 #include <utility> 12 #include "src/core/SkArenaAlloc.h" 13 #include "src/core/SkArenaAllocList.h" 14 #include "src/gpu/GrAppliedClip.h" 15 #include "src/gpu/GrBufferAllocPool.h" 16 #include "src/gpu/GrDeferredUpload.h" 17 #include "src/gpu/GrDeinstantiateProxyTracker.h" 18 #include "src/gpu/GrRenderTargetProxy.h" 19 #include "src/gpu/ops/GrMeshDrawOp.h" 20 21 class GrGpu; 22 class GrGpuCommandBuffer; 23 class GrGpuRTCommandBuffer; 24 class GrResourceProvider; 25 26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */ 27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target { 28 public: 29 // vertexSpace and indexSpace may either be null or an alloation of size 30 // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for 31 // vertices/indices when a buffer larger than kDefaultBufferSize is required. 32 GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*, 33 sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr); 34 ~GrOpFlushState()35 ~GrOpFlushState() final { this->reset(); } 36 37 /** This is called after each op has a chance to prepare its draws and before the draws are 38 executed. */ 39 void preExecuteDraws(); 40 41 void doUpload(GrDeferredTextureUploadFn&); 42 43 /** Called as ops are executed. Must be called in the same order as the ops were prepared. */ 44 void executeDrawsAndUploadsForMeshDrawOp( 45 const GrOp* op, const SkRect& chainBounds, GrProcessorSet&&, 46 GrPipeline::InputFlags = GrPipeline::InputFlags::kNone, 47 const GrUserStencilSettings* = &GrUserStencilSettings::kUnused); 48 commandBuffer()49 GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; } 50 // Helper function used by Ops that are only called via RenderTargetOpLists 51 GrGpuRTCommandBuffer* rtCommandBuffer(); setCommandBuffer(GrGpuCommandBuffer * buffer)52 void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; } 53 gpu()54 GrGpu* gpu() { return fGpu; } 55 56 void reset(); 57 58 /** Additional data required on a per-op basis when executing GrOps. */ 59 struct OpArgs { originOpArgs60 GrSurfaceOrigin origin() const { return fProxy->origin(); } renderTargetOpArgs61 GrRenderTarget* renderTarget() const { return fProxy->peekRenderTarget(); } 62 63 GrOp* fOp; 64 // TODO: do we still need the dst proxy here? 65 GrRenderTargetProxy* fProxy; 66 GrAppliedClip* fAppliedClip; 67 GrSwizzle fOutputSwizzle; 68 GrXferProcessor::DstProxy fDstProxy; 69 }; 70 setOpArgs(OpArgs * opArgs)71 void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; } 72 drawOpArgs()73 const OpArgs& drawOpArgs() const { 74 SkASSERT(fOpArgs); 75 SkASSERT(fOpArgs->fOp); 76 return *fOpArgs; 77 } 78 79 /** Overrides of GrDeferredUploadTarget. */ 80 tokenTracker()81 const GrTokenTracker* tokenTracker() final { return fTokenTracker; } 82 GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final; 83 GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final; 84 85 /** Overrides of GrMeshDrawOp::Target. */ 86 void recordDraw( 87 sk_sp<const GrGeometryProcessor>, const GrMesh[], int meshCnt, 88 const GrPipeline::FixedDynamicState*, const GrPipeline::DynamicStateArrays*) final; 89 void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*, 90 int* startVertex) final; 91 uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final; 92 void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, 93 sk_sp<const GrBuffer>*, int* startVertex, 94 int* actualVertexCount) final; 95 uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, 96 sk_sp<const GrBuffer>*, int* startIndex, 97 int* actualIndexCount) final; 98 void putBackIndices(int indexCount) final; 99 void putBackVertices(int vertices, size_t vertexStride) final; proxy()100 GrRenderTargetProxy* proxy() const final { return fOpArgs->fProxy; } appliedClip()101 const GrAppliedClip* appliedClip() final { return fOpArgs->fAppliedClip; } 102 GrAppliedClip detachAppliedClip() final; dstProxy()103 const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->fDstProxy; } deferredUploadTarget()104 GrDeferredUploadTarget* deferredUploadTarget() final { return this; } 105 const GrCaps& caps() const final; resourceProvider()106 GrResourceProvider* resourceProvider() const final { return fResourceProvider; } 107 108 GrStrikeCache* glyphCache() const final; 109 110 // At this point we know we're flushing so full access to the GrAtlasManager is required (and 111 // permissible). 112 GrAtlasManager* atlasManager() const final; 113 deinstantiateProxyTracker()114 GrDeinstantiateProxyTracker* deinstantiateProxyTracker() { return &fDeinstantiateProxyTracker; } 115 116 /** GrMeshDrawOp::Target override. */ allocator()117 SkArenaAlloc* allocator() override { return &fArena; } 118 private: 119 120 struct InlineUpload { InlineUploadInlineUpload121 InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token) 122 : fUpload(std::move(upload)), fUploadBeforeToken(token) {} 123 GrDeferredTextureUploadFn fUpload; 124 GrDeferredUploadToken fUploadBeforeToken; 125 }; 126 127 // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The 128 // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes 129 // that share a geometry processor into a Draw is that it allows the Gpu object to setup 130 // the shared state once and then issue draws for each mesh. 131 struct Draw { 132 ~Draw(); 133 sk_sp<const GrGeometryProcessor> fGeometryProcessor; 134 const GrPipeline::FixedDynamicState* fFixedDynamicState; 135 const GrPipeline::DynamicStateArrays* fDynamicStateArrays; 136 const GrMesh* fMeshes = nullptr; 137 const GrOp* fOp = nullptr; 138 int fMeshCnt = 0; 139 }; 140 141 // Storage for ops' pipelines, draws, and inline uploads. 142 SkArenaAlloc fArena{sizeof(GrPipeline) * 100}; 143 144 // Store vertex and index data on behalf of ops that are flushed. 145 GrVertexBufferAllocPool fVertexPool; 146 GrIndexBufferAllocPool fIndexPool; 147 148 // Data stored on behalf of the ops being flushed. 149 SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads; 150 SkArenaAllocList<InlineUpload> fInlineUploads; 151 SkArenaAllocList<Draw> fDraws; 152 153 // All draws we store have an implicit draw token. This is the draw token for the first draw 154 // in fDraws. 155 GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken(); 156 157 // Info about the op that is currently preparing or executing using the flush state or null if 158 // an op is not currently preparing of executing. 159 OpArgs* fOpArgs = nullptr; 160 161 GrGpu* fGpu; 162 GrResourceProvider* fResourceProvider; 163 GrTokenTracker* fTokenTracker; 164 GrGpuCommandBuffer* fCommandBuffer = nullptr; 165 166 // Variables that are used to track where we are in lists as ops are executed 167 SkArenaAllocList<Draw>::Iter fCurrDraw; 168 SkArenaAllocList<InlineUpload>::Iter fCurrUpload; 169 170 // Used to track the proxies that need to be deinstantiated after we finish a flush 171 GrDeinstantiateProxyTracker fDeinstantiateProxyTracker; 172 }; 173 174 #endif 175