1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrOpFlushState_DEFINED 9 #define GrOpFlushState_DEFINED 10 11 #include <utility> 12 #include "src/core/SkArenaAlloc.h" 13 #include "src/core/SkArenaAllocList.h" 14 #include "src/gpu/GrAppliedClip.h" 15 #include "src/gpu/GrBufferAllocPool.h" 16 #include "src/gpu/GrDeferredUpload.h" 17 #include "src/gpu/GrRenderTargetProxy.h" 18 #include "src/gpu/GrSurfaceProxyView.h" 19 #include "src/gpu/ops/GrMeshDrawOp.h" 20 21 class GrGpu; 22 class GrOpsRenderPass; 23 class GrResourceProvider; 24 25 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */ 26 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target { 27 public: 28 // vertexSpace and indexSpace may either be null or an alloation of size 29 // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for 30 // vertices/indices when a buffer larger than kDefaultBufferSize is required. 31 GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*, 32 sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr); 33 ~GrOpFlushState()34 ~GrOpFlushState() final { this->reset(); } 35 36 /** This is called after each op has a chance to prepare its draws and before the draws are 37 executed. */ 38 void preExecuteDraws(); 39 40 /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded 41 surface needs to be prepared for being sampled in a draw after the upload, the caller 42 should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan 43 when doing inline uploads to reset the image layout back to sampled. */ 44 void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false); 45 46 /** Called as ops are executed. Must be called in the same order as the ops were prepared. */ 47 void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds, 48 const GrPipeline*); 49 opsRenderPass()50 GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; } setOpsRenderPass(GrOpsRenderPass * renderPass)51 void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; } 52 gpu()53 GrGpu* gpu() { return fGpu; } 54 55 void reset(); 56 57 /** Additional data required on a per-op basis when executing GrOps. */ 58 struct OpArgs { 59 // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it. OpArgsOpArgs60 explicit OpArgs(GrOp* op, GrSurfaceProxyView* surfaceView, GrAppliedClip* appliedClip, 61 const GrXferProcessor::DstProxyView& dstProxyView) 62 : fOp(op) 63 , fSurfaceView(surfaceView) 64 , fRenderTargetProxy(surfaceView->asRenderTargetProxy()) 65 , fAppliedClip(appliedClip) 66 , fDstProxyView(dstProxyView) { 67 SkASSERT(surfaceView->asRenderTargetProxy()); 68 } 69 originOpArgs70 GrSurfaceOrigin origin() const { return fSurfaceView->origin(); } outputSwizzleOpArgs71 GrSwizzle outputSwizzle() const { return fSurfaceView->swizzle(); } 72 opOpArgs73 GrOp* op() { return fOp; } viewOpArgs74 const GrSurfaceProxyView* view() const { return fSurfaceView; } proxyOpArgs75 GrRenderTargetProxy* proxy() const { return fRenderTargetProxy; } appliedClipOpArgs76 GrAppliedClip* appliedClip() { return fAppliedClip; } appliedClipOpArgs77 const GrAppliedClip* appliedClip() const { return fAppliedClip; } dstProxyViewOpArgs78 const GrXferProcessor::DstProxyView& dstProxyView() const { return fDstProxyView; } 79 80 #ifdef SK_DEBUG validateOpArgs81 void validate() const { 82 SkASSERT(fOp); 83 SkASSERT(fSurfaceView); 84 } 85 #endif 86 87 private: 88 GrOp* fOp; 89 GrSurfaceProxyView* fSurfaceView; 90 GrRenderTargetProxy* fRenderTargetProxy; 91 GrAppliedClip* fAppliedClip; 92 GrXferProcessor::DstProxyView fDstProxyView; // TODO: do we still need the dst proxy here? 93 }; 94 setOpArgs(OpArgs * opArgs)95 void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; } 96 drawOpArgs()97 const OpArgs& drawOpArgs() const { 98 SkASSERT(fOpArgs); 99 SkDEBUGCODE(fOpArgs->validate()); 100 return *fOpArgs; 101 } 102 setSampledProxyArray(SkTArray<GrSurfaceProxy *,true> * sampledProxies)103 void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) { 104 fSampledProxies = sampledProxies; 105 } 106 sampledProxyArray()107 SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override { 108 return fSampledProxies; 109 } 110 111 /** Overrides of GrDeferredUploadTarget. */ 112 tokenTracker()113 const GrTokenTracker* tokenTracker() final { return fTokenTracker; } 114 GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final; 115 GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final; 116 117 /** Overrides of GrMeshDrawOp::Target. */ 118 void recordDraw(const GrGeometryProcessor*, const GrMesh[], int meshCnt, 119 const GrPipeline::FixedDynamicState*, 120 const GrPipeline::DynamicStateArrays*, GrPrimitiveType) final; 121 void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*, 122 int* startVertex) final; 123 uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final; 124 void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, 125 sk_sp<const GrBuffer>*, int* startVertex, 126 int* actualVertexCount) final; 127 uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, 128 sk_sp<const GrBuffer>*, int* startIndex, 129 int* actualIndexCount) final; 130 void putBackIndices(int indexCount) final; 131 void putBackVertices(int vertices, size_t vertexStride) final; view()132 const GrSurfaceProxyView* view() const { return this->drawOpArgs().view(); } proxy()133 GrRenderTargetProxy* proxy() const final { return this->drawOpArgs().proxy(); } appliedClip()134 const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); } 135 GrAppliedClip detachAppliedClip() final; dstProxyView()136 const GrXferProcessor::DstProxyView& dstProxyView() const final { 137 return this->drawOpArgs().dstProxyView(); 138 } deferredUploadTarget()139 GrDeferredUploadTarget* deferredUploadTarget() final { return this; } 140 const GrCaps& caps() const final; resourceProvider()141 GrResourceProvider* resourceProvider() const final { return fResourceProvider; } 142 143 GrStrikeCache* glyphCache() const final; 144 145 // At this point we know we're flushing so full access to the GrAtlasManager is required (and 146 // permissible). 147 GrAtlasManager* atlasManager() const final; 148 149 /** GrMeshDrawOp::Target override. */ allocator()150 SkArenaAlloc* allocator() override { return &fArena; } 151 152 private: 153 struct InlineUpload { InlineUploadInlineUpload154 InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token) 155 : fUpload(std::move(upload)), fUploadBeforeToken(token) {} 156 GrDeferredTextureUploadFn fUpload; 157 GrDeferredUploadToken fUploadBeforeToken; 158 }; 159 160 // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The 161 // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes 162 // that share a geometry processor into a Draw is that it allows the Gpu object to setup 163 // the shared state once and then issue draws for each mesh. 164 struct Draw { 165 ~Draw(); 166 // The geometry processor is always forced to be in an arena allocation or appears on 167 // the stack (for CCPR). In either case this object does not need to manage its 168 // lifetime. 169 const GrGeometryProcessor* fGeometryProcessor = nullptr; 170 const GrPipeline::FixedDynamicState* fFixedDynamicState = nullptr; 171 const GrPipeline::DynamicStateArrays* fDynamicStateArrays = nullptr; 172 const GrMesh* fMeshes = nullptr; 173 const GrOp* fOp = nullptr; 174 int fMeshCnt = 0; 175 GrPrimitiveType fPrimitiveType; 176 }; 177 178 // Storage for ops' pipelines, draws, and inline uploads. 179 SkArenaAlloc fArena{sizeof(GrPipeline) * 100}; 180 181 // Store vertex and index data on behalf of ops that are flushed. 182 GrVertexBufferAllocPool fVertexPool; 183 GrIndexBufferAllocPool fIndexPool; 184 185 // Data stored on behalf of the ops being flushed. 186 SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads; 187 SkArenaAllocList<InlineUpload> fInlineUploads; 188 SkArenaAllocList<Draw> fDraws; 189 190 // All draws we store have an implicit draw token. This is the draw token for the first draw 191 // in fDraws. 192 GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken(); 193 194 // Info about the op that is currently preparing or executing using the flush state or null if 195 // an op is not currently preparing of executing. 196 OpArgs* fOpArgs = nullptr; 197 198 // This field is only transiently set during flush. Each GrOpsTask will set it to point to an 199 // array of proxies it uses before call onPrepare and onExecute. 200 SkTArray<GrSurfaceProxy*, true>* fSampledProxies; 201 202 GrGpu* fGpu; 203 GrResourceProvider* fResourceProvider; 204 GrTokenTracker* fTokenTracker; 205 GrOpsRenderPass* fOpsRenderPass = nullptr; 206 207 // Variables that are used to track where we are in lists as ops are executed 208 SkArenaAllocList<Draw>::Iter fCurrDraw; 209 SkArenaAllocList<InlineUpload>::Iter fCurrUpload; 210 }; 211 212 #endif 213