• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOpFlushState_DEFINED
9 #define GrOpFlushState_DEFINED
10 
11 #include <utility>
12 #include "src/base/SkArenaAlloc.h"
13 #include "src/base/SkArenaAllocList.h"
14 #include "src/gpu/ganesh/GrAppliedClip.h"
15 #include "src/gpu/ganesh/GrBufferAllocPool.h"
16 #include "src/gpu/ganesh/GrDeferredUpload.h"
17 #include "src/gpu/ganesh/GrMeshDrawTarget.h"
18 #include "src/gpu/ganesh/GrProgramInfo.h"
19 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
20 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
21 
22 class GrGpu;
23 class GrOpsRenderPass;
24 class GrResourceProvider;
25 
26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
28 public:
29     // vertexSpace and indexSpace may either be null or an alloation of size
30     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
31     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
32     GrOpFlushState(GrGpu*, GrResourceProvider*, skgpu::TokenTracker*,
33                    sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
34 
~GrOpFlushState()35     ~GrOpFlushState() final { this->reset(); }
36 
37     /** This is called after each op has a chance to prepare its draws and before the draws are
38         executed. */
39     void preExecuteDraws();
40 
41     /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
42         surface needs to be prepared for being sampled in a draw after the upload, the caller
43         should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
44         when doing inline uploads to reset the image layout back to sampled. */
45     void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
46 
47     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
48     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
49                                              const GrPipeline*, const GrUserStencilSettings*);
50 
opsRenderPass()51     GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
setOpsRenderPass(GrOpsRenderPass * renderPass)52     void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
53 
gpu()54     GrGpu* gpu() { return fGpu; }
55 
56     void reset();
57 
58     /** Additional data required on a per-op basis when executing GrOps. */
59     struct OpArgs {
60         // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
OpArgsOpArgs61         explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
62                         GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
63                         GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
64                 : fOp(op)
65                 , fSurfaceView(surfaceView)
66                 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
67                 , fUsesMSAASurface(usesMSAASurface)
68                 , fAppliedClip(appliedClip)
69                 , fDstProxyView(dstProxyView)
70                 , fRenderPassXferBarriers(renderPassXferBarriers)
71                 , fColorLoadOp(colorLoadOp) {
72             SkASSERT(surfaceView.asRenderTargetProxy());
73         }
74 
opOpArgs75         GrOp* op() { return fOp; }
writeViewOpArgs76         const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
rtProxyOpArgs77         GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
78         // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
usesMSAASurfaceOpArgs79         bool usesMSAASurface() const { return fUsesMSAASurface; }
appliedClipOpArgs80         GrAppliedClip* appliedClip() { return fAppliedClip; }
appliedClipOpArgs81         const GrAppliedClip* appliedClip() const { return fAppliedClip; }
dstProxyViewOpArgs82         const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
renderPassBarriersOpArgs83         GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
colorLoadOpOpArgs84         GrLoadOp colorLoadOp() const { return fColorLoadOp; }
85 
86 #ifdef SK_DEBUG
validateOpArgs87         void validate() const {
88             SkASSERT(fOp);
89             SkASSERT(fSurfaceView);
90         }
91 #endif
92 
93     private:
94         GrOp*                         fOp;
95         const GrSurfaceProxyView&     fSurfaceView;
96         GrRenderTargetProxy*          fRenderTargetProxy;
97         bool                          fUsesMSAASurface;
98         GrAppliedClip*                fAppliedClip;
99         GrDstProxyView                fDstProxyView;   // TODO: do we still need the dst proxy here?
100         GrXferBarrierFlags            fRenderPassXferBarriers;
101         GrLoadOp                      fColorLoadOp;
102     };
103 
setOpArgs(OpArgs * opArgs)104     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
105 
drawOpArgs()106     const OpArgs& drawOpArgs() const {
107         SkASSERT(fOpArgs);
108         SkDEBUGCODE(fOpArgs->validate());
109         return *fOpArgs;
110     }
111 
setSampledProxyArray(SkTArray<GrSurfaceProxy *,true> * sampledProxies)112     void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) {
113         fSampledProxies = sampledProxies;
114     }
115 
sampledProxyArray()116     SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
117         return fSampledProxies;
118     }
119 
120     /** Overrides of GrDeferredUploadTarget. */
121 
tokenTracker()122     const skgpu::TokenTracker* tokenTracker() final { return fTokenTracker; }
123     skgpu::AtlasToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
124     skgpu::AtlasToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
125 
126     /** Overrides of GrMeshDrawTarget. */
127     void recordDraw(const GrGeometryProcessor*,
128                     const GrSimpleMesh[],
129                     int meshCnt,
130                     const GrSurfaceProxy* const primProcProxies[],
131                     GrPrimitiveType) final;
132     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
133                           int* startVertex) final;
134     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
135     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
136                                  sk_sp<const GrBuffer>*, int* startVertex,
137                                  int* actualVertexCount) final;
138     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
139                                     sk_sp<const GrBuffer>*, int* startIndex,
140                                     int* actualIndexCount) final;
makeDrawIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)141     GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
142                                                size_t* offset) override {
143         return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
144     }
makeDrawIndexedIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)145     GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
146                                                              sk_sp<const GrBuffer>* buffer,
147                                                              size_t* offset) override {
148         return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
149     }
150     void putBackIndices(int indexCount) final;
151     void putBackVertices(int vertices, size_t vertexStride) final;
putBackIndirectDraws(int drawCount)152     void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
putBackIndexedIndirectDraws(int drawCount)153     void putBackIndexedIndirectDraws(int drawCount) final {
154         fDrawIndirectPool.putBackIndexed(drawCount);
155     }
writeView()156     const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
rtProxy()157     GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
usesMSAASurface()158     bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
appliedClip()159     const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
appliedHardClip()160     const GrAppliedHardClip& appliedHardClip() const {
161         return (fOpArgs->appliedClip()) ?
162                 fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
163     }
164     GrAppliedClip detachAppliedClip() final;
dstProxyView()165     const GrDstProxyView& dstProxyView() const final {
166         return this->drawOpArgs().dstProxyView();
167     }
168 
renderPassBarriers()169     GrXferBarrierFlags renderPassBarriers() const final {
170         return this->drawOpArgs().renderPassBarriers();
171     }
172 
colorLoadOp()173     GrLoadOp colorLoadOp() const final {
174         return this->drawOpArgs().colorLoadOp();
175     }
176 
deferredUploadTarget()177     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
178     const GrCaps& caps() const final;
179     GrThreadSafeCache* threadSafeCache() const final;
resourceProvider()180     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
181 
182     sktext::gpu::StrikeCache* strikeCache() const final;
183 
184     // At this point we know we're flushing so full access to the GrAtlasManager and
185     // SmallPathAtlasMgr is required (and permissible).
186     GrAtlasManager* atlasManager() const final;
187 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
188     skgpu::v1::SmallPathAtlasMgr* smallPathAtlasManager() const final;
189 #endif
190 
191     /** GrMeshDrawTarget override. */
allocator()192     SkArenaAlloc* allocator() override { return &fArena; }
193 
194     // This is a convenience method that binds the given pipeline, and then, if our applied clip has
195     // a scissor, sets the scissor rect from the applied clip.
bindPipelineAndScissorClip(const GrProgramInfo & programInfo,const SkRect & drawBounds)196     void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
197         SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
198                  (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
199         this->bindPipeline(programInfo, drawBounds);
200         if (programInfo.pipeline().isScissorTestEnabled()) {
201             this->setScissorRect(this->appliedClip()->scissorState().rect());
202         }
203     }
204 
205     // This is a convenience method for when the primitive processor has exactly one texture. It
206     // binds one texture for the primitive processor, and any others for FPs on the pipeline.
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy & singleGeomProcTexture,const GrPipeline & pipeline)207     void bindTextures(const GrGeometryProcessor& geomProc,
208                       const GrSurfaceProxy& singleGeomProcTexture,
209                       const GrPipeline& pipeline) {
210         SkASSERT(geomProc.numTextureSamplers() == 1);
211         const GrSurfaceProxy* ptr = &singleGeomProcTexture;
212         this->bindTextures(geomProc, &ptr, pipeline);
213     }
214 
215     // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
216     void drawMesh(const GrSimpleMesh& mesh);
217 
218     // Pass-through methods to GrOpsRenderPass.
bindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)219     void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
220         fOpsRenderPass->bindPipeline(programInfo, drawBounds);
221     }
setScissorRect(const SkIRect & scissorRect)222     void setScissorRect(const SkIRect& scissorRect) {
223         fOpsRenderPass->setScissorRect(scissorRect);
224     }
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)225     void bindTextures(const GrGeometryProcessor& geomProc,
226                       const GrSurfaceProxy* const geomProcTextures[],
227                       const GrPipeline& pipeline) {
228         fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
229     }
230     void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
231                      sk_sp<const GrBuffer> vertexBuffer,
232                      GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
233         fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
234                                     std::move(vertexBuffer), primitiveRestart);
235     }
draw(int vertexCount,int baseVertex)236     void draw(int vertexCount, int baseVertex) {
237         fOpsRenderPass->draw(vertexCount, baseVertex);
238     }
drawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)239     void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
240                      int baseVertex) {
241         fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
242                                     baseVertex);
243     }
drawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)244     void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
245         fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
246     }
drawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)247     void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
248                               int baseVertex) {
249         fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
250                                              baseVertex);
251     }
drawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)252     void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
253         fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
254     }
drawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)255     void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
256         fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
257     }
drawIndexPattern(int patternIndexCount,int patternRepeatCount,int maxPatternRepetitionsInIndexBuffer,int patternVertexCount,int baseVertex)258     void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
259                           int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
260                           int baseVertex) {
261         fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
262                                          maxPatternRepetitionsInIndexBuffer, patternVertexCount,
263                                          baseVertex);
264     }
265 
266 private:
267     struct InlineUpload {
InlineUploadInlineUpload268         InlineUpload(GrDeferredTextureUploadFn&& upload, skgpu::AtlasToken token)
269                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
270         GrDeferredTextureUploadFn fUpload;
271         skgpu::AtlasToken fUploadBeforeToken;
272     };
273 
274     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
275     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
276     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
277     // the shared state once and then issue draws for each mesh.
278     struct Draw {
279         ~Draw();
280         // The geometry processor is always forced to be in an arena allocation. This object does
281         // not need to manage its lifetime.
282         const GrGeometryProcessor* fGeometryProcessor = nullptr;
283         // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
284         const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
285         const GrSimpleMesh* fMeshes = nullptr;
286         const GrOp* fOp = nullptr;
287         int fMeshCnt = 0;
288         GrPrimitiveType fPrimitiveType;
289     };
290 
291     // Storage for ops' pipelines, draws, and inline uploads.
292     SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
293 
294     // Store vertex and index data on behalf of ops that are flushed.
295     GrVertexBufferAllocPool fVertexPool;
296     GrIndexBufferAllocPool fIndexPool;
297     GrDrawIndirectBufferAllocPool fDrawIndirectPool;
298 
299     // Data stored on behalf of the ops being flushed.
300     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
301     SkArenaAllocList<InlineUpload> fInlineUploads;
302     SkArenaAllocList<Draw> fDraws;
303 
304     // All draws we store have an implicit draw token. This is the draw token for the first draw
305     // in fDraws.
306     skgpu::AtlasToken fBaseDrawToken = skgpu::AtlasToken::InvalidToken();
307 
308     // Info about the op that is currently preparing or executing using the flush state or null if
309     // an op is not currently preparing of executing.
310     OpArgs* fOpArgs = nullptr;
311 
312     // This field is only transiently set during flush. Each OpsTask will set it to point to an
313     // array of proxies it uses before call onPrepare and onExecute.
314     SkTArray<GrSurfaceProxy*, true>* fSampledProxies;
315 
316     GrGpu* fGpu;
317     GrResourceProvider* fResourceProvider;
318     skgpu::TokenTracker* fTokenTracker;
319     GrOpsRenderPass* fOpsRenderPass = nullptr;
320 
321     // Variables that are used to track where we are in lists as ops are executed
322     SkArenaAllocList<Draw>::Iter fCurrDraw;
323     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
324 };
325 
326 #endif
327