• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOpFlushState_DEFINED
9 #define GrOpFlushState_DEFINED
10 
11 #include <utility>
12 #include "src/core/SkArenaAlloc.h"
13 #include "src/core/SkArenaAllocList.h"
14 #include "src/gpu/GrAppliedClip.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrMeshDrawTarget.h"
18 #include "src/gpu/GrProgramInfo.h"
19 #include "src/gpu/GrRenderTargetProxy.h"
20 #include "src/gpu/GrSurfaceProxyView.h"
21 
22 class GrGpu;
23 class GrOpsRenderPass;
24 class GrResourceProvider;
25 
26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
28 public:
29     // vertexSpace and indexSpace may either be null or an alloation of size
30     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
31     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
32     GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
33                    sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
34 
~GrOpFlushState()35     ~GrOpFlushState() final { this->reset(); }
36 
37     /** This is called after each op has a chance to prepare its draws and before the draws are
38         executed. */
39     void preExecuteDraws();
40 
41     /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
42         surface needs to be prepared for being sampled in a draw after the upload, the caller
43         should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
44         when doing inline uploads to reset the image layout back to sampled. */
45     void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
46 
47     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
48     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
49                                              const GrPipeline*, const GrUserStencilSettings*);
50 
opsRenderPass()51     GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
setOpsRenderPass(GrOpsRenderPass * renderPass)52     void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
53 
gpu()54     GrGpu* gpu() { return fGpu; }
55 
56     void reset();
57 
58     /** Additional data required on a per-op basis when executing GrOps. */
59     struct OpArgs {
60         // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
OpArgsOpArgs61         explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
62                         GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
63                         GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
64                 : fOp(op)
65                 , fSurfaceView(surfaceView)
66                 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
67                 , fUsesMSAASurface(usesMSAASurface)
68                 , fAppliedClip(appliedClip)
69                 , fDstProxyView(dstProxyView)
70                 , fRenderPassXferBarriers(renderPassXferBarriers)
71                 , fColorLoadOp(colorLoadOp) {
72             SkASSERT(surfaceView.asRenderTargetProxy());
73         }
74 
opOpArgs75         GrOp* op() { return fOp; }
writeViewOpArgs76         const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
rtProxyOpArgs77         GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
78         // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
usesMSAASurfaceOpArgs79         bool usesMSAASurface() const { return fUsesMSAASurface; }
appliedClipOpArgs80         GrAppliedClip* appliedClip() { return fAppliedClip; }
appliedClipOpArgs81         const GrAppliedClip* appliedClip() const { return fAppliedClip; }
dstProxyViewOpArgs82         const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
renderPassBarriersOpArgs83         GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
colorLoadOpOpArgs84         GrLoadOp colorLoadOp() const { return fColorLoadOp; }
85 
86 #ifdef SK_DEBUG
validateOpArgs87         void validate() const {
88             SkASSERT(fOp);
89             SkASSERT(fSurfaceView);
90         }
91 #endif
92 
93     private:
94         GrOp*                         fOp;
95         const GrSurfaceProxyView&     fSurfaceView;
96         GrRenderTargetProxy*          fRenderTargetProxy;
97         bool                          fUsesMSAASurface;
98         GrAppliedClip*                fAppliedClip;
99         GrDstProxyView                fDstProxyView;   // TODO: do we still need the dst proxy here?
100         GrXferBarrierFlags            fRenderPassXferBarriers;
101         GrLoadOp                      fColorLoadOp;
102     };
103 
setOpArgs(OpArgs * opArgs)104     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
105 
drawOpArgs()106     const OpArgs& drawOpArgs() const {
107         SkASSERT(fOpArgs);
108         SkDEBUGCODE(fOpArgs->validate());
109         return *fOpArgs;
110     }
111 
setSampledProxyArray(SkTArray<GrSurfaceProxy *,true> * sampledProxies)112     void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) {
113         fSampledProxies = sampledProxies;
114     }
115 
sampledProxyArray()116     SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
117         return fSampledProxies;
118     }
119 
120     /** Overrides of GrDeferredUploadTarget. */
121 
tokenTracker()122     const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
123     GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
124     GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
125 
126     /** Overrides of GrMeshDrawTarget. */
127     void recordDraw(const GrGeometryProcessor*,
128                     const GrSimpleMesh[],
129                     int meshCnt,
130                     const GrSurfaceProxy* const primProcProxies[],
131                     GrPrimitiveType) final;
132     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
133                           int* startVertex) final;
134     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
135     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
136                                  sk_sp<const GrBuffer>*, int* startVertex,
137                                  int* actualVertexCount) final;
138     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
139                                     sk_sp<const GrBuffer>*, int* startIndex,
140                                     int* actualIndexCount) final;
makeDrawIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)141     GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
142                                                size_t* offset) override {
143         return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
144     }
makeDrawIndexedIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)145     GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
146                                                              sk_sp<const GrBuffer>* buffer,
147                                                              size_t* offset) override {
148         return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
149     }
150     void putBackIndices(int indexCount) final;
151     void putBackVertices(int vertices, size_t vertexStride) final;
putBackIndirectDraws(int drawCount)152     void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
putBackIndexedIndirectDraws(int drawCount)153     void putBackIndexedIndirectDraws(int drawCount) final {
154         fDrawIndirectPool.putBackIndexed(drawCount);
155     }
writeView()156     const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
rtProxy()157     GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
usesMSAASurface()158     bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
appliedClip()159     const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
appliedHardClip()160     const GrAppliedHardClip& appliedHardClip() const {
161         return (fOpArgs->appliedClip()) ?
162                 fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
163     }
164     GrAppliedClip detachAppliedClip() final;
dstProxyView()165     const GrDstProxyView& dstProxyView() const final {
166         return this->drawOpArgs().dstProxyView();
167     }
168 
renderPassBarriers()169     GrXferBarrierFlags renderPassBarriers() const final {
170         return this->drawOpArgs().renderPassBarriers();
171     }
172 
colorLoadOp()173     GrLoadOp colorLoadOp() const final {
174         return this->drawOpArgs().colorLoadOp();
175     }
176 
deferredUploadTarget()177     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
178     const GrCaps& caps() const final;
179     GrThreadSafeCache* threadSafeCache() const final;
resourceProvider()180     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
181 
182     GrStrikeCache* strikeCache() const final;
183 
184     // At this point we know we're flushing so full access to the GrAtlasManager and
185     // SmallPathAtlasMgr is required (and permissible).
186     GrAtlasManager* atlasManager() const final;
187     skgpu::v1::SmallPathAtlasMgr* smallPathAtlasManager() const final;
188 
189     /** GrMeshDrawTarget override. */
allocator()190     SkArenaAlloc* allocator() override { return &fArena; }
191 
192     // This is a convenience method that binds the given pipeline, and then, if our applied clip has
193     // a scissor, sets the scissor rect from the applied clip.
bindPipelineAndScissorClip(const GrProgramInfo & programInfo,const SkRect & drawBounds)194     void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
195         SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
196                  (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
197         this->bindPipeline(programInfo, drawBounds);
198         if (programInfo.pipeline().isScissorTestEnabled()) {
199             this->setScissorRect(this->appliedClip()->scissorState().rect());
200         }
201     }
202 
203     // This is a convenience method for when the primitive processor has exactly one texture. It
204     // binds one texture for the primitive processor, and any others for FPs on the pipeline.
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy & singleGeomProcTexture,const GrPipeline & pipeline)205     void bindTextures(const GrGeometryProcessor& geomProc,
206                       const GrSurfaceProxy& singleGeomProcTexture,
207                       const GrPipeline& pipeline) {
208         SkASSERT(geomProc.numTextureSamplers() == 1);
209         const GrSurfaceProxy* ptr = &singleGeomProcTexture;
210         this->bindTextures(geomProc, &ptr, pipeline);
211     }
212 
213     // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
214     void drawMesh(const GrSimpleMesh& mesh);
215 
216     // Pass-through methods to GrOpsRenderPass.
bindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)217     void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
218         fOpsRenderPass->bindPipeline(programInfo, drawBounds);
219     }
setScissorRect(const SkIRect & scissorRect)220     void setScissorRect(const SkIRect& scissorRect) {
221         fOpsRenderPass->setScissorRect(scissorRect);
222     }
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)223     void bindTextures(const GrGeometryProcessor& geomProc,
224                       const GrSurfaceProxy* const geomProcTextures[],
225                       const GrPipeline& pipeline) {
226         fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
227     }
228     void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
229                      sk_sp<const GrBuffer> vertexBuffer,
230                      GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
231         fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
232                                     std::move(vertexBuffer), primitiveRestart);
233     }
draw(int vertexCount,int baseVertex)234     void draw(int vertexCount, int baseVertex) {
235         fOpsRenderPass->draw(vertexCount, baseVertex);
236     }
drawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)237     void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
238                      int baseVertex) {
239         fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
240                                     baseVertex);
241     }
drawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)242     void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
243         fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
244     }
drawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)245     void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
246                               int baseVertex) {
247         fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
248                                              baseVertex);
249     }
drawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)250     void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
251         fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
252     }
drawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)253     void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
254         fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
255     }
drawIndexPattern(int patternIndexCount,int patternRepeatCount,int maxPatternRepetitionsInIndexBuffer,int patternVertexCount,int baseVertex)256     void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
257                           int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
258                           int baseVertex) {
259         fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
260                                          maxPatternRepetitionsInIndexBuffer, patternVertexCount,
261                                          baseVertex);
262     }
263 
264 private:
265     struct InlineUpload {
InlineUploadInlineUpload266         InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
267                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
268         GrDeferredTextureUploadFn fUpload;
269         GrDeferredUploadToken fUploadBeforeToken;
270     };
271 
272     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
273     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
274     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
275     // the shared state once and then issue draws for each mesh.
276     struct Draw {
277         ~Draw();
278         // The geometry processor is always forced to be in an arena allocation. This object does
279         // not need to manage its lifetime.
280         const GrGeometryProcessor* fGeometryProcessor = nullptr;
281         // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
282         const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
283         const GrSimpleMesh* fMeshes = nullptr;
284         const GrOp* fOp = nullptr;
285         int fMeshCnt = 0;
286         GrPrimitiveType fPrimitiveType;
287     };
288 
289     // Storage for ops' pipelines, draws, and inline uploads.
290     SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
291 
292     // Store vertex and index data on behalf of ops that are flushed.
293     GrVertexBufferAllocPool fVertexPool;
294     GrIndexBufferAllocPool fIndexPool;
295     GrDrawIndirectBufferAllocPool fDrawIndirectPool;
296 
297     // Data stored on behalf of the ops being flushed.
298     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
299     SkArenaAllocList<InlineUpload> fInlineUploads;
300     SkArenaAllocList<Draw> fDraws;
301 
302     // All draws we store have an implicit draw token. This is the draw token for the first draw
303     // in fDraws.
304     GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
305 
306     // Info about the op that is currently preparing or executing using the flush state or null if
307     // an op is not currently preparing of executing.
308     OpArgs* fOpArgs = nullptr;
309 
310     // This field is only transiently set during flush. Each OpsTask will set it to point to an
311     // array of proxies it uses before call onPrepare and onExecute.
312     SkTArray<GrSurfaceProxy*, true>* fSampledProxies;
313 
314     GrGpu* fGpu;
315     GrResourceProvider* fResourceProvider;
316     GrTokenTracker* fTokenTracker;
317     GrOpsRenderPass* fOpsRenderPass = nullptr;
318 
319     // Variables that are used to track where we are in lists as ops are executed
320     SkArenaAllocList<Draw>::Iter fCurrDraw;
321     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
322 };
323 
324 #endif
325