• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOpFlushState_DEFINED
9 #define GrOpFlushState_DEFINED
10 
11 #include <utility>
12 #include "src/core/SkArenaAlloc.h"
13 #include "src/core/SkArenaAllocList.h"
14 #include "src/gpu/GrAppliedClip.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrMeshDrawTarget.h"
18 #include "src/gpu/GrProgramInfo.h"
19 #include "src/gpu/GrRenderTargetProxy.h"
20 #include "src/gpu/GrSurfaceProxyView.h"
21 
22 class GrGpu;
23 class GrOpsRenderPass;
24 class GrResourceProvider;
25 
26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
28 public:
29     // vertexSpace and indexSpace may either be null or an alloation of size
30     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
31     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
32     GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
33                    sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
34 
~GrOpFlushState()35     ~GrOpFlushState() final { this->reset(); }
36 
37     /** This is called after each op has a chance to prepare its draws and before the draws are
38         executed. */
39     void preExecuteDraws();
40 
41     /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
42         surface needs to be prepared for being sampled in a draw after the upload, the caller
43         should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
44         when doing inline uploads to reset the image layout back to sampled. */
45     void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
46 
47     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
48     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
49                                              const GrPipeline*, const GrUserStencilSettings*);
50 
opsRenderPass()51     GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
setOpsRenderPass(GrOpsRenderPass * renderPass)52     void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
53 
gpu()54     GrGpu* gpu() { return fGpu; }
55 
56     void reset();
57 
58 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
setOpaqueRegion(uint32_t opaqueRegionCount,const SkIRect * region)59     void setOpaqueRegion(uint32_t opaqueRegionCount, const SkIRect* region) {
60         fOpsRenderPass->setOpaqueRegion(opaqueRegionCount, region);
61     }
62 #endif
63 
64     /** Additional data required on a per-op basis when executing GrOps. */
65     struct OpArgs {
66         // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
OpArgsOpArgs67         explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
68                         GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
69                         GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
70                 : fOp(op)
71                 , fSurfaceView(surfaceView)
72                 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
73                 , fUsesMSAASurface(usesMSAASurface)
74                 , fAppliedClip(appliedClip)
75                 , fDstProxyView(dstProxyView)
76                 , fRenderPassXferBarriers(renderPassXferBarriers)
77                 , fColorLoadOp(colorLoadOp) {
78             SkASSERT(surfaceView.asRenderTargetProxy());
79         }
80 
opOpArgs81         GrOp* op() { return fOp; }
writeViewOpArgs82         const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
rtProxyOpArgs83         GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
84         // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
usesMSAASurfaceOpArgs85         bool usesMSAASurface() const { return fUsesMSAASurface; }
appliedClipOpArgs86         GrAppliedClip* appliedClip() { return fAppliedClip; }
appliedClipOpArgs87         const GrAppliedClip* appliedClip() const { return fAppliedClip; }
dstProxyViewOpArgs88         const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
renderPassBarriersOpArgs89         GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
colorLoadOpOpArgs90         GrLoadOp colorLoadOp() const { return fColorLoadOp; }
91 
92 #ifdef SK_DEBUG
validateOpArgs93         void validate() const {
94             SkASSERT(fOp);
95             SkASSERT(fSurfaceView);
96         }
97 #endif
98 
99     private:
100         GrOp*                         fOp;
101         const GrSurfaceProxyView&     fSurfaceView;
102         GrRenderTargetProxy*          fRenderTargetProxy;
103         bool                          fUsesMSAASurface;
104         GrAppliedClip*                fAppliedClip;
105         GrDstProxyView                fDstProxyView;   // TODO: do we still need the dst proxy here?
106         GrXferBarrierFlags            fRenderPassXferBarriers;
107         GrLoadOp                      fColorLoadOp;
108     };
109 
setOpArgs(OpArgs * opArgs)110     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
111 
drawOpArgs()112     const OpArgs& drawOpArgs() const {
113         SkASSERT(fOpArgs);
114         SkDEBUGCODE(fOpArgs->validate());
115         return *fOpArgs;
116     }
117 
setSampledProxyArray(SkTArray<GrSurfaceProxy *,true> * sampledProxies)118     void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) {
119         fSampledProxies = sampledProxies;
120     }
121 
sampledProxyArray()122     SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
123         return fSampledProxies;
124     }
125 
126     /** Overrides of GrDeferredUploadTarget. */
127 
tokenTracker()128     const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
129     GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
130     GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
131 
132     /** Overrides of GrMeshDrawTarget. */
133     void recordDraw(const GrGeometryProcessor*,
134                     const GrSimpleMesh[],
135                     int meshCnt,
136                     const GrSurfaceProxy* const primProcProxies[],
137                     GrPrimitiveType) final;
138     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
139                           int* startVertex) final;
140     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
141     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
142                                  sk_sp<const GrBuffer>*, int* startVertex,
143                                  int* actualVertexCount) final;
144     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
145                                     sk_sp<const GrBuffer>*, int* startIndex,
146                                     int* actualIndexCount) final;
makeDrawIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)147     GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
148                                                size_t* offset) override {
149         return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
150     }
makeDrawIndexedIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)151     GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
152                                                              sk_sp<const GrBuffer>* buffer,
153                                                              size_t* offset) override {
154         return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
155     }
156     void putBackIndices(int indexCount) final;
157     void putBackVertices(int vertices, size_t vertexStride) final;
putBackIndirectDraws(int drawCount)158     void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
putBackIndexedIndirectDraws(int drawCount)159     void putBackIndexedIndirectDraws(int drawCount) final {
160         fDrawIndirectPool.putBackIndexed(drawCount);
161     }
writeView()162     const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
rtProxy()163     GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
usesMSAASurface()164     bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
appliedClip()165     const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
appliedHardClip()166     const GrAppliedHardClip& appliedHardClip() const {
167         return (fOpArgs->appliedClip()) ?
168                 fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
169     }
170     GrAppliedClip detachAppliedClip() final;
dstProxyView()171     const GrDstProxyView& dstProxyView() const final {
172         return this->drawOpArgs().dstProxyView();
173     }
174 
renderPassBarriers()175     GrXferBarrierFlags renderPassBarriers() const final {
176         return this->drawOpArgs().renderPassBarriers();
177     }
178 
colorLoadOp()179     GrLoadOp colorLoadOp() const final {
180         return this->drawOpArgs().colorLoadOp();
181     }
182 
deferredUploadTarget()183     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
184     const GrCaps& caps() const final;
185     GrThreadSafeCache* threadSafeCache() const final;
resourceProvider()186     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
187 
188     GrStrikeCache* strikeCache() const final;
189 
190     // At this point we know we're flushing so full access to the GrAtlasManager and
191     // SmallPathAtlasMgr is required (and permissible).
192     GrAtlasManager* atlasManager() const final;
193     skgpu::v1::SmallPathAtlasMgr* smallPathAtlasManager() const final;
194 
195     /** GrMeshDrawTarget override. */
allocator()196     SkArenaAlloc* allocator() override { return &fArena; }
197 
198     // This is a convenience method that binds the given pipeline, and then, if our applied clip has
199     // a scissor, sets the scissor rect from the applied clip.
bindPipelineAndScissorClip(const GrProgramInfo & programInfo,const SkRect & drawBounds)200     void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
201         SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
202                  (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
203         this->bindPipeline(programInfo, drawBounds);
204         if (programInfo.pipeline().isScissorTestEnabled()) {
205             this->setScissorRect(this->appliedClip()->scissorState().rect());
206         }
207     }
208 
209     // This is a convenience method for when the primitive processor has exactly one texture. It
210     // binds one texture for the primitive processor, and any others for FPs on the pipeline.
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy & singleGeomProcTexture,const GrPipeline & pipeline)211     void bindTextures(const GrGeometryProcessor& geomProc,
212                       const GrSurfaceProxy& singleGeomProcTexture,
213                       const GrPipeline& pipeline) {
214         SkASSERT(geomProc.numTextureSamplers() == 1);
215         const GrSurfaceProxy* ptr = &singleGeomProcTexture;
216         this->bindTextures(geomProc, &ptr, pipeline);
217     }
218 
219     // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
220     void drawMesh(const GrSimpleMesh& mesh);
221 
222     // Pass-through methods to GrOpsRenderPass.
bindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)223     void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
224         fOpsRenderPass->bindPipeline(programInfo, drawBounds);
225     }
setScissorRect(const SkIRect & scissorRect)226     void setScissorRect(const SkIRect& scissorRect) {
227         fOpsRenderPass->setScissorRect(scissorRect);
228     }
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)229     void bindTextures(const GrGeometryProcessor& geomProc,
230                       const GrSurfaceProxy* const geomProcTextures[],
231                       const GrPipeline& pipeline) {
232         fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
233     }
234     void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
235                      sk_sp<const GrBuffer> vertexBuffer,
236                      GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
237         fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
238                                     std::move(vertexBuffer), primitiveRestart);
239     }
draw(int vertexCount,int baseVertex)240     void draw(int vertexCount, int baseVertex) {
241         fOpsRenderPass->draw(vertexCount, baseVertex);
242     }
drawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)243     void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
244                      int baseVertex) {
245         fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
246                                     baseVertex);
247     }
drawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)248     void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
249         fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
250     }
drawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)251     void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
252                               int baseVertex) {
253         fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
254                                              baseVertex);
255     }
drawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)256     void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
257         fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
258     }
drawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)259     void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
260         fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
261     }
drawIndexPattern(int patternIndexCount,int patternRepeatCount,int maxPatternRepetitionsInIndexBuffer,int patternVertexCount,int baseVertex)262     void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
263                           int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
264                           int baseVertex) {
265         fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
266                                          maxPatternRepetitionsInIndexBuffer, patternVertexCount,
267                                          baseVertex);
268     }
269 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
270     bool fDisableStencilCulling = false;
271     bool fHasStencilCullingOp = false;
272 #endif
273 
274 private:
275     struct InlineUpload {
InlineUploadInlineUpload276         InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
277                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
278         GrDeferredTextureUploadFn fUpload;
279         GrDeferredUploadToken fUploadBeforeToken;
280     };
281 
282     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
283     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
284     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
285     // the shared state once and then issue draws for each mesh.
286     struct Draw {
287         ~Draw();
288         // The geometry processor is always forced to be in an arena allocation. This object does
289         // not need to manage its lifetime.
290         const GrGeometryProcessor* fGeometryProcessor = nullptr;
291         // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
292         const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
293         const GrSimpleMesh* fMeshes = nullptr;
294         const GrOp* fOp = nullptr;
295         int fMeshCnt = 0;
296         GrPrimitiveType fPrimitiveType;
297     };
298 
299     // Storage for ops' pipelines, draws, and inline uploads.
300     SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
301 
302     // Store vertex and index data on behalf of ops that are flushed.
303     GrVertexBufferAllocPool fVertexPool;
304     GrIndexBufferAllocPool fIndexPool;
305     GrDrawIndirectBufferAllocPool fDrawIndirectPool;
306 
307     // Data stored on behalf of the ops being flushed.
308     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
309     SkArenaAllocList<InlineUpload> fInlineUploads;
310     SkArenaAllocList<Draw> fDraws;
311 
312     // All draws we store have an implicit draw token. This is the draw token for the first draw
313     // in fDraws.
314     GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
315 
316     // Info about the op that is currently preparing or executing using the flush state or null if
317     // an op is not currently preparing of executing.
318     OpArgs* fOpArgs = nullptr;
319 
320     // This field is only transiently set during flush. Each OpsTask will set it to point to an
321     // array of proxies it uses before call onPrepare and onExecute.
322     SkTArray<GrSurfaceProxy*, true>* fSampledProxies;
323 
324     GrGpu* fGpu;
325     GrResourceProvider* fResourceProvider;
326     GrTokenTracker* fTokenTracker;
327     GrOpsRenderPass* fOpsRenderPass = nullptr;
328 
329     // Variables that are used to track where we are in lists as ops are executed
330     SkArenaAllocList<Draw>::Iter fCurrDraw;
331     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
332 };
333 
334 #endif
335