• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOpFlushState_DEFINED
9 #define GrOpFlushState_DEFINED
10 
11 #include <utility>
12 #include "src/core/SkArenaAlloc.h"
13 #include "src/core/SkArenaAllocList.h"
14 #include "src/gpu/GrAppliedClip.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrProgramInfo.h"
18 #include "src/gpu/GrRenderTargetProxy.h"
19 #include "src/gpu/GrSurfaceProxyView.h"
20 #include "src/gpu/ops/GrMeshDrawOp.h"
21 
22 class GrGpu;
23 class GrOpsRenderPass;
24 class GrResourceProvider;
25 
26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */
27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
28 public:
29     // vertexSpace and indexSpace may either be null or an alloation of size
30     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
31     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
32     GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
33                    sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
34 
~GrOpFlushState()35     ~GrOpFlushState() final { this->reset(); }
36 
37     /** This is called after each op has a chance to prepare its draws and before the draws are
38         executed. */
39     void preExecuteDraws();
40 
41     /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
42         surface needs to be prepared for being sampled in a draw after the upload, the caller
43         should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
44         when doing inline uploads to reset the image layout back to sampled. */
45     void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
46 
47     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
48     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
49                                              const GrPipeline*, const GrUserStencilSettings*);
50 
opsRenderPass()51     GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
setOpsRenderPass(GrOpsRenderPass * renderPass)52     void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
53 
gpu()54     GrGpu* gpu() { return fGpu; }
55 
56     void reset();
57 
58     /** Additional data required on a per-op basis when executing GrOps. */
59     struct OpArgs {
60         // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
OpArgsOpArgs61         explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
62                         GrAppliedClip* appliedClip,
63                         const GrXferProcessor::DstProxyView& dstProxyView,
64                         GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
65                 : fOp(op)
66                 , fSurfaceView(surfaceView)
67                 , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
68                 , fUsesMSAASurface(usesMSAASurface)
69                 , fAppliedClip(appliedClip)
70                 , fDstProxyView(dstProxyView)
71                 , fRenderPassXferBarriers(renderPassXferBarriers)
72                 , fColorLoadOp(colorLoadOp) {
73             SkASSERT(surfaceView.asRenderTargetProxy());
74         }
75 
opOpArgs76         GrOp* op() { return fOp; }
writeViewOpArgs77         const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
rtProxyOpArgs78         GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
79         // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
usesMSAASurfaceOpArgs80         bool usesMSAASurface() const { return fUsesMSAASurface; }
appliedClipOpArgs81         GrAppliedClip* appliedClip() { return fAppliedClip; }
appliedClipOpArgs82         const GrAppliedClip* appliedClip() const { return fAppliedClip; }
dstProxyViewOpArgs83         const GrXferProcessor::DstProxyView& dstProxyView() const { return fDstProxyView; }
renderPassBarriersOpArgs84         GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
colorLoadOpOpArgs85         GrLoadOp colorLoadOp() const { return fColorLoadOp; }
86 
87 #ifdef SK_DEBUG
validateOpArgs88         void validate() const {
89             SkASSERT(fOp);
90             SkASSERT(fSurfaceView);
91         }
92 #endif
93 
94     private:
95         GrOp*                         fOp;
96         const GrSurfaceProxyView&     fSurfaceView;
97         GrRenderTargetProxy*          fRenderTargetProxy;
98         bool                          fUsesMSAASurface;
99         GrAppliedClip*                fAppliedClip;
100         GrXferProcessor::DstProxyView fDstProxyView;   // TODO: do we still need the dst proxy here?
101         GrXferBarrierFlags            fRenderPassXferBarriers;
102         GrLoadOp                      fColorLoadOp;
103     };
104 
setOpArgs(OpArgs * opArgs)105     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
106 
drawOpArgs()107     const OpArgs& drawOpArgs() const {
108         SkASSERT(fOpArgs);
109         SkDEBUGCODE(fOpArgs->validate());
110         return *fOpArgs;
111     }
112 
setSampledProxyArray(SkTArray<GrSurfaceProxy *,true> * sampledProxies)113     void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) {
114         fSampledProxies = sampledProxies;
115     }
116 
sampledProxyArray()117     SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
118         return fSampledProxies;
119     }
120 
121     /** Overrides of GrDeferredUploadTarget. */
122 
tokenTracker()123     const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
124     GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
125     GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
126 
127     /** Overrides of GrMeshDrawOp::Target. */
128     void recordDraw(const GrGeometryProcessor*,
129                     const GrSimpleMesh[],
130                     int meshCnt,
131                     const GrSurfaceProxy* const primProcProxies[],
132                     GrPrimitiveType) final;
133     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
134                           int* startVertex) final;
135     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
136     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
137                                  sk_sp<const GrBuffer>*, int* startVertex,
138                                  int* actualVertexCount) final;
139     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
140                                     sk_sp<const GrBuffer>*, int* startIndex,
141                                     int* actualIndexCount) final;
makeDrawIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)142     GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
143                                                size_t* offset) override {
144         return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
145     }
makeDrawIndexedIndirectSpace(int drawCount,sk_sp<const GrBuffer> * buffer,size_t * offset)146     GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
147                                                              sk_sp<const GrBuffer>* buffer,
148                                                              size_t* offset) override {
149         return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
150     }
151     void putBackIndices(int indexCount) final;
152     void putBackVertices(int vertices, size_t vertexStride) final;
putBackIndirectDraws(int drawCount)153     void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
putBackIndexedIndirectDraws(int drawCount)154     void putBackIndexedIndirectDraws(int drawCount) final {
155         fDrawIndirectPool.putBackIndexed(drawCount);
156     }
writeView()157     const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
rtProxy()158     GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
usesMSAASurface()159     bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
appliedClip()160     const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
appliedHardClip()161     const GrAppliedHardClip& appliedHardClip() const {
162         return (fOpArgs->appliedClip()) ?
163                 fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
164     }
165     GrAppliedClip detachAppliedClip() final;
dstProxyView()166     const GrXferProcessor::DstProxyView& dstProxyView() const final {
167         return this->drawOpArgs().dstProxyView();
168     }
169 
renderPassBarriers()170     GrXferBarrierFlags renderPassBarriers() const final {
171         return this->drawOpArgs().renderPassBarriers();
172     }
173 
colorLoadOp()174     GrLoadOp colorLoadOp() const final {
175         return this->drawOpArgs().colorLoadOp();
176     }
177 
deferredUploadTarget()178     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
179     const GrCaps& caps() const final;
180     GrThreadSafeCache* threadSafeCache() const final;
resourceProvider()181     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
182 
183     GrStrikeCache* strikeCache() const final;
184 
185     // At this point we know we're flushing so full access to the GrAtlasManager and
186     // GrSmallPathAtlasMgr is required (and permissible).
187     GrAtlasManager* atlasManager() const final;
188     GrSmallPathAtlasMgr* smallPathAtlasManager() const final;
189 
190     /** GrMeshDrawOp::Target override. */
allocator()191     SkArenaAlloc* allocator() override { return &fArena; }
192 
193     // This is a convenience method that binds the given pipeline, and then, if our applied clip has
194     // a scissor, sets the scissor rect from the applied clip.
bindPipelineAndScissorClip(const GrProgramInfo & programInfo,const SkRect & drawBounds)195     void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
196         SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
197                  (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
198         this->bindPipeline(programInfo, drawBounds);
199         if (programInfo.pipeline().isScissorTestEnabled()) {
200             this->setScissorRect(this->appliedClip()->scissorState().rect());
201         }
202     }
203 
204     // This is a convenience method for when the primitive processor has exactly one texture. It
205     // binds one texture for the primitive processor, and any others for FPs on the pipeline.
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy & singleGeomProcTexture,const GrPipeline & pipeline)206     void bindTextures(const GrGeometryProcessor& geomProc,
207                       const GrSurfaceProxy& singleGeomProcTexture,
208                       const GrPipeline& pipeline) {
209         SkASSERT(geomProc.numTextureSamplers() == 1);
210         const GrSurfaceProxy* ptr = &singleGeomProcTexture;
211         this->bindTextures(geomProc, &ptr, pipeline);
212     }
213 
214     // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
215     void drawMesh(const GrSimpleMesh& mesh);
216 
217     // Pass-through methods to GrOpsRenderPass.
bindPipeline(const GrProgramInfo & programInfo,const SkRect & drawBounds)218     void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
219         fOpsRenderPass->bindPipeline(programInfo, drawBounds);
220     }
setScissorRect(const SkIRect & scissorRect)221     void setScissorRect(const SkIRect& scissorRect) {
222         fOpsRenderPass->setScissorRect(scissorRect);
223     }
bindTextures(const GrGeometryProcessor & geomProc,const GrSurfaceProxy * const geomProcTextures[],const GrPipeline & pipeline)224     void bindTextures(const GrGeometryProcessor& geomProc,
225                       const GrSurfaceProxy* const geomProcTextures[],
226                       const GrPipeline& pipeline) {
227         fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
228     }
229     void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
230                      sk_sp<const GrBuffer> vertexBuffer,
231                      GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
232         fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
233                                     std::move(vertexBuffer), primitiveRestart);
234     }
draw(int vertexCount,int baseVertex)235     void draw(int vertexCount, int baseVertex) {
236         fOpsRenderPass->draw(vertexCount, baseVertex);
237     }
drawIndexed(int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,int baseVertex)238     void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
239                      int baseVertex) {
240         fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
241                                     baseVertex);
242     }
drawInstanced(int instanceCount,int baseInstance,int vertexCount,int baseVertex)243     void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
244         fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
245     }
drawIndexedInstanced(int indexCount,int baseIndex,int instanceCount,int baseInstance,int baseVertex)246     void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
247                               int baseVertex) {
248         fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
249                                              baseVertex);
250     }
drawIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)251     void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
252         fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
253     }
drawIndexedIndirect(const GrBuffer * drawIndirectBuffer,size_t offset,int drawCount)254     void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
255         fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
256     }
drawIndexPattern(int patternIndexCount,int patternRepeatCount,int maxPatternRepetitionsInIndexBuffer,int patternVertexCount,int baseVertex)257     void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
258                           int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
259                           int baseVertex) {
260         fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
261                                          maxPatternRepetitionsInIndexBuffer, patternVertexCount,
262                                          baseVertex);
263     }
264 
265 private:
266     struct InlineUpload {
InlineUploadInlineUpload267         InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
268                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
269         GrDeferredTextureUploadFn fUpload;
270         GrDeferredUploadToken fUploadBeforeToken;
271     };
272 
273     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
274     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
275     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
276     // the shared state once and then issue draws for each mesh.
277     struct Draw {
278         ~Draw();
279         // The geometry processor is always forced to be in an arena allocation or appears on
280         // the stack (for CCPR). In either case this object does not need to manage its
281         // lifetime.
282         const GrGeometryProcessor* fGeometryProcessor = nullptr;
283         // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
284         const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
285         const GrSimpleMesh* fMeshes = nullptr;
286         const GrOp* fOp = nullptr;
287         int fMeshCnt = 0;
288         GrPrimitiveType fPrimitiveType;
289     };
290 
291     // Storage for ops' pipelines, draws, and inline uploads.
292     SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
293 
294     // Store vertex and index data on behalf of ops that are flushed.
295     GrVertexBufferAllocPool fVertexPool;
296     GrIndexBufferAllocPool fIndexPool;
297     GrDrawIndirectBufferAllocPool fDrawIndirectPool;
298 
299     // Data stored on behalf of the ops being flushed.
300     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
301     SkArenaAllocList<InlineUpload> fInlineUploads;
302     SkArenaAllocList<Draw> fDraws;
303 
304     // All draws we store have an implicit draw token. This is the draw token for the first draw
305     // in fDraws.
306     GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
307 
308     // Info about the op that is currently preparing or executing using the flush state or null if
309     // an op is not currently preparing of executing.
310     OpArgs* fOpArgs = nullptr;
311 
312     // This field is only transiently set during flush. Each GrOpsTask will set it to point to an
313     // array of proxies it uses before call onPrepare and onExecute.
314     SkTArray<GrSurfaceProxy*, true>* fSampledProxies;
315 
316     GrGpu* fGpu;
317     GrResourceProvider* fResourceProvider;
318     GrTokenTracker* fTokenTracker;
319     GrOpsRenderPass* fOpsRenderPass = nullptr;
320 
321     // Variables that are used to track where we are in lists as ops are executed
322     SkArenaAllocList<Draw>::Iter fCurrDraw;
323     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
324 };
325 
326 #endif
327