• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrOpFlushState_DEFINED
9 #define GrOpFlushState_DEFINED
10 
11 #include <utility>
12 #include "GrAppliedClip.h"
13 #include "GrBufferAllocPool.h"
14 #include "GrDeferredUpload.h"
15 #include "GrDeinstantiateProxyTracker.h"
16 #include "GrRenderTargetProxy.h"
17 #include "SkArenaAlloc.h"
18 #include "SkArenaAllocList.h"
19 #include "ops/GrMeshDrawOp.h"
20 
21 class GrGpu;
22 class GrGpuCommandBuffer;
23 class GrGpuRTCommandBuffer;
24 class GrResourceProvider;
25 
26 /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
27 class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
28 public:
29     // vertexSpace and indexSpace may either be null or an alloation of size
30     // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
31     // vertices/indices when a buffer larger than kDefaultBufferSize is required.
32     GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*, void* vertexSpace,
33                    void* indexSpace);
34 
~GrOpFlushState()35     ~GrOpFlushState() final { this->reset(); }
36 
37     /** This is called after each op has a chance to prepare its draws and before the draws are
38         executed. */
39     void preExecuteDraws();
40 
41     void doUpload(GrDeferredTextureUploadFn&);
42 
43     /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
44     void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& opBounds);
45 
commandBuffer()46     GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
47     // Helper function used by Ops that are only called via RenderTargetOpLists
48     GrGpuRTCommandBuffer* rtCommandBuffer();
setCommandBuffer(GrGpuCommandBuffer * buffer)49     void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
50 
gpu()51     GrGpu* gpu() { return fGpu; }
52 
53     void reset();
54 
55     /** Additional data required on a per-op basis when executing GrOps. */
56     struct OpArgs {
originOpArgs57         GrSurfaceOrigin origin() const { return fProxy->origin(); }
renderTargetOpArgs58         GrRenderTarget* renderTarget() const { return fProxy->peekRenderTarget(); }
59 
60         GrOp* fOp;
61         // TODO: do we still need the dst proxy here?
62         GrRenderTargetProxy* fProxy;
63         GrAppliedClip* fAppliedClip;
64         GrXferProcessor::DstProxy fDstProxy;
65     };
66 
setOpArgs(OpArgs * opArgs)67     void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
68 
drawOpArgs()69     const OpArgs& drawOpArgs() const {
70         SkASSERT(fOpArgs);
71         SkASSERT(fOpArgs->fOp);
72         return *fOpArgs;
73     }
74 
75     /** Overrides of GrDeferredUploadTarget. */
76 
tokenTracker()77     const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
78     GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
79     GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
80 
81     /** Overrides of GrMeshDrawOp::Target. */
82     void draw(sk_sp<const GrGeometryProcessor>,
83               const GrPipeline*,
84               const GrPipeline::FixedDynamicState*,
85               const GrPipeline::DynamicStateArrays*,
86               const GrMesh[],
87               int meshCnt) final;
88     void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
89                           int* startVertex) final;
90     uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
91     void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
92                                  sk_sp<const GrBuffer>*, int* startVertex,
93                                  int* actualVertexCount) final;
94     uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
95                                     sk_sp<const GrBuffer>*, int* startIndex,
96                                     int* actualIndexCount) final;
97     void putBackIndices(int indexCount) final;
98     void putBackVertices(int vertices, size_t vertexStride) final;
proxy()99     GrRenderTargetProxy* proxy() const final { return fOpArgs->fProxy; }
100     GrAppliedClip detachAppliedClip() final;
dstProxy()101     const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->fDstProxy; }
deferredUploadTarget()102     GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
103     const GrCaps& caps() const final;
resourceProvider()104     GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
105 
106     GrStrikeCache* glyphCache() const final;
107 
108     // At this point we know we're flushing so full access to the GrAtlasManager is required (and
109     // permissible).
110     GrAtlasManager* atlasManager() const final;
111 
deinstantiateProxyTracker()112     GrDeinstantiateProxyTracker* deinstantiateProxyTracker() { return &fDeinstantiateProxyTracker; }
113 
114 private:
115     /** GrMeshDrawOp::Target override. */
pipelineArena()116     SkArenaAlloc* pipelineArena() override { return &fArena; }
117 
118     struct InlineUpload {
InlineUploadInlineUpload119         InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
120                 : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
121         GrDeferredTextureUploadFn fUpload;
122         GrDeferredUploadToken fUploadBeforeToken;
123     };
124 
125     // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
126     // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
127     // that share a geometry processor into a Draw is that it allows the Gpu object to setup
128     // the shared state once and then issue draws for each mesh.
129     struct Draw {
130         ~Draw();
131         sk_sp<const GrGeometryProcessor> fGeometryProcessor;
132         const GrPipeline* fPipeline = nullptr;
133         const GrPipeline::FixedDynamicState* fFixedDynamicState;
134         const GrPipeline::DynamicStateArrays* fDynamicStateArrays;
135         const GrMesh* fMeshes = nullptr;
136         const GrOp* fOp = nullptr;
137         int fMeshCnt = 0;
138     };
139 
140     // Storage for ops' pipelines, draws, and inline uploads.
141     SkArenaAlloc fArena{sizeof(GrPipeline) * 100};
142 
143     // Store vertex and index data on behalf of ops that are flushed.
144     GrVertexBufferAllocPool fVertexPool;
145     GrIndexBufferAllocPool fIndexPool;
146 
147     // Data stored on behalf of the ops being flushed.
148     SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
149     SkArenaAllocList<InlineUpload> fInlineUploads;
150     SkArenaAllocList<Draw> fDraws;
151 
152     // All draws we store have an implicit draw token. This is the draw token for the first draw
153     // in fDraws.
154     GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
155 
156     // Info about the op that is currently preparing or executing using the flush state or null if
157     // an op is not currently preparing of executing.
158     OpArgs* fOpArgs = nullptr;
159 
160     GrGpu* fGpu;
161     GrResourceProvider* fResourceProvider;
162     GrTokenTracker* fTokenTracker;
163     GrGpuCommandBuffer* fCommandBuffer = nullptr;
164 
165     // Variables that are used to track where we are in lists as ops are executed
166     SkArenaAllocList<Draw>::Iter fCurrDraw;
167     SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
168 
169     // Used to track the proxies that need to be deinstantiated after we finish a flush
170     GrDeinstantiateProxyTracker fDeinstantiateProxyTracker;
171 };
172 
173 #endif
174