1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GLInstancedRendering.h"
9
10 #include "GrResourceProvider.h"
11 #include "gl/GrGLGpu.h"
12 #include "instanced/InstanceProcessor.h"
13
14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15
16 namespace gr_instanced {
17
18 class GLInstancedOp final : public InstancedOp {
19 public:
20 DEFINE_OP_CLASS_ID
21
GLInstancedOp(GLOpAllocator * alloc,GrPaint && paint)22 GLInstancedOp(GLOpAllocator* alloc, GrPaint&& paint)
23 : INHERITED(ClassID(), std::move(paint), alloc) {
24 }
numGLCommands() const25 int numGLCommands() const { return 1 + fNumChangesInGeometry; }
26
27 private:
28 int fEmulatedBaseInstance;
29 int fGLDrawCmdsIdx;
30
31 friend class GLInstancedRendering;
32
33 typedef InstancedOp INHERITED;
34 };
35
CheckSupport(const GrGLCaps & glCaps)36 GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
37 // This method is only intended to be used for initializing fInstancedSupport in the caps.
38 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
39 if (!glCaps.vertexArrayObjectSupport() ||
40 (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
41 return GrCaps::InstancedSupport::kNone;
42 }
43 return InstanceProcessor::CheckSupport(*glCaps.shaderCaps(), glCaps);
44 }
45
GLInstancedRendering(GrGLGpu * gpu)46 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
47 : INHERITED(gpu)
48 , fVertexArrayID(0)
49 , fGLDrawCmdsInfo(0)
50 , fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
51 SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
52 }
53
~GLInstancedRendering()54 GLInstancedRendering::~GLInstancedRendering() {
55 if (fVertexArrayID) {
56 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
57 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
58 }
59 }
60
glGpu() const61 inline GrGLGpu* GLInstancedRendering::glGpu() const {
62 return static_cast<GrGLGpu*>(this->gpu());
63 }
64
makeOp(GrPaint && paint)65 std::unique_ptr<InstancedOp> GLOpAllocator::makeOp(GrPaint&& paint) {
66 return std::unique_ptr<InstancedOp>(new GLInstancedOp(this, std::move(paint)));
67 }
68
onBeginFlush(GrResourceProvider * rp)69 void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
70 // Count what there is to draw.
71 OpList::Iter iter;
72 iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
73 int numGLInstances = 0;
74 int numGLDrawCmds = 0;
75 while (InstancedOp* o = iter.get()) {
76 GLInstancedOp* op = (GLInstancedOp*) o;
77 iter.next();
78
79 numGLInstances += op->fNumDraws;
80 numGLDrawCmds += op->numGLCommands();
81 }
82 if (!numGLDrawCmds) {
83 return;
84 }
85 SkASSERT(numGLInstances);
86
87 // Lazily create a vertex array object.
88 if (!fVertexArrayID) {
89 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
90 if (!fVertexArrayID) {
91 return;
92 }
93 this->glGpu()->bindVertexArray(fVertexArrayID);
94
95 // Attach our index buffer to the vertex array.
96 SkASSERT(!this->indexBuffer()->isCPUBacked());
97 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
98 static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
99
100 // Set up the non-instanced attribs.
101 this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer());
102 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords));
103 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE,
104 sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
105 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs));
106 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex),
107 (void*) offsetof(ShapeVertex, fAttrs)));
108
109 SkASSERT(fInstanceAttribsBufferUniqueId.isInvalid());
110 }
111
112 // Create and map instance and draw-indirect buffers.
113 SkASSERT(!fInstanceBuffer);
114 fInstanceBuffer.reset(
115 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
116 kDynamic_GrAccessPattern,
117 GrResourceProvider::kNoPendingIO_Flag |
118 GrResourceProvider::kRequireGpuMemory_Flag));
119 if (!fInstanceBuffer) {
120 return;
121 }
122
123 SkASSERT(!fDrawIndirectBuffer);
124 if (this->glGpu()->glCaps().drawIndirectSupport()) {
125 fDrawIndirectBuffer.reset(
126 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
127 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
128 GrResourceProvider::kNoPendingIO_Flag |
129 GrResourceProvider::kRequireGpuMemory_Flag));
130 if (!fDrawIndirectBuffer) {
131 return;
132 }
133 }
134
135 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
136 SkASSERT(glMappedInstances);
137 int glInstancesIdx = 0;
138
139 GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
140 int glDrawCmdsIdx = 0;
141 if (fDrawIndirectBuffer) {
142 glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
143 SkASSERT(glMappedCmds);
144 }
145
146 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
147 SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
148
149 SkASSERT(!fGLDrawCmdsInfo);
150 if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
151 fGLDrawCmdsInfo.reset(numGLDrawCmds);
152 }
153
154 // Generate the instance and draw-indirect buffer contents based on the tracked ops.
155 iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
156 while (InstancedOp* o = iter.get()) {
157 GLInstancedOp* op = static_cast<GLInstancedOp*>(o);
158 iter.next();
159
160 op->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
161 op->fGLDrawCmdsIdx = glDrawCmdsIdx;
162
163 const InstancedOp::Draw* draw = op->fHeadDraw;
164 SkASSERT(draw);
165 do {
166 int instanceCount = 0;
167 IndexRange geometry = draw->fGeometry;
168 SkASSERT(!geometry.isEmpty());
169
170 do {
171 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance;
172 draw = draw->fNext;
173 } while (draw && draw->fGeometry == geometry);
174
175 if (fDrawIndirectBuffer) {
176 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
177 glCmd.fCount = geometry.fCount;
178 glCmd.fInstanceCount = instanceCount;
179 glCmd.fFirstIndex = geometry.fStart;
180 glCmd.fBaseVertex = 0;
181 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
182 }
183
184 if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) {
185 GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
186 cmdInfo.fGeometry = geometry;
187 cmdInfo.fInstanceCount = instanceCount;
188 }
189
190 glInstancesIdx += instanceCount;
191 ++glDrawCmdsIdx;
192 } while (draw);
193 }
194
195 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
196 if (fDrawIndirectBuffer) {
197 fDrawIndirectBuffer->unmap();
198 }
199
200 SkASSERT(glInstancesIdx == numGLInstances);
201 fInstanceBuffer->unmap();
202 }
203
onDraw(const GrPipeline & pipeline,const InstanceProcessor & instProc,const InstancedOp * baseOp)204 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
205 const InstancedOp* baseOp) {
206 if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
207 return; // beginFlush was not successful.
208 }
209 if (!this->glGpu()->flushGLState(pipeline, instProc, false)) {
210 return;
211 }
212
213 if (fDrawIndirectBuffer) {
214 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
215 }
216
217 const GrGLCaps& glCaps = this->glGpu()->glCaps();
218 const GLInstancedOp* op = static_cast<const GLInstancedOp*>(baseOp);
219 int numCommands = op->numGLCommands();
220
221 #if GR_GL_LOG_INSTANCED_OPS
222 SkASSERT(fGLDrawCmdsInfo);
223 SkDebugf("Instanced op: [");
224 for (int i = 0; i < numCommands; ++i) {
225 int glCmdIdx = op->fGLDrawCmdsIdx + i;
226 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount,
227 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry));
228 }
229 SkDebugf("]\n");
230 #else
231 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
232 #endif
233
234 if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) {
235 SkASSERT(fDrawIndirectBuffer);
236 int glCmdsIdx = op->fGLDrawCmdsIdx;
237 this->flushInstanceAttribs(op->fEmulatedBaseInstance);
238 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
239 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
240 numCommands, 0));
241 return;
242 }
243
244 int emulatedBaseInstance = op->fEmulatedBaseInstance;
245 for (int i = 0; i < numCommands; ++i) {
246 int glCmdIdx = op->fGLDrawCmdsIdx + i;
247 this->flushInstanceAttribs(emulatedBaseInstance);
248 if (fDrawIndirectBuffer) {
249 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
250 (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
251 } else {
252 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
253 GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCount,
254 GR_GL_UNSIGNED_BYTE,
255 (GrGLubyte*) nullptr + cmdInfo.fGeometry.fStart,
256 cmdInfo.fInstanceCount));
257 }
258 if (!glCaps.baseInstanceSupport()) {
259 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
260 emulatedBaseInstance += cmdInfo.fInstanceCount;
261 }
262 }
263 }
264
flushInstanceAttribs(int baseInstance)265 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
266 SkASSERT(fVertexArrayID);
267 this->glGpu()->bindVertexArray(fVertexArrayID);
268
269 SkASSERT(fInstanceBuffer);
270 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->uniqueID() ||
271 fInstanceAttribsBaseInstance != baseInstance) {
272 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
273
274 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
275
276 // Info attrib.
277 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo));
278 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGNED_INT,
279 sizeof(Instance), &offsetInBuffer->fInfo));
280 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1));
281
282 // Shape matrix attrib.
283 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX));
284 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY));
285 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT, GR_GL_FALSE,
286 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
287 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT, GR_GL_FALSE,
288 sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
289 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1));
290 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1));
291
292 // Color attrib.
293 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor));
294 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
295 sizeof(Instance), &offsetInBuffer->fColor));
296 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1));
297
298 // Local rect attrib.
299 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect));
300 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_GL_FALSE,
301 sizeof(Instance), &offsetInBuffer->fLocalRect));
302 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1));
303
304 fInstanceAttribsBufferUniqueId = fInstanceBuffer->uniqueID();
305 fInstanceAttribsBaseInstance = baseInstance;
306 }
307 }
308
onEndFlush()309 void GLInstancedRendering::onEndFlush() {
310 fInstanceBuffer.reset();
311 fDrawIndirectBuffer.reset();
312 fGLDrawCmdsInfo.reset(0);
313 }
314
onResetGpuResources(ResetType resetType)315 void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
316 if (fVertexArrayID && ResetType::kDestroy == resetType) {
317 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
318 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
319 }
320 fVertexArrayID = 0;
321 fInstanceBuffer.reset();
322 fDrawIndirectBuffer.reset();
323 fInstanceAttribsBufferUniqueId.makeInvalid();
324 }
325
326 }
327