1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrCCFiller.h"
9
10 #include "GrCaps.h"
11 #include "GrGpuCommandBuffer.h"
12 #include "GrOnFlushResourceProvider.h"
13 #include "GrOpFlushState.h"
14 #include "SkMathPriv.h"
15 #include "SkPath.h"
16 #include "SkPathPriv.h"
17 #include "SkPoint.h"
18 #include <stdlib.h>
19
20 using TriPointInstance = GrCCCoverageProcessor::TriPointInstance;
21 using QuadPointInstance = GrCCCoverageProcessor::QuadPointInstance;
22
GrCCFiller(int numPaths,int numSkPoints,int numSkVerbs,int numConicWeights)23 GrCCFiller::GrCCFiller(int numPaths, int numSkPoints, int numSkVerbs, int numConicWeights)
24 : fGeometry(numSkPoints, numSkVerbs, numConicWeights)
25 , fPathInfos(numPaths)
26 , fScissorSubBatches(numPaths)
27 , fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
28 // Batches decide what to draw by looking where the previous one ended. Define initial batches
29 // that "end" at the beginning of the data. These will not be drawn, but will only be be read by
30 // the first actual batch.
31 fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
32 fBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count(), PrimitiveTallies()};
33 }
34
parseDeviceSpaceFill(const SkPath & path,const SkPoint * deviceSpacePts,GrScissorTest scissorTest,const SkIRect & clippedDevIBounds,const SkIVector & devToAtlasOffset)35 void GrCCFiller::parseDeviceSpaceFill(const SkPath& path, const SkPoint* deviceSpacePts,
36 GrScissorTest scissorTest, const SkIRect& clippedDevIBounds,
37 const SkIVector& devToAtlasOffset) {
38 SkASSERT(!fInstanceBuffer); // Can't call after prepareToDraw().
39 SkASSERT(!path.isEmpty());
40
41 int currPathPointsIdx = fGeometry.points().count();
42 int currPathVerbsIdx = fGeometry.verbs().count();
43 PrimitiveTallies currPathPrimitiveCounts = PrimitiveTallies();
44
45 fGeometry.beginPath();
46
47 const float* conicWeights = SkPathPriv::ConicWeightData(path);
48 int ptsIdx = 0;
49 int conicWeightsIdx = 0;
50 bool insideContour = false;
51
52 for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
53 switch (verb) {
54 case SkPath::kMove_Verb:
55 if (insideContour) {
56 currPathPrimitiveCounts += fGeometry.endContour();
57 }
58 fGeometry.beginContour(deviceSpacePts[ptsIdx]);
59 ++ptsIdx;
60 insideContour = true;
61 continue;
62 case SkPath::kClose_Verb:
63 if (insideContour) {
64 currPathPrimitiveCounts += fGeometry.endContour();
65 }
66 insideContour = false;
67 continue;
68 case SkPath::kLine_Verb:
69 fGeometry.lineTo(&deviceSpacePts[ptsIdx - 1]);
70 ++ptsIdx;
71 continue;
72 case SkPath::kQuad_Verb:
73 fGeometry.quadraticTo(&deviceSpacePts[ptsIdx - 1]);
74 ptsIdx += 2;
75 continue;
76 case SkPath::kCubic_Verb:
77 fGeometry.cubicTo(&deviceSpacePts[ptsIdx - 1]);
78 ptsIdx += 3;
79 continue;
80 case SkPath::kConic_Verb:
81 fGeometry.conicTo(&deviceSpacePts[ptsIdx - 1], conicWeights[conicWeightsIdx]);
82 ptsIdx += 2;
83 ++conicWeightsIdx;
84 continue;
85 default:
86 SK_ABORT("Unexpected path verb.");
87 }
88 }
89 SkASSERT(ptsIdx == path.countPoints());
90 SkASSERT(conicWeightsIdx == SkPathPriv::ConicWeightCnt(path));
91
92 if (insideContour) {
93 currPathPrimitiveCounts += fGeometry.endContour();
94 }
95
96 fPathInfos.emplace_back(scissorTest, devToAtlasOffset);
97
98 // Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
99 int numVerbs = fGeometry.verbs().count() - currPathVerbsIdx - 1;
100 int64_t tessellationWork = (int64_t)numVerbs * (32 - SkCLZ(numVerbs)); // N log N.
101 int64_t fanningWork = (int64_t)clippedDevIBounds.height() * clippedDevIBounds.width();
102 if (tessellationWork * (50*50) + (100*100) < fanningWork) { // Don't tessellate under 100x100.
103 fPathInfos.back().tessellateFan(fGeometry, currPathVerbsIdx, currPathPointsIdx,
104 clippedDevIBounds, &currPathPrimitiveCounts);
105 }
106
107 fTotalPrimitiveCounts[(int)scissorTest] += currPathPrimitiveCounts;
108
109 if (GrScissorTest::kEnabled == scissorTest) {
110 fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled],
111 clippedDevIBounds.makeOffset(devToAtlasOffset.fX,
112 devToAtlasOffset.fY)};
113 }
114 }
115
tessellateFan(const GrCCFillGeometry & geometry,int verbsIdx,int ptsIdx,const SkIRect & clippedDevIBounds,PrimitiveTallies * newTriangleCounts)116 void GrCCFiller::PathInfo::tessellateFan(const GrCCFillGeometry& geometry, int verbsIdx,
117 int ptsIdx, const SkIRect& clippedDevIBounds,
118 PrimitiveTallies* newTriangleCounts) {
119 using Verb = GrCCFillGeometry::Verb;
120 SkASSERT(-1 == fFanTessellationCount);
121 SkASSERT(!fFanTessellation);
122
123 const SkTArray<Verb, true>& verbs = geometry.verbs();
124 const SkTArray<SkPoint, true>& pts = geometry.points();
125
126 newTriangleCounts->fTriangles =
127 newTriangleCounts->fWeightedTriangles = 0;
128
129 // Build an SkPath of the Redbook fan. We use "winding" fill type right now because we are
130 // producing a coverage count, and must fill in every region that has non-zero wind. The
131 // path processor will convert coverage count to the appropriate fill type later.
132 SkPath fan;
133 fan.setFillType(SkPath::kWinding_FillType);
134 SkASSERT(Verb::kBeginPath == verbs[verbsIdx]);
135 for (int i = verbsIdx + 1; i < verbs.count(); ++i) {
136 switch (verbs[i]) {
137 case Verb::kBeginPath:
138 SK_ABORT("Invalid GrCCFillGeometry");
139 continue;
140
141 case Verb::kBeginContour:
142 fan.moveTo(pts[ptsIdx++]);
143 continue;
144
145 case Verb::kLineTo:
146 fan.lineTo(pts[ptsIdx++]);
147 continue;
148
149 case Verb::kMonotonicQuadraticTo:
150 case Verb::kMonotonicConicTo:
151 fan.lineTo(pts[ptsIdx + 1]);
152 ptsIdx += 2;
153 continue;
154
155 case Verb::kMonotonicCubicTo:
156 fan.lineTo(pts[ptsIdx + 2]);
157 ptsIdx += 3;
158 continue;
159
160 case Verb::kEndClosedContour:
161 case Verb::kEndOpenContour:
162 fan.close();
163 continue;
164 }
165 }
166
167 GrTessellator::WindingVertex* vertices = nullptr;
168 fFanTessellationCount =
169 GrTessellator::PathToVertices(fan, std::numeric_limits<float>::infinity(),
170 SkRect::Make(clippedDevIBounds), &vertices);
171 if (fFanTessellationCount <= 0) {
172 SkASSERT(0 == fFanTessellationCount);
173 SkASSERT(nullptr == vertices);
174 return;
175 }
176
177 SkASSERT(0 == fFanTessellationCount % 3);
178 for (int i = 0; i < fFanTessellationCount; i += 3) {
179 int tessWinding = vertices[i].fWinding;
180 SkASSERT(tessWinding == vertices[i + 1].fWinding);
181 SkASSERT(tessWinding == vertices[i + 2].fWinding);
182
183 // Ensure this triangle's points actually wind in the same direction as tessWinding.
184 // CCPR shaders use the sign of wind to determine which direction to bloat, so even for
185 // "wound" triangles the winding sign and point ordering need to agree.
186 float ax = vertices[i].fPos.fX - vertices[i + 1].fPos.fX;
187 float ay = vertices[i].fPos.fY - vertices[i + 1].fPos.fY;
188 float bx = vertices[i].fPos.fX - vertices[i + 2].fPos.fX;
189 float by = vertices[i].fPos.fY - vertices[i + 2].fPos.fY;
190 float wind = ax*by - ay*bx;
191 if ((wind > 0) != (-tessWinding > 0)) { // Tessellator has opposite winding sense.
192 std::swap(vertices[i + 1].fPos, vertices[i + 2].fPos);
193 }
194
195 if (1 == abs(tessWinding)) {
196 ++newTriangleCounts->fTriangles;
197 } else {
198 ++newTriangleCounts->fWeightedTriangles;
199 }
200 }
201
202 fFanTessellation.reset(vertices);
203 }
204
closeCurrentBatch()205 GrCCFiller::BatchID GrCCFiller::closeCurrentBatch() {
206 SkASSERT(!fInstanceBuffer);
207 SkASSERT(!fBatches.empty());
208
209 const auto& lastBatch = fBatches.back();
210 int maxMeshes = 1 + fScissorSubBatches.count() - lastBatch.fEndScissorSubBatchIdx;
211 fMaxMeshesPerDraw = SkTMax(fMaxMeshesPerDraw, maxMeshes);
212
213 const auto& lastScissorSubBatch = fScissorSubBatches[lastBatch.fEndScissorSubBatchIdx - 1];
214 PrimitiveTallies batchTotalCounts = fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled] -
215 lastBatch.fEndNonScissorIndices;
216 batchTotalCounts += fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled] -
217 lastScissorSubBatch.fEndPrimitiveIndices;
218
219 // This will invalidate lastBatch.
220 fBatches.push_back() = {
221 fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled],
222 fScissorSubBatches.count(),
223 batchTotalCounts
224 };
225 return fBatches.count() - 1;
226 }
227
228 // Emits a contour's triangle fan.
229 //
230 // Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1].
231 //
232 // This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The
233 // advantage to this approach is that for a convex-ish contour, it generates larger triangles.
234 // Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they
235 // have a longer perimeter to rasterize and antialias.
236 //
237 // The indices array indexes the fan's points (think: glDrawElements), and must have at least log3
238 // elements past the end for this method to use as scratch space.
239 //
240 // Returns the next triangle instance after the final one emitted.
emit_recursive_fan(const SkTArray<SkPoint,true> & pts,SkTArray<int32_t,true> & indices,int firstIndex,int indexCount,const Sk2f & devToAtlasOffset,TriPointInstance out[])241 static TriPointInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
242 SkTArray<int32_t, true>& indices, int firstIndex,
243 int indexCount, const Sk2f& devToAtlasOffset,
244 TriPointInstance out[]) {
245 if (indexCount < 3) {
246 return out;
247 }
248
249 int32_t oneThirdCount = indexCount / 3;
250 int32_t twoThirdsCount = (2 * indexCount) / 3;
251 out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
252 pts[indices[firstIndex + twoThirdsCount]], devToAtlasOffset);
253
254 out = emit_recursive_fan(pts, indices, firstIndex, oneThirdCount + 1, devToAtlasOffset, out);
255 out = emit_recursive_fan(pts, indices, firstIndex + oneThirdCount,
256 twoThirdsCount - oneThirdCount + 1, devToAtlasOffset, out);
257
258 int endIndex = firstIndex + indexCount;
259 int32_t oldValue = indices[endIndex];
260 indices[endIndex] = indices[firstIndex];
261 out = emit_recursive_fan(pts, indices, firstIndex + twoThirdsCount,
262 indexCount - twoThirdsCount + 1, devToAtlasOffset, out);
263 indices[endIndex] = oldValue;
264
265 return out;
266 }
267
emit_tessellated_fan(const GrTessellator::WindingVertex * vertices,int numVertices,const Sk2f & devToAtlasOffset,TriPointInstance * triPointInstanceData,QuadPointInstance * quadPointInstanceData,GrCCFillGeometry::PrimitiveTallies * indices)268 static void emit_tessellated_fan(const GrTessellator::WindingVertex* vertices, int numVertices,
269 const Sk2f& devToAtlasOffset,
270 TriPointInstance* triPointInstanceData,
271 QuadPointInstance* quadPointInstanceData,
272 GrCCFillGeometry::PrimitiveTallies* indices) {
273 for (int i = 0; i < numVertices; i += 3) {
274 if (1 == abs(vertices[i].fWinding)) {
275 triPointInstanceData[indices->fTriangles++].set(vertices[i].fPos, vertices[i + 1].fPos,
276 vertices[i + 2].fPos, devToAtlasOffset);
277 } else {
278 quadPointInstanceData[indices->fWeightedTriangles++].setW(
279 vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
280 static_cast<float>(abs(vertices[i].fWinding)));
281 }
282 }
283 }
284
prepareToDraw(GrOnFlushResourceProvider * onFlushRP)285 bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
286 using Verb = GrCCFillGeometry::Verb;
287 SkASSERT(!fInstanceBuffer);
288 SkASSERT(fBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
289 fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled]);
290 SkASSERT(fBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
291
292 // Here we build a single instance buffer to share with every internal batch.
293 //
294 // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
295 // type is further divided into instances that require a scissor and those that don't. This
296 // leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
297 //
298 // Rather than place each instance array in its own GPU buffer, we allocate a single
299 // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
300 // our draw calls to direct the GPU to the applicable elements within a given array.
301 //
302 // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
303 // straightforward. Start with triangles and quadratics. They both view the instance buffer as
304 // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other.
305 fBaseInstances[0].fTriangles = 0;
306 fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
307 fTotalPrimitiveCounts[0].fTriangles;
308 fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
309 fTotalPrimitiveCounts[1].fTriangles;
310 fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
311 fTotalPrimitiveCounts[0].fQuadratics;
312 int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
313
314 // Wound triangles and cubics both view the same instance buffer as an array of
315 // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
316 // them on the first index that will not overwrite previous TriPointInstance data.
317 int quadBaseIdx =
318 GR_CT_DIV_ROUND_UP(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
319 fBaseInstances[0].fWeightedTriangles = quadBaseIdx;
320 fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles +
321 fTotalPrimitiveCounts[0].fWeightedTriangles;
322 fBaseInstances[0].fCubics = fBaseInstances[1].fWeightedTriangles +
323 fTotalPrimitiveCounts[1].fWeightedTriangles;
324 fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
325 fBaseInstances[0].fConics = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
326 fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
327 int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
328
329 fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
330 quadEndIdx * sizeof(QuadPointInstance));
331 if (!fInstanceBuffer) {
332 SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
333 return false;
334 }
335
336 TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map());
337 QuadPointInstance* quadPointInstanceData =
338 reinterpret_cast<QuadPointInstance*>(triPointInstanceData);
339 SkASSERT(quadPointInstanceData);
340
341 PathInfo* nextPathInfo = fPathInfos.begin();
342 Sk2f devToAtlasOffset;
343 PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
344 PrimitiveTallies* currIndices = nullptr;
345 SkSTArray<256, int32_t, true> currFan;
346 bool currFanIsTessellated = false;
347
348 const SkTArray<SkPoint, true>& pts = fGeometry.points();
349 int ptsIdx = -1;
350 int nextConicWeightIdx = 0;
351
352 // Expand the ccpr verbs into GPU instance buffers.
353 for (Verb verb : fGeometry.verbs()) {
354 switch (verb) {
355 case Verb::kBeginPath:
356 SkASSERT(currFan.empty());
357 currIndices = &instanceIndices[(int)nextPathInfo->scissorTest()];
358 devToAtlasOffset = Sk2f(static_cast<float>(nextPathInfo->devToAtlasOffset().fX),
359 static_cast<float>(nextPathInfo->devToAtlasOffset().fY));
360 currFanIsTessellated = nextPathInfo->hasFanTessellation();
361 if (currFanIsTessellated) {
362 emit_tessellated_fan(nextPathInfo->fanTessellation(),
363 nextPathInfo->fanTessellationCount(), devToAtlasOffset,
364 triPointInstanceData, quadPointInstanceData, currIndices);
365 }
366 ++nextPathInfo;
367 continue;
368
369 case Verb::kBeginContour:
370 SkASSERT(currFan.empty());
371 ++ptsIdx;
372 if (!currFanIsTessellated) {
373 currFan.push_back(ptsIdx);
374 }
375 continue;
376
377 case Verb::kLineTo:
378 ++ptsIdx;
379 if (!currFanIsTessellated) {
380 SkASSERT(!currFan.empty());
381 currFan.push_back(ptsIdx);
382 }
383 continue;
384
385 case Verb::kMonotonicQuadraticTo:
386 triPointInstanceData[currIndices->fQuadratics++].set(&pts[ptsIdx],
387 devToAtlasOffset);
388 ptsIdx += 2;
389 if (!currFanIsTessellated) {
390 SkASSERT(!currFan.empty());
391 currFan.push_back(ptsIdx);
392 }
393 continue;
394
395 case Verb::kMonotonicCubicTo:
396 quadPointInstanceData[currIndices->fCubics++].set(&pts[ptsIdx], devToAtlasOffset[0],
397 devToAtlasOffset[1]);
398 ptsIdx += 3;
399 if (!currFanIsTessellated) {
400 SkASSERT(!currFan.empty());
401 currFan.push_back(ptsIdx);
402 }
403 continue;
404
405 case Verb::kMonotonicConicTo:
406 quadPointInstanceData[currIndices->fConics++].setW(
407 &pts[ptsIdx], devToAtlasOffset,
408 fGeometry.getConicWeight(nextConicWeightIdx));
409 ptsIdx += 2;
410 ++nextConicWeightIdx;
411 if (!currFanIsTessellated) {
412 SkASSERT(!currFan.empty());
413 currFan.push_back(ptsIdx);
414 }
415 continue;
416
417 case Verb::kEndClosedContour: // endPt == startPt.
418 if (!currFanIsTessellated) {
419 SkASSERT(!currFan.empty());
420 currFan.pop_back();
421 }
422 // fallthru.
423 case Verb::kEndOpenContour: // endPt != startPt.
424 SkASSERT(!currFanIsTessellated || currFan.empty());
425 if (!currFanIsTessellated && currFan.count() >= 3) {
426 int fanSize = currFan.count();
427 // Reserve space for emit_recursive_fan. Technically this can grow to
428 // fanSize + log3(fanSize), but we approximate with log2.
429 currFan.push_back_n(SkNextLog2(fanSize));
430 SkDEBUGCODE(TriPointInstance* end =)
431 emit_recursive_fan(pts, currFan, 0, fanSize, devToAtlasOffset,
432 triPointInstanceData + currIndices->fTriangles);
433 currIndices->fTriangles += fanSize - 2;
434 SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
435 }
436 currFan.reset();
437 continue;
438 }
439 }
440
441 fInstanceBuffer->unmap();
442
443 SkASSERT(nextPathInfo == fPathInfos.end());
444 SkASSERT(ptsIdx == pts.count() - 1);
445 SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
446 SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
447 SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
448 SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
449 SkASSERT(instanceIndices[0].fWeightedTriangles == fBaseInstances[1].fWeightedTriangles);
450 SkASSERT(instanceIndices[1].fWeightedTriangles == fBaseInstances[0].fCubics);
451 SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
452 SkASSERT(instanceIndices[1].fCubics == fBaseInstances[0].fConics);
453 SkASSERT(instanceIndices[0].fConics == fBaseInstances[1].fConics);
454 SkASSERT(instanceIndices[1].fConics == quadEndIdx);
455
456 fMeshesScratchBuffer.reserve(fMaxMeshesPerDraw);
457 fScissorRectScratchBuffer.reserve(fMaxMeshesPerDraw);
458
459 return true;
460 }
461
drawFills(GrOpFlushState * flushState,BatchID batchID,const SkIRect & drawBounds) const462 void GrCCFiller::drawFills(GrOpFlushState* flushState, BatchID batchID,
463 const SkIRect& drawBounds) const {
464 using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
465
466 SkASSERT(fInstanceBuffer);
467
468 const PrimitiveTallies& batchTotalCounts = fBatches[batchID].fTotalPrimitiveCounts;
469
470 GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus);
471
472 if (batchTotalCounts.fTriangles) {
473 this->drawPrimitives(flushState, pipeline, batchID, PrimitiveType::kTriangles,
474 &PrimitiveTallies::fTriangles, drawBounds);
475 }
476
477 if (batchTotalCounts.fWeightedTriangles) {
478 this->drawPrimitives(flushState, pipeline, batchID, PrimitiveType::kWeightedTriangles,
479 &PrimitiveTallies::fWeightedTriangles, drawBounds);
480 }
481
482 if (batchTotalCounts.fQuadratics) {
483 this->drawPrimitives(flushState, pipeline, batchID, PrimitiveType::kQuadratics,
484 &PrimitiveTallies::fQuadratics, drawBounds);
485 }
486
487 if (batchTotalCounts.fCubics) {
488 this->drawPrimitives(flushState, pipeline, batchID, PrimitiveType::kCubics,
489 &PrimitiveTallies::fCubics, drawBounds);
490 }
491
492 if (batchTotalCounts.fConics) {
493 this->drawPrimitives(flushState, pipeline, batchID, PrimitiveType::kConics,
494 &PrimitiveTallies::fConics, drawBounds);
495 }
496 }
497
drawPrimitives(GrOpFlushState * flushState,const GrPipeline & pipeline,BatchID batchID,GrCCCoverageProcessor::PrimitiveType primitiveType,int PrimitiveTallies::* instanceType,const SkIRect & drawBounds) const498 void GrCCFiller::drawPrimitives(GrOpFlushState* flushState, const GrPipeline& pipeline,
499 BatchID batchID, GrCCCoverageProcessor::PrimitiveType primitiveType,
500 int PrimitiveTallies::*instanceType,
501 const SkIRect& drawBounds) const {
502 SkASSERT(pipeline.isScissorEnabled());
503
504 // Don't call reset(), as that also resets the reserve count.
505 fMeshesScratchBuffer.pop_back_n(fMeshesScratchBuffer.count());
506 fScissorRectScratchBuffer.pop_back_n(fScissorRectScratchBuffer.count());
507
508 GrCCCoverageProcessor proc(flushState->resourceProvider(), primitiveType);
509
510 SkASSERT(batchID > 0);
511 SkASSERT(batchID < fBatches.count());
512 const Batch& previousBatch = fBatches[batchID - 1];
513 const Batch& batch = fBatches[batchID];
514 SkDEBUGCODE(int totalInstanceCount = 0);
515
516 if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
517 previousBatch.fEndNonScissorIndices.*instanceType) {
518 SkASSERT(instanceCount > 0);
519 int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].*instanceType +
520 previousBatch.fEndNonScissorIndices.*instanceType;
521 proc.appendMesh(fInstanceBuffer, instanceCount, baseInstance, &fMeshesScratchBuffer);
522 fScissorRectScratchBuffer.push_back().setXYWH(0, 0, drawBounds.width(),
523 drawBounds.height());
524 SkDEBUGCODE(totalInstanceCount += instanceCount);
525 }
526
527 SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
528 SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
529 int baseScissorInstance = fBaseInstances[(int)GrScissorTest::kEnabled].*instanceType;
530 for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
531 const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
532 const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
533 int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
534 int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
535 if (!instanceCount) {
536 continue;
537 }
538 SkASSERT(instanceCount > 0);
539 proc.appendMesh(fInstanceBuffer, instanceCount, baseScissorInstance + startIndex,
540 &fMeshesScratchBuffer);
541 fScissorRectScratchBuffer.push_back() = scissorSubBatch.fScissor;
542 SkDEBUGCODE(totalInstanceCount += instanceCount);
543 }
544
545 SkASSERT(fMeshesScratchBuffer.count() == fScissorRectScratchBuffer.count());
546 SkASSERT(fMeshesScratchBuffer.count() <= fMaxMeshesPerDraw);
547 SkASSERT(totalInstanceCount == batch.fTotalPrimitiveCounts.*instanceType);
548
549 if (!fMeshesScratchBuffer.empty()) {
550 proc.draw(flushState, pipeline, fScissorRectScratchBuffer.begin(),
551 fMeshesScratchBuffer.begin(), fMeshesScratchBuffer.count(),
552 SkRect::Make(drawBounds));
553 }
554 }
555