1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrInOrderDrawBuffer.h"
9
10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h"
13 #include "GrIndexBuffer.h"
14 #include "GrPath.h"
15 #include "GrPoint.h"
16 #include "GrRenderTarget.h"
17 #include "GrTemplates.h"
18 #include "GrTexture.h"
19 #include "GrVertexBuffer.h"
20
GrInOrderDrawBuffer(GrGpu * gpu,GrVertexBufferAllocPool * vertexPool,GrIndexBufferAllocPool * indexPool)21 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
22 GrVertexBufferAllocPool* vertexPool,
23 GrIndexBufferAllocPool* indexPool)
24 : GrDrawTarget(gpu->getContext())
25 , fDstGpu(gpu)
26 , fClipSet(true)
27 , fClipProxyState(kUnknown_ClipProxyState)
28 , fVertexPool(*vertexPool)
29 , fIndexPool(*indexPool)
30 , fFlushing(false)
31 , fDrawID(0) {
32
33 fDstGpu->ref();
34 fCaps.reset(SkRef(fDstGpu->caps()));
35
36 SkASSERT(NULL != vertexPool);
37 SkASSERT(NULL != indexPool);
38
39 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
40 poolState.fUsedPoolVertexBytes = 0;
41 poolState.fUsedPoolIndexBytes = 0;
42 #ifdef SK_DEBUG
43 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
44 poolState.fPoolStartVertex = ~0;
45 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
46 poolState.fPoolStartIndex = ~0;
47 #endif
48 this->reset();
49 }
50
~GrInOrderDrawBuffer()51 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
52 this->reset();
53 // This must be called by before the GrDrawTarget destructor
54 this->releaseGeometry();
55 fDstGpu->unref();
56 }
57
58 ////////////////////////////////////////////////////////////////////////////////
59
60 namespace {
get_vertex_bounds(const void * vertices,size_t vertexSize,int vertexCount,SkRect * bounds)61 void get_vertex_bounds(const void* vertices,
62 size_t vertexSize,
63 int vertexCount,
64 SkRect* bounds) {
65 SkASSERT(vertexSize >= sizeof(GrPoint));
66 SkASSERT(vertexCount > 0);
67 const GrPoint* point = static_cast<const GrPoint*>(vertices);
68 bounds->fLeft = bounds->fRight = point->fX;
69 bounds->fTop = bounds->fBottom = point->fY;
70 for (int i = 1; i < vertexCount; ++i) {
71 point = reinterpret_cast<GrPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
72 bounds->growToInclude(point->fX, point->fY);
73 }
74 }
75 }
76
77
78 namespace {
79
80 extern const GrVertexAttrib kRectPosColorUVAttribs[] = {
81 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
82 {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
83 {kVec2f_GrVertexAttribType, sizeof(GrPoint)+sizeof(GrColor),
84 kLocalCoord_GrVertexAttribBinding},
85 };
86
87 extern const GrVertexAttrib kRectPosUVAttribs[] = {
88 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
89 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding},
90 };
91
set_vertex_attributes(GrDrawState * drawState,bool hasColor,bool hasUVs,int * colorOffset,int * localOffset)92 static void set_vertex_attributes(GrDrawState* drawState,
93 bool hasColor, bool hasUVs,
94 int* colorOffset, int* localOffset) {
95 *colorOffset = -1;
96 *localOffset = -1;
97
98 // Using per-vertex colors allows batching across colors. (A lot of rects in a row differing
99 // only in color is a common occurrence in tables). However, having per-vertex colors disables
100 // blending optimizations because we don't know if the color will be solid or not. These
101 // optimizations help determine whether coverage and color can be blended correctly when
102 // dual-source blending isn't available. This comes into play when there is coverage. If colors
103 // were a stage it could take a hint that every vertex's color will be opaque.
104 if (hasColor && hasUVs) {
105 *colorOffset = sizeof(GrPoint);
106 *localOffset = sizeof(GrPoint) + sizeof(GrColor);
107 drawState->setVertexAttribs<kRectPosColorUVAttribs>(3);
108 } else if (hasColor) {
109 *colorOffset = sizeof(GrPoint);
110 drawState->setVertexAttribs<kRectPosColorUVAttribs>(2);
111 } else if (hasUVs) {
112 *localOffset = sizeof(GrPoint);
113 drawState->setVertexAttribs<kRectPosUVAttribs>(2);
114 } else {
115 drawState->setVertexAttribs<kRectPosUVAttribs>(1);
116 }
117 }
118
119 };
120
onDrawRect(const SkRect & rect,const SkMatrix * matrix,const SkRect * localRect,const SkMatrix * localMatrix)121 void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
122 const SkMatrix* matrix,
123 const SkRect* localRect,
124 const SkMatrix* localMatrix) {
125 GrDrawState::AutoColorRestore acr;
126
127 GrDrawState* drawState = this->drawState();
128
129 GrColor color = drawState->getColor();
130
131 int colorOffset, localOffset;
132 set_vertex_attributes(drawState,
133 this->caps()->dualSourceBlendingSupport() || drawState->hasSolidCoverage(),
134 NULL != localRect,
135 &colorOffset, &localOffset);
136 if (colorOffset >= 0) {
137 // We set the draw state's color to white here. This is done so that any batching performed
138 // in our subclass's onDraw() won't get a false from GrDrawState::op== due to a color
139 // mismatch. TODO: Once vertex layout is owned by GrDrawState it should skip comparing the
140 // constant color in its op== when the kColor layout bit is set and then we can remove
141 // this.
142 acr.set(drawState, 0xFFFFFFFF);
143 }
144
145 AutoReleaseGeometry geo(this, 4, 0);
146 if (!geo.succeeded()) {
147 GrPrintf("Failed to get space for vertices!\n");
148 return;
149 }
150
151 // Go to device coords to allow batching across matrix changes
152 SkMatrix combinedMatrix;
153 if (NULL != matrix) {
154 combinedMatrix = *matrix;
155 } else {
156 combinedMatrix.reset();
157 }
158 combinedMatrix.postConcat(drawState->getViewMatrix());
159 // When the caller has provided an explicit source rect for a stage then we don't want to
160 // modify that stage's matrix. Otherwise if the effect is generating its source rect from
161 // the vertex positions then we have to account for the view matrix change.
162 GrDrawState::AutoViewMatrixRestore avmr;
163 if (!avmr.setIdentity(drawState)) {
164 return;
165 }
166
167 size_t vsize = drawState->getVertexSize();
168
169 geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vsize);
170 combinedMatrix.mapPointsWithStride(geo.positions(), vsize, 4);
171
172 SkRect devBounds;
173 // since we already computed the dev verts, set the bounds hint. This will help us avoid
174 // unnecessary clipping in our onDraw().
175 get_vertex_bounds(geo.vertices(), vsize, 4, &devBounds);
176
177 if (localOffset >= 0) {
178 GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(geo.vertices()) + localOffset);
179 coords->setRectFan(localRect->fLeft, localRect->fTop,
180 localRect->fRight, localRect->fBottom,
181 vsize);
182 if (NULL != localMatrix) {
183 localMatrix->mapPointsWithStride(coords, vsize, 4);
184 }
185 }
186
187 if (colorOffset >= 0) {
188 GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + colorOffset);
189 for (int i = 0; i < 4; ++i) {
190 *vertColor = color;
191 vertColor = (GrColor*) ((intptr_t) vertColor + vsize);
192 }
193 }
194
195 this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
196 this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
197
198 // to ensure that stashing the drawState ptr is valid
199 SkASSERT(this->drawState() == drawState);
200 }
201
quickInsideClip(const SkRect & devBounds)202 bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
203 if (!this->getDrawState().isClipState()) {
204 return true;
205 }
206 if (kUnknown_ClipProxyState == fClipProxyState) {
207 SkIRect rect;
208 bool iior;
209 this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
210 if (iior) {
211 // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
212 // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
213 // free via the viewport. We don't want to think that clipping must be enabled in this
214 // case. So we extend the clip outward from the edge to avoid these false negatives.
215 fClipProxyState = kValid_ClipProxyState;
216 fClipProxy = SkRect::Make(rect);
217
218 if (fClipProxy.fLeft <= 0) {
219 fClipProxy.fLeft = SK_ScalarMin;
220 }
221 if (fClipProxy.fTop <= 0) {
222 fClipProxy.fTop = SK_ScalarMin;
223 }
224 if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
225 fClipProxy.fRight = SK_ScalarMax;
226 }
227 if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
228 fClipProxy.fBottom = SK_ScalarMax;
229 }
230 } else {
231 fClipProxyState = kInvalid_ClipProxyState;
232 }
233 }
234 if (kValid_ClipProxyState == fClipProxyState) {
235 return fClipProxy.contains(devBounds);
236 }
237 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
238 SkIntToScalar(this->getClip()->fOrigin.fY)};
239 SkRect clipSpaceBounds = devBounds;
240 clipSpaceBounds.offset(originOffset);
241 return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
242 }
243
concatInstancedDraw(const DrawInfo & info)244 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
245 SkASSERT(info.isInstanced());
246
247 const GeometrySrcState& geomSrc = this->getGeomSrc();
248 const GrDrawState& drawState = this->getDrawState();
249
250 // we only attempt to concat the case when reserved verts are used with a client-specified index
251 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
252 // between draws.
253 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
254 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
255 return 0;
256 }
257 // Check if there is a draw info that is compatible that uses the same VB from the pool and
258 // the same IB
259 if (kDraw_Cmd != fCmds.back()) {
260 return 0;
261 }
262
263 DrawRecord* draw = &fDraws.back();
264 GeometryPoolState& poolState = fGeoPoolStateStack.back();
265 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
266
267 if (!draw->isInstanced() ||
268 draw->verticesPerInstance() != info.verticesPerInstance() ||
269 draw->indicesPerInstance() != info.indicesPerInstance() ||
270 draw->fVertexBuffer != vertexBuffer ||
271 draw->fIndexBuffer != geomSrc.fIndexBuffer) {
272 return 0;
273 }
274 // info does not yet account for the offset from the start of the pool's VB while the previous
275 // draw record does.
276 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
277 if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
278 return 0;
279 }
280
281 SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
282
283 // how many instances can be concat'ed onto draw given the size of the index buffer
284 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
285 instancesToConcat -= draw->instanceCount();
286 instancesToConcat = GrMin(instancesToConcat, info.instanceCount());
287
288 // update the amount of reserved vertex data actually referenced in draws
289 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
290 drawState.getVertexSize();
291 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
292
293 draw->adjustInstanceCount(instancesToConcat);
294 return instancesToConcat;
295 }
296
297 class AutoClipReenable {
298 public:
AutoClipReenable()299 AutoClipReenable() : fDrawState(NULL) {}
~AutoClipReenable()300 ~AutoClipReenable() {
301 if (NULL != fDrawState) {
302 fDrawState->enableState(GrDrawState::kClip_StateBit);
303 }
304 }
set(GrDrawState * drawState)305 void set(GrDrawState* drawState) {
306 if (drawState->isClipState()) {
307 fDrawState = drawState;
308 drawState->disableState(GrDrawState::kClip_StateBit);
309 }
310 }
311 private:
312 GrDrawState* fDrawState;
313 };
314
onDraw(const DrawInfo & info)315 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
316
317 GeometryPoolState& poolState = fGeoPoolStateStack.back();
318 const GrDrawState& drawState = this->getDrawState();
319 AutoClipReenable acr;
320
321 if (drawState.isClipState() &&
322 NULL != info.getDevBounds() &&
323 this->quickInsideClip(*info.getDevBounds())) {
324 acr.set(this->drawState());
325 }
326
327 if (this->needsNewClip()) {
328 this->recordClip();
329 }
330 if (this->needsNewState()) {
331 this->recordState();
332 }
333
334 DrawRecord* draw;
335 if (info.isInstanced()) {
336 int instancesConcated = this->concatInstancedDraw(info);
337 if (info.instanceCount() > instancesConcated) {
338 draw = this->recordDraw(info);
339 draw->adjustInstanceCount(-instancesConcated);
340 } else {
341 return;
342 }
343 } else {
344 draw = this->recordDraw(info);
345 }
346
347 switch (this->getGeomSrc().fVertexSrc) {
348 case kBuffer_GeometrySrcType:
349 draw->fVertexBuffer = this->getGeomSrc().fVertexBuffer;
350 break;
351 case kReserved_GeometrySrcType: // fallthrough
352 case kArray_GeometrySrcType: {
353 size_t vertexBytes = (info.vertexCount() + info.startVertex()) *
354 drawState.getVertexSize();
355 poolState.fUsedPoolVertexBytes = GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
356 draw->fVertexBuffer = poolState.fPoolVertexBuffer;
357 draw->adjustStartVertex(poolState.fPoolStartVertex);
358 break;
359 }
360 default:
361 GrCrash("unknown geom src type");
362 }
363 draw->fVertexBuffer->ref();
364
365 if (info.isIndexed()) {
366 switch (this->getGeomSrc().fIndexSrc) {
367 case kBuffer_GeometrySrcType:
368 draw->fIndexBuffer = this->getGeomSrc().fIndexBuffer;
369 break;
370 case kReserved_GeometrySrcType: // fallthrough
371 case kArray_GeometrySrcType: {
372 size_t indexBytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
373 poolState.fUsedPoolIndexBytes = GrMax(poolState.fUsedPoolIndexBytes, indexBytes);
374 draw->fIndexBuffer = poolState.fPoolIndexBuffer;
375 draw->adjustStartIndex(poolState.fPoolStartIndex);
376 break;
377 }
378 default:
379 GrCrash("unknown geom src type");
380 }
381 draw->fIndexBuffer->ref();
382 } else {
383 draw->fIndexBuffer = NULL;
384 }
385 }
386
StencilPath()387 GrInOrderDrawBuffer::StencilPath::StencilPath() {}
DrawPath()388 GrInOrderDrawBuffer::DrawPath::DrawPath() {}
389
onStencilPath(const GrPath * path,SkPath::FillType fill)390 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
391 if (this->needsNewClip()) {
392 this->recordClip();
393 }
394 // Only compare the subset of GrDrawState relevant to path stenciling?
395 if (this->needsNewState()) {
396 this->recordState();
397 }
398 StencilPath* sp = this->recordStencilPath();
399 sp->fPath.reset(path);
400 path->ref();
401 sp->fFill = fill;
402 }
403
onDrawPath(const GrPath * path,SkPath::FillType fill,const GrDeviceCoordTexture * dstCopy)404 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
405 SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
406 if (this->needsNewClip()) {
407 this->recordClip();
408 }
409 // TODO: Only compare the subset of GrDrawState relevant to path covering?
410 if (this->needsNewState()) {
411 this->recordState();
412 }
413 DrawPath* cp = this->recordDrawPath();
414 cp->fPath.reset(path);
415 path->ref();
416 cp->fFill = fill;
417 if (NULL != dstCopy) {
418 cp->fDstCopy = *dstCopy;
419 }
420 }
421
clear(const SkIRect * rect,GrColor color,bool canIgnoreRect,GrRenderTarget * renderTarget)422 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
423 bool canIgnoreRect, GrRenderTarget* renderTarget) {
424 SkIRect r;
425 if (NULL == renderTarget) {
426 renderTarget = this->drawState()->getRenderTarget();
427 SkASSERT(NULL != renderTarget);
428 }
429 if (NULL == rect) {
430 // We could do something smart and remove previous draws and clears to
431 // the current render target. If we get that smart we have to make sure
432 // those draws aren't read before this clear (render-to-texture).
433 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
434 rect = &r;
435 }
436 Clear* clr = this->recordClear();
437 clr->fColor = color;
438 clr->fRect = *rect;
439 clr->fCanIgnoreRect = canIgnoreRect;
440 clr->fRenderTarget = renderTarget;
441 renderTarget->ref();
442 }
443
reset()444 void GrInOrderDrawBuffer::reset() {
445 SkASSERT(1 == fGeoPoolStateStack.count());
446 this->resetVertexSource();
447 this->resetIndexSource();
448 int numDraws = fDraws.count();
449 for (int d = 0; d < numDraws; ++d) {
450 // we always have a VB, but not always an IB
451 SkASSERT(NULL != fDraws[d].fVertexBuffer);
452 fDraws[d].fVertexBuffer->unref();
453 SkSafeUnref(fDraws[d].fIndexBuffer);
454 }
455 fCmds.reset();
456 fDraws.reset();
457 fStencilPaths.reset();
458 fDrawPaths.reset();
459 fStates.reset();
460 fClears.reset();
461 fVertexPool.reset();
462 fIndexPool.reset();
463 fClips.reset();
464 fClipOrigins.reset();
465 fCopySurfaces.reset();
466 fClipSet = true;
467 }
468
flush()469 void GrInOrderDrawBuffer::flush() {
470 if (fFlushing) {
471 return;
472 }
473
474 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
475 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
476
477 int numCmds = fCmds.count();
478 if (0 == numCmds) {
479 return;
480 }
481
482 GrAutoTRestore<bool> flushRestore(&fFlushing);
483 fFlushing = true;
484
485 fVertexPool.unlock();
486 fIndexPool.unlock();
487
488 GrDrawTarget::AutoClipRestore acr(fDstGpu);
489 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
490
491 GrDrawState playbackState;
492 GrDrawState* prevDrawState = fDstGpu->drawState();
493 prevDrawState->ref();
494 fDstGpu->setDrawState(&playbackState);
495
496 GrClipData clipData;
497
498 int currState = 0;
499 int currClip = 0;
500 int currClear = 0;
501 int currDraw = 0;
502 int currStencilPath = 0;
503 int currDrawPath = 0;
504 int currCopySurface = 0;
505
506 for (int c = 0; c < numCmds; ++c) {
507 switch (fCmds[c]) {
508 case kDraw_Cmd: {
509 const DrawRecord& draw = fDraws[currDraw];
510 fDstGpu->setVertexSourceToBuffer(draw.fVertexBuffer);
511 if (draw.isIndexed()) {
512 fDstGpu->setIndexSourceToBuffer(draw.fIndexBuffer);
513 }
514 fDstGpu->executeDraw(draw);
515
516 ++currDraw;
517 break;
518 }
519 case kStencilPath_Cmd: {
520 const StencilPath& sp = fStencilPaths[currStencilPath];
521 fDstGpu->stencilPath(sp.fPath.get(), sp.fFill);
522 ++currStencilPath;
523 break;
524 }
525 case kDrawPath_Cmd: {
526 const DrawPath& cp = fDrawPaths[currDrawPath];
527 fDstGpu->executeDrawPath(cp.fPath.get(), cp.fFill,
528 NULL != cp.fDstCopy.texture() ? &cp.fDstCopy : NULL);
529 ++currDrawPath;
530 break;
531 }
532 case kSetState_Cmd:
533 fStates[currState].restoreTo(&playbackState);
534 ++currState;
535 break;
536 case kSetClip_Cmd:
537 clipData.fClipStack = &fClips[currClip];
538 clipData.fOrigin = fClipOrigins[currClip];
539 fDstGpu->setClip(&clipData);
540 ++currClip;
541 break;
542 case kClear_Cmd:
543 fDstGpu->clear(&fClears[currClear].fRect,
544 fClears[currClear].fColor,
545 fClears[currClear].fCanIgnoreRect,
546 fClears[currClear].fRenderTarget);
547 ++currClear;
548 break;
549 case kCopySurface_Cmd:
550 fDstGpu->copySurface(fCopySurfaces[currCopySurface].fDst.get(),
551 fCopySurfaces[currCopySurface].fSrc.get(),
552 fCopySurfaces[currCopySurface].fSrcRect,
553 fCopySurfaces[currCopySurface].fDstPoint);
554 ++currCopySurface;
555 break;
556 }
557 }
558 // we should have consumed all the states, clips, etc.
559 SkASSERT(fStates.count() == currState);
560 SkASSERT(fClips.count() == currClip);
561 SkASSERT(fClipOrigins.count() == currClip);
562 SkASSERT(fClears.count() == currClear);
563 SkASSERT(fDraws.count() == currDraw);
564 SkASSERT(fCopySurfaces.count() == currCopySurface);
565
566 fDstGpu->setDrawState(prevDrawState);
567 prevDrawState->unref();
568 this->reset();
569 ++fDrawID;
570 }
571
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)572 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
573 GrSurface* src,
574 const SkIRect& srcRect,
575 const SkIPoint& dstPoint) {
576 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
577 CopySurface* cs = this->recordCopySurface();
578 cs->fDst.reset(SkRef(dst));
579 cs->fSrc.reset(SkRef(src));
580 cs->fSrcRect = srcRect;
581 cs->fDstPoint = dstPoint;
582 return true;
583 } else {
584 return false;
585 }
586 }
587
onCanCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)588 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
589 GrSurface* src,
590 const SkIRect& srcRect,
591 const SkIPoint& dstPoint) {
592 return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
593 }
594
initCopySurfaceDstDesc(const GrSurface * src,GrTextureDesc * desc)595 void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
596 fDstGpu->initCopySurfaceDstDesc(src, desc);
597 }
598
willReserveVertexAndIndexSpace(int vertexCount,int indexCount)599 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
600 int indexCount) {
601 // We use geometryHints() to know whether to flush the draw buffer. We
602 // can't flush if we are inside an unbalanced pushGeometrySource.
603 // Moreover, flushing blows away vertex and index data that was
604 // previously reserved. So if the vertex or index data is pulled from
605 // reserved space and won't be released by this request then we can't
606 // flush.
607 bool insideGeoPush = fGeoPoolStateStack.count() > 1;
608
609 bool unreleasedVertexSpace =
610 !vertexCount &&
611 kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
612
613 bool unreleasedIndexSpace =
614 !indexCount &&
615 kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
616
617 // we don't want to finalize any reserved geom on the target since
618 // we don't know that the client has finished writing to it.
619 bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
620
621 int vcount = vertexCount;
622 int icount = indexCount;
623
624 if (!insideGeoPush &&
625 !unreleasedVertexSpace &&
626 !unreleasedIndexSpace &&
627 !targetHasReservedGeom &&
628 this->geometryHints(&vcount, &icount)) {
629
630 this->flush();
631 }
632 }
633
geometryHints(int * vertexCount,int * indexCount) const634 bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
635 int* indexCount) const {
636 // we will recommend a flush if the data could fit in a single
637 // preallocated buffer but none are left and it can't fit
638 // in the current buffer (which may not be prealloced).
639 bool flush = false;
640 if (NULL != indexCount) {
641 int32_t currIndices = fIndexPool.currentBufferIndices();
642 if (*indexCount > currIndices &&
643 (!fIndexPool.preallocatedBuffersRemaining() &&
644 *indexCount <= fIndexPool.preallocatedBufferIndices())) {
645
646 flush = true;
647 }
648 *indexCount = currIndices;
649 }
650 if (NULL != vertexCount) {
651 size_t vertexSize = this->getDrawState().getVertexSize();
652 int32_t currVertices = fVertexPool.currentBufferVertices(vertexSize);
653 if (*vertexCount > currVertices &&
654 (!fVertexPool.preallocatedBuffersRemaining() &&
655 *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexSize))) {
656
657 flush = true;
658 }
659 *vertexCount = currVertices;
660 }
661 return flush;
662 }
663
onReserveVertexSpace(size_t vertexSize,int vertexCount,void ** vertices)664 bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
665 int vertexCount,
666 void** vertices) {
667 GeometryPoolState& poolState = fGeoPoolStateStack.back();
668 SkASSERT(vertexCount > 0);
669 SkASSERT(NULL != vertices);
670 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
671
672 *vertices = fVertexPool.makeSpace(vertexSize,
673 vertexCount,
674 &poolState.fPoolVertexBuffer,
675 &poolState.fPoolStartVertex);
676 return NULL != *vertices;
677 }
678
onReserveIndexSpace(int indexCount,void ** indices)679 bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
680 GeometryPoolState& poolState = fGeoPoolStateStack.back();
681 SkASSERT(indexCount > 0);
682 SkASSERT(NULL != indices);
683 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
684
685 *indices = fIndexPool.makeSpace(indexCount,
686 &poolState.fPoolIndexBuffer,
687 &poolState.fPoolStartIndex);
688 return NULL != *indices;
689 }
690
releaseReservedVertexSpace()691 void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
692 GeometryPoolState& poolState = fGeoPoolStateStack.back();
693 const GeometrySrcState& geoSrc = this->getGeomSrc();
694
695 // If we get a release vertex space call then our current source should either be reserved
696 // or array (which we copied into reserved space).
697 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
698 kArray_GeometrySrcType == geoSrc.fVertexSrc);
699
700 // When the caller reserved vertex buffer space we gave it back a pointer
701 // provided by the vertex buffer pool. At each draw we tracked the largest
702 // offset into the pool's pointer that was referenced. Now we return to the
703 // pool any portion at the tail of the allocation that no draw referenced.
704 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
705 fVertexPool.putBack(reservedVertexBytes -
706 poolState.fUsedPoolVertexBytes);
707 poolState.fUsedPoolVertexBytes = 0;
708 poolState.fPoolVertexBuffer = NULL;
709 poolState.fPoolStartVertex = 0;
710 }
711
releaseReservedIndexSpace()712 void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
713 GeometryPoolState& poolState = fGeoPoolStateStack.back();
714 const GeometrySrcState& geoSrc = this->getGeomSrc();
715
716 // If we get a release index space call then our current source should either be reserved
717 // or array (which we copied into reserved space).
718 SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
719 kArray_GeometrySrcType == geoSrc.fIndexSrc);
720
721 // Similar to releaseReservedVertexSpace we return any unused portion at
722 // the tail
723 size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
724 fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
725 poolState.fUsedPoolIndexBytes = 0;
726 poolState.fPoolIndexBuffer = NULL;
727 poolState.fPoolStartIndex = 0;
728 }
729
onSetVertexSourceToArray(const void * vertexArray,int vertexCount)730 void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
731 int vertexCount) {
732
733 GeometryPoolState& poolState = fGeoPoolStateStack.back();
734 SkASSERT(0 == poolState.fUsedPoolVertexBytes);
735 #ifdef SK_DEBUG
736 bool success =
737 #endif
738 fVertexPool.appendVertices(this->getVertexSize(),
739 vertexCount,
740 vertexArray,
741 &poolState.fPoolVertexBuffer,
742 &poolState.fPoolStartVertex);
743 GR_DEBUGASSERT(success);
744 }
745
onSetIndexSourceToArray(const void * indexArray,int indexCount)746 void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
747 int indexCount) {
748 GeometryPoolState& poolState = fGeoPoolStateStack.back();
749 SkASSERT(0 == poolState.fUsedPoolIndexBytes);
750 #ifdef SK_DEBUG
751 bool success =
752 #endif
753 fIndexPool.appendIndices(indexCount,
754 indexArray,
755 &poolState.fPoolIndexBuffer,
756 &poolState.fPoolStartIndex);
757 GR_DEBUGASSERT(success);
758 }
759
releaseVertexArray()760 void GrInOrderDrawBuffer::releaseVertexArray() {
761 // When the client provides an array as the vertex source we handled it
762 // by copying their array into reserved space.
763 this->GrInOrderDrawBuffer::releaseReservedVertexSpace();
764 }
765
releaseIndexArray()766 void GrInOrderDrawBuffer::releaseIndexArray() {
767 // When the client provides an array as the index source we handled it
768 // by copying their array into reserved space.
769 this->GrInOrderDrawBuffer::releaseReservedIndexSpace();
770 }
771
geometrySourceWillPush()772 void GrInOrderDrawBuffer::geometrySourceWillPush() {
773 GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
774 poolState.fUsedPoolVertexBytes = 0;
775 poolState.fUsedPoolIndexBytes = 0;
776 #ifdef SK_DEBUG
777 poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
778 poolState.fPoolStartVertex = ~0;
779 poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
780 poolState.fPoolStartIndex = ~0;
781 #endif
782 }
783
geometrySourceWillPop(const GeometrySrcState & restoredState)784 void GrInOrderDrawBuffer::geometrySourceWillPop(
785 const GeometrySrcState& restoredState) {
786 SkASSERT(fGeoPoolStateStack.count() > 1);
787 fGeoPoolStateStack.pop_back();
788 GeometryPoolState& poolState = fGeoPoolStateStack.back();
789 // we have to assume that any slack we had in our vertex/index data
790 // is now unreleasable because data may have been appended later in the
791 // pool.
792 if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
793 kArray_GeometrySrcType == restoredState.fVertexSrc) {
794 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
795 }
796 if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
797 kArray_GeometrySrcType == restoredState.fIndexSrc) {
798 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
799 restoredState.fIndexCount;
800 }
801 }
802
needsNewState() const803 bool GrInOrderDrawBuffer::needsNewState() const {
804 return fStates.empty() || !fStates.back().isEqual(this->getDrawState());
805 }
806
needsNewClip() const807 bool GrInOrderDrawBuffer::needsNewClip() const {
808 SkASSERT(fClips.count() == fClipOrigins.count());
809 if (this->getDrawState().isClipState()) {
810 if (fClipSet &&
811 (fClips.empty() ||
812 fClips.back() != *this->getClip()->fClipStack ||
813 fClipOrigins.back() != this->getClip()->fOrigin)) {
814 return true;
815 }
816 }
817 return false;
818 }
819
recordClip()820 void GrInOrderDrawBuffer::recordClip() {
821 fClips.push_back() = *this->getClip()->fClipStack;
822 fClipOrigins.push_back() = this->getClip()->fOrigin;
823 fClipSet = false;
824 fCmds.push_back(kSetClip_Cmd);
825 }
826
recordState()827 void GrInOrderDrawBuffer::recordState() {
828 fStates.push_back().saveFrom(this->getDrawState());
829 fCmds.push_back(kSetState_Cmd);
830 }
831
recordDraw(const DrawInfo & info)832 GrInOrderDrawBuffer::DrawRecord* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info) {
833 fCmds.push_back(kDraw_Cmd);
834 return &fDraws.push_back(info);
835 }
836
recordStencilPath()837 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath() {
838 fCmds.push_back(kStencilPath_Cmd);
839 return &fStencilPaths.push_back();
840 }
841
recordDrawPath()842 GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath() {
843 fCmds.push_back(kDrawPath_Cmd);
844 return &fDrawPaths.push_back();
845 }
846
recordClear()847 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear() {
848 fCmds.push_back(kClear_Cmd);
849 return &fClears.push_back();
850 }
851
recordCopySurface()852 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface() {
853 fCmds.push_back(kCopySurface_Cmd);
854 return &fCopySurfaces.push_back();
855 }
856
857
clipWillBeSet(const GrClipData * newClipData)858 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
859 INHERITED::clipWillBeSet(newClipData);
860 fClipSet = true;
861 fClipProxyState = kUnknown_ClipProxyState;
862 }
863