1
2 /*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "GrContext.h"
11
12 #include "effects/GrConvolutionEffect.h"
13 #include "effects/GrSingleTextureEffect.h"
14 #include "effects/GrConfigConversionEffect.h"
15
16 #include "GrBufferAllocPool.h"
17 #include "GrGpu.h"
18 #include "GrIndexBuffer.h"
19 #include "GrInOrderDrawBuffer.h"
20 #include "GrPathRenderer.h"
21 #include "GrPathUtils.h"
22 #include "GrResourceCache.h"
23 #include "GrSoftwarePathRenderer.h"
24 #include "GrStencilBuffer.h"
25 #include "GrTextStrike.h"
26 #include "SkStrokeRec.h"
27 #include "SkTLazy.h"
28 #include "SkTLS.h"
29 #include "SkTrace.h"
30
31 SK_DEFINE_INST_COUNT(GrContext)
32 SK_DEFINE_INST_COUNT(GrDrawState)
33
34 // It can be useful to set this to kNo_BufferedDraw to test whether a bug is caused by using the
35 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
36 // debugging easier.
37 #define DEFAULT_BUFFERING (GR_DISABLE_DRAW_BUFFERING ? kNo_BufferedDraw : kYes_BufferedDraw)
38
39 #define MAX_BLUR_SIGMA 4.0f
40
41 // When we're using coverage AA but the blend is incompatible (given gpu
42 // limitations) should we disable AA or draw wrong?
43 #define DISABLE_COVERAGE_AA_FOR_BLEND 1
44
45 #if GR_DEBUG
46 // change this to a 1 to see notifications when partial coverage fails
47 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
48 #else
49 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
50 #endif
51
52 static const size_t MAX_TEXTURE_CACHE_COUNT = 2048;
53 static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024;
54
55 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
56 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
57
58 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
59 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
60
61 #define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
62
Create(GrBackend backend,GrBackendContext context)63 GrContext* GrContext::Create(GrBackend backend, GrBackendContext context) {
64 GrContext* ctx = NULL;
65 GrGpu* fGpu = GrGpu::Create(backend, context);
66 if (NULL != fGpu) {
67 ctx = SkNEW_ARGS(GrContext, (fGpu));
68 fGpu->unref();
69 }
70 return ctx;
71 }
72
73 namespace {
CreateThreadInstanceCount()74 void* CreateThreadInstanceCount() {
75 return SkNEW_ARGS(int, (0));
76 }
DeleteThreadInstanceCount(void * v)77 void DeleteThreadInstanceCount(void* v) {
78 delete reinterpret_cast<int*>(v);
79 }
80 #define THREAD_INSTANCE_COUNT \
81 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, \
82 DeleteThreadInstanceCount)))
83
84 }
85
GetThreadInstanceCount()86 int GrContext::GetThreadInstanceCount() {
87 return THREAD_INSTANCE_COUNT;
88 }
89
~GrContext()90 GrContext::~GrContext() {
91 for (int i = 0; i < fCleanUpData.count(); ++i) {
92 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
93 }
94
95 this->flush();
96
97 // Since the gpu can hold scratch textures, give it a chance to let go
98 // of them before freeing the texture cache
99 fGpu->purgeResources();
100
101 delete fTextureCache;
102 fTextureCache = NULL;
103 delete fFontCache;
104 delete fDrawBuffer;
105 delete fDrawBufferVBAllocPool;
106 delete fDrawBufferIBAllocPool;
107
108 fAARectRenderer->unref();
109
110 fGpu->unref();
111 GrSafeUnref(fPathRendererChain);
112 GrSafeUnref(fSoftwarePathRenderer);
113 fDrawState->unref();
114
115 --THREAD_INSTANCE_COUNT;
116 }
117
contextLost()118 void GrContext::contextLost() {
119 contextDestroyed();
120 this->setupDrawBuffer();
121 }
122
contextDestroyed()123 void GrContext::contextDestroyed() {
124 // abandon first to so destructors
125 // don't try to free the resources in the API.
126 fGpu->abandonResources();
127
128 // a path renderer may be holding onto resources that
129 // are now unusable
130 GrSafeSetNull(fPathRendererChain);
131 GrSafeSetNull(fSoftwarePathRenderer);
132
133 delete fDrawBuffer;
134 fDrawBuffer = NULL;
135
136 delete fDrawBufferVBAllocPool;
137 fDrawBufferVBAllocPool = NULL;
138
139 delete fDrawBufferIBAllocPool;
140 fDrawBufferIBAllocPool = NULL;
141
142 fAARectRenderer->reset();
143
144 fTextureCache->purgeAllUnlocked();
145 fFontCache->freeAll();
146 fGpu->markContextDirty();
147 }
148
resetContext()149 void GrContext::resetContext() {
150 fGpu->markContextDirty();
151 }
152
freeGpuResources()153 void GrContext::freeGpuResources() {
154 this->flush();
155
156 fGpu->purgeResources();
157
158 fAARectRenderer->reset();
159
160 fTextureCache->purgeAllUnlocked();
161 fFontCache->freeAll();
162 // a path renderer may be holding onto resources
163 GrSafeSetNull(fPathRendererChain);
164 GrSafeSetNull(fSoftwarePathRenderer);
165 }
166
getGpuTextureCacheBytes() const167 size_t GrContext::getGpuTextureCacheBytes() const {
168 return fTextureCache->getCachedResourceBytes();
169 }
170
171 ////////////////////////////////////////////////////////////////////////////////
172
173 namespace {
174
scale_rect(SkRect * rect,float xScale,float yScale)175 void scale_rect(SkRect* rect, float xScale, float yScale) {
176 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale));
177 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale));
178 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale));
179 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale));
180 }
181
adjust_sigma(float sigma,int * scaleFactor,int * radius)182 float adjust_sigma(float sigma, int *scaleFactor, int *radius) {
183 *scaleFactor = 1;
184 while (sigma > MAX_BLUR_SIGMA) {
185 *scaleFactor *= 2;
186 sigma *= 0.5f;
187 }
188 *radius = static_cast<int>(ceilf(sigma * 3.0f));
189 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius);
190 return sigma;
191 }
192
convolve_gaussian(GrDrawTarget * target,GrTexture * texture,const SkRect & rect,float sigma,int radius,Gr1DKernelEffect::Direction direction)193 void convolve_gaussian(GrDrawTarget* target,
194 GrTexture* texture,
195 const SkRect& rect,
196 float sigma,
197 int radius,
198 Gr1DKernelEffect::Direction direction) {
199 GrRenderTarget* rt = target->drawState()->getRenderTarget();
200 GrDrawTarget::AutoStateRestore asr(target, GrDrawTarget::kReset_ASRInit);
201 GrDrawState* drawState = target->drawState();
202 drawState->setRenderTarget(rt);
203 SkAutoTUnref<GrEffectRef> conv(GrConvolutionEffect::CreateGaussian(texture,
204 direction,
205 radius,
206 sigma));
207 drawState->setEffect(0, conv);
208 target->drawSimpleRect(rect, NULL);
209 }
210
211 }
212
213 ////////////////////////////////////////////////////////////////////////////////
214
findAndRefTexture(const GrTextureDesc & desc,const GrCacheID & cacheID,const GrTextureParams * params)215 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
216 const GrCacheID& cacheID,
217 const GrTextureParams* params) {
218 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
219 GrResource* resource = fTextureCache->find(resourceKey);
220 SkSafeRef(resource);
221 return static_cast<GrTexture*>(resource);
222 }
223
isTextureInCache(const GrTextureDesc & desc,const GrCacheID & cacheID,const GrTextureParams * params) const224 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
225 const GrCacheID& cacheID,
226 const GrTextureParams* params) const {
227 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
228 return fTextureCache->hasKey(resourceKey);
229 }
230
addStencilBuffer(GrStencilBuffer * sb)231 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
232 ASSERT_OWNED_RESOURCE(sb);
233
234 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
235 sb->height(),
236 sb->numSamples());
237 fTextureCache->addResource(resourceKey, sb);
238 }
239
findStencilBuffer(int width,int height,int sampleCnt)240 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
241 int sampleCnt) {
242 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
243 height,
244 sampleCnt);
245 GrResource* resource = fTextureCache->find(resourceKey);
246 return static_cast<GrStencilBuffer*>(resource);
247 }
248
stretchImage(void * dst,int dstW,int dstH,void * src,int srcW,int srcH,int bpp)249 static void stretchImage(void* dst,
250 int dstW,
251 int dstH,
252 void* src,
253 int srcW,
254 int srcH,
255 int bpp) {
256 GrFixed dx = (srcW << 16) / dstW;
257 GrFixed dy = (srcH << 16) / dstH;
258
259 GrFixed y = dy >> 1;
260
261 int dstXLimit = dstW*bpp;
262 for (int j = 0; j < dstH; ++j) {
263 GrFixed x = dx >> 1;
264 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
265 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
266 for (int i = 0; i < dstXLimit; i += bpp) {
267 memcpy((uint8_t*) dstRow + i,
268 (uint8_t*) srcRow + (x>>16)*bpp,
269 bpp);
270 x += dx;
271 }
272 y += dy;
273 }
274 }
275
276 // The desired texture is NPOT and tiled but that isn't supported by
277 // the current hardware. Resize the texture to be a POT
createResizedTexture(const GrTextureDesc & desc,const GrCacheID & cacheID,void * srcData,size_t rowBytes,bool needsFiltering)278 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
279 const GrCacheID& cacheID,
280 void* srcData,
281 size_t rowBytes,
282 bool needsFiltering) {
283 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
284 if (NULL == clampedTexture) {
285 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
286
287 if (NULL == clampedTexture) {
288 return NULL;
289 }
290 }
291
292 GrTextureDesc rtDesc = desc;
293 rtDesc.fFlags = rtDesc.fFlags |
294 kRenderTarget_GrTextureFlagBit |
295 kNoStencil_GrTextureFlagBit;
296 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64));
297 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
298
299 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
300
301 if (NULL != texture) {
302 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
303 GrDrawState* drawState = fGpu->drawState();
304 drawState->setRenderTarget(texture->asRenderTarget());
305
306 // if filtering is not desired then we want to ensure all
307 // texels in the resampled image are copies of texels from
308 // the original.
309 GrTextureParams params(SkShader::kClamp_TileMode, needsFiltering);
310 drawState->createTextureEffect(0, clampedTexture, SkMatrix::I(), params);
311
312 static const GrVertexLayout layout = GrDrawState::StageTexCoordVertexLayoutBit(0,0);
313 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
314
315 if (arg.succeeded()) {
316 GrPoint* verts = (GrPoint*) arg.vertices();
317 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint));
318 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint));
319 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
320 }
321 texture->releaseRenderTarget();
322 } else {
323 // TODO: Our CPU stretch doesn't filter. But we create separate
324 // stretched textures when the texture params is either filtered or
325 // not. Either implement filtered stretch blit on CPU or just create
326 // one when FBO case fails.
327
328 rtDesc.fFlags = kNone_GrTextureFlags;
329 // no longer need to clamp at min RT size.
330 rtDesc.fWidth = GrNextPow2(desc.fWidth);
331 rtDesc.fHeight = GrNextPow2(desc.fHeight);
332 int bpp = GrBytesPerPixel(desc.fConfig);
333 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
334 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
335 srcData, desc.fWidth, desc.fHeight, bpp);
336
337 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
338
339 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
340 GrAssert(NULL != texture);
341 }
342
343 return texture;
344 }
345
createTexture(const GrTextureParams * params,const GrTextureDesc & desc,const GrCacheID & cacheID,void * srcData,size_t rowBytes)346 GrTexture* GrContext::createTexture(const GrTextureParams* params,
347 const GrTextureDesc& desc,
348 const GrCacheID& cacheID,
349 void* srcData,
350 size_t rowBytes) {
351 SK_TRACE_EVENT0("GrContext::createTexture");
352
353 #if GR_DUMP_TEXTURE_UPLOAD
354 GrPrintf("GrContext::createTexture[%d %d]\n", desc.fWidth, desc.fHeight);
355 #endif
356
357 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
358
359 GrTexture* texture;
360 if (GrTexture::NeedsResizing(resourceKey)) {
361 texture = this->createResizedTexture(desc, cacheID,
362 srcData, rowBytes,
363 GrTexture::NeedsFiltering(resourceKey));
364 } else {
365 texture= fGpu->createTexture(desc, srcData, rowBytes);
366 }
367
368 if (NULL != texture) {
369 fTextureCache->addResource(resourceKey, texture);
370 }
371
372 return texture;
373 }
374
lockAndRefScratchTexture(const GrTextureDesc & inDesc,ScratchTexMatch match)375 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
376 GrTextureDesc desc = inDesc;
377
378 GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) ||
379 !(desc.fFlags & kNoStencil_GrTextureFlagBit));
380
381 if (kApprox_ScratchTexMatch == match) {
382 // bin by pow2 with a reasonable min
383 static const int MIN_SIZE = 256;
384 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
385 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
386 }
387
388 GrResource* resource = NULL;
389 int origWidth = desc.fWidth;
390 int origHeight = desc.fHeight;
391 bool doubledW = false;
392 bool doubledH = false;
393
394 do {
395 GrResourceKey key = GrTexture::ComputeScratchKey(desc);
396 // Ensure we have exclusive access to the texture so future 'find' calls don't return it
397 resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
398 if (NULL != resource) {
399 resource->ref();
400 break;
401 }
402 if (kExact_ScratchTexMatch == match) {
403 break;
404 }
405 // We had a cache miss and we are in approx mode, relax the fit of the flags... then try
406 // doubling width... then the height.
407
408 // We no longer try to reuse textures that were previously used as render targets in
409 // situations where no RT is needed; doing otherwise can confuse the video driver and
410 // cause significant performance problems in some cases.
411 if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
412 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
413 } else if (!doubledW) {
414 desc.fFlags = inDesc.fFlags;
415 desc.fWidth *= 2;
416 doubledW = true;
417 } else if (!doubledH) {
418 desc.fFlags = inDesc.fFlags;
419 desc.fWidth = origWidth;
420 desc.fHeight *= 2;
421 doubledH = true;
422 } else {
423 break;
424 }
425
426 } while (true);
427
428 if (NULL == resource) {
429 desc.fFlags = inDesc.fFlags;
430 desc.fWidth = origWidth;
431 desc.fHeight = origHeight;
432 GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
433 if (NULL != texture) {
434 GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
435 // Make the resource exclusive so future 'find' calls don't return it
436 fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
437 resource = texture;
438 }
439 }
440
441 return static_cast<GrTexture*>(resource);
442 }
443
addExistingTextureToCache(GrTexture * texture)444 void GrContext::addExistingTextureToCache(GrTexture* texture) {
445
446 if (NULL == texture) {
447 return;
448 }
449
450 // This texture should already have a cache entry since it was once
451 // attached
452 GrAssert(NULL != texture->getCacheEntry());
453
454 // Conceptually, the cache entry is going to assume responsibility
455 // for the creation ref.
456 GrAssert(1 == texture->getRefCnt());
457
458 // Since this texture came from an AutoScratchTexture it should
459 // still be in the exclusive pile
460 fTextureCache->makeNonExclusive(texture->getCacheEntry());
461
462 this->purgeCache();
463 }
464
465
unlockScratchTexture(GrTexture * texture)466 void GrContext::unlockScratchTexture(GrTexture* texture) {
467 ASSERT_OWNED_RESOURCE(texture);
468 GrAssert(NULL != texture->getCacheEntry());
469
470 // If this is a scratch texture we detached it from the cache
471 // while it was locked (to avoid two callers simultaneously getting
472 // the same texture).
473 if (texture->getCacheEntry()->key().isScratch()) {
474 fTextureCache->makeNonExclusive(texture->getCacheEntry());
475 }
476
477 this->purgeCache();
478 }
479
purgeCache()480 void GrContext::purgeCache() {
481 if (NULL != fTextureCache) {
482 fTextureCache->purgeAsNeeded();
483 }
484 }
485
createUncachedTexture(const GrTextureDesc & descIn,void * srcData,size_t rowBytes)486 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
487 void* srcData,
488 size_t rowBytes) {
489 GrTextureDesc descCopy = descIn;
490 return fGpu->createTexture(descCopy, srcData, rowBytes);
491 }
492
getTextureCacheLimits(int * maxTextures,size_t * maxTextureBytes) const493 void GrContext::getTextureCacheLimits(int* maxTextures,
494 size_t* maxTextureBytes) const {
495 fTextureCache->getLimits(maxTextures, maxTextureBytes);
496 }
497
setTextureCacheLimits(int maxTextures,size_t maxTextureBytes)498 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
499 fTextureCache->setLimits(maxTextures, maxTextureBytes);
500 }
501
getMaxTextureSize() const502 int GrContext::getMaxTextureSize() const {
503 return fGpu->getCaps().maxTextureSize();
504 }
505
getMaxRenderTargetSize() const506 int GrContext::getMaxRenderTargetSize() const {
507 return fGpu->getCaps().maxRenderTargetSize();
508 }
509
510 ///////////////////////////////////////////////////////////////////////////////
511
wrapBackendTexture(const GrBackendTextureDesc & desc)512 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
513 return fGpu->wrapBackendTexture(desc);
514 }
515
wrapBackendRenderTarget(const GrBackendRenderTargetDesc & desc)516 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
517 return fGpu->wrapBackendRenderTarget(desc);
518 }
519
520 ///////////////////////////////////////////////////////////////////////////////
521
supportsIndex8PixelConfig(const GrTextureParams * params,int width,int height) const522 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
523 int width, int height) const {
524 const GrDrawTarget::Caps& caps = fGpu->getCaps();
525 if (!caps.eightBitPaletteSupport()) {
526 return false;
527 }
528
529 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
530
531 if (!isPow2) {
532 bool tiled = NULL != params && params->isTiled();
533 if (tiled && !caps.npotTextureTileSupport()) {
534 return false;
535 }
536 }
537 return true;
538 }
539
540 ////////////////////////////////////////////////////////////////////////////////
541
getClip() const542 const GrClipData* GrContext::getClip() const {
543 return fGpu->getClip();
544 }
545
setClip(const GrClipData * clipData)546 void GrContext::setClip(const GrClipData* clipData) {
547 fGpu->setClip(clipData);
548
549 fDrawState->setState(GrDrawState::kClip_StateBit,
550 clipData && clipData->fClipStack && !clipData->fClipStack->isWideOpen());
551 }
552
553 ////////////////////////////////////////////////////////////////////////////////
554
clear(const GrIRect * rect,const GrColor color,GrRenderTarget * target)555 void GrContext::clear(const GrIRect* rect,
556 const GrColor color,
557 GrRenderTarget* target) {
558 this->prepareToDraw(NULL, DEFAULT_BUFFERING)->clear(rect, color, target);
559 }
560
drawPaint(const GrPaint & origPaint)561 void GrContext::drawPaint(const GrPaint& origPaint) {
562 // set rect to be big enough to fill the space, but not super-huge, so we
563 // don't overflow fixed-point implementations
564 GrRect r;
565 r.setLTRB(0, 0,
566 SkIntToScalar(getRenderTarget()->width()),
567 SkIntToScalar(getRenderTarget()->height()));
568 SkMatrix inverse;
569 SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
570 AutoMatrix am;
571
572 // We attempt to map r by the inverse matrix and draw that. mapRect will
573 // map the four corners and bound them with a new rect. This will not
574 // produce a correct result for some perspective matrices.
575 if (!this->getMatrix().hasPerspective()) {
576 if (!fDrawState->getViewInverse(&inverse)) {
577 GrPrintf("Could not invert matrix\n");
578 return;
579 }
580 inverse.mapRect(&r);
581 } else {
582 if (!am.setIdentity(this, paint.writable())) {
583 GrPrintf("Could not invert matrix\n");
584 return;
585 }
586 }
587 // by definition this fills the entire clip, no need for AA
588 if (paint->isAntiAlias()) {
589 paint.writable()->setAntiAlias(false);
590 }
591 this->drawRect(*paint, r);
592 }
593
594 ////////////////////////////////////////////////////////////////////////////////
595
596 namespace {
disable_coverage_aa_for_blend(GrDrawTarget * target)597 inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
598 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
599 }
600 }
601
602 ////////////////////////////////////////////////////////////////////////////////
603
604 /* create a triangle strip that strokes the specified triangle. There are 8
605 unique vertices, but we repreat the last 2 to close up. Alternatively we
606 could use an indices array, and then only send 8 verts, but not sure that
607 would be faster.
608 */
setStrokeRectStrip(GrPoint verts[10],GrRect rect,SkScalar width)609 static void setStrokeRectStrip(GrPoint verts[10], GrRect rect,
610 SkScalar width) {
611 const SkScalar rad = SkScalarHalf(width);
612 rect.sort();
613
614 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
615 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
616 verts[2].set(rect.fRight - rad, rect.fTop + rad);
617 verts[3].set(rect.fRight + rad, rect.fTop - rad);
618 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
619 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
620 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
621 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
622 verts[8] = verts[0];
623 verts[9] = verts[1];
624 }
625
626 /**
627 * Returns true if the rects edges are integer-aligned.
628 */
isIRect(const GrRect & r)629 static bool isIRect(const GrRect& r) {
630 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
631 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
632 }
633
apply_aa_to_rect(GrDrawTarget * target,const GrRect & rect,SkScalar width,const SkMatrix * matrix,SkMatrix * combinedMatrix,GrRect * devRect,bool * useVertexCoverage)634 static bool apply_aa_to_rect(GrDrawTarget* target,
635 const GrRect& rect,
636 SkScalar width,
637 const SkMatrix* matrix,
638 SkMatrix* combinedMatrix,
639 GrRect* devRect,
640 bool* useVertexCoverage) {
641 // we use a simple coverage ramp to do aa on axis-aligned rects
642 // we check if the rect will be axis-aligned, and the rect won't land on
643 // integer coords.
644
645 // we are keeping around the "tweak the alpha" trick because
646 // it is our only hope for the fixed-pipe implementation.
647 // In a shader implementation we can give a separate coverage input
648 // TODO: remove this ugliness when we drop the fixed-pipe impl
649 *useVertexCoverage = false;
650 if (!target->canTweakAlphaForCoverage()) {
651 if (disable_coverage_aa_for_blend(target)) {
652 #if GR_DEBUG
653 //GrPrintf("Turning off AA to correctly apply blend.\n");
654 #endif
655 return false;
656 } else {
657 *useVertexCoverage = true;
658 }
659 }
660 const GrDrawState& drawState = target->getDrawState();
661 if (drawState.getRenderTarget()->isMultisampled()) {
662 return false;
663 }
664
665 if (0 == width && target->willUseHWAALines()) {
666 return false;
667 }
668
669 if (!drawState.getViewMatrix().preservesAxisAlignment()) {
670 return false;
671 }
672
673 if (NULL != matrix &&
674 !matrix->preservesAxisAlignment()) {
675 return false;
676 }
677
678 *combinedMatrix = drawState.getViewMatrix();
679 if (NULL != matrix) {
680 combinedMatrix->preConcat(*matrix);
681 GrAssert(combinedMatrix->preservesAxisAlignment());
682 }
683
684 combinedMatrix->mapRect(devRect, rect);
685 devRect->sort();
686
687 if (width < 0) {
688 return !isIRect(*devRect);
689 } else {
690 return true;
691 }
692 }
693
drawRect(const GrPaint & paint,const GrRect & rect,SkScalar width,const SkMatrix * matrix)694 void GrContext::drawRect(const GrPaint& paint,
695 const GrRect& rect,
696 SkScalar width,
697 const SkMatrix* matrix) {
698 SK_TRACE_EVENT0("GrContext::drawRect");
699
700 GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
701 GrDrawState::AutoStageDisable atr(fDrawState);
702
703 GrRect devRect = rect;
704 SkMatrix combinedMatrix;
705 bool useVertexCoverage;
706 bool needAA = paint.isAntiAlias() &&
707 !this->getRenderTarget()->isMultisampled();
708 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
709 &combinedMatrix, &devRect,
710 &useVertexCoverage);
711
712 if (doAA) {
713 GrDrawState::AutoDeviceCoordDraw adcd(target->drawState());
714 if (!adcd.succeeded()) {
715 return;
716 }
717 if (width >= 0) {
718 GrVec strokeSize;
719 if (width > 0) {
720 strokeSize.set(width, width);
721 combinedMatrix.mapVectors(&strokeSize, 1);
722 strokeSize.setAbs(strokeSize);
723 } else {
724 strokeSize.set(SK_Scalar1, SK_Scalar1);
725 }
726 fAARectRenderer->strokeAARect(this->getGpu(), target, devRect,
727 strokeSize, useVertexCoverage);
728 } else {
729 fAARectRenderer->fillAARect(this->getGpu(), target,
730 devRect, useVertexCoverage);
731 }
732 return;
733 }
734
735 if (width >= 0) {
736 // TODO: consider making static vertex buffers for these cases.
737 // Hairline could be done by just adding closing vertex to
738 // unitSquareVertexBuffer()
739
740 static const int worstCaseVertCount = 10;
741 GrDrawTarget::AutoReleaseGeometry geo(target, 0, worstCaseVertCount, 0);
742
743 if (!geo.succeeded()) {
744 GrPrintf("Failed to get space for vertices!\n");
745 return;
746 }
747
748 GrPrimitiveType primType;
749 int vertCount;
750 GrPoint* vertex = geo.positions();
751
752 if (width > 0) {
753 vertCount = 10;
754 primType = kTriangleStrip_GrPrimitiveType;
755 setStrokeRectStrip(vertex, rect, width);
756 } else {
757 // hairline
758 vertCount = 5;
759 primType = kLineStrip_GrPrimitiveType;
760 vertex[0].set(rect.fLeft, rect.fTop);
761 vertex[1].set(rect.fRight, rect.fTop);
762 vertex[2].set(rect.fRight, rect.fBottom);
763 vertex[3].set(rect.fLeft, rect.fBottom);
764 vertex[4].set(rect.fLeft, rect.fTop);
765 }
766
767 GrDrawState::AutoViewMatrixRestore avmr;
768 if (NULL != matrix) {
769 GrDrawState* drawState = target->drawState();
770 avmr.set(drawState, *matrix);
771 }
772
773 target->drawNonIndexed(primType, 0, vertCount);
774 } else {
775 #if GR_STATIC_RECT_VB
776 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
777 if (NULL == sqVB) {
778 GrPrintf("Failed to create static rect vb.\n");
779 return;
780 }
781 target->setVertexSourceToBuffer(0, sqVB);
782 GrDrawState* drawState = target->drawState();
783 SkMatrix m;
784 m.setAll(rect.width(), 0, rect.fLeft,
785 0, rect.height(), rect.fTop,
786 0, 0, SkMatrix::I()[8]);
787
788 if (NULL != matrix) {
789 m.postConcat(*matrix);
790 }
791 GrDrawState::AutoViewMatrixRestore avmr(drawState, m);
792
793 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
794 #else
795 target->drawSimpleRect(rect, matrix);
796 #endif
797 }
798 }
799
drawRectToRect(const GrPaint & paint,const GrRect & dstRect,const GrRect & srcRect,const SkMatrix * dstMatrix,const SkMatrix * srcMatrix)800 void GrContext::drawRectToRect(const GrPaint& paint,
801 const GrRect& dstRect,
802 const GrRect& srcRect,
803 const SkMatrix* dstMatrix,
804 const SkMatrix* srcMatrix) {
805 SK_TRACE_EVENT0("GrContext::drawRectToRect");
806
807 // srcRect refers to paint's first color stage
808 if (!paint.isColorStageEnabled(0)) {
809 drawRect(paint, dstRect, -1, dstMatrix);
810 return;
811 }
812
813 GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
814
815 #if GR_STATIC_RECT_VB
816 GrDrawState::AutoStageDisable atr(fDrawState);
817 GrDrawState* drawState = target->drawState();
818
819 SkMatrix m;
820
821 m.setAll(dstRect.width(), 0, dstRect.fLeft,
822 0, dstRect.height(), dstRect.fTop,
823 0, 0, SkMatrix::I()[8]);
824 if (NULL != dstMatrix) {
825 m.postConcat(*dstMatrix);
826 }
827
828 // The first color stage's coords come from srcRect rather than applying a matrix to dstRect.
829 // We explicitly compute a matrix for that stage below, no need to adjust here.
830 static const uint32_t kExplicitCoordMask = 1 << GrPaint::kFirstColorStage;
831 GrDrawState::AutoViewMatrixRestore avmr(drawState, m, kExplicitCoordMask);
832
833 m.setAll(srcRect.width(), 0, srcRect.fLeft,
834 0, srcRect.height(), srcRect.fTop,
835 0, 0, SkMatrix::I()[8]);
836 if (NULL != srcMatrix) {
837 m.postConcat(*srcMatrix);
838 }
839
840 drawState->preConcatStageMatrices(kExplicitCoordMask, m);
841
842 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
843 if (NULL == sqVB) {
844 GrPrintf("Failed to create static rect vb.\n");
845 return;
846 }
847 target->setVertexSourceToBuffer(0, sqVB);
848 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
849 #else
850 GrDrawState::AutoStageDisable atr(fDrawState);
851
852 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL};
853 const SkMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL};
854 srcRects[0] = &srcRect;
855 srcMatrices[0] = srcMatrix;
856
857 target->drawRect(dstRect, dstMatrix, srcRects, srcMatrices);
858 #endif
859 }
860
drawVertices(const GrPaint & paint,GrPrimitiveType primitiveType,int vertexCount,const GrPoint positions[],const GrPoint texCoords[],const GrColor colors[],const uint16_t indices[],int indexCount)861 void GrContext::drawVertices(const GrPaint& paint,
862 GrPrimitiveType primitiveType,
863 int vertexCount,
864 const GrPoint positions[],
865 const GrPoint texCoords[],
866 const GrColor colors[],
867 const uint16_t indices[],
868 int indexCount) {
869 SK_TRACE_EVENT0("GrContext::drawVertices");
870
871 GrDrawTarget::AutoReleaseGeometry geo;
872
873 GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
874 GrDrawState::AutoStageDisable atr(fDrawState);
875
876 GrVertexLayout layout = 0;
877 if (NULL != texCoords) {
878 layout |= GrDrawState::StageTexCoordVertexLayoutBit(0, 0);
879 }
880 if (NULL != colors) {
881 layout |= GrDrawState::kColor_VertexLayoutBit;
882 }
883 int vertexSize = GrDrawState::VertexSize(layout);
884
885 if (sizeof(GrPoint) != vertexSize) {
886 if (!geo.set(target, layout, vertexCount, 0)) {
887 GrPrintf("Failed to get space for vertices!\n");
888 return;
889 }
890 int texOffsets[GrDrawState::kMaxTexCoords];
891 int colorOffset;
892 GrDrawState::VertexSizeAndOffsetsByIdx(layout,
893 texOffsets,
894 &colorOffset,
895 NULL,
896 NULL);
897 void* curVertex = geo.vertices();
898
899 for (int i = 0; i < vertexCount; ++i) {
900 *((GrPoint*)curVertex) = positions[i];
901
902 if (texOffsets[0] > 0) {
903 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i];
904 }
905 if (colorOffset > 0) {
906 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
907 }
908 curVertex = (void*)((intptr_t)curVertex + vertexSize);
909 }
910 } else {
911 target->setVertexSourceToArray(layout, positions, vertexCount);
912 }
913
914 // we don't currently apply offscreen AA to this path. Need improved
915 // management of GrDrawTarget's geometry to avoid copying points per-tile.
916
917 if (NULL != indices) {
918 target->setIndexSourceToArray(indices, indexCount);
919 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
920 } else {
921 target->drawNonIndexed(primitiveType, 0, vertexCount);
922 }
923 }
924
925 ///////////////////////////////////////////////////////////////////////////////
926 namespace {
927
928 struct CircleVertex {
929 GrPoint fPos;
930 GrPoint fCenter;
931 SkScalar fOuterRadius;
932 SkScalar fInnerRadius;
933 };
934
circleStaysCircle(const SkMatrix & m)935 inline bool circleStaysCircle(const SkMatrix& m) {
936 return m.isSimilarity();
937 }
938
939 }
940
drawOval(const GrPaint & paint,const GrRect & oval,const SkStrokeRec & stroke)941 void GrContext::drawOval(const GrPaint& paint,
942 const GrRect& oval,
943 const SkStrokeRec& stroke) {
944
945 if (!canDrawOval(paint, oval, stroke)) {
946 SkPath path;
947 path.addOval(oval);
948 this->drawPath(paint, path, stroke);
949 return;
950 }
951
952 internalDrawOval(paint, oval, stroke);
953 }
954
canDrawOval(const GrPaint & paint,const GrRect & oval,const SkStrokeRec & stroke) const955 bool GrContext::canDrawOval(const GrPaint& paint, const GrRect& oval, const SkStrokeRec& stroke) const {
956
957 if (!paint.isAntiAlias()) {
958 return false;
959 }
960
961 // we can draw circles in any style
962 bool isCircle = SkScalarNearlyEqual(oval.width(), oval.height())
963 && circleStaysCircle(this->getMatrix());
964 // and for now, axis-aligned ellipses only with fill or stroke-and-fill
965 SkStrokeRec::Style style = stroke.getStyle();
966 bool isStroke = (style == SkStrokeRec::kStroke_Style || style == SkStrokeRec::kHairline_Style);
967 bool isFilledAxisAlignedEllipse = this->getMatrix().rectStaysRect() && !isStroke;
968
969 return isCircle || isFilledAxisAlignedEllipse;
970 }
971
internalDrawOval(const GrPaint & paint,const GrRect & oval,const SkStrokeRec & stroke)972 void GrContext::internalDrawOval(const GrPaint& paint,
973 const GrRect& oval,
974 const SkStrokeRec& stroke) {
975
976 SkScalar xRadius = SkScalarHalf(oval.width());
977 SkScalar yRadius = SkScalarHalf(oval.height());
978
979 SkScalar strokeWidth = stroke.getWidth();
980 SkStrokeRec::Style style = stroke.getStyle();
981
982 bool isCircle = SkScalarNearlyEqual(xRadius, yRadius) && circleStaysCircle(this->getMatrix());
983 #ifdef SK_DEBUG
984 {
985 // we should have checked for this previously
986 bool isStroke = (style == SkStrokeRec::kStroke_Style || style == SkStrokeRec::kHairline_Style);
987 bool isFilledAxisAlignedEllipse = this->getMatrix().rectStaysRect() && !isStroke;
988 SkASSERT(paint.isAntiAlias() && (isCircle || isFilledAxisAlignedEllipse));
989 }
990 #endif
991
992 GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
993
994 GrDrawState* drawState = target->drawState();
995 GrDrawState::AutoStageDisable atr(fDrawState);
996 const SkMatrix vm = drawState->getViewMatrix();
997
998 const GrRenderTarget* rt = drawState->getRenderTarget();
999 if (NULL == rt) {
1000 return;
1001 }
1002
1003 GrDrawState::AutoDeviceCoordDraw adcd(drawState);
1004 if (!adcd.succeeded()) {
1005 return;
1006 }
1007
1008 GrVertexLayout layout = GrDrawState::kEdge_VertexLayoutBit;
1009 GrAssert(sizeof(CircleVertex) == GrDrawState::VertexSize(layout));
1010
1011 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0);
1012 if (!geo.succeeded()) {
1013 GrPrintf("Failed to get space for vertices!\n");
1014 return;
1015 }
1016
1017 CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices());
1018
1019 GrPoint center = GrPoint::Make(oval.centerX(), oval.centerY());
1020 vm.mapPoints(¢er, 1);
1021
1022 SkScalar L;
1023 SkScalar R;
1024 SkScalar T;
1025 SkScalar B;
1026
1027 if (isCircle) {
1028 drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType);
1029
1030 xRadius = vm.mapRadius(xRadius);
1031
1032 SkScalar outerRadius = xRadius;
1033 SkScalar innerRadius = 0;
1034 SkScalar halfWidth = 0;
1035 if (style != SkStrokeRec::kFill_Style) {
1036 strokeWidth = vm.mapRadius(strokeWidth);
1037 if (SkScalarNearlyZero(strokeWidth)) {
1038 halfWidth = SK_ScalarHalf;
1039 } else {
1040 halfWidth = SkScalarHalf(strokeWidth);
1041 }
1042
1043 outerRadius += halfWidth;
1044 if (style == SkStrokeRec::kStroke_Style || style == SkStrokeRec::kHairline_Style) {
1045 innerRadius = SkMaxScalar(0, xRadius - halfWidth);
1046 }
1047 }
1048
1049 for (int i = 0; i < 4; ++i) {
1050 verts[i].fCenter = center;
1051 verts[i].fOuterRadius = outerRadius;
1052 verts[i].fInnerRadius = innerRadius;
1053 }
1054
1055 L = -outerRadius;
1056 R = +outerRadius;
1057 T = -outerRadius;
1058 B = +outerRadius;
1059 } else { // is axis-aligned ellipse
1060 drawState->setVertexEdgeType(GrDrawState::kEllipse_EdgeType);
1061
1062 SkRect xformedRect;
1063 vm.mapRect(&xformedRect, oval);
1064
1065 xRadius = SkScalarHalf(xformedRect.width());
1066 yRadius = SkScalarHalf(xformedRect.height());
1067
1068 if (style == SkStrokeRec::kStrokeAndFill_Style && strokeWidth > 0.0f) {
1069 SkScalar halfWidth = SkScalarHalf(strokeWidth);
1070 // do (potentially) anisotropic mapping
1071 SkVector scaledStroke;
1072 scaledStroke.set(halfWidth, halfWidth);
1073 vm.mapVectors(&scaledStroke, 1);
1074 // this is legit only if scale & translation (which should be the case at the moment)
1075 xRadius += scaledStroke.fX;
1076 yRadius += scaledStroke.fY;
1077 }
1078
1079 SkScalar ratio = SkScalarDiv(xRadius, yRadius);
1080
1081 for (int i = 0; i < 4; ++i) {
1082 verts[i].fCenter = center;
1083 verts[i].fOuterRadius = xRadius;
1084 verts[i].fInnerRadius = ratio;
1085 }
1086
1087 L = -xRadius;
1088 R = +xRadius;
1089 T = -yRadius;
1090 B = +yRadius;
1091 }
1092
1093 // The fragment shader will extend the radius out half a pixel
1094 // to antialias. Expand the drawn rect here so all the pixels
1095 // will be captured.
1096 L += center.fX - SK_ScalarHalf;
1097 R += center.fX + SK_ScalarHalf;
1098 T += center.fY - SK_ScalarHalf;
1099 B += center.fY + SK_ScalarHalf;
1100
1101 verts[0].fPos = SkPoint::Make(L, T);
1102 verts[1].fPos = SkPoint::Make(R, T);
1103 verts[2].fPos = SkPoint::Make(L, B);
1104 verts[3].fPos = SkPoint::Make(R, B);
1105
1106 target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4);
1107 }
1108
drawPath(const GrPaint & paint,const SkPath & path,const SkStrokeRec & stroke)1109 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1110
1111 if (path.isEmpty()) {
1112 if (path.isInverseFillType()) {
1113 this->drawPaint(paint);
1114 }
1115 return;
1116 }
1117
1118 SkRect ovalRect;
1119 bool isOval = path.isOval(&ovalRect);
1120
1121 if (isOval && !path.isInverseFillType() && this->canDrawOval(paint, ovalRect, stroke)) {
1122 this->drawOval(paint, ovalRect, stroke);
1123 return;
1124 }
1125
1126 this->internalDrawPath(paint, path, stroke);
1127 }
1128
internalDrawPath(const GrPaint & paint,const SkPath & path,const SkStrokeRec & stroke)1129 void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1130
1131 // Note that below we may sw-rasterize the path into a scratch texture.
1132 // Scratch textures can be recycled after they are returned to the texture
1133 // cache. This presents a potential hazard for buffered drawing. However,
1134 // the writePixels that uploads to the scratch will perform a flush so we're
1135 // OK.
1136 GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
1137 GrDrawState::AutoStageDisable atr(fDrawState);
1138
1139 bool prAA = paint.isAntiAlias() && !this->getRenderTarget()->isMultisampled();
1140
1141 // An Assumption here is that path renderer would use some form of tweaking
1142 // the src color (either the input alpha or in the frag shader) to implement
1143 // aa. If we have some future driver-mojo path AA that can do the right
1144 // thing WRT to the blend then we'll need some query on the PR.
1145 if (disable_coverage_aa_for_blend(target)) {
1146 #if GR_DEBUG
1147 //GrPrintf("Turning off AA to correctly apply blend.\n");
1148 #endif
1149 prAA = false;
1150 }
1151
1152 GrPathRendererChain::DrawType type = prAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1153 GrPathRendererChain::kColor_DrawType;
1154
1155 const SkPath* pathPtr = &path;
1156 SkPath tmpPath;
1157 SkStrokeRec strokeRec(stroke);
1158
1159 // Try a 1st time without stroking the path and without allowing the SW renderer
1160 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type);
1161
1162 if (NULL == pr) {
1163 if (!strokeRec.isHairlineStyle()) {
1164 // It didn't work the 1st time, so try again with the stroked path
1165 if (strokeRec.applyToPath(&tmpPath, *pathPtr)) {
1166 pathPtr = &tmpPath;
1167 strokeRec.setFillStyle();
1168 }
1169 }
1170 // This time, allow SW renderer
1171 pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type);
1172 }
1173
1174 if (NULL == pr) {
1175 #if GR_DEBUG
1176 GrPrintf("Unable to find path renderer compatible with path.\n");
1177 #endif
1178 return;
1179 }
1180
1181 pr->drawPath(*pathPtr, strokeRec, target, prAA);
1182 }
1183
1184 ////////////////////////////////////////////////////////////////////////////////
1185
flush(int flagsBitfield)1186 void GrContext::flush(int flagsBitfield) {
1187 if (kDiscard_FlushBit & flagsBitfield) {
1188 fDrawBuffer->reset();
1189 } else {
1190 this->flushDrawBuffer();
1191 }
1192 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) {
1193 fGpu->forceRenderTargetFlush();
1194 }
1195 }
1196
flushDrawBuffer()1197 void GrContext::flushDrawBuffer() {
1198 if (fDrawBuffer) {
1199 // With addition of the AA clip path, flushing the draw buffer can
1200 // result in the generation of an AA clip mask. During this
1201 // process the SW path renderer may be invoked which recusively
1202 // calls this method (via internalWriteTexturePixels) creating
1203 // infinite recursion
1204 GrInOrderDrawBuffer* temp = fDrawBuffer;
1205 fDrawBuffer = NULL;
1206
1207 temp->flushTo(fGpu);
1208
1209 fDrawBuffer = temp;
1210 }
1211 }
1212
writeTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes,uint32_t flags)1213 void GrContext::writeTexturePixels(GrTexture* texture,
1214 int left, int top, int width, int height,
1215 GrPixelConfig config, const void* buffer, size_t rowBytes,
1216 uint32_t flags) {
1217 SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1218 ASSERT_OWNED_RESOURCE(texture);
1219
1220 // TODO: use scratch texture to perform conversion
1221 if (kUnpremul_PixelOpsFlag & flags) {
1222 return;
1223 }
1224 if (!(kDontFlush_PixelOpsFlag & flags)) {
1225 this->flush();
1226 }
1227
1228 fGpu->writeTexturePixels(texture, left, top, width, height,
1229 config, buffer, rowBytes);
1230 }
1231
readTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes,uint32_t flags)1232 bool GrContext::readTexturePixels(GrTexture* texture,
1233 int left, int top, int width, int height,
1234 GrPixelConfig config, void* buffer, size_t rowBytes,
1235 uint32_t flags) {
1236 SK_TRACE_EVENT0("GrContext::readTexturePixels");
1237 ASSERT_OWNED_RESOURCE(texture);
1238
1239 // TODO: code read pixels for textures that aren't also rendertargets
1240 GrRenderTarget* target = texture->asRenderTarget();
1241 if (NULL != target) {
1242 return this->readRenderTargetPixels(target,
1243 left, top, width, height,
1244 config, buffer, rowBytes,
1245 flags);
1246 } else {
1247 return false;
1248 }
1249 }
1250
1251 #include "SkConfig8888.h"
1252
1253 namespace {
1254 /**
1255 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1256 * formats are representable as Config8888 and so the function returns false
1257 * if the GrPixelConfig has no equivalent Config8888.
1258 */
grconfig_to_config8888(GrPixelConfig config,bool unpremul,SkCanvas::Config8888 * config8888)1259 bool grconfig_to_config8888(GrPixelConfig config,
1260 bool unpremul,
1261 SkCanvas::Config8888* config8888) {
1262 switch (config) {
1263 case kRGBA_8888_GrPixelConfig:
1264 if (unpremul) {
1265 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1266 } else {
1267 *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1268 }
1269 return true;
1270 case kBGRA_8888_GrPixelConfig:
1271 if (unpremul) {
1272 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1273 } else {
1274 *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1275 }
1276 return true;
1277 default:
1278 return false;
1279 }
1280 }
1281
1282 // It returns a configuration with where the byte position of the R & B components are swapped in
1283 // relation to the input config. This should only be called with the result of
1284 // grconfig_to_config8888 as it will fail for other configs.
swap_config8888_red_and_blue(SkCanvas::Config8888 config8888)1285 SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
1286 switch (config8888) {
1287 case SkCanvas::kBGRA_Premul_Config8888:
1288 return SkCanvas::kRGBA_Premul_Config8888;
1289 case SkCanvas::kBGRA_Unpremul_Config8888:
1290 return SkCanvas::kRGBA_Unpremul_Config8888;
1291 case SkCanvas::kRGBA_Premul_Config8888:
1292 return SkCanvas::kBGRA_Premul_Config8888;
1293 case SkCanvas::kRGBA_Unpremul_Config8888:
1294 return SkCanvas::kBGRA_Unpremul_Config8888;
1295 default:
1296 GrCrash("Unexpected input");
1297 return SkCanvas::kBGRA_Unpremul_Config8888;;
1298 }
1299 }
1300 }
1301
readRenderTargetPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes,uint32_t flags)1302 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1303 int left, int top, int width, int height,
1304 GrPixelConfig config, void* buffer, size_t rowBytes,
1305 uint32_t flags) {
1306 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1307 ASSERT_OWNED_RESOURCE(target);
1308
1309 if (NULL == target) {
1310 target = fDrawState->getRenderTarget();
1311 if (NULL == target) {
1312 return false;
1313 }
1314 }
1315
1316 if (!(kDontFlush_PixelOpsFlag & flags)) {
1317 this->flush();
1318 }
1319
1320 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1321
1322 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1323 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1324 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1325 width, height, config,
1326 rowBytes);
1327 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config);
1328
1329 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1330
1331 // flipY will get set to false when it is handled below using a scratch. However, in that case
1332 // we still want to do the read upside down.
1333 bool readUpsideDown = flipY;
1334
1335 if (unpremul && kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1336 // The unpremul flag is only allowed for these two configs.
1337 return false;
1338 }
1339
1340 GrPixelConfig readConfig;
1341 if (swapRAndB) {
1342 readConfig = GrPixelConfigSwapRAndB(config);
1343 GrAssert(kUnknown_GrPixelConfig != config);
1344 } else {
1345 readConfig = config;
1346 }
1347
1348 // If the src is a texture and we would have to do conversions after read pixels, we instead
1349 // do the conversions by drawing the src to a scratch texture. If we handle any of the
1350 // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1351 // on the read back pixels.
1352 GrTexture* src = target->asTexture();
1353 GrAutoScratchTexture ast;
1354 if (NULL != src && (swapRAndB || unpremul || flipY)) {
1355 // Make the scratch a render target because we don't have a robust readTexturePixels as of
1356 // yet. It calls this function.
1357 GrTextureDesc desc;
1358 desc.fFlags = kRenderTarget_GrTextureFlagBit;
1359 desc.fWidth = width;
1360 desc.fHeight = height;
1361 desc.fConfig = readConfig;
1362
1363 // When a full readback is faster than a partial we could always make the scratch exactly
1364 // match the passed rect. However, if we see many different size rectangles we will trash
1365 // our texture cache and pay the cost of creating and destroying many textures. So, we only
1366 // request an exact match when the caller is reading an entire RT.
1367 ScratchTexMatch match = kApprox_ScratchTexMatch;
1368 if (0 == left &&
1369 0 == top &&
1370 target->width() == width &&
1371 target->height() == height &&
1372 fGpu->fullReadPixelsIsFasterThanPartial()) {
1373 match = kExact_ScratchTexMatch;
1374 }
1375 ast.set(this, desc, match);
1376 GrTexture* texture = ast.texture();
1377 if (texture) {
1378 // compute a matrix to perform the draw
1379 SkMatrix textureMatrix;
1380 if (flipY) {
1381 textureMatrix.setTranslate(SK_Scalar1 * left,
1382 SK_Scalar1 * (top + height));
1383 textureMatrix.set(SkMatrix::kMScaleY, -SK_Scalar1);
1384 } else {
1385 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1386 }
1387 textureMatrix.postIDiv(src->width(), src->height());
1388
1389 SkAutoTUnref<const GrEffectRef> effect;
1390 if (unpremul) {
1391 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1392 if (NULL != effect) {
1393 unpremul = false; // we no longer need to do this on CPU after the readback.
1394 }
1395 }
1396 // If we failed to create a PM->UPM effect and have no other conversions to perform then
1397 // there is no longer any point to using the scratch.
1398 if (NULL != effect || flipY || swapRAndB) {
1399 if (!effect) {
1400 effect.reset(GrConfigConversionEffect::Create(
1401 src,
1402 swapRAndB,
1403 GrConfigConversionEffect::kNone_PMConversion,
1404 textureMatrix));
1405 }
1406 swapRAndB = false; // we will handle the swap in the draw.
1407 flipY = false; // we already incorporated the y flip in the matrix
1408
1409 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1410 GrDrawState* drawState = fGpu->drawState();
1411 GrAssert(effect);
1412 drawState->setEffect(0, effect);
1413
1414 drawState->setRenderTarget(texture->asRenderTarget());
1415 GrRect rect = GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1416 fGpu->drawSimpleRect(rect, NULL);
1417 // we want to read back from the scratch's origin
1418 left = 0;
1419 top = 0;
1420 target = texture->asRenderTarget();
1421 }
1422 }
1423 }
1424 if (!fGpu->readPixels(target,
1425 left, top, width, height,
1426 readConfig, buffer, rowBytes, readUpsideDown)) {
1427 return false;
1428 }
1429 // Perform any conversions we weren't able to perform using a scratch texture.
1430 if (unpremul || swapRAndB || flipY) {
1431 // These are initialized to suppress a warning
1432 SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
1433 SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
1434
1435 bool c8888IsValid = grconfig_to_config8888(config, false, &srcC8888);
1436 grconfig_to_config8888(config, unpremul, &dstC8888);
1437
1438 if (swapRAndB) {
1439 GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
1440 srcC8888 = swap_config8888_red_and_blue(srcC8888);
1441 }
1442 if (flipY) {
1443 size_t tightRB = width * GrBytesPerPixel(config);
1444 if (0 == rowBytes) {
1445 rowBytes = tightRB;
1446 }
1447 SkAutoSTMalloc<256, uint8_t> tempRow(tightRB);
1448 intptr_t top = reinterpret_cast<intptr_t>(buffer);
1449 intptr_t bot = top + (height - 1) * rowBytes;
1450 while (top < bot) {
1451 uint32_t* t = reinterpret_cast<uint32_t*>(top);
1452 uint32_t* b = reinterpret_cast<uint32_t*>(bot);
1453 uint32_t* temp = reinterpret_cast<uint32_t*>(tempRow.get());
1454 memcpy(temp, t, tightRB);
1455 if (c8888IsValid) {
1456 SkConvertConfig8888Pixels(t, tightRB, dstC8888,
1457 b, tightRB, srcC8888,
1458 width, 1);
1459 SkConvertConfig8888Pixels(b, tightRB, dstC8888,
1460 temp, tightRB, srcC8888,
1461 width, 1);
1462 } else {
1463 memcpy(t, b, tightRB);
1464 memcpy(b, temp, tightRB);
1465 }
1466 top += rowBytes;
1467 bot -= rowBytes;
1468 }
1469 // The above loop does nothing on the middle row when height is odd.
1470 if (top == bot && c8888IsValid && dstC8888 != srcC8888) {
1471 uint32_t* mid = reinterpret_cast<uint32_t*>(top);
1472 SkConvertConfig8888Pixels(mid, tightRB, dstC8888, mid, tightRB, srcC8888, width, 1);
1473 }
1474 } else {
1475 // if we aren't flipping Y then we have no reason to be here other than doing
1476 // conversions for 8888 (r/b swap or upm).
1477 GrAssert(c8888IsValid);
1478 uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1479 SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1480 b32, rowBytes, srcC8888,
1481 width, height);
1482 }
1483 }
1484 return true;
1485 }
1486
resolveRenderTarget(GrRenderTarget * target)1487 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1488 GrAssert(target);
1489 ASSERT_OWNED_RESOURCE(target);
1490 // In the future we may track whether there are any pending draws to this
1491 // target. We don't today so we always perform a flush. We don't promise
1492 // this to our clients, though.
1493 this->flush();
1494 fGpu->resolveRenderTarget(target);
1495 }
1496
copyTexture(GrTexture * src,GrRenderTarget * dst,const SkIPoint * topLeft)1497 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1498 if (NULL == src || NULL == dst) {
1499 return;
1500 }
1501 ASSERT_OWNED_RESOURCE(src);
1502
1503 // Writes pending to the source texture are not tracked, so a flush
1504 // is required to ensure that the copy captures the most recent contents
1505 // of the source texture. See similar behavior in
1506 // GrContext::resolveRenderTarget.
1507 this->flush();
1508
1509 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1510 GrDrawState* drawState = fGpu->drawState();
1511 drawState->setRenderTarget(dst);
1512 SkMatrix sampleM;
1513 sampleM.setIDiv(src->width(), src->height());
1514 SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1515 if (NULL != topLeft) {
1516 srcRect.offset(*topLeft);
1517 }
1518 SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1519 if (!srcRect.intersect(srcBounds)) {
1520 return;
1521 }
1522 sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1523 drawState->createTextureEffect(0, src, sampleM);
1524 SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1525 fGpu->drawSimpleRect(dstR, NULL);
1526 }
1527
writeRenderTargetPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes,uint32_t flags)1528 void GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1529 int left, int top, int width, int height,
1530 GrPixelConfig config,
1531 const void* buffer,
1532 size_t rowBytes,
1533 uint32_t flags) {
1534 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1535 ASSERT_OWNED_RESOURCE(target);
1536
1537 if (NULL == target) {
1538 target = fDrawState->getRenderTarget();
1539 if (NULL == target) {
1540 return;
1541 }
1542 }
1543
1544 // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1545 // desktop GL).
1546
1547 // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1548 // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1549 // set.)
1550
1551 // If the RT is also a texture and we don't have to premultiply then take the texture path.
1552 // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1553 // we do below.
1554
1555 #if !GR_MAC_BUILD
1556 // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1557 // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1558 // HW is affected.
1559 if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags)) {
1560 this->writeTexturePixels(target->asTexture(),
1561 left, top, width, height,
1562 config, buffer, rowBytes, flags);
1563 return;
1564 }
1565 #endif
1566
1567 bool swapRAndB = (fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config));
1568
1569 GrPixelConfig textureConfig;
1570 if (swapRAndB) {
1571 textureConfig = GrPixelConfigSwapRAndB(config);
1572 } else {
1573 textureConfig = config;
1574 }
1575
1576 GrTextureDesc desc;
1577 desc.fWidth = width;
1578 desc.fHeight = height;
1579 desc.fConfig = textureConfig;
1580 GrAutoScratchTexture ast(this, desc);
1581 GrTexture* texture = ast.texture();
1582 if (NULL == texture) {
1583 return;
1584 }
1585
1586 SkAutoTUnref<const GrEffectRef> effect;
1587 SkMatrix textureMatrix;
1588 textureMatrix.setIDiv(texture->width(), texture->height());
1589
1590 // allocate a tmp buffer and sw convert the pixels to premul
1591 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1592
1593 if (kUnpremul_PixelOpsFlag & flags) {
1594 if (kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1595 return;
1596 }
1597 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1598 if (NULL == effect) {
1599 SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1600 GR_DEBUGCODE(bool success = )
1601 grconfig_to_config8888(config, true, &srcConfig8888);
1602 GrAssert(success);
1603 GR_DEBUGCODE(success = )
1604 grconfig_to_config8888(config, false, &dstConfig8888);
1605 GrAssert(success);
1606 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1607 tmpPixels.reset(width * height);
1608 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1609 src, rowBytes, srcConfig8888,
1610 width, height);
1611 buffer = tmpPixels.get();
1612 rowBytes = 4 * width;
1613 }
1614 }
1615 if (NULL == effect) {
1616 effect.reset(GrConfigConversionEffect::Create(texture,
1617 swapRAndB,
1618 GrConfigConversionEffect::kNone_PMConversion,
1619 textureMatrix));
1620 }
1621
1622 this->writeTexturePixels(texture,
1623 0, 0, width, height,
1624 textureConfig, buffer, rowBytes,
1625 flags & ~kUnpremul_PixelOpsFlag);
1626
1627 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1628 GrDrawState* drawState = fGpu->drawState();
1629 GrAssert(effect);
1630 drawState->setEffect(0, effect);
1631
1632 SkMatrix matrix;
1633 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1634 drawState->setViewMatrix(matrix);
1635 drawState->setRenderTarget(target);
1636
1637 fGpu->drawSimpleRect(GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1638 }
1639 ////////////////////////////////////////////////////////////////////////////////
1640
prepareToDraw(const GrPaint * paint,BufferedDraw buffered)1641 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, BufferedDraw buffered) {
1642 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1643 this->flushDrawBuffer();
1644 fLastDrawWasBuffered = kNo_BufferedDraw;
1645 }
1646 if (NULL != paint) {
1647 GrAssert(fDrawState->stagesDisabled());
1648 fDrawState->setFromPaint(*paint);
1649 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1650 if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1651 !fGpu->canApplyCoverage()) {
1652 GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1653 }
1654 #endif
1655 }
1656 if (kYes_BufferedDraw == buffered) {
1657 fDrawBuffer->setClip(fGpu->getClip());
1658 fLastDrawWasBuffered = kYes_BufferedDraw;
1659 return fDrawBuffer;
1660 } else {
1661 GrAssert(kNo_BufferedDraw == buffered);
1662 return fGpu;
1663 }
1664 }
1665
1666 /*
1667 * This method finds a path renderer that can draw the specified path on
1668 * the provided target.
1669 * Due to its expense, the software path renderer has split out so it can
1670 * can be individually allowed/disallowed via the "allowSW" boolean.
1671 */
getPathRenderer(const SkPath & path,const SkStrokeRec & stroke,const GrDrawTarget * target,bool allowSW,GrPathRendererChain::DrawType drawType,GrPathRendererChain::StencilSupport * stencilSupport)1672 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1673 const SkStrokeRec& stroke,
1674 const GrDrawTarget* target,
1675 bool allowSW,
1676 GrPathRendererChain::DrawType drawType,
1677 GrPathRendererChain::StencilSupport* stencilSupport) {
1678
1679 if (NULL == fPathRendererChain) {
1680 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1681 }
1682
1683 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1684 stroke,
1685 target,
1686 drawType,
1687 stencilSupport);
1688
1689 if (NULL == pr && allowSW) {
1690 if (NULL == fSoftwarePathRenderer) {
1691 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1692 }
1693 pr = fSoftwarePathRenderer;
1694 }
1695
1696 return pr;
1697 }
1698
1699 ////////////////////////////////////////////////////////////////////////////////
1700
setRenderTarget(GrRenderTarget * target)1701 void GrContext::setRenderTarget(GrRenderTarget* target) {
1702 ASSERT_OWNED_RESOURCE(target);
1703 fDrawState->setRenderTarget(target);
1704 }
1705
getRenderTarget()1706 GrRenderTarget* GrContext::getRenderTarget() {
1707 return fDrawState->getRenderTarget();
1708 }
1709
getRenderTarget() const1710 const GrRenderTarget* GrContext::getRenderTarget() const {
1711 return fDrawState->getRenderTarget();
1712 }
1713
isConfigRenderable(GrPixelConfig config) const1714 bool GrContext::isConfigRenderable(GrPixelConfig config) const {
1715 return fGpu->isConfigRenderable(config);
1716 }
1717
getMatrix() const1718 const SkMatrix& GrContext::getMatrix() const {
1719 return fDrawState->getViewMatrix();
1720 }
1721
setMatrix(const SkMatrix & m)1722 void GrContext::setMatrix(const SkMatrix& m) {
1723 fDrawState->setViewMatrix(m);
1724 }
1725
setIdentityMatrix()1726 void GrContext::setIdentityMatrix() {
1727 fDrawState->viewMatrix()->reset();
1728 }
1729
concatMatrix(const SkMatrix & m) const1730 void GrContext::concatMatrix(const SkMatrix& m) const {
1731 fDrawState->preConcatViewMatrix(m);
1732 }
1733
setOrClear(intptr_t bits,int shift,intptr_t pred)1734 static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
1735 intptr_t mask = 1 << shift;
1736 if (pred) {
1737 bits |= mask;
1738 } else {
1739 bits &= ~mask;
1740 }
1741 return bits;
1742 }
1743
GrContext(GrGpu * gpu)1744 GrContext::GrContext(GrGpu* gpu) {
1745 ++THREAD_INSTANCE_COUNT;
1746
1747 fGpu = gpu;
1748 fGpu->ref();
1749 fGpu->setContext(this);
1750
1751 fDrawState = SkNEW(GrDrawState);
1752 fGpu->setDrawState(fDrawState);
1753
1754 fPathRendererChain = NULL;
1755 fSoftwarePathRenderer = NULL;
1756
1757 fTextureCache = SkNEW_ARGS(GrResourceCache,
1758 (MAX_TEXTURE_CACHE_COUNT,
1759 MAX_TEXTURE_CACHE_BYTES));
1760 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
1761
1762 fLastDrawWasBuffered = kNo_BufferedDraw;
1763
1764 fDrawBuffer = NULL;
1765 fDrawBufferVBAllocPool = NULL;
1766 fDrawBufferIBAllocPool = NULL;
1767
1768 fAARectRenderer = SkNEW(GrAARectRenderer);
1769
1770 fDidTestPMConversions = false;
1771
1772 this->setupDrawBuffer();
1773 }
1774
setupDrawBuffer()1775 void GrContext::setupDrawBuffer() {
1776
1777 GrAssert(NULL == fDrawBuffer);
1778 GrAssert(NULL == fDrawBufferVBAllocPool);
1779 GrAssert(NULL == fDrawBufferIBAllocPool);
1780
1781 fDrawBufferVBAllocPool =
1782 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1783 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1784 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1785 fDrawBufferIBAllocPool =
1786 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1787 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1788 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1789
1790 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1791 fDrawBufferVBAllocPool,
1792 fDrawBufferIBAllocPool));
1793
1794 if (fDrawBuffer) {
1795 fDrawBuffer->setAutoFlushTarget(fGpu);
1796 fDrawBuffer->setDrawState(fDrawState);
1797 }
1798 }
1799
getTextTarget(const GrPaint & paint)1800 GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
1801 return prepareToDraw(&paint, DEFAULT_BUFFERING);
1802 }
1803
getQuadIndexBuffer() const1804 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1805 return fGpu->getQuadIndexBuffer();
1806 }
1807
1808 namespace {
test_pm_conversions(GrContext * ctx,int * pmToUPMValue,int * upmToPMValue)1809 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1810 GrConfigConversionEffect::PMConversion pmToUPM;
1811 GrConfigConversionEffect::PMConversion upmToPM;
1812 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1813 *pmToUPMValue = pmToUPM;
1814 *upmToPMValue = upmToPM;
1815 }
1816 }
1817
createPMToUPMEffect(GrTexture * texture,bool swapRAndB,const SkMatrix & matrix)1818 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
1819 bool swapRAndB,
1820 const SkMatrix& matrix) {
1821 if (!fDidTestPMConversions) {
1822 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1823 fDidTestPMConversions = true;
1824 }
1825 GrConfigConversionEffect::PMConversion pmToUPM =
1826 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1827 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1828 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1829 } else {
1830 return NULL;
1831 }
1832 }
1833
createUPMToPMEffect(GrTexture * texture,bool swapRAndB,const SkMatrix & matrix)1834 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
1835 bool swapRAndB,
1836 const SkMatrix& matrix) {
1837 if (!fDidTestPMConversions) {
1838 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1839 fDidTestPMConversions = true;
1840 }
1841 GrConfigConversionEffect::PMConversion upmToPM =
1842 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1843 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1844 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1845 } else {
1846 return NULL;
1847 }
1848 }
1849
gaussianBlur(GrTexture * srcTexture,bool canClobberSrc,const SkRect & rect,float sigmaX,float sigmaY)1850 GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture,
1851 bool canClobberSrc,
1852 const SkRect& rect,
1853 float sigmaX, float sigmaY) {
1854 ASSERT_OWNED_RESOURCE(srcTexture);
1855
1856 AutoRenderTarget art(this);
1857
1858 AutoMatrix am;
1859 am.setIdentity(this);
1860
1861 SkIRect clearRect;
1862 int scaleFactorX, radiusX;
1863 int scaleFactorY, radiusY;
1864 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX);
1865 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY);
1866
1867 SkRect srcRect(rect);
1868 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
1869 srcRect.roundOut();
1870 scale_rect(&srcRect, static_cast<float>(scaleFactorX),
1871 static_cast<float>(scaleFactorY));
1872
1873 AutoClip acs(this, srcRect);
1874
1875 GrAssert(kBGRA_8888_GrPixelConfig == srcTexture->config() ||
1876 kRGBA_8888_GrPixelConfig == srcTexture->config() ||
1877 kAlpha_8_GrPixelConfig == srcTexture->config());
1878
1879 GrTextureDesc desc;
1880 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
1881 desc.fWidth = SkScalarFloorToInt(srcRect.width());
1882 desc.fHeight = SkScalarFloorToInt(srcRect.height());
1883 desc.fConfig = srcTexture->config();
1884
1885 GrAutoScratchTexture temp1, temp2;
1886 GrTexture* dstTexture = temp1.set(this, desc);
1887 GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc);
1888 if (NULL == dstTexture || NULL == tempTexture) {
1889 return NULL;
1890 }
1891
1892 GrPaint paint;
1893 paint.reset();
1894
1895 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) {
1896 SkMatrix matrix;
1897 matrix.setIDiv(srcTexture->width(), srcTexture->height());
1898 this->setRenderTarget(dstTexture->asRenderTarget());
1899 SkRect dstRect(srcRect);
1900 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f,
1901 i < scaleFactorY ? 0.5f : 1.0f);
1902
1903 paint.colorStage(0)->setEffect(GrSimpleTextureEffect::Create(srcTexture,
1904 matrix,
1905 true))->unref();
1906 this->drawRectToRect(paint, dstRect, srcRect);
1907 srcRect = dstRect;
1908 srcTexture = dstTexture;
1909 SkTSwap(dstTexture, tempTexture);
1910 }
1911
1912 SkIRect srcIRect;
1913 srcRect.roundOut(&srcIRect);
1914
1915 if (sigmaX > 0.0f) {
1916 if (scaleFactorX > 1) {
1917 // Clear out a radius to the right of the srcRect to prevent the
1918 // X convolution from reading garbage.
1919 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1920 radiusX, srcIRect.height());
1921 this->clear(&clearRect, 0x0);
1922 }
1923
1924 this->setRenderTarget(dstTexture->asRenderTarget());
1925 GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1926 convolve_gaussian(target, srcTexture, srcRect, sigmaX, radiusX,
1927 Gr1DKernelEffect::kX_Direction);
1928 srcTexture = dstTexture;
1929 SkTSwap(dstTexture, tempTexture);
1930 }
1931
1932 if (sigmaY > 0.0f) {
1933 if (scaleFactorY > 1 || sigmaX > 0.0f) {
1934 // Clear out a radius below the srcRect to prevent the Y
1935 // convolution from reading garbage.
1936 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1937 srcIRect.width(), radiusY);
1938 this->clear(&clearRect, 0x0);
1939 }
1940
1941 this->setRenderTarget(dstTexture->asRenderTarget());
1942 GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1943 convolve_gaussian(target, srcTexture, srcRect, sigmaY, radiusY,
1944 Gr1DKernelEffect::kY_Direction);
1945 srcTexture = dstTexture;
1946 SkTSwap(dstTexture, tempTexture);
1947 }
1948
1949 if (scaleFactorX > 1 || scaleFactorY > 1) {
1950 // Clear one pixel to the right and below, to accommodate bilinear
1951 // upsampling.
1952 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1953 srcIRect.width() + 1, 1);
1954 this->clear(&clearRect, 0x0);
1955 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1956 1, srcIRect.height());
1957 this->clear(&clearRect, 0x0);
1958 SkMatrix matrix;
1959 // FIXME: This should be mitchell, not bilinear.
1960 matrix.setIDiv(srcTexture->width(), srcTexture->height());
1961 this->setRenderTarget(dstTexture->asRenderTarget());
1962 paint.colorStage(0)->setEffect(GrSimpleTextureEffect::Create(srcTexture,
1963 matrix,
1964 true))->unref();
1965 SkRect dstRect(srcRect);
1966 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY);
1967 this->drawRectToRect(paint, dstRect, srcRect);
1968 srcRect = dstRect;
1969 srcTexture = dstTexture;
1970 SkTSwap(dstTexture, tempTexture);
1971 }
1972 if (srcTexture == temp1.texture()) {
1973 return temp1.detach();
1974 } else if (srcTexture == temp2.texture()) {
1975 return temp2.detach();
1976 } else {
1977 srcTexture->ref();
1978 return srcTexture;
1979 }
1980 }
1981
1982 ///////////////////////////////////////////////////////////////////////////////
1983 #if GR_CACHE_STATS
printCacheStats() const1984 void GrContext::printCacheStats() const {
1985 fTextureCache->printStats();
1986 }
1987 #endif
1988