1
2 /*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #include "GrContext.h"
11
12 #include "GrBufferAllocPool.h"
13 #include "GrClipIterator.h"
14 #include "GrGpu.h"
15 #include "GrIndexBuffer.h"
16 #include "GrInOrderDrawBuffer.h"
17 #include "GrPathRenderer.h"
18 #include "GrPathUtils.h"
19 #include "GrResourceCache.h"
20 #include "GrStencilBuffer.h"
21 #include "GrTextStrike.h"
22 #include "SkTLazy.h"
23 #include "SkTrace.h"
24
25 #define DEFER_TEXT_RENDERING 1
26
27 #define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB)
28
29 // When we're using coverage AA but the blend is incompatible (given gpu
30 // limitations) should we disable AA or draw wrong?
31 #define DISABLE_COVERAGE_AA_FOR_BLEND 1
32
33 static const size_t MAX_TEXTURE_CACHE_COUNT = 256;
34 static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024;
35
36 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 18;
37 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
38
39 // We are currently only batching Text and drawRectToRect, both
40 // of which use the quad index buffer.
41 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 0;
42 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 0;
43
44 #define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
45
Create(GrEngine engine,GrPlatform3DContext context3D)46 GrContext* GrContext::Create(GrEngine engine,
47 GrPlatform3DContext context3D) {
48 GrContext* ctx = NULL;
49 GrGpu* fGpu = GrGpu::Create(engine, context3D);
50 if (NULL != fGpu) {
51 ctx = new GrContext(fGpu);
52 fGpu->unref();
53 }
54 return ctx;
55 }
56
~GrContext()57 GrContext::~GrContext() {
58 this->flush();
59 delete fTextureCache;
60 delete fFontCache;
61 delete fDrawBuffer;
62 delete fDrawBufferVBAllocPool;
63 delete fDrawBufferIBAllocPool;
64
65 GrSafeUnref(fAAFillRectIndexBuffer);
66 GrSafeUnref(fAAStrokeRectIndexBuffer);
67 fGpu->unref();
68 GrSafeUnref(fPathRendererChain);
69 }
70
contextLost()71 void GrContext::contextLost() {
72 contextDestroyed();
73 this->setupDrawBuffer();
74 }
75
contextDestroyed()76 void GrContext::contextDestroyed() {
77 // abandon first to so destructors
78 // don't try to free the resources in the API.
79 fGpu->abandonResources();
80
81 // a path renderer may be holding onto resources that
82 // are now unusable
83 GrSafeSetNull(fPathRendererChain);
84
85 delete fDrawBuffer;
86 fDrawBuffer = NULL;
87
88 delete fDrawBufferVBAllocPool;
89 fDrawBufferVBAllocPool = NULL;
90
91 delete fDrawBufferIBAllocPool;
92 fDrawBufferIBAllocPool = NULL;
93
94 GrSafeSetNull(fAAFillRectIndexBuffer);
95 GrSafeSetNull(fAAStrokeRectIndexBuffer);
96
97 fTextureCache->removeAll();
98 fFontCache->freeAll();
99 fGpu->markContextDirty();
100 }
101
resetContext()102 void GrContext::resetContext() {
103 fGpu->markContextDirty();
104 }
105
freeGpuResources()106 void GrContext::freeGpuResources() {
107 this->flush();
108 fTextureCache->removeAll();
109 fFontCache->freeAll();
110 // a path renderer may be holding onto resources
111 GrSafeSetNull(fPathRendererChain);
112 }
113
getGpuTextureCacheBytes() const114 size_t GrContext::getGpuTextureCacheBytes() const {
115 return fTextureCache->getCachedResourceBytes();
116 }
117
118 ////////////////////////////////////////////////////////////////////////////////
119
PaintStageVertexLayoutBits(const GrPaint & paint,const bool hasTexCoords[GrPaint::kTotalStages])120 int GrContext::PaintStageVertexLayoutBits(
121 const GrPaint& paint,
122 const bool hasTexCoords[GrPaint::kTotalStages]) {
123 int stageMask = paint.getActiveStageMask();
124 int layout = 0;
125 for (int i = 0; i < GrPaint::kTotalStages; ++i) {
126 if ((1 << i) & stageMask) {
127 if (NULL != hasTexCoords && hasTexCoords[i]) {
128 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i);
129 } else {
130 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i);
131 }
132 }
133 }
134 return layout;
135 }
136
137
138 ////////////////////////////////////////////////////////////////////////////////
139
140 enum {
141 // flags for textures
142 kNPOTBit = 0x1,
143 kFilterBit = 0x2,
144 kScratchBit = 0x4,
145
146 // resource type
147 kTextureBit = 0x8,
148 kStencilBufferBit = 0x10
149 };
150
texture() const151 GrTexture* GrContext::TextureCacheEntry::texture() const {
152 if (NULL == fEntry) {
153 return NULL;
154 } else {
155 return (GrTexture*) fEntry->resource();
156 }
157 }
158
159 namespace {
160 // returns true if this is a "special" texture because of gpu NPOT limitations
gen_texture_key_values(const GrGpu * gpu,const GrSamplerState * sampler,GrContext::TextureKey clientKey,int width,int height,int sampleCnt,bool scratch,uint32_t v[4])161 bool gen_texture_key_values(const GrGpu* gpu,
162 const GrSamplerState* sampler,
163 GrContext::TextureKey clientKey,
164 int width,
165 int height,
166 int sampleCnt,
167 bool scratch,
168 uint32_t v[4]) {
169 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t));
170 // we assume we only need 16 bits of width and height
171 // assert that texture creation will fail anyway if this assumption
172 // would cause key collisions.
173 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16);
174 v[0] = clientKey & 0xffffffffUL;
175 v[1] = (clientKey >> 32) & 0xffffffffUL;
176 v[2] = width | (height << 16);
177
178 v[3] = (sampleCnt << 24);
179 GrAssert(sampleCnt >= 0 && sampleCnt < 256);
180
181 if (!gpu->getCaps().fNPOTTextureTileSupport) {
182 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
183
184 bool tiled = NULL != sampler &&
185 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) ||
186 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode));
187
188 if (tiled && !isPow2) {
189 v[3] |= kNPOTBit;
190 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) {
191 v[3] |= kFilterBit;
192 }
193 }
194 }
195
196 if (scratch) {
197 v[3] |= kScratchBit;
198 }
199
200 v[3] |= kTextureBit;
201
202 return v[3] & kNPOTBit;
203 }
204
205 // we should never have more than one stencil buffer with same combo of
206 // (width,height,samplecount)
gen_stencil_key_values(int width,int height,int sampleCnt,uint32_t v[4])207 void gen_stencil_key_values(int width, int height,
208 int sampleCnt, uint32_t v[4]) {
209 v[0] = width;
210 v[1] = height;
211 v[2] = sampleCnt;
212 v[3] = kStencilBufferBit;
213 }
214
gen_stencil_key_values(const GrStencilBuffer * sb,uint32_t v[4])215 void gen_stencil_key_values(const GrStencilBuffer* sb,
216 uint32_t v[4]) {
217 gen_stencil_key_values(sb->width(), sb->height(),
218 sb->numSamples(), v);
219 }
220
221 }
222
findAndLockTexture(TextureKey key,int width,int height,const GrSamplerState * sampler)223 GrContext::TextureCacheEntry GrContext::findAndLockTexture(
224 TextureKey key,
225 int width,
226 int height,
227 const GrSamplerState* sampler) {
228 uint32_t v[4];
229 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v);
230 GrResourceKey resourceKey(v);
231 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey,
232 GrResourceCache::kNested_LockType));
233 }
234
isTextureInCache(TextureKey key,int width,int height,const GrSamplerState * sampler) const235 bool GrContext::isTextureInCache(TextureKey key,
236 int width,
237 int height,
238 const GrSamplerState* sampler) const {
239 uint32_t v[4];
240 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v);
241 GrResourceKey resourceKey(v);
242 return fTextureCache->hasKey(resourceKey);
243 }
244
addAndLockStencilBuffer(GrStencilBuffer * sb)245 GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) {
246 ASSERT_OWNED_RESOURCE(sb);
247 uint32_t v[4];
248 gen_stencil_key_values(sb, v);
249 GrResourceKey resourceKey(v);
250 return fTextureCache->createAndLock(resourceKey, sb);
251 }
252
findStencilBuffer(int width,int height,int sampleCnt)253 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
254 int sampleCnt) {
255 uint32_t v[4];
256 gen_stencil_key_values(width, height, sampleCnt, v);
257 GrResourceKey resourceKey(v);
258 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey,
259 GrResourceCache::kSingle_LockType);
260 if (NULL != entry) {
261 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource();
262 return sb;
263 } else {
264 return NULL;
265 }
266 }
267
unlockStencilBuffer(GrResourceEntry * sbEntry)268 void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) {
269 ASSERT_OWNED_RESOURCE(sbEntry->resource());
270 fTextureCache->unlock(sbEntry);
271 }
272
stretchImage(void * dst,int dstW,int dstH,void * src,int srcW,int srcH,int bpp)273 static void stretchImage(void* dst,
274 int dstW,
275 int dstH,
276 void* src,
277 int srcW,
278 int srcH,
279 int bpp) {
280 GrFixed dx = (srcW << 16) / dstW;
281 GrFixed dy = (srcH << 16) / dstH;
282
283 GrFixed y = dy >> 1;
284
285 int dstXLimit = dstW*bpp;
286 for (int j = 0; j < dstH; ++j) {
287 GrFixed x = dx >> 1;
288 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
289 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
290 for (int i = 0; i < dstXLimit; i += bpp) {
291 memcpy((uint8_t*) dstRow + i,
292 (uint8_t*) srcRow + (x>>16)*bpp,
293 bpp);
294 x += dx;
295 }
296 y += dy;
297 }
298 }
299
createAndLockTexture(TextureKey key,const GrSamplerState * sampler,const GrTextureDesc & desc,void * srcData,size_t rowBytes)300 GrContext::TextureCacheEntry GrContext::createAndLockTexture(
301 TextureKey key,
302 const GrSamplerState* sampler,
303 const GrTextureDesc& desc,
304 void* srcData,
305 size_t rowBytes) {
306 SK_TRACE_EVENT0("GrContext::createAndLockTexture");
307
308 #if GR_DUMP_TEXTURE_UPLOAD
309 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
310 #endif
311
312 TextureCacheEntry entry;
313 uint32_t v[4];
314 bool special = gen_texture_key_values(fGpu, sampler, key,
315 desc.fWidth, desc.fHeight,
316 desc.fSampleCnt, false, v);
317 GrResourceKey resourceKey(v);
318
319 if (special) {
320 GrAssert(NULL != sampler);
321 TextureCacheEntry clampEntry = this->findAndLockTexture(key,
322 desc.fWidth,
323 desc.fHeight,
324 NULL);
325
326 if (NULL == clampEntry.texture()) {
327 clampEntry = this->createAndLockTexture(key, NULL, desc,
328 srcData, rowBytes);
329 GrAssert(NULL != clampEntry.texture());
330 if (NULL == clampEntry.texture()) {
331 return entry;
332 }
333 }
334 GrTextureDesc rtDesc = desc;
335 rtDesc.fFlags = rtDesc.fFlags |
336 kRenderTarget_GrTextureFlagBit |
337 kNoStencil_GrTextureFlagBit;
338 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64));
339 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
340
341 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
342
343 if (NULL != texture) {
344 GrDrawTarget::AutoStateRestore asr(fGpu);
345 GrDrawState* drawState = fGpu->drawState();
346 drawState->reset();
347 drawState->setRenderTarget(texture->asRenderTarget());
348 drawState->setTexture(0, clampEntry.texture());
349
350 GrSamplerState::Filter filter;
351 // if filtering is not desired then we want to ensure all
352 // texels in the resampled image are copies of texels from
353 // the original.
354 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) {
355 filter = GrSamplerState::kNearest_Filter;
356 } else {
357 filter = GrSamplerState::kBilinear_Filter;
358 }
359 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode,
360 filter);
361
362 static const GrVertexLayout layout =
363 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
364 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
365
366 if (arg.succeeded()) {
367 GrPoint* verts = (GrPoint*) arg.vertices();
368 verts[0].setIRectFan(0, 0,
369 texture->width(),
370 texture->height(),
371 2*sizeof(GrPoint));
372 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
373 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType,
374 0, 4);
375 entry.set(fTextureCache->createAndLock(resourceKey, texture));
376 }
377 texture->releaseRenderTarget();
378 } else {
379 // TODO: Our CPU stretch doesn't filter. But we create separate
380 // stretched textures when the sampler state is either filtered or
381 // not. Either implement filtered stretch blit on CPU or just create
382 // one when FBO case fails.
383
384 rtDesc.fFlags = kNone_GrTextureFlags;
385 // no longer need to clamp at min RT size.
386 rtDesc.fWidth = GrNextPow2(desc.fWidth);
387 rtDesc.fHeight = GrNextPow2(desc.fHeight);
388 int bpp = GrBytesPerPixel(desc.fConfig);
389 SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
390 rtDesc.fWidth *
391 rtDesc.fHeight);
392 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
393 srcData, desc.fWidth, desc.fHeight, bpp);
394
395 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
396
397 GrTexture* texture = fGpu->createTexture(rtDesc,
398 stretchedPixels.get(),
399 stretchedRowBytes);
400 GrAssert(NULL != texture);
401 entry.set(fTextureCache->createAndLock(resourceKey, texture));
402 }
403 fTextureCache->unlock(clampEntry.cacheEntry());
404
405 } else {
406 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
407 if (NULL != texture) {
408 entry.set(fTextureCache->createAndLock(resourceKey, texture));
409 }
410 }
411 return entry;
412 }
413
414 namespace {
gen_scratch_tex_key_values(const GrGpu * gpu,const GrTextureDesc & desc,uint32_t v[4])415 inline void gen_scratch_tex_key_values(const GrGpu* gpu,
416 const GrTextureDesc& desc,
417 uint32_t v[4]) {
418 // Instead of a client-provided key of the texture contents
419 // we create a key of from the descriptor.
420 GrContext::TextureKey descKey = (desc.fFlags << 8) |
421 ((uint64_t) desc.fConfig << 32);
422 // this code path isn't friendly to tiling with NPOT restricitons
423 // We just pass ClampNoFilter()
424 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth,
425 desc.fHeight, desc.fSampleCnt, true, v);
426 }
427 }
428
lockScratchTexture(const GrTextureDesc & inDesc,ScratchTexMatch match)429 GrContext::TextureCacheEntry GrContext::lockScratchTexture(
430 const GrTextureDesc& inDesc,
431 ScratchTexMatch match) {
432
433 GrTextureDesc desc = inDesc;
434 if (kExact_ScratchTexMatch != match) {
435 // bin by pow2 with a reasonable min
436 static const int MIN_SIZE = 256;
437 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
438 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
439 }
440
441 GrResourceEntry* entry;
442 int origWidth = desc.fWidth;
443 int origHeight = desc.fHeight;
444 bool doubledW = false;
445 bool doubledH = false;
446
447 do {
448 uint32_t v[4];
449 gen_scratch_tex_key_values(fGpu, desc, v);
450 GrResourceKey key(v);
451 entry = fTextureCache->findAndLock(key,
452 GrResourceCache::kNested_LockType);
453 // if we miss, relax the fit of the flags...
454 // then try doubling width... then height.
455 if (NULL != entry || kExact_ScratchTexMatch == match) {
456 break;
457 }
458 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
459 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit;
460 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
461 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
462 } else if (!doubledW) {
463 desc.fFlags = inDesc.fFlags;
464 desc.fWidth *= 2;
465 doubledW = true;
466 } else if (!doubledH) {
467 desc.fFlags = inDesc.fFlags;
468 desc.fWidth = origWidth;
469 desc.fHeight *= 2;
470 doubledH = true;
471 } else {
472 break;
473 }
474
475 } while (true);
476
477 if (NULL == entry) {
478 desc.fFlags = inDesc.fFlags;
479 desc.fWidth = origWidth;
480 desc.fHeight = origHeight;
481 GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
482 if (NULL != texture) {
483 uint32_t v[4];
484 gen_scratch_tex_key_values(fGpu, desc, v);
485 GrResourceKey key(v);
486 entry = fTextureCache->createAndLock(key, texture);
487 }
488 }
489
490 // If the caller gives us the same desc/sampler twice we don't want
491 // to return the same texture the second time (unless it was previously
492 // released). So we detach the entry from the cache and reattach at release.
493 if (NULL != entry) {
494 fTextureCache->detach(entry);
495 }
496 return TextureCacheEntry(entry);
497 }
498
unlockTexture(TextureCacheEntry entry)499 void GrContext::unlockTexture(TextureCacheEntry entry) {
500 ASSERT_OWNED_RESOURCE(entry.texture());
501 // If this is a scratch texture we detached it from the cache
502 // while it was locked (to avoid two callers simultaneously getting
503 // the same texture).
504 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) {
505 fTextureCache->reattachAndUnlock(entry.cacheEntry());
506 } else {
507 fTextureCache->unlock(entry.cacheEntry());
508 }
509 }
510
createUncachedTexture(const GrTextureDesc & desc,void * srcData,size_t rowBytes)511 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc,
512 void* srcData,
513 size_t rowBytes) {
514 return fGpu->createTexture(desc, srcData, rowBytes);
515 }
516
getTextureCacheLimits(int * maxTextures,size_t * maxTextureBytes) const517 void GrContext::getTextureCacheLimits(int* maxTextures,
518 size_t* maxTextureBytes) const {
519 fTextureCache->getLimits(maxTextures, maxTextureBytes);
520 }
521
setTextureCacheLimits(int maxTextures,size_t maxTextureBytes)522 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
523 fTextureCache->setLimits(maxTextures, maxTextureBytes);
524 }
525
getMaxTextureSize() const526 int GrContext::getMaxTextureSize() const {
527 return fGpu->getCaps().fMaxTextureSize;
528 }
529
getMaxRenderTargetSize() const530 int GrContext::getMaxRenderTargetSize() const {
531 return fGpu->getCaps().fMaxRenderTargetSize;
532 }
533
534 ///////////////////////////////////////////////////////////////////////////////
535
createPlatformTexture(const GrPlatformTextureDesc & desc)536 GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) {
537 return fGpu->createPlatformTexture(desc);
538 }
539
createPlatformRenderTarget(const GrPlatformRenderTargetDesc & desc)540 GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) {
541 return fGpu->createPlatformRenderTarget(desc);
542 }
543
544 ///////////////////////////////////////////////////////////////////////////////
545
supportsIndex8PixelConfig(const GrSamplerState * sampler,int width,int height) const546 bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler,
547 int width, int height) const {
548 const GrDrawTarget::Caps& caps = fGpu->getCaps();
549 if (!caps.f8BitPaletteSupport) {
550 return false;
551 }
552
553 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
554
555 if (!isPow2) {
556 bool tiled = NULL != sampler &&
557 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode ||
558 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode);
559 if (tiled && !caps.fNPOTTextureTileSupport) {
560 return false;
561 }
562 }
563 return true;
564 }
565
566 ////////////////////////////////////////////////////////////////////////////////
567
getClip() const568 const GrClip& GrContext::getClip() const { return fGpu->getClip(); }
569
setClip(const GrClip & clip)570 void GrContext::setClip(const GrClip& clip) {
571 fGpu->setClip(clip);
572 fGpu->drawState()->enableState(GrDrawState::kClip_StateBit);
573 }
574
setClip(const GrIRect & rect)575 void GrContext::setClip(const GrIRect& rect) {
576 GrClip clip;
577 clip.setFromIRect(rect);
578 fGpu->setClip(clip);
579 }
580
581 ////////////////////////////////////////////////////////////////////////////////
582
clear(const GrIRect * rect,const GrColor color)583 void GrContext::clear(const GrIRect* rect, const GrColor color) {
584 this->flush();
585 fGpu->clear(rect, color);
586 }
587
drawPaint(const GrPaint & paint)588 void GrContext::drawPaint(const GrPaint& paint) {
589 // set rect to be big enough to fill the space, but not super-huge, so we
590 // don't overflow fixed-point implementations
591 GrRect r;
592 r.setLTRB(0, 0,
593 GrIntToScalar(getRenderTarget()->width()),
594 GrIntToScalar(getRenderTarget()->height()));
595 GrMatrix inverse;
596 SkTLazy<GrPaint> tmpPaint;
597 const GrPaint* p = &paint;
598 GrDrawState* drawState = fGpu->drawState();
599 GrAutoMatrix am;
600
601 // We attempt to map r by the inverse matrix and draw that. mapRect will
602 // map the four corners and bound them with a new rect. This will not
603 // produce a correct result for some perspective matrices.
604 if (!this->getMatrix().hasPerspective()) {
605 if (!drawState->getViewInverse(&inverse)) {
606 GrPrintf("Could not invert matrix");
607 return;
608 }
609 inverse.mapRect(&r);
610 } else {
611 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) {
612 if (!drawState->getViewInverse(&inverse)) {
613 GrPrintf("Could not invert matrix");
614 return;
615 }
616 tmpPaint.set(paint);
617 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse);
618 p = tmpPaint.get();
619 }
620 am.set(this, GrMatrix::I());
621 }
622 // by definition this fills the entire clip, no need for AA
623 if (paint.fAntiAlias) {
624 if (!tmpPaint.isValid()) {
625 tmpPaint.set(paint);
626 p = tmpPaint.get();
627 }
628 GrAssert(p == tmpPaint.get());
629 tmpPaint.get()->fAntiAlias = false;
630 }
631 this->drawRect(*p, r);
632 }
633
634 ////////////////////////////////////////////////////////////////////////////////
635
636 namespace {
disable_coverage_aa_for_blend(GrDrawTarget * target)637 inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
638 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
639 }
640 }
641
642 ////////////////////////////////////////////////////////////////////////////////
643
644 /* create a triangle strip that strokes the specified triangle. There are 8
645 unique vertices, but we repreat the last 2 to close up. Alternatively we
646 could use an indices array, and then only send 8 verts, but not sure that
647 would be faster.
648 */
setStrokeRectStrip(GrPoint verts[10],GrRect rect,GrScalar width)649 static void setStrokeRectStrip(GrPoint verts[10], GrRect rect,
650 GrScalar width) {
651 const GrScalar rad = GrScalarHalf(width);
652 rect.sort();
653
654 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
655 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
656 verts[2].set(rect.fRight - rad, rect.fTop + rad);
657 verts[3].set(rect.fRight + rad, rect.fTop - rad);
658 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
659 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
660 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
661 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
662 verts[8] = verts[0];
663 verts[9] = verts[1];
664 }
665
setInsetFan(GrPoint * pts,size_t stride,const GrRect & r,GrScalar dx,GrScalar dy)666 static void setInsetFan(GrPoint* pts, size_t stride,
667 const GrRect& r, GrScalar dx, GrScalar dy) {
668 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride);
669 }
670
671 static const uint16_t gFillAARectIdx[] = {
672 0, 1, 5, 5, 4, 0,
673 1, 2, 6, 6, 5, 1,
674 2, 3, 7, 7, 6, 2,
675 3, 0, 4, 4, 7, 3,
676 4, 5, 6, 6, 7, 4,
677 };
678
aaFillRectIndexCount() const679 int GrContext::aaFillRectIndexCount() const {
680 return GR_ARRAY_COUNT(gFillAARectIdx);
681 }
682
aaFillRectIndexBuffer()683 GrIndexBuffer* GrContext::aaFillRectIndexBuffer() {
684 if (NULL == fAAFillRectIndexBuffer) {
685 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx),
686 false);
687 if (NULL != fAAFillRectIndexBuffer) {
688 #if GR_DEBUG
689 bool updated =
690 #endif
691 fAAFillRectIndexBuffer->updateData(gFillAARectIdx,
692 sizeof(gFillAARectIdx));
693 GR_DEBUGASSERT(updated);
694 }
695 }
696 return fAAFillRectIndexBuffer;
697 }
698
699 static const uint16_t gStrokeAARectIdx[] = {
700 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
701 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
702 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0,
703 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0,
704
705 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4,
706 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4,
707 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4,
708 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4,
709
710 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8,
711 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8,
712 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8,
713 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8,
714 };
715
aaStrokeRectIndexCount() const716 int GrContext::aaStrokeRectIndexCount() const {
717 return GR_ARRAY_COUNT(gStrokeAARectIdx);
718 }
719
aaStrokeRectIndexBuffer()720 GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() {
721 if (NULL == fAAStrokeRectIndexBuffer) {
722 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx),
723 false);
724 if (NULL != fAAStrokeRectIndexBuffer) {
725 #if GR_DEBUG
726 bool updated =
727 #endif
728 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx,
729 sizeof(gStrokeAARectIdx));
730 GR_DEBUGASSERT(updated);
731 }
732 }
733 return fAAStrokeRectIndexBuffer;
734 }
735
aa_rect_layout(const GrDrawTarget * target,bool useCoverage)736 static GrVertexLayout aa_rect_layout(const GrDrawTarget* target,
737 bool useCoverage) {
738 GrVertexLayout layout = 0;
739 for (int s = 0; s < GrDrawState::kNumStages; ++s) {
740 if (NULL != target->getDrawState().getTexture(s)) {
741 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s);
742 }
743 }
744 if (useCoverage) {
745 layout |= GrDrawTarget::kCoverage_VertexLayoutBit;
746 } else {
747 layout |= GrDrawTarget::kColor_VertexLayoutBit;
748 }
749 return layout;
750 }
751
fillAARect(GrDrawTarget * target,const GrRect & devRect,bool useVertexCoverage)752 void GrContext::fillAARect(GrDrawTarget* target,
753 const GrRect& devRect,
754 bool useVertexCoverage) {
755 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage);
756
757 size_t vsize = GrDrawTarget::VertexSize(layout);
758
759 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0);
760 if (!geo.succeeded()) {
761 GrPrintf("Failed to get space for vertices!\n");
762 return;
763 }
764 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer();
765 if (NULL == indexBuffer) {
766 GrPrintf("Failed to create index buffer!\n");
767 return;
768 }
769
770 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
771
772 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts);
773 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize);
774
775 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf);
776 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf);
777
778 verts += sizeof(GrPoint);
779 for (int i = 0; i < 4; ++i) {
780 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
781 }
782
783 GrColor innerColor;
784 if (useVertexCoverage) {
785 innerColor = 0xffffffff;
786 } else {
787 innerColor = target->getDrawState().getColor();
788 }
789
790 verts += 4 * vsize;
791 for (int i = 0; i < 4; ++i) {
792 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor;
793 }
794
795 target->setIndexSourceToBuffer(indexBuffer);
796
797 target->drawIndexed(kTriangles_PrimitiveType, 0,
798 0, 8, this->aaFillRectIndexCount());
799 }
800
strokeAARect(GrDrawTarget * target,const GrRect & devRect,const GrVec & devStrokeSize,bool useVertexCoverage)801 void GrContext::strokeAARect(GrDrawTarget* target,
802 const GrRect& devRect,
803 const GrVec& devStrokeSize,
804 bool useVertexCoverage) {
805 const GrScalar& dx = devStrokeSize.fX;
806 const GrScalar& dy = devStrokeSize.fY;
807 const GrScalar rx = GrMul(dx, GR_ScalarHalf);
808 const GrScalar ry = GrMul(dy, GR_ScalarHalf);
809
810 GrScalar spare;
811 {
812 GrScalar w = devRect.width() - dx;
813 GrScalar h = devRect.height() - dy;
814 spare = GrMin(w, h);
815 }
816
817 if (spare <= 0) {
818 GrRect r(devRect);
819 r.inset(-rx, -ry);
820 fillAARect(target, r, useVertexCoverage);
821 return;
822 }
823 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage);
824 size_t vsize = GrDrawTarget::VertexSize(layout);
825
826 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0);
827 if (!geo.succeeded()) {
828 GrPrintf("Failed to get space for vertices!\n");
829 return;
830 }
831 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer();
832 if (NULL == indexBuffer) {
833 GrPrintf("Failed to create index buffer!\n");
834 return;
835 }
836
837 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
838
839 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts);
840 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize);
841 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize);
842 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize);
843
844 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf);
845 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf);
846 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf);
847 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf);
848
849 verts += sizeof(GrPoint);
850 for (int i = 0; i < 4; ++i) {
851 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
852 }
853
854 GrColor innerColor;
855 if (useVertexCoverage) {
856 innerColor = 0xffffffff;
857 } else {
858 innerColor = target->getDrawState().getColor();
859 }
860 verts += 4 * vsize;
861 for (int i = 0; i < 8; ++i) {
862 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor;
863 }
864
865 verts += 8 * vsize;
866 for (int i = 0; i < 8; ++i) {
867 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
868 }
869
870 target->setIndexSourceToBuffer(indexBuffer);
871 target->drawIndexed(kTriangles_PrimitiveType,
872 0, 0, 16, aaStrokeRectIndexCount());
873 }
874
875 /**
876 * Returns true if the rects edges are integer-aligned.
877 */
isIRect(const GrRect & r)878 static bool isIRect(const GrRect& r) {
879 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) &&
880 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom);
881 }
882
apply_aa_to_rect(GrDrawTarget * target,const GrRect & rect,GrScalar width,const GrMatrix * matrix,GrMatrix * combinedMatrix,GrRect * devRect,bool * useVertexCoverage)883 static bool apply_aa_to_rect(GrDrawTarget* target,
884 const GrRect& rect,
885 GrScalar width,
886 const GrMatrix* matrix,
887 GrMatrix* combinedMatrix,
888 GrRect* devRect,
889 bool* useVertexCoverage) {
890 // we use a simple coverage ramp to do aa on axis-aligned rects
891 // we check if the rect will be axis-aligned, and the rect won't land on
892 // integer coords.
893
894 // we are keeping around the "tweak the alpha" trick because
895 // it is our only hope for the fixed-pipe implementation.
896 // In a shader implementation we can give a separate coverage input
897 // TODO: remove this ugliness when we drop the fixed-pipe impl
898 *useVertexCoverage = false;
899 if (!target->canTweakAlphaForCoverage()) {
900 if (disable_coverage_aa_for_blend(target)) {
901 #if GR_DEBUG
902 //GrPrintf("Turning off AA to correctly apply blend.\n");
903 #endif
904 return false;
905 } else {
906 *useVertexCoverage = true;
907 }
908 }
909 const GrDrawState& drawState = target->getDrawState();
910 if (drawState.getRenderTarget()->isMultisampled()) {
911 return false;
912 }
913
914 if (0 == width && target->willUseHWAALines()) {
915 return false;
916 }
917
918 if (!drawState.getViewMatrix().preservesAxisAlignment()) {
919 return false;
920 }
921
922 if (NULL != matrix &&
923 !matrix->preservesAxisAlignment()) {
924 return false;
925 }
926
927 *combinedMatrix = drawState.getViewMatrix();
928 if (NULL != matrix) {
929 combinedMatrix->preConcat(*matrix);
930 GrAssert(combinedMatrix->preservesAxisAlignment());
931 }
932
933 combinedMatrix->mapRect(devRect, rect);
934 devRect->sort();
935
936 if (width < 0) {
937 return !isIRect(*devRect);
938 } else {
939 return true;
940 }
941 }
942
drawRect(const GrPaint & paint,const GrRect & rect,GrScalar width,const GrMatrix * matrix)943 void GrContext::drawRect(const GrPaint& paint,
944 const GrRect& rect,
945 GrScalar width,
946 const GrMatrix* matrix) {
947 SK_TRACE_EVENT0("GrContext::drawRect");
948
949 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
950 int stageMask = paint.getActiveStageMask();
951
952 GrRect devRect = rect;
953 GrMatrix combinedMatrix;
954 bool useVertexCoverage;
955 bool needAA = paint.fAntiAlias &&
956 !this->getRenderTarget()->isMultisampled();
957 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
958 &combinedMatrix, &devRect,
959 &useVertexCoverage);
960
961 if (doAA) {
962 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask);
963 if (width >= 0) {
964 GrVec strokeSize;;
965 if (width > 0) {
966 strokeSize.set(width, width);
967 combinedMatrix.mapVectors(&strokeSize, 1);
968 strokeSize.setAbs(strokeSize);
969 } else {
970 strokeSize.set(GR_Scalar1, GR_Scalar1);
971 }
972 strokeAARect(target, devRect, strokeSize, useVertexCoverage);
973 } else {
974 fillAARect(target, devRect, useVertexCoverage);
975 }
976 return;
977 }
978
979 if (width >= 0) {
980 // TODO: consider making static vertex buffers for these cases.
981 // Hairline could be done by just adding closing vertex to
982 // unitSquareVertexBuffer()
983 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
984
985 static const int worstCaseVertCount = 10;
986 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0);
987
988 if (!geo.succeeded()) {
989 GrPrintf("Failed to get space for vertices!\n");
990 return;
991 }
992
993 GrPrimitiveType primType;
994 int vertCount;
995 GrPoint* vertex = geo.positions();
996
997 if (width > 0) {
998 vertCount = 10;
999 primType = kTriangleStrip_PrimitiveType;
1000 setStrokeRectStrip(vertex, rect, width);
1001 } else {
1002 // hairline
1003 vertCount = 5;
1004 primType = kLineStrip_PrimitiveType;
1005 vertex[0].set(rect.fLeft, rect.fTop);
1006 vertex[1].set(rect.fRight, rect.fTop);
1007 vertex[2].set(rect.fRight, rect.fBottom);
1008 vertex[3].set(rect.fLeft, rect.fBottom);
1009 vertex[4].set(rect.fLeft, rect.fTop);
1010 }
1011
1012 GrDrawState::AutoViewMatrixRestore avmr;
1013 if (NULL != matrix) {
1014 GrDrawState* drawState = target->drawState();
1015 avmr.set(drawState);
1016 drawState->preConcatViewMatrix(*matrix);
1017 drawState->preConcatSamplerMatrices(stageMask, *matrix);
1018 }
1019
1020 target->drawNonIndexed(primType, 0, vertCount);
1021 } else {
1022 #if GR_STATIC_RECT_VB
1023 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
1024 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
1025 if (NULL == sqVB) {
1026 GrPrintf("Failed to create static rect vb.\n");
1027 return;
1028 }
1029 target->setVertexSourceToBuffer(layout, sqVB);
1030 GrDrawState* drawState = target->drawState();
1031 GrDrawState::AutoViewMatrixRestore avmr(drawState);
1032 GrMatrix m;
1033 m.setAll(rect.width(), 0, rect.fLeft,
1034 0, rect.height(), rect.fTop,
1035 0, 0, GrMatrix::I()[8]);
1036
1037 if (NULL != matrix) {
1038 m.postConcat(*matrix);
1039 }
1040 drawState->preConcatViewMatrix(m);
1041 drawState->preConcatSamplerMatrices(stageMask, m);
1042
1043 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4);
1044 #else
1045 target->drawSimpleRect(rect, matrix, stageMask);
1046 #endif
1047 }
1048 }
1049
drawRectToRect(const GrPaint & paint,const GrRect & dstRect,const GrRect & srcRect,const GrMatrix * dstMatrix,const GrMatrix * srcMatrix)1050 void GrContext::drawRectToRect(const GrPaint& paint,
1051 const GrRect& dstRect,
1052 const GrRect& srcRect,
1053 const GrMatrix* dstMatrix,
1054 const GrMatrix* srcMatrix) {
1055 SK_TRACE_EVENT0("GrContext::drawRectToRect");
1056
1057 // srcRect refers to paint's first texture
1058 if (NULL == paint.getTexture(0)) {
1059 drawRect(paint, dstRect, -1, dstMatrix);
1060 return;
1061 }
1062
1063 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB);
1064
1065 #if GR_STATIC_RECT_VB
1066 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
1067 GrDrawState* drawState = target->drawState();
1068 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
1069 GrDrawState::AutoViewMatrixRestore avmr(drawState);
1070
1071 GrMatrix m;
1072
1073 m.setAll(dstRect.width(), 0, dstRect.fLeft,
1074 0, dstRect.height(), dstRect.fTop,
1075 0, 0, GrMatrix::I()[8]);
1076 if (NULL != dstMatrix) {
1077 m.postConcat(*dstMatrix);
1078 }
1079 drawState->preConcatViewMatrix(m);
1080
1081 // srcRect refers to first stage
1082 int otherStageMask = paint.getActiveStageMask() &
1083 (~(1 << GrPaint::kFirstTextureStage));
1084 if (otherStageMask) {
1085 drawState->preConcatSamplerMatrices(otherStageMask, m);
1086 }
1087
1088 m.setAll(srcRect.width(), 0, srcRect.fLeft,
1089 0, srcRect.height(), srcRect.fTop,
1090 0, 0, GrMatrix::I()[8]);
1091 if (NULL != srcMatrix) {
1092 m.postConcat(*srcMatrix);
1093 }
1094 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m);
1095
1096 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
1097 if (NULL == sqVB) {
1098 GrPrintf("Failed to create static rect vb.\n");
1099 return;
1100 }
1101 target->setVertexSourceToBuffer(layout, sqVB);
1102 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4);
1103 #else
1104
1105 GrDrawTarget* target;
1106 #if BATCH_RECT_TO_RECT
1107 target = this->prepareToDraw(paint, kBuffered_DrawCategory);
1108 #else
1109 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
1110 #endif
1111
1112 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL};
1113 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL};
1114 srcRects[0] = &srcRect;
1115 srcMatrices[0] = srcMatrix;
1116
1117 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices);
1118 #endif
1119 }
1120
drawVertices(const GrPaint & paint,GrPrimitiveType primitiveType,int vertexCount,const GrPoint positions[],const GrPoint texCoords[],const GrColor colors[],const uint16_t indices[],int indexCount)1121 void GrContext::drawVertices(const GrPaint& paint,
1122 GrPrimitiveType primitiveType,
1123 int vertexCount,
1124 const GrPoint positions[],
1125 const GrPoint texCoords[],
1126 const GrColor colors[],
1127 const uint16_t indices[],
1128 int indexCount) {
1129 SK_TRACE_EVENT0("GrContext::drawVertices");
1130
1131 GrDrawTarget::AutoReleaseGeometry geo;
1132
1133 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
1134
1135 bool hasTexCoords[GrPaint::kTotalStages] = {
1136 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords
1137 0 // remaining stages use positions
1138 };
1139
1140 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords);
1141
1142 if (NULL != colors) {
1143 layout |= GrDrawTarget::kColor_VertexLayoutBit;
1144 }
1145 int vertexSize = GrDrawTarget::VertexSize(layout);
1146
1147 if (sizeof(GrPoint) != vertexSize) {
1148 if (!geo.set(target, layout, vertexCount, 0)) {
1149 GrPrintf("Failed to get space for vertices!\n");
1150 return;
1151 }
1152 int texOffsets[GrDrawState::kMaxTexCoords];
1153 int colorOffset;
1154 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout,
1155 texOffsets,
1156 &colorOffset,
1157 NULL,
1158 NULL);
1159 void* curVertex = geo.vertices();
1160
1161 for (int i = 0; i < vertexCount; ++i) {
1162 *((GrPoint*)curVertex) = positions[i];
1163
1164 if (texOffsets[0] > 0) {
1165 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i];
1166 }
1167 if (colorOffset > 0) {
1168 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
1169 }
1170 curVertex = (void*)((intptr_t)curVertex + vertexSize);
1171 }
1172 } else {
1173 target->setVertexSourceToArray(layout, positions, vertexCount);
1174 }
1175
1176 // we don't currently apply offscreen AA to this path. Need improved
1177 // management of GrDrawTarget's geometry to avoid copying points per-tile.
1178
1179 if (NULL != indices) {
1180 target->setIndexSourceToArray(indices, indexCount);
1181 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
1182 } else {
1183 target->drawNonIndexed(primitiveType, 0, vertexCount);
1184 }
1185 }
1186
1187 ///////////////////////////////////////////////////////////////////////////////
1188 #include "SkDraw.h"
1189 #include "SkRasterClip.h"
1190
1191 namespace {
1192
gr_fill_to_sk_fill(GrPathFill fill)1193 SkPath::FillType gr_fill_to_sk_fill(GrPathFill fill) {
1194 switch (fill) {
1195 case kWinding_PathFill:
1196 return SkPath::kWinding_FillType;
1197 case kEvenOdd_PathFill:
1198 return SkPath::kEvenOdd_FillType;
1199 case kInverseWinding_PathFill:
1200 return SkPath::kInverseWinding_FillType;
1201 case kInverseEvenOdd_PathFill:
1202 return SkPath::kInverseEvenOdd_FillType;
1203 default:
1204 GrCrash("Unexpected fill.");
1205 return SkPath::kWinding_FillType;
1206 }
1207 }
1208
1209 // gets device coord bounds of path (not considering the fill) and clip. The
1210 // path bounds will be a subset of the clip bounds. returns false if path bounds
1211 // would be empty.
get_path_and_clip_bounds(const GrDrawTarget * target,const GrPath & path,const GrVec * translate,GrIRect * pathBounds,GrIRect * clipBounds)1212 bool get_path_and_clip_bounds(const GrDrawTarget* target,
1213 const GrPath& path,
1214 const GrVec* translate,
1215 GrIRect* pathBounds,
1216 GrIRect* clipBounds) {
1217 // compute bounds as intersection of rt size, clip, and path
1218 const GrRenderTarget* rt = target->getDrawState().getRenderTarget();
1219 if (NULL == rt) {
1220 return false;
1221 }
1222 *pathBounds = GrIRect::MakeWH(rt->width(), rt->height());
1223 const GrClip& clip = target->getClip();
1224 if (clip.hasConservativeBounds()) {
1225 clip.getConservativeBounds().roundOut(clipBounds);
1226 if (!pathBounds->intersect(*clipBounds)) {
1227 return false;
1228 }
1229 } else {
1230 // pathBounds is currently the rt extent, set clip bounds to that rect.
1231 *clipBounds = *pathBounds;
1232 }
1233 GrRect pathSBounds = path.getBounds();
1234 if (!pathSBounds.isEmpty()) {
1235 if (NULL != translate) {
1236 pathSBounds.offset(*translate);
1237 }
1238 target->getDrawState().getViewMatrix().mapRect(&pathSBounds,
1239 pathSBounds);
1240 GrIRect pathIBounds;
1241 pathSBounds.roundOut(&pathIBounds);
1242 if (!pathBounds->intersect(pathIBounds)) {
1243 return false;
1244 }
1245 } else {
1246 return false;
1247 }
1248 return true;
1249 }
1250
1251 /**
1252 * sw rasterizes path to A8 mask using the context's matrix and uploads to a
1253 * scratch texture.
1254 */
1255
sw_draw_path_to_mask_texture(const GrPath & clientPath,const GrIRect & pathDevBounds,GrPathFill fill,GrContext * context,const GrPoint * translate,GrAutoScratchTexture * tex)1256 bool sw_draw_path_to_mask_texture(const GrPath& clientPath,
1257 const GrIRect& pathDevBounds,
1258 GrPathFill fill,
1259 GrContext* context,
1260 const GrPoint* translate,
1261 GrAutoScratchTexture* tex) {
1262 SkPaint paint;
1263 SkPath tmpPath;
1264 const SkPath* pathToDraw = &clientPath;
1265 if (kHairLine_PathFill == fill) {
1266 paint.setStyle(SkPaint::kStroke_Style);
1267 paint.setStrokeWidth(SK_Scalar1);
1268 } else {
1269 paint.setStyle(SkPaint::kFill_Style);
1270 SkPath::FillType skfill = gr_fill_to_sk_fill(fill);
1271 if (skfill != pathToDraw->getFillType()) {
1272 tmpPath = *pathToDraw;
1273 tmpPath.setFillType(skfill);
1274 pathToDraw = &tmpPath;
1275 }
1276 }
1277 paint.setAntiAlias(true);
1278 paint.setColor(SK_ColorWHITE);
1279
1280 GrMatrix matrix = context->getMatrix();
1281 if (NULL != translate) {
1282 matrix.postTranslate(translate->fX, translate->fY);
1283 }
1284
1285 matrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1,
1286 -pathDevBounds.fTop * SK_Scalar1);
1287 GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(),
1288 pathDevBounds.height());
1289
1290 SkBitmap bm;
1291 bm.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom);
1292 if (!bm.allocPixels()) {
1293 return false;
1294 }
1295 sk_bzero(bm.getPixels(), bm.getSafeSize());
1296
1297 SkDraw draw;
1298 sk_bzero(&draw, sizeof(draw));
1299 SkRasterClip rc(bounds);
1300 draw.fRC = &rc;
1301 draw.fClip = &rc.bwRgn();
1302 draw.fMatrix = &matrix;
1303 draw.fBitmap = &bm;
1304 draw.drawPath(*pathToDraw, paint);
1305
1306 const GrTextureDesc desc = {
1307 kNone_GrTextureFlags,
1308 bounds.fRight,
1309 bounds.fBottom,
1310 kAlpha_8_GrPixelConfig,
1311 {0} // samples
1312 };
1313
1314 tex->set(context, desc);
1315 GrTexture* texture = tex->texture();
1316
1317 if (NULL == texture) {
1318 return false;
1319 }
1320 SkAutoLockPixels alp(bm);
1321 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
1322 bm.getPixels(), bm.rowBytes());
1323 return true;
1324 }
1325
draw_around_inv_path(GrDrawTarget * target,GrDrawState::StageMask stageMask,const GrIRect & clipBounds,const GrIRect & pathBounds)1326 void draw_around_inv_path(GrDrawTarget* target,
1327 GrDrawState::StageMask stageMask,
1328 const GrIRect& clipBounds,
1329 const GrIRect& pathBounds) {
1330 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask);
1331 GrRect rect;
1332 if (clipBounds.fTop < pathBounds.fTop) {
1333 rect.iset(clipBounds.fLeft, clipBounds.fTop,
1334 clipBounds.fRight, pathBounds.fTop);
1335 target->drawSimpleRect(rect, NULL, stageMask);
1336 }
1337 if (clipBounds.fLeft < pathBounds.fLeft) {
1338 rect.iset(clipBounds.fLeft, pathBounds.fTop,
1339 pathBounds.fLeft, pathBounds.fBottom);
1340 target->drawSimpleRect(rect, NULL, stageMask);
1341 }
1342 if (clipBounds.fRight > pathBounds.fRight) {
1343 rect.iset(pathBounds.fRight, pathBounds.fTop,
1344 clipBounds.fRight, pathBounds.fBottom);
1345 target->drawSimpleRect(rect, NULL, stageMask);
1346 }
1347 if (clipBounds.fBottom > pathBounds.fBottom) {
1348 rect.iset(clipBounds.fLeft, pathBounds.fBottom,
1349 clipBounds.fRight, clipBounds.fBottom);
1350 target->drawSimpleRect(rect, NULL, stageMask);
1351 }
1352 }
1353
1354 }
1355
drawPath(const GrPaint & paint,const GrPath & path,GrPathFill fill,const GrPoint * translate)1356 void GrContext::drawPath(const GrPaint& paint, const GrPath& path,
1357 GrPathFill fill, const GrPoint* translate) {
1358
1359 if (path.isEmpty()) {
1360 if (GrIsFillInverted(fill)) {
1361 this->drawPaint(paint);
1362 }
1363 return;
1364 }
1365
1366 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
1367 GrDrawState::StageMask stageMask = paint.getActiveStageMask();
1368
1369 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled();
1370
1371 // An Assumption here is that path renderer would use some form of tweaking
1372 // the src color (either the input alpha or in the frag shader) to implement
1373 // aa. If we have some future driver-mojo path AA that can do the right
1374 // thing WRT to the blend then we'll need some query on the PR.
1375 if (disable_coverage_aa_for_blend(target)) {
1376 #if GR_DEBUG
1377 //GrPrintf("Turning off AA to correctly apply blend.\n");
1378 #endif
1379 prAA = false;
1380 }
1381
1382 GrPathRenderer* pr = NULL;
1383 if (prAA) {
1384 pr = this->getPathRenderer(path, fill, target, true);
1385 if (NULL == pr) {
1386 GrAutoScratchTexture ast;
1387 GrIRect pathBounds, clipBounds;
1388 if (!get_path_and_clip_bounds(target, path, translate,
1389 &pathBounds, &clipBounds)) {
1390 return;
1391 }
1392 if (NULL == pr && sw_draw_path_to_mask_texture(path, pathBounds,
1393 fill, this,
1394 translate, &ast)) {
1395 GrTexture* texture = ast.texture();
1396 GrAssert(NULL != texture);
1397 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask);
1398 enum {
1399 kPathMaskStage = GrPaint::kTotalStages,
1400 };
1401 target->drawState()->setTexture(kPathMaskStage, texture);
1402 target->drawState()->sampler(kPathMaskStage)->reset();
1403 GrScalar w = GrIntToScalar(pathBounds.width());
1404 GrScalar h = GrIntToScalar(pathBounds.height());
1405 GrRect maskRect = GrRect::MakeWH(w / texture->width(),
1406 h / texture->height());
1407 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL};
1408 srcRects[kPathMaskStage] = &maskRect;
1409 stageMask |= 1 << kPathMaskStage;
1410 GrRect dstRect = GrRect::MakeLTRB(
1411 SK_Scalar1* pathBounds.fLeft,
1412 SK_Scalar1* pathBounds.fTop,
1413 SK_Scalar1* pathBounds.fRight,
1414 SK_Scalar1* pathBounds.fBottom);
1415 target->drawRect(dstRect, NULL, stageMask, srcRects, NULL);
1416 target->drawState()->setTexture(kPathMaskStage, NULL);
1417 if (GrIsFillInverted(fill)) {
1418 draw_around_inv_path(target, stageMask,
1419 clipBounds, pathBounds);
1420 }
1421 return;
1422 }
1423 }
1424 } else {
1425 pr = this->getPathRenderer(path, fill, target, false);
1426 }
1427
1428 if (NULL == pr) {
1429 #if GR_DEBUG
1430 GrPrintf("Unable to find path renderer compatible with path.\n");
1431 #endif
1432 return;
1433 }
1434
1435 pr->drawPath(path, fill, translate, target, stageMask, prAA);
1436 }
1437
1438 ////////////////////////////////////////////////////////////////////////////////
1439
flush(int flagsBitfield)1440 void GrContext::flush(int flagsBitfield) {
1441 if (kDiscard_FlushBit & flagsBitfield) {
1442 fDrawBuffer->reset();
1443 } else {
1444 this->flushDrawBuffer();
1445 }
1446 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) {
1447 fGpu->forceRenderTargetFlush();
1448 }
1449 }
1450
flushText()1451 void GrContext::flushText() {
1452 if (kText_DrawCategory == fLastDrawCategory) {
1453 flushDrawBuffer();
1454 }
1455 }
1456
flushDrawBuffer()1457 void GrContext::flushDrawBuffer() {
1458 #if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING
1459 if (fDrawBuffer) {
1460 fDrawBuffer->playback(fGpu);
1461 fDrawBuffer->reset();
1462 }
1463 #endif
1464 }
1465
internalWriteTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes,uint32_t flags)1466 void GrContext::internalWriteTexturePixels(GrTexture* texture,
1467 int left, int top,
1468 int width, int height,
1469 GrPixelConfig config,
1470 const void* buffer,
1471 size_t rowBytes,
1472 uint32_t flags) {
1473 SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1474 ASSERT_OWNED_RESOURCE(texture);
1475
1476 if (!(kDontFlush_PixelOpsFlag & flags)) {
1477 this->flush();
1478 }
1479 // TODO: use scratch texture to perform conversion
1480 if (GrPixelConfigIsUnpremultiplied(texture->config()) !=
1481 GrPixelConfigIsUnpremultiplied(config)) {
1482 return;
1483 }
1484
1485 fGpu->writeTexturePixels(texture, left, top, width, height,
1486 config, buffer, rowBytes);
1487 }
1488
internalReadTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes,uint32_t flags)1489 bool GrContext::internalReadTexturePixels(GrTexture* texture,
1490 int left, int top,
1491 int width, int height,
1492 GrPixelConfig config,
1493 void* buffer,
1494 size_t rowBytes,
1495 uint32_t flags) {
1496 SK_TRACE_EVENT0("GrContext::readTexturePixels");
1497 ASSERT_OWNED_RESOURCE(texture);
1498
1499 // TODO: code read pixels for textures that aren't also rendertargets
1500 GrRenderTarget* target = texture->asRenderTarget();
1501 if (NULL != target) {
1502 return this->internalReadRenderTargetPixels(target,
1503 left, top, width, height,
1504 config, buffer, rowBytes,
1505 flags);
1506 } else {
1507 return false;
1508 }
1509 }
1510
1511 #include "SkConfig8888.h"
1512
1513 namespace {
1514 /**
1515 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1516 * formats are representable as Config8888 and so the function returns false
1517 * if the GrPixelConfig has no equivalent Config8888.
1518 */
grconfig_to_config8888(GrPixelConfig config,SkCanvas::Config8888 * config8888)1519 bool grconfig_to_config8888(GrPixelConfig config,
1520 SkCanvas::Config8888* config8888) {
1521 switch (config) {
1522 case kRGBA_8888_PM_GrPixelConfig:
1523 *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1524 return true;
1525 case kRGBA_8888_UPM_GrPixelConfig:
1526 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1527 return true;
1528 case kBGRA_8888_PM_GrPixelConfig:
1529 *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1530 return true;
1531 case kBGRA_8888_UPM_GrPixelConfig:
1532 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1533 return true;
1534 default:
1535 return false;
1536 }
1537 }
1538 }
1539
internalReadRenderTargetPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes,uint32_t flags)1540 bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target,
1541 int left, int top,
1542 int width, int height,
1543 GrPixelConfig config,
1544 void* buffer,
1545 size_t rowBytes,
1546 uint32_t flags) {
1547 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1548 ASSERT_OWNED_RESOURCE(target);
1549
1550 if (NULL == target) {
1551 target = fGpu->drawState()->getRenderTarget();
1552 if (NULL == target) {
1553 return false;
1554 }
1555 }
1556
1557 if (!(kDontFlush_PixelOpsFlag & flags)) {
1558 this->flush();
1559 }
1560
1561 if (!GrPixelConfigIsUnpremultiplied(target->config()) &&
1562 GrPixelConfigIsUnpremultiplied(config) &&
1563 !fGpu->canPreserveReadWriteUnpremulPixels()) {
1564 SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1565 if (!grconfig_to_config8888(target->config(), &srcConfig8888) ||
1566 !grconfig_to_config8888(config, &dstConfig8888)) {
1567 return false;
1568 }
1569 // do read back using target's own config
1570 this->internalReadRenderTargetPixels(target,
1571 left, top,
1572 width, height,
1573 target->config(),
1574 buffer, rowBytes,
1575 kDontFlush_PixelOpsFlag);
1576 // sw convert the pixels to unpremul config
1577 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer);
1578 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888,
1579 pixels, rowBytes, srcConfig8888,
1580 width, height);
1581 return true;
1582 }
1583
1584 GrTexture* src = target->asTexture();
1585 bool swapRAndB = NULL != src &&
1586 fGpu->preferredReadPixelsConfig(config) ==
1587 GrPixelConfigSwapRAndB(config);
1588
1589 bool flipY = NULL != src &&
1590 fGpu->readPixelsWillPayForYFlip(target, left, top,
1591 width, height, config,
1592 rowBytes);
1593 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) &&
1594 GrPixelConfigIsUnpremultiplied(config));
1595
1596 if (NULL == src && alphaConversion) {
1597 // we should fallback to cpu conversion here. This could happen when
1598 // we were given an external render target by the client that is not
1599 // also a texture (e.g. FBO 0 in GL)
1600 return false;
1601 }
1602 // we draw to a scratch texture if any of these conversion are applied
1603 GrAutoScratchTexture ast;
1604 if (flipY || swapRAndB || alphaConversion) {
1605 GrAssert(NULL != src);
1606 if (swapRAndB) {
1607 config = GrPixelConfigSwapRAndB(config);
1608 GrAssert(kUnknown_GrPixelConfig != config);
1609 }
1610 // Make the scratch a render target because we don't have a robust
1611 // readTexturePixels as of yet (it calls this function).
1612 const GrTextureDesc desc = {
1613 kRenderTarget_GrTextureFlagBit,
1614 width, height,
1615 config,
1616 {0}, // samples
1617 };
1618
1619 // When a full readback is faster than a partial we could always make
1620 // the scratch exactly match the passed rect. However, if we see many
1621 // different size rectangles we will trash our texture cache and pay the
1622 // cost of creating and destroying many textures. So, we only request
1623 // an exact match when the caller is reading an entire RT.
1624 ScratchTexMatch match = kApprox_ScratchTexMatch;
1625 if (0 == left &&
1626 0 == top &&
1627 target->width() == width &&
1628 target->height() == height &&
1629 fGpu->fullReadPixelsIsFasterThanPartial()) {
1630 match = kExact_ScratchTexMatch;
1631 }
1632 ast.set(this, desc, match);
1633 GrTexture* texture = ast.texture();
1634 if (!texture) {
1635 return false;
1636 }
1637 target = texture->asRenderTarget();
1638 GrAssert(NULL != target);
1639
1640 GrDrawTarget::AutoStateRestore asr(fGpu);
1641 GrDrawState* drawState = fGpu->drawState();
1642 drawState->reset();
1643 drawState->setRenderTarget(target);
1644
1645 GrMatrix matrix;
1646 if (flipY) {
1647 matrix.setTranslate(SK_Scalar1 * left,
1648 SK_Scalar1 * (top + height));
1649 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1);
1650 } else {
1651 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1652 }
1653 matrix.postIDiv(src->width(), src->height());
1654 drawState->sampler(0)->reset(matrix);
1655 drawState->sampler(0)->setRAndBSwap(swapRAndB);
1656 drawState->setTexture(0, src);
1657 GrRect rect;
1658 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height);
1659 fGpu->drawSimpleRect(rect, NULL, 0x1);
1660 left = 0;
1661 top = 0;
1662 }
1663 return fGpu->readPixels(target,
1664 left, top, width, height,
1665 config, buffer, rowBytes, flipY);
1666 }
1667
resolveRenderTarget(GrRenderTarget * target)1668 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1669 GrAssert(target);
1670 ASSERT_OWNED_RESOURCE(target);
1671 // In the future we may track whether there are any pending draws to this
1672 // target. We don't today so we always perform a flush. We don't promise
1673 // this to our clients, though.
1674 this->flush();
1675 fGpu->resolveRenderTarget(target);
1676 }
1677
copyTexture(GrTexture * src,GrRenderTarget * dst)1678 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) {
1679 if (NULL == src || NULL == dst) {
1680 return;
1681 }
1682 ASSERT_OWNED_RESOURCE(src);
1683
1684 GrDrawTarget::AutoStateRestore asr(fGpu);
1685 GrDrawState* drawState = fGpu->drawState();
1686 drawState->reset();
1687 drawState->setRenderTarget(dst);
1688 GrMatrix sampleM;
1689 sampleM.setIDiv(src->width(), src->height());
1690 drawState->setTexture(0, src);
1691 drawState->sampler(0)->reset(sampleM);
1692 SkRect rect = SkRect::MakeXYWH(0, 0,
1693 SK_Scalar1 * src->width(),
1694 SK_Scalar1 * src->height());
1695 fGpu->drawSimpleRect(rect, NULL, 1 << 0);
1696 }
1697
internalWriteRenderTargetPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes,uint32_t flags)1698 void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target,
1699 int left, int top,
1700 int width, int height,
1701 GrPixelConfig config,
1702 const void* buffer,
1703 size_t rowBytes,
1704 uint32_t flags) {
1705 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1706 ASSERT_OWNED_RESOURCE(target);
1707
1708 if (NULL == target) {
1709 target = fGpu->drawState()->getRenderTarget();
1710 if (NULL == target) {
1711 return;
1712 }
1713 }
1714
1715 // TODO: when underlying api has a direct way to do this we should use it
1716 // (e.g. glDrawPixels on desktop GL).
1717
1718 // If the RT is also a texture and we don't have to do PM/UPM conversion
1719 // then take the texture path, which we expect to be at least as fast or
1720 // faster since it doesn't use an intermediate texture as we do below.
1721
1722 #if !GR_MAC_BUILD
1723 // At least some drivers on the Mac get confused when glTexImage2D is called
1724 // on a texture attached to an FBO. The FBO still sees the old image. TODO:
1725 // determine what OS versions and/or HW is affected.
1726 if (NULL != target->asTexture() &&
1727 GrPixelConfigIsUnpremultiplied(target->config()) ==
1728 GrPixelConfigIsUnpremultiplied(config)) {
1729
1730 this->internalWriteTexturePixels(target->asTexture(),
1731 left, top, width, height,
1732 config, buffer, rowBytes, flags);
1733 return;
1734 }
1735 #endif
1736 if (!GrPixelConfigIsUnpremultiplied(target->config()) &&
1737 GrPixelConfigIsUnpremultiplied(config) &&
1738 !fGpu->canPreserveReadWriteUnpremulPixels()) {
1739 SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1740 if (!grconfig_to_config8888(config, &srcConfig8888) ||
1741 !grconfig_to_config8888(target->config(), &dstConfig8888)) {
1742 return;
1743 }
1744 // allocate a tmp buffer and sw convert the pixels to premul
1745 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height);
1746 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1747 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1748 src, rowBytes, srcConfig8888,
1749 width, height);
1750 // upload the already premul pixels
1751 this->internalWriteRenderTargetPixels(target,
1752 left, top,
1753 width, height,
1754 target->config(),
1755 tmpPixels, 4 * width, flags);
1756 return;
1757 }
1758
1759 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) ==
1760 GrPixelConfigSwapRAndB(config);
1761 if (swapRAndB) {
1762 config = GrPixelConfigSwapRAndB(config);
1763 }
1764
1765 const GrTextureDesc desc = {
1766 kNone_GrTextureFlags, width, height, config, {0}
1767 };
1768 GrAutoScratchTexture ast(this, desc);
1769 GrTexture* texture = ast.texture();
1770 if (NULL == texture) {
1771 return;
1772 }
1773 this->internalWriteTexturePixels(texture, 0, 0, width, height,
1774 config, buffer, rowBytes, flags);
1775
1776 GrDrawTarget::AutoStateRestore asr(fGpu);
1777 GrDrawState* drawState = fGpu->drawState();
1778 drawState->reset();
1779
1780 GrMatrix matrix;
1781 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
1782 drawState->setViewMatrix(matrix);
1783 drawState->setRenderTarget(target);
1784 drawState->setTexture(0, texture);
1785
1786 matrix.setIDiv(texture->width(), texture->height());
1787 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode,
1788 GrSamplerState::kNearest_Filter,
1789 matrix);
1790 drawState->sampler(0)->setRAndBSwap(swapRAndB);
1791
1792 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0);
1793 static const int VCOUNT = 4;
1794 // TODO: Use GrGpu::drawRect here
1795 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0);
1796 if (!geo.succeeded()) {
1797 GrPrintf("Failed to get space for vertices!\n");
1798 return;
1799 }
1800 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height);
1801 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT);
1802 }
1803 ////////////////////////////////////////////////////////////////////////////////
1804
setPaint(const GrPaint & paint,GrDrawTarget * target)1805 void GrContext::setPaint(const GrPaint& paint, GrDrawTarget* target) {
1806 GrDrawState* drawState = target->drawState();
1807
1808 for (int i = 0; i < GrPaint::kMaxTextures; ++i) {
1809 int s = i + GrPaint::kFirstTextureStage;
1810 drawState->setTexture(s, paint.getTexture(i));
1811 ASSERT_OWNED_RESOURCE(paint.getTexture(i));
1812 if (paint.getTexture(i)) {
1813 *drawState->sampler(s) = paint.getTextureSampler(i);
1814 }
1815 }
1816
1817 drawState->setFirstCoverageStage(GrPaint::kFirstMaskStage);
1818
1819 for (int i = 0; i < GrPaint::kMaxMasks; ++i) {
1820 int s = i + GrPaint::kFirstMaskStage;
1821 drawState->setTexture(s, paint.getMask(i));
1822 ASSERT_OWNED_RESOURCE(paint.getMask(i));
1823 if (paint.getMask(i)) {
1824 *drawState->sampler(s) = paint.getMaskSampler(i);
1825 }
1826 }
1827
1828 drawState->setColor(paint.fColor);
1829
1830 if (paint.fDither) {
1831 drawState->enableState(GrDrawState::kDither_StateBit);
1832 } else {
1833 drawState->disableState(GrDrawState::kDither_StateBit);
1834 }
1835 if (paint.fAntiAlias) {
1836 drawState->enableState(GrDrawState::kHWAntialias_StateBit);
1837 } else {
1838 drawState->disableState(GrDrawState::kHWAntialias_StateBit);
1839 }
1840 if (paint.fColorMatrixEnabled) {
1841 drawState->enableState(GrDrawState::kColorMatrix_StateBit);
1842 } else {
1843 drawState->disableState(GrDrawState::kColorMatrix_StateBit);
1844 }
1845 drawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff);
1846 drawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode);
1847 drawState->setColorMatrix(paint.fColorMatrix);
1848 drawState->setCoverage(paint.fCoverage);
1849
1850 if (paint.getActiveMaskStageMask() && !target->canApplyCoverage()) {
1851 GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1852 }
1853 }
1854
prepareToDraw(const GrPaint & paint,DrawCategory category)1855 GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint,
1856 DrawCategory category) {
1857 if (category != fLastDrawCategory) {
1858 flushDrawBuffer();
1859 fLastDrawCategory = category;
1860 }
1861 this->setPaint(paint, fGpu);
1862 GrDrawTarget* target = fGpu;
1863 switch (category) {
1864 case kText_DrawCategory:
1865 #if DEFER_TEXT_RENDERING
1866 target = fDrawBuffer;
1867 fDrawBuffer->initializeDrawStateAndClip(*fGpu);
1868 #else
1869 target = fGpu;
1870 #endif
1871 break;
1872 case kUnbuffered_DrawCategory:
1873 target = fGpu;
1874 break;
1875 case kBuffered_DrawCategory:
1876 target = fDrawBuffer;
1877 fDrawBuffer->initializeDrawStateAndClip(*fGpu);
1878 break;
1879 }
1880 return target;
1881 }
1882
getPathRenderer(const GrPath & path,GrPathFill fill,const GrDrawTarget * target,bool antiAlias)1883 GrPathRenderer* GrContext::getPathRenderer(const GrPath& path,
1884 GrPathFill fill,
1885 const GrDrawTarget* target,
1886 bool antiAlias) {
1887 if (NULL == fPathRendererChain) {
1888 fPathRendererChain =
1889 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag);
1890 }
1891 return fPathRendererChain->getPathRenderer(path, fill, target, antiAlias);
1892 }
1893
1894 ////////////////////////////////////////////////////////////////////////////////
1895
setRenderTarget(GrRenderTarget * target)1896 void GrContext::setRenderTarget(GrRenderTarget* target) {
1897 ASSERT_OWNED_RESOURCE(target);
1898 this->flush(false);
1899 fGpu->drawState()->setRenderTarget(target);
1900 }
1901
getRenderTarget()1902 GrRenderTarget* GrContext::getRenderTarget() {
1903 return fGpu->drawState()->getRenderTarget();
1904 }
1905
getRenderTarget() const1906 const GrRenderTarget* GrContext::getRenderTarget() const {
1907 return fGpu->getDrawState().getRenderTarget();
1908 }
1909
getMatrix() const1910 const GrMatrix& GrContext::getMatrix() const {
1911 return fGpu->getDrawState().getViewMatrix();
1912 }
1913
setMatrix(const GrMatrix & m)1914 void GrContext::setMatrix(const GrMatrix& m) {
1915 fGpu->drawState()->setViewMatrix(m);
1916 }
1917
concatMatrix(const GrMatrix & m) const1918 void GrContext::concatMatrix(const GrMatrix& m) const {
1919 fGpu->drawState()->preConcatViewMatrix(m);
1920 }
1921
setOrClear(intptr_t bits,int shift,intptr_t pred)1922 static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
1923 intptr_t mask = 1 << shift;
1924 if (pred) {
1925 bits |= mask;
1926 } else {
1927 bits &= ~mask;
1928 }
1929 return bits;
1930 }
1931
resetStats()1932 void GrContext::resetStats() {
1933 fGpu->resetStats();
1934 }
1935
getStats() const1936 const GrGpuStats& GrContext::getStats() const {
1937 return fGpu->getStats();
1938 }
1939
printStats() const1940 void GrContext::printStats() const {
1941 fGpu->printStats();
1942 }
1943
GrContext(GrGpu * gpu)1944 GrContext::GrContext(GrGpu* gpu) {
1945 fGpu = gpu;
1946 fGpu->ref();
1947 fGpu->setContext(this);
1948
1949 fPathRendererChain = NULL;
1950
1951 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT,
1952 MAX_TEXTURE_CACHE_BYTES);
1953 fFontCache = new GrFontCache(fGpu);
1954
1955 fLastDrawCategory = kUnbuffered_DrawCategory;
1956
1957 fDrawBuffer = NULL;
1958 fDrawBufferVBAllocPool = NULL;
1959 fDrawBufferIBAllocPool = NULL;
1960
1961 fAAFillRectIndexBuffer = NULL;
1962 fAAStrokeRectIndexBuffer = NULL;
1963
1964 this->setupDrawBuffer();
1965 }
1966
setupDrawBuffer()1967 void GrContext::setupDrawBuffer() {
1968
1969 GrAssert(NULL == fDrawBuffer);
1970 GrAssert(NULL == fDrawBufferVBAllocPool);
1971 GrAssert(NULL == fDrawBufferIBAllocPool);
1972
1973 #if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT
1974 fDrawBufferVBAllocPool =
1975 new GrVertexBufferAllocPool(fGpu, false,
1976 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1977 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS);
1978 fDrawBufferIBAllocPool =
1979 new GrIndexBufferAllocPool(fGpu, false,
1980 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1981 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS);
1982
1983 fDrawBuffer = new GrInOrderDrawBuffer(fGpu,
1984 fDrawBufferVBAllocPool,
1985 fDrawBufferIBAllocPool);
1986 #endif
1987
1988 #if BATCH_RECT_TO_RECT
1989 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer());
1990 #endif
1991 }
1992
getTextTarget(const GrPaint & paint)1993 GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
1994 GrDrawTarget* target;
1995 #if DEFER_TEXT_RENDERING
1996 target = prepareToDraw(paint, kText_DrawCategory);
1997 #else
1998 target = prepareToDraw(paint, kUnbuffered_DrawCategory);
1999 #endif
2000 this->setPaint(paint, target);
2001 return target;
2002 }
2003
getQuadIndexBuffer() const2004 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
2005 return fGpu->getQuadIndexBuffer();
2006 }
2007
convolve(GrTexture * texture,const SkRect & rect,const float * kernel,int kernelWidth,GrSamplerState::FilterDirection direction)2008 void GrContext::convolve(GrTexture* texture,
2009 const SkRect& rect,
2010 const float* kernel,
2011 int kernelWidth,
2012 GrSamplerState::FilterDirection direction) {
2013 ASSERT_OWNED_RESOURCE(texture);
2014
2015 GrDrawTarget::AutoStateRestore asr(fGpu);
2016 GrDrawState* drawState = fGpu->drawState();
2017 GrRenderTarget* target = drawState->getRenderTarget();
2018 drawState->reset();
2019 drawState->setRenderTarget(target);
2020 GrMatrix sampleM;
2021 sampleM.setIDiv(texture->width(), texture->height());
2022 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode,
2023 GrSamplerState::kConvolution_Filter,
2024 sampleM);
2025 drawState->sampler(0)->setConvolutionParams(kernelWidth, kernel);
2026 drawState->sampler(0)->setFilterDirection(direction);
2027 drawState->setTexture(0, texture);
2028 fGpu->drawSimpleRect(rect, NULL, 1 << 0);
2029 }
2030
applyMorphology(GrTexture * texture,const SkRect & rect,int radius,GrSamplerState::Filter filter,GrSamplerState::FilterDirection direction)2031 void GrContext::applyMorphology(GrTexture* texture,
2032 const SkRect& rect,
2033 int radius,
2034 GrSamplerState::Filter filter,
2035 GrSamplerState::FilterDirection direction) {
2036 ASSERT_OWNED_RESOURCE(texture);
2037 GrAssert(filter == GrSamplerState::kErode_Filter ||
2038 filter == GrSamplerState::kDilate_Filter);
2039
2040 GrDrawTarget::AutoStateRestore asr(fGpu);
2041 GrDrawState* drawState = fGpu->drawState();
2042 GrRenderTarget* target = drawState->getRenderTarget();
2043 drawState->reset();
2044 drawState->setRenderTarget(target);
2045 GrMatrix sampleM;
2046 sampleM.setIDiv(texture->width(), texture->height());
2047 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode,
2048 filter,
2049 sampleM);
2050 drawState->sampler(0)->setMorphologyRadius(radius);
2051 drawState->sampler(0)->setFilterDirection(direction);
2052 drawState->setTexture(0, texture);
2053 fGpu->drawSimpleRect(rect, NULL, 1 << 0);
2054 }
2055
2056 ///////////////////////////////////////////////////////////////////////////////
2057