1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "GrGpuGL.h"
10 #include "GrGLStencilBuffer.h"
11 #include "GrGLPath.h"
12 #include "GrGLShaderBuilder.h"
13 #include "GrTemplates.h"
14 #include "GrTypes.h"
15 #include "SkStrokeRec.h"
16 #include "SkTemplates.h"
17
18 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
19 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
20
21 #define SKIP_CACHE_CHECK true
22
23 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
24 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
25 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
26 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
27 #else
28 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
29 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
30 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
31 #endif
32
33
34 ///////////////////////////////////////////////////////////////////////////////
35
36 static const GrGLenum gXfermodeCoeff2Blend[] = {
37 GR_GL_ZERO,
38 GR_GL_ONE,
39 GR_GL_SRC_COLOR,
40 GR_GL_ONE_MINUS_SRC_COLOR,
41 GR_GL_DST_COLOR,
42 GR_GL_ONE_MINUS_DST_COLOR,
43 GR_GL_SRC_ALPHA,
44 GR_GL_ONE_MINUS_SRC_ALPHA,
45 GR_GL_DST_ALPHA,
46 GR_GL_ONE_MINUS_DST_ALPHA,
47 GR_GL_CONSTANT_COLOR,
48 GR_GL_ONE_MINUS_CONSTANT_COLOR,
49 GR_GL_CONSTANT_ALPHA,
50 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
51
52 // extended blend coeffs
53 GR_GL_SRC1_COLOR,
54 GR_GL_ONE_MINUS_SRC1_COLOR,
55 GR_GL_SRC1_ALPHA,
56 GR_GL_ONE_MINUS_SRC1_ALPHA,
57 };
58
BlendCoeffReferencesConstant(GrBlendCoeff coeff)59 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
60 static const bool gCoeffReferencesBlendConst[] = {
61 false,
62 false,
63 false,
64 false,
65 false,
66 false,
67 false,
68 false,
69 false,
70 false,
71 true,
72 true,
73 true,
74 true,
75
76 // extended blend coeffs
77 false,
78 false,
79 false,
80 false,
81 };
82 return gCoeffReferencesBlendConst[coeff];
83 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
84 GR_ARRAY_COUNT(gCoeffReferencesBlendConst));
85
86 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
87 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
88 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
89 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
90 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
91 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
92 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
93 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
94 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
95 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
96 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
97 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
98 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
99 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
100
101 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
102 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
103 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
104 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
105
106 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
107 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
108 GR_ARRAY_COUNT(gXfermodeCoeff2Blend));
109 }
110
111 ///////////////////////////////////////////////////////////////////////////////
112
113 static bool gPrintStartupSpew;
114
GrGpuGL(const GrGLContext & ctx,GrContext * context)115 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context)
116 : GrGpu(context)
117 , fGLContext(ctx) {
118
119 SkASSERT(ctx.isInitialized());
120
121 fCaps.reset(SkRef(ctx.info().caps()));
122
123 fHWBoundTextures.reset(ctx.info().caps()->maxFragmentTextureUnits());
124 fHWTexGenSettings.reset(ctx.info().caps()->maxFixedFunctionTextureCoords());
125
126 GrGLClearErr(fGLContext.interface());
127
128 if (gPrintStartupSpew) {
129 const GrGLubyte* vendor;
130 const GrGLubyte* renderer;
131 const GrGLubyte* version;
132 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
133 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
134 GL_CALL_RET(version, GetString(GR_GL_VERSION));
135 GrPrintf("------------------------- create GrGpuGL %p --------------\n",
136 this);
137 GrPrintf("------ VENDOR %s\n", vendor);
138 GrPrintf("------ RENDERER %s\n", renderer);
139 GrPrintf("------ VERSION %s\n", version);
140 GrPrintf("------ EXTENSIONS\n");
141 ctx.info().extensions().print();
142 GrPrintf("\n");
143 GrPrintf(ctx.info().caps()->dump().c_str());
144 }
145
146 fProgramCache = SkNEW_ARGS(ProgramCache, (this));
147
148 SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt);
149
150 fLastSuccessfulStencilFmtIdx = 0;
151 fHWProgramID = 0;
152 }
153
~GrGpuGL()154 GrGpuGL::~GrGpuGL() {
155 if (0 != fHWProgramID) {
156 // detach the current program so there is no confusion on OpenGL's part
157 // that we want it to be deleted
158 SkASSERT(fHWProgramID == fCurrentProgram->programID());
159 GL_CALL(UseProgram(0));
160 }
161
162 delete fProgramCache;
163
164 // This must be called by before the GrDrawTarget destructor
165 this->releaseGeometry();
166 // This subclass must do this before the base class destructor runs
167 // since we will unref the GrGLInterface.
168 this->releaseResources();
169 }
170
171 ///////////////////////////////////////////////////////////////////////////////
172
173
preferredReadPixelsConfig(GrPixelConfig readConfig,GrPixelConfig surfaceConfig) const174 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig,
175 GrPixelConfig surfaceConfig) const {
176 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) {
177 return kBGRA_8888_GrPixelConfig;
178 } else if (fGLContext.info().isMesa() &&
179 GrBytesPerPixel(readConfig) == 4 &&
180 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) {
181 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa.
182 // Perhaps this should be guarded by some compiletime or runtime check.
183 return surfaceConfig;
184 } else if (readConfig == kBGRA_8888_GrPixelConfig &&
185 !this->glCaps().readPixelsSupported(this->glInterface(),
186 GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) {
187 return kRGBA_8888_GrPixelConfig;
188 } else {
189 return readConfig;
190 }
191 }
192
preferredWritePixelsConfig(GrPixelConfig writeConfig,GrPixelConfig surfaceConfig) const193 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig,
194 GrPixelConfig surfaceConfig) const {
195 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) {
196 return kBGRA_8888_GrPixelConfig;
197 } else {
198 return writeConfig;
199 }
200 }
201
canWriteTexturePixels(const GrTexture * texture,GrPixelConfig srcConfig) const202 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const {
203 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) {
204 return false;
205 }
206 if (srcConfig != texture->config() && kES_GrGLBinding == this->glBinding()) {
207 // In general ES2 requires the internal format of the texture and the format of the src
208 // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA
209 // texture. It depends upon which extension added BGRA. The Apple extension allows it
210 // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own
211 // internal format).
212 if (this->glCaps().bgraFormatSupport() &&
213 !this->glCaps().bgraIsInternalFormat() &&
214 kBGRA_8888_GrPixelConfig == srcConfig &&
215 kRGBA_8888_GrPixelConfig == texture->config()) {
216 return true;
217 } else {
218 return false;
219 }
220 } else {
221 return true;
222 }
223 }
224
fullReadPixelsIsFasterThanPartial() const225 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const {
226 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL);
227 }
228
onResetContext(uint32_t resetBits)229 void GrGpuGL::onResetContext(uint32_t resetBits) {
230 // we don't use the zb at all
231 if (resetBits & kMisc_GrGLBackendState) {
232 GL_CALL(Disable(GR_GL_DEPTH_TEST));
233 GL_CALL(DepthMask(GR_GL_FALSE));
234
235 fHWDrawFace = GrDrawState::kInvalid_DrawFace;
236 fHWDitherEnabled = kUnknown_TriState;
237
238 if (kDesktop_GrGLBinding == this->glBinding()) {
239 // Desktop-only state that we never change
240 if (!this->glCaps().isCoreProfile()) {
241 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
242 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
243 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
244 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
245 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
246 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
247 }
248 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
249 // core profile. This seems like a bug since the core spec removes any mention of
250 // GL_ARB_imaging.
251 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
252 GL_CALL(Disable(GR_GL_COLOR_TABLE));
253 }
254 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
255 // Since ES doesn't support glPointSize at all we always use the VS to
256 // set the point size
257 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
258
259 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
260 // currently part of our gl interface. There are probably others as
261 // well.
262 }
263 fHWWriteToColor = kUnknown_TriState;
264 // we only ever use lines in hairline mode
265 GL_CALL(LineWidth(1));
266 }
267
268 if (resetBits & kAA_GrGLBackendState) {
269 fHWAAState.invalidate();
270 }
271
272 fHWActiveTextureUnitIdx = -1; // invalid
273
274 if (resetBits & kTextureBinding_GrGLBackendState) {
275 for (int s = 0; s < fHWBoundTextures.count(); ++s) {
276 fHWBoundTextures[s] = NULL;
277 }
278 }
279
280 if (resetBits & kBlend_GrGLBackendState) {
281 fHWBlendState.invalidate();
282 }
283
284 if (resetBits & kView_GrGLBackendState) {
285 fHWScissorSettings.invalidate();
286 fHWViewport.invalidate();
287 }
288
289 if (resetBits & kStencil_GrGLBackendState) {
290 fHWStencilSettings.invalidate();
291 fHWStencilTestEnabled = kUnknown_TriState;
292 }
293
294 // Vertex
295 if (resetBits & kVertex_GrGLBackendState) {
296 fHWGeometryState.invalidate();
297 }
298
299 if (resetBits & kRenderTarget_GrGLBackendState) {
300 fHWBoundRenderTarget = NULL;
301 }
302
303 if (resetBits & (kFixedFunction_GrGLBackendState | kPathRendering_GrGLBackendState)) {
304 if (this->glCaps().fixedFunctionSupport()) {
305 fHWProjectionMatrixState.invalidate();
306 // we don't use the model view matrix.
307 GL_CALL(MatrixMode(GR_GL_MODELVIEW));
308 GL_CALL(LoadIdentity());
309
310 for (int i = 0; i < this->glCaps().maxFixedFunctionTextureCoords(); ++i) {
311 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + i));
312 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S));
313 GL_CALL(Disable(GR_GL_TEXTURE_GEN_T));
314 GL_CALL(Disable(GR_GL_TEXTURE_GEN_Q));
315 GL_CALL(Disable(GR_GL_TEXTURE_GEN_R));
316 if (this->caps()->pathRenderingSupport()) {
317 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
318 }
319 fHWTexGenSettings[i].fMode = GR_GL_NONE;
320 fHWTexGenSettings[i].fNumComponents = 0;
321 }
322 fHWActiveTexGenSets = 0;
323 }
324 if (this->caps()->pathRenderingSupport()) {
325 fHWPathStencilSettings.invalidate();
326 }
327 }
328
329 // we assume these values
330 if (resetBits & kPixelStore_GrGLBackendState) {
331 if (this->glCaps().unpackRowLengthSupport()) {
332 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
333 }
334 if (this->glCaps().packRowLengthSupport()) {
335 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
336 }
337 if (this->glCaps().unpackFlipYSupport()) {
338 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
339 }
340 if (this->glCaps().packFlipYSupport()) {
341 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
342 }
343 }
344
345 if (resetBits & kProgram_GrGLBackendState) {
346 fHWProgramID = 0;
347 fSharedGLProgramState.invalidate();
348 }
349 }
350
351 namespace {
352
resolve_origin(GrSurfaceOrigin origin,bool renderTarget)353 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
354 // By default, GrRenderTargets are GL's normal orientation so that they
355 // can be drawn to by the outside world without the client having
356 // to render upside down.
357 if (kDefault_GrSurfaceOrigin == origin) {
358 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
359 } else {
360 return origin;
361 }
362 }
363
364 }
365
onWrapBackendTexture(const GrBackendTextureDesc & desc)366 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
367 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) {
368 return NULL;
369 }
370
371 if (0 == desc.fTextureHandle) {
372 return NULL;
373 }
374
375 int maxSize = this->caps()->maxTextureSize();
376 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
377 return NULL;
378 }
379
380 GrGLTexture::Desc glTexDesc;
381 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
382 glTexDesc.fFlags = (GrTextureFlags) desc.fFlags;
383 glTexDesc.fWidth = desc.fWidth;
384 glTexDesc.fHeight = desc.fHeight;
385 glTexDesc.fConfig = desc.fConfig;
386 glTexDesc.fSampleCnt = desc.fSampleCnt;
387 glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle);
388 glTexDesc.fIsWrapped = true;
389 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
390 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
391 // assuming the old behaviour, which is that backend textures are always
392 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
393 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
394 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
395 glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
396 } else {
397 glTexDesc.fOrigin = desc.fOrigin;
398 }
399
400 GrGLTexture* texture = NULL;
401 if (renderTarget) {
402 GrGLRenderTarget::Desc glRTDesc;
403 glRTDesc.fRTFBOID = 0;
404 glRTDesc.fTexFBOID = 0;
405 glRTDesc.fMSColorRenderbufferID = 0;
406 glRTDesc.fConfig = desc.fConfig;
407 glRTDesc.fSampleCnt = desc.fSampleCnt;
408 glRTDesc.fOrigin = glTexDesc.fOrigin;
409 glRTDesc.fCheckAllocation = false;
410 if (!this->createRenderTargetObjects(glTexDesc.fWidth,
411 glTexDesc.fHeight,
412 glTexDesc.fTextureID,
413 &glRTDesc)) {
414 return NULL;
415 }
416 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
417 } else {
418 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
419 }
420 if (NULL == texture) {
421 return NULL;
422 }
423
424 return texture;
425 }
426
onWrapBackendRenderTarget(const GrBackendRenderTargetDesc & desc)427 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
428 GrGLRenderTarget::Desc glDesc;
429 glDesc.fConfig = desc.fConfig;
430 glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle);
431 glDesc.fMSColorRenderbufferID = 0;
432 glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
433 glDesc.fSampleCnt = desc.fSampleCnt;
434 glDesc.fIsWrapped = true;
435 glDesc.fCheckAllocation = false;
436
437 glDesc.fOrigin = resolve_origin(desc.fOrigin, true);
438 GrGLIRect viewport;
439 viewport.fLeft = 0;
440 viewport.fBottom = 0;
441 viewport.fWidth = desc.fWidth;
442 viewport.fHeight = desc.fHeight;
443
444 GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget,
445 (this, glDesc, viewport));
446 if (desc.fStencilBits) {
447 GrGLStencilBuffer::Format format;
448 format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat;
449 format.fPacked = false;
450 format.fStencilBits = desc.fStencilBits;
451 format.fTotalBits = desc.fStencilBits;
452 static const bool kIsSBWrapped = false;
453 GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer,
454 (this,
455 kIsSBWrapped,
456 0,
457 desc.fWidth,
458 desc.fHeight,
459 desc.fSampleCnt,
460 format));
461 tgt->setStencilBuffer(sb);
462 sb->unref();
463 }
464 return tgt;
465 }
466
467 ////////////////////////////////////////////////////////////////////////////////
468
onWriteTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes)469 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture,
470 int left, int top, int width, int height,
471 GrPixelConfig config, const void* buffer,
472 size_t rowBytes) {
473 if (NULL == buffer) {
474 return false;
475 }
476 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
477
478 this->setScratchTextureUnit();
479 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID()));
480 GrGLTexture::Desc desc;
481 desc.fFlags = glTex->desc().fFlags;
482 desc.fWidth = glTex->width();
483 desc.fHeight = glTex->height();
484 desc.fConfig = glTex->config();
485 desc.fSampleCnt = glTex->desc().fSampleCnt;
486 desc.fTextureID = glTex->textureID();
487 desc.fOrigin = glTex->origin();
488
489 if (this->uploadTexData(desc, false,
490 left, top, width, height,
491 config, buffer, rowBytes)) {
492 texture->dirtyMipMaps(true);
493 return true;
494 } else {
495 return false;
496 }
497 }
498
499 namespace {
adjust_pixel_ops_params(int surfaceWidth,int surfaceHeight,size_t bpp,int * left,int * top,int * width,int * height,const void ** data,size_t * rowBytes)500 bool adjust_pixel_ops_params(int surfaceWidth,
501 int surfaceHeight,
502 size_t bpp,
503 int* left, int* top, int* width, int* height,
504 const void** data,
505 size_t* rowBytes) {
506 if (!*rowBytes) {
507 *rowBytes = *width * bpp;
508 }
509
510 SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height);
511 SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
512
513 if (!subRect.intersect(bounds)) {
514 return false;
515 }
516 *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) +
517 (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp);
518
519 *left = subRect.fLeft;
520 *top = subRect.fTop;
521 *width = subRect.width();
522 *height = subRect.height();
523 return true;
524 }
525
check_alloc_error(const GrTextureDesc & desc,const GrGLInterface * interface)526 GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) {
527 if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) {
528 return GR_GL_GET_ERROR(interface);
529 } else {
530 return CHECK_ALLOC_ERROR(interface);
531 }
532 }
533
534 }
535
uploadTexData(const GrGLTexture::Desc & desc,bool isNewTexture,int left,int top,int width,int height,GrPixelConfig dataConfig,const void * data,size_t rowBytes)536 bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc,
537 bool isNewTexture,
538 int left, int top, int width, int height,
539 GrPixelConfig dataConfig,
540 const void* data,
541 size_t rowBytes) {
542 SkASSERT(NULL != data || isNewTexture);
543
544 size_t bpp = GrBytesPerPixel(dataConfig);
545 if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top,
546 &width, &height, &data, &rowBytes)) {
547 return false;
548 }
549 size_t trimRowBytes = width * bpp;
550
551 // in case we need a temporary, trimmed copy of the src pixels
552 SkAutoSMalloc<128 * 128> tempStorage;
553
554 // paletted textures cannot be partially updated
555 // We currently lazily create MIPMAPs when the we see a draw with
556 // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the
557 // MIP levels are all created when the texture is created. So for now we don't use
558 // texture storage.
559 bool useTexStorage = false &&
560 isNewTexture &&
561 desc.fConfig != kIndex_8_GrPixelConfig &&
562 this->glCaps().texStorageSupport();
563
564 if (useTexStorage && kDesktop_GrGLBinding == this->glBinding()) {
565 // 565 is not a sized internal format on desktop GL. So on desktop with
566 // 565 we always use an unsized internal format to let the system pick
567 // the best sized format to convert the 565 data to. Since TexStorage
568 // only allows sized internal formats we will instead use TexImage2D.
569 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
570 }
571
572 GrGLenum internalFormat;
573 GrGLenum externalFormat;
574 GrGLenum externalType;
575 // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
576 // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the
577 // size of the internal format whenever possible and so only use a sized internal format when
578 // using texture storage.
579 if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat,
580 &externalFormat, &externalType)) {
581 return false;
582 }
583
584 if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) {
585 // paletted textures cannot be updated
586 return false;
587 }
588
589 /*
590 * check whether to allocate a temporary buffer for flipping y or
591 * because our srcData has extra bytes past each row. If so, we need
592 * to trim those off here, since GL ES may not let us specify
593 * GL_UNPACK_ROW_LENGTH.
594 */
595 bool restoreGLRowLength = false;
596 bool swFlipY = false;
597 bool glFlipY = false;
598 if (NULL != data) {
599 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
600 if (this->glCaps().unpackFlipYSupport()) {
601 glFlipY = true;
602 } else {
603 swFlipY = true;
604 }
605 }
606 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
607 // can't use this for flipping, only non-neg values allowed. :(
608 if (rowBytes != trimRowBytes) {
609 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
610 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
611 restoreGLRowLength = true;
612 }
613 } else {
614 if (trimRowBytes != rowBytes || swFlipY) {
615 // copy data into our new storage, skipping the trailing bytes
616 size_t trimSize = height * trimRowBytes;
617 const char* src = (const char*)data;
618 if (swFlipY) {
619 src += (height - 1) * rowBytes;
620 }
621 char* dst = (char*)tempStorage.reset(trimSize);
622 for (int y = 0; y < height; y++) {
623 memcpy(dst, src, trimRowBytes);
624 if (swFlipY) {
625 src -= rowBytes;
626 } else {
627 src += rowBytes;
628 }
629 dst += trimRowBytes;
630 }
631 // now point data to our copied version
632 data = tempStorage.get();
633 }
634 }
635 if (glFlipY) {
636 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
637 }
638 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast<GrGLint>(bpp)));
639 }
640 bool succeeded = true;
641 if (isNewTexture &&
642 0 == left && 0 == top &&
643 desc.fWidth == width && desc.fHeight == height) {
644 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
645 if (useTexStorage) {
646 // We never resize or change formats of textures.
647 GL_ALLOC_CALL(this->glInterface(),
648 TexStorage2D(GR_GL_TEXTURE_2D,
649 1, // levels
650 internalFormat,
651 desc.fWidth, desc.fHeight));
652 } else {
653 if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
654 GrGLsizei imageSize = desc.fWidth * desc.fHeight +
655 kGrColorTableSize;
656 GL_ALLOC_CALL(this->glInterface(),
657 CompressedTexImage2D(GR_GL_TEXTURE_2D,
658 0, // level
659 internalFormat,
660 desc.fWidth, desc.fHeight,
661 0, // border
662 imageSize,
663 data));
664 } else {
665 GL_ALLOC_CALL(this->glInterface(),
666 TexImage2D(GR_GL_TEXTURE_2D,
667 0, // level
668 internalFormat,
669 desc.fWidth, desc.fHeight,
670 0, // border
671 externalFormat, externalType,
672 data));
673 }
674 }
675 GrGLenum error = check_alloc_error(desc, this->glInterface());
676 if (error != GR_GL_NO_ERROR) {
677 succeeded = false;
678 } else {
679 // if we have data and we used TexStorage to create the texture, we
680 // now upload with TexSubImage.
681 if (NULL != data && useTexStorage) {
682 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
683 0, // level
684 left, top,
685 width, height,
686 externalFormat, externalType,
687 data));
688 }
689 }
690 } else {
691 if (swFlipY || glFlipY) {
692 top = desc.fHeight - (top + height);
693 }
694 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
695 0, // level
696 left, top,
697 width, height,
698 externalFormat, externalType, data));
699 }
700
701 if (restoreGLRowLength) {
702 SkASSERT(this->glCaps().unpackRowLengthSupport());
703 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
704 }
705 if (glFlipY) {
706 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
707 }
708 return succeeded;
709 }
710
renderbuffer_storage_msaa(GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)711 static bool renderbuffer_storage_msaa(GrGLContext& ctx,
712 int sampleCount,
713 GrGLenum format,
714 int width, int height) {
715 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
716 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.info().caps()->msFBOType());
717 #if GR_GL_IGNORE_ES3_MSAA
718 GL_ALLOC_CALL(ctx.interface(),
719 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
720 sampleCount,
721 format,
722 width, height));
723 #else
724 switch (ctx.info().caps()->msFBOType()) {
725 case GrGLCaps::kDesktop_ARB_MSFBOType:
726 case GrGLCaps::kDesktop_EXT_MSFBOType:
727 case GrGLCaps::kES_3_0_MSFBOType:
728 GL_ALLOC_CALL(ctx.interface(),
729 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
730 sampleCount,
731 format,
732 width, height));
733 break;
734 case GrGLCaps::kES_Apple_MSFBOType:
735 GL_ALLOC_CALL(ctx.interface(),
736 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
737 sampleCount,
738 format,
739 width, height));
740 break;
741 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
742 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
743 GL_ALLOC_CALL(ctx.interface(),
744 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
745 sampleCount,
746 format,
747 width, height));
748 break;
749 case GrGLCaps::kNone_MSFBOType:
750 GrCrash("Shouldn't be here if we don't support multisampled renderbuffers.");
751 break;
752 }
753 #endif
754 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));;
755 }
756
createRenderTargetObjects(int width,int height,GrGLuint texID,GrGLRenderTarget::Desc * desc)757 bool GrGpuGL::createRenderTargetObjects(int width, int height,
758 GrGLuint texID,
759 GrGLRenderTarget::Desc* desc) {
760 desc->fMSColorRenderbufferID = 0;
761 desc->fRTFBOID = 0;
762 desc->fTexFBOID = 0;
763 desc->fIsWrapped = false;
764
765 GrGLenum status;
766
767 GrGLenum msColorFormat = 0; // suppress warning
768
769 if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
770 goto FAILED;
771 }
772
773 GL_CALL(GenFramebuffers(1, &desc->fTexFBOID));
774 if (!desc->fTexFBOID) {
775 goto FAILED;
776 }
777
778
779 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
780 // the texture bound to the other. The exception is the IMG multisample extension. With this
781 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
782 // rendered from.
783 if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
784 GL_CALL(GenFramebuffers(1, &desc->fRTFBOID));
785 GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID));
786 if (!desc->fRTFBOID ||
787 !desc->fMSColorRenderbufferID ||
788 !this->configToGLFormats(desc->fConfig,
789 // ES2 and ES3 require sized internal formats for rb storage.
790 kES_GrGLBinding == this->glBinding(),
791 &msColorFormat,
792 NULL,
793 NULL)) {
794 goto FAILED;
795 }
796 } else {
797 desc->fRTFBOID = desc->fTexFBOID;
798 }
799
800 // below here we may bind the FBO
801 fHWBoundRenderTarget = NULL;
802 if (desc->fRTFBOID != desc->fTexFBOID) {
803 SkASSERT(desc->fSampleCnt > 0);
804 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER,
805 desc->fMSColorRenderbufferID));
806 if (!renderbuffer_storage_msaa(fGLContext,
807 desc->fSampleCnt,
808 msColorFormat,
809 width, height)) {
810 goto FAILED;
811 }
812 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID));
813 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
814 GR_GL_COLOR_ATTACHMENT0,
815 GR_GL_RENDERBUFFER,
816 desc->fMSColorRenderbufferID));
817 if (desc->fCheckAllocation ||
818 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
819 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
820 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
821 goto FAILED;
822 }
823 fGLContext.info().caps()->markConfigAsValidColorAttachment(desc->fConfig);
824 }
825 }
826 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID));
827
828 if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) {
829 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
830 GR_GL_COLOR_ATTACHMENT0,
831 GR_GL_TEXTURE_2D,
832 texID, 0, desc->fSampleCnt));
833 } else {
834 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
835 GR_GL_COLOR_ATTACHMENT0,
836 GR_GL_TEXTURE_2D,
837 texID, 0));
838 }
839 if (desc->fCheckAllocation ||
840 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) {
841 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
842 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
843 goto FAILED;
844 }
845 fGLContext.info().caps()->markConfigAsValidColorAttachment(desc->fConfig);
846 }
847
848 return true;
849
850 FAILED:
851 if (desc->fMSColorRenderbufferID) {
852 GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID));
853 }
854 if (desc->fRTFBOID != desc->fTexFBOID) {
855 GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID));
856 }
857 if (desc->fTexFBOID) {
858 GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID));
859 }
860 return false;
861 }
862
863 // good to set a break-point here to know when createTexture fails
return_null_texture()864 static GrTexture* return_null_texture() {
865 // SkDEBUGFAIL("null texture");
866 return NULL;
867 }
868
869 #if 0 && defined(SK_DEBUG)
870 static size_t as_size_t(int x) {
871 return x;
872 }
873 #endif
874
onCreateTexture(const GrTextureDesc & desc,const void * srcData,size_t rowBytes)875 GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
876 const void* srcData,
877 size_t rowBytes) {
878
879 GrGLTexture::Desc glTexDesc;
880 GrGLRenderTarget::Desc glRTDesc;
881
882 // Attempt to catch un- or wrongly initialized sample counts;
883 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
884 // We fail if the MSAA was requested and is not available.
885 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
886 //GrPrintf("MSAA RT requested but not supported on this platform.");
887 return return_null_texture();
888 }
889 // If the sample count exceeds the max then we clamp it.
890 glTexDesc.fSampleCnt = GrMin(desc.fSampleCnt, this->caps()->maxSampleCount());
891
892 glTexDesc.fFlags = desc.fFlags;
893 glTexDesc.fWidth = desc.fWidth;
894 glTexDesc.fHeight = desc.fHeight;
895 glTexDesc.fConfig = desc.fConfig;
896 glTexDesc.fIsWrapped = false;
897
898 glRTDesc.fMSColorRenderbufferID = 0;
899 glRTDesc.fRTFBOID = 0;
900 glRTDesc.fTexFBOID = 0;
901 glRTDesc.fIsWrapped = false;
902 glRTDesc.fConfig = glTexDesc.fConfig;
903 glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit);
904
905 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit);
906
907 glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
908 glRTDesc.fOrigin = glTexDesc.fOrigin;
909
910 glRTDesc.fSampleCnt = glTexDesc.fSampleCnt;
911 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() &&
912 desc.fSampleCnt) {
913 //GrPrintf("MSAA RT requested but not supported on this platform.");
914 return return_null_texture();
915 }
916
917 if (renderTarget) {
918 int maxRTSize = this->caps()->maxRenderTargetSize();
919 if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) {
920 return return_null_texture();
921 }
922 } else {
923 int maxSize = this->caps()->maxTextureSize();
924 if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) {
925 return return_null_texture();
926 }
927 }
928
929 GL_CALL(GenTextures(1, &glTexDesc.fTextureID));
930
931 if (!glTexDesc.fTextureID) {
932 return return_null_texture();
933 }
934
935 this->setScratchTextureUnit();
936 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID));
937
938 if (renderTarget && this->glCaps().textureUsageSupport()) {
939 // provides a hint about how this texture will be used
940 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
941 GR_GL_TEXTURE_USAGE,
942 GR_GL_FRAMEBUFFER_ATTACHMENT));
943 }
944
945 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
946 // drivers have a bug where an FBO won't be complete if it includes a
947 // texture that is not mipmap complete (considering the filter in use).
948 GrGLTexture::TexParams initialTexParams;
949 // we only set a subset here so invalidate first
950 initialTexParams.invalidate();
951 initialTexParams.fMinFilter = GR_GL_NEAREST;
952 initialTexParams.fMagFilter = GR_GL_NEAREST;
953 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
954 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
955 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
956 GR_GL_TEXTURE_MAG_FILTER,
957 initialTexParams.fMagFilter));
958 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
959 GR_GL_TEXTURE_MIN_FILTER,
960 initialTexParams.fMinFilter));
961 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
962 GR_GL_TEXTURE_WRAP_S,
963 initialTexParams.fWrapS));
964 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
965 GR_GL_TEXTURE_WRAP_T,
966 initialTexParams.fWrapT));
967 if (!this->uploadTexData(glTexDesc, true, 0, 0,
968 glTexDesc.fWidth, glTexDesc.fHeight,
969 desc.fConfig, srcData, rowBytes)) {
970 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
971 return return_null_texture();
972 }
973
974 GrGLTexture* tex;
975 if (renderTarget) {
976 // unbind the texture from the texture unit before binding it to the frame buffer
977 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
978
979 if (!this->createRenderTargetObjects(glTexDesc.fWidth,
980 glTexDesc.fHeight,
981 glTexDesc.fTextureID,
982 &glRTDesc)) {
983 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
984 return return_null_texture();
985 }
986 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc));
987 } else {
988 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc));
989 }
990 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
991 #ifdef TRACE_TEXTURE_CREATION
992 GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n",
993 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
994 #endif
995 return tex;
996 }
997
998 namespace {
999
1000 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount;
1001
get_stencil_rb_sizes(const GrGLInterface * gl,GrGLStencilBuffer::Format * format)1002 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1003 GrGLStencilBuffer::Format* format) {
1004
1005 // we shouldn't ever know one size and not the other
1006 SkASSERT((kUnknownBitCount == format->fStencilBits) ==
1007 (kUnknownBitCount == format->fTotalBits));
1008 if (kUnknownBitCount == format->fStencilBits) {
1009 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1010 GR_GL_RENDERBUFFER_STENCIL_SIZE,
1011 (GrGLint*)&format->fStencilBits);
1012 if (format->fPacked) {
1013 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1014 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1015 (GrGLint*)&format->fTotalBits);
1016 format->fTotalBits += format->fStencilBits;
1017 } else {
1018 format->fTotalBits = format->fStencilBits;
1019 }
1020 }
1021 }
1022 }
1023
createStencilBufferForRenderTarget(GrRenderTarget * rt,int width,int height)1024 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt,
1025 int width, int height) {
1026
1027 // All internally created RTs are also textures. We don't create
1028 // SBs for a client's standalone RT (that is a RT that isn't also a texture).
1029 SkASSERT(rt->asTexture());
1030 SkASSERT(width >= rt->width());
1031 SkASSERT(height >= rt->height());
1032
1033 int samples = rt->numSamples();
1034 GrGLuint sbID;
1035 GL_CALL(GenRenderbuffers(1, &sbID));
1036 if (!sbID) {
1037 return false;
1038 }
1039
1040 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1041 for (int i = 0; i < stencilFmtCnt; ++i) {
1042 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID));
1043 // we start with the last stencil format that succeeded in hopes
1044 // that we won't go through this loop more than once after the
1045 // first (painful) stencil creation.
1046 int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt;
1047 const GrGLCaps::StencilFormat& sFmt =
1048 this->glCaps().stencilFormats()[sIdx];
1049 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1050 // we do this "if" so that we don't call the multisample
1051 // version on a GL that doesn't have an MSAA extension.
1052 bool created;
1053 if (samples > 0) {
1054 created = renderbuffer_storage_msaa(fGLContext,
1055 samples,
1056 sFmt.fInternalFormat,
1057 width, height);
1058 } else {
1059 GL_ALLOC_CALL(this->glInterface(),
1060 RenderbufferStorage(GR_GL_RENDERBUFFER,
1061 sFmt.fInternalFormat,
1062 width, height));
1063 created =
1064 (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
1065 }
1066 if (created) {
1067 // After sized formats we attempt an unsized format and take
1068 // whatever sizes GL gives us. In that case we query for the size.
1069 GrGLStencilBuffer::Format format = sFmt;
1070 get_stencil_rb_sizes(this->glInterface(), &format);
1071 static const bool kIsWrapped = false;
1072 SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer,
1073 (this, kIsWrapped, sbID, width, height,
1074 samples, format)));
1075 if (this->attachStencilBufferToRenderTarget(sb, rt)) {
1076 fLastSuccessfulStencilFmtIdx = sIdx;
1077 sb->transferToCache();
1078 rt->setStencilBuffer(sb);
1079 return true;
1080 }
1081 sb->abandon(); // otherwise we lose sbID
1082 }
1083 }
1084 GL_CALL(DeleteRenderbuffers(1, &sbID));
1085 return false;
1086 }
1087
attachStencilBufferToRenderTarget(GrStencilBuffer * sb,GrRenderTarget * rt)1088 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) {
1089 GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt;
1090
1091 GrGLuint fbo = glrt->renderFBOID();
1092
1093 if (NULL == sb) {
1094 if (NULL != rt->getStencilBuffer()) {
1095 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1096 GR_GL_STENCIL_ATTACHMENT,
1097 GR_GL_RENDERBUFFER, 0));
1098 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1099 GR_GL_DEPTH_ATTACHMENT,
1100 GR_GL_RENDERBUFFER, 0));
1101 #ifdef SK_DEBUG
1102 GrGLenum status;
1103 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1104 SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
1105 #endif
1106 }
1107 return true;
1108 } else {
1109 GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb);
1110 GrGLuint rb = glsb->renderbufferID();
1111
1112 fHWBoundRenderTarget = NULL;
1113 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo));
1114 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1115 GR_GL_STENCIL_ATTACHMENT,
1116 GR_GL_RENDERBUFFER, rb));
1117 if (glsb->format().fPacked) {
1118 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1119 GR_GL_DEPTH_ATTACHMENT,
1120 GR_GL_RENDERBUFFER, rb));
1121 } else {
1122 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1123 GR_GL_DEPTH_ATTACHMENT,
1124 GR_GL_RENDERBUFFER, 0));
1125 }
1126
1127 GrGLenum status;
1128 if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) {
1129 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1130 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1131 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1132 GR_GL_STENCIL_ATTACHMENT,
1133 GR_GL_RENDERBUFFER, 0));
1134 if (glsb->format().fPacked) {
1135 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1136 GR_GL_DEPTH_ATTACHMENT,
1137 GR_GL_RENDERBUFFER, 0));
1138 }
1139 return false;
1140 } else {
1141 fGLContext.info().caps()->markColorConfigAndStencilFormatAsVerified(
1142 rt->config(),
1143 glsb->format());
1144 }
1145 }
1146 return true;
1147 }
1148 }
1149
1150 ////////////////////////////////////////////////////////////////////////////////
1151
onCreateVertexBuffer(size_t size,bool dynamic)1152 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) {
1153 GrGLVertexBuffer::Desc desc;
1154 desc.fDynamic = dynamic;
1155 desc.fSizeInBytes = size;
1156 desc.fIsWrapped = false;
1157
1158 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1159 desc.fID = 0;
1160 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1161 return vertexBuffer;
1162 } else {
1163 GL_CALL(GenBuffers(1, &desc.fID));
1164 if (desc.fID) {
1165 fHWGeometryState.setVertexBufferID(this, desc.fID);
1166 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1167 // make sure driver can allocate memory for this buffer
1168 GL_ALLOC_CALL(this->glInterface(),
1169 BufferData(GR_GL_ARRAY_BUFFER,
1170 (GrGLsizeiptr) desc.fSizeInBytes,
1171 NULL, // data ptr
1172 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1173 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1174 GL_CALL(DeleteBuffers(1, &desc.fID));
1175 this->notifyVertexBufferDelete(desc.fID);
1176 return NULL;
1177 }
1178 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1179 return vertexBuffer;
1180 }
1181 return NULL;
1182 }
1183 }
1184
onCreateIndexBuffer(size_t size,bool dynamic)1185 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) {
1186 GrGLIndexBuffer::Desc desc;
1187 desc.fDynamic = dynamic;
1188 desc.fSizeInBytes = size;
1189 desc.fIsWrapped = false;
1190
1191 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1192 desc.fID = 0;
1193 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1194 return indexBuffer;
1195 } else {
1196 GL_CALL(GenBuffers(1, &desc.fID));
1197 if (desc.fID) {
1198 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
1199 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1200 // make sure driver can allocate memory for this buffer
1201 GL_ALLOC_CALL(this->glInterface(),
1202 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1203 (GrGLsizeiptr) desc.fSizeInBytes,
1204 NULL, // data ptr
1205 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1206 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1207 GL_CALL(DeleteBuffers(1, &desc.fID));
1208 this->notifyIndexBufferDelete(desc.fID);
1209 return NULL;
1210 }
1211 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1212 return indexBuffer;
1213 }
1214 return NULL;
1215 }
1216 }
1217
onCreatePath(const SkPath & inPath,const SkStrokeRec & stroke)1218 GrPath* GrGpuGL::onCreatePath(const SkPath& inPath, const SkStrokeRec& stroke) {
1219 SkASSERT(this->caps()->pathRenderingSupport());
1220 return SkNEW_ARGS(GrGLPath, (this, inPath, stroke));
1221 }
1222
flushScissor()1223 void GrGpuGL::flushScissor() {
1224 if (fScissorState.fEnabled) {
1225 // Only access the RT if scissoring is being enabled. We can call this before performing
1226 // a glBitframebuffer for a surface->surface copy, which requires no RT to be bound to the
1227 // GrDrawState.
1228 const GrDrawState& drawState = this->getDrawState();
1229 const GrGLRenderTarget* rt =
1230 static_cast<const GrGLRenderTarget*>(drawState.getRenderTarget());
1231
1232 SkASSERT(NULL != rt);
1233 const GrGLIRect& vp = rt->getViewport();
1234 GrGLIRect scissor;
1235 scissor.setRelativeTo(vp,
1236 fScissorState.fRect.fLeft,
1237 fScissorState.fRect.fTop,
1238 fScissorState.fRect.width(),
1239 fScissorState.fRect.height(),
1240 rt->origin());
1241 // if the scissor fully contains the viewport then we fall through and
1242 // disable the scissor test.
1243 if (!scissor.contains(vp)) {
1244 if (fHWScissorSettings.fRect != scissor) {
1245 scissor.pushToGLScissor(this->glInterface());
1246 fHWScissorSettings.fRect = scissor;
1247 }
1248 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1249 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1250 fHWScissorSettings.fEnabled = kYes_TriState;
1251 }
1252 return;
1253 }
1254 }
1255 if (kNo_TriState != fHWScissorSettings.fEnabled) {
1256 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
1257 fHWScissorSettings.fEnabled = kNo_TriState;
1258 return;
1259 }
1260 }
1261
onClear(const SkIRect * rect,GrColor color,bool canIgnoreRect)1262 void GrGpuGL::onClear(const SkIRect* rect, GrColor color, bool canIgnoreRect) {
1263 const GrDrawState& drawState = this->getDrawState();
1264 const GrRenderTarget* rt = drawState.getRenderTarget();
1265 // parent class should never let us get here with no RT
1266 SkASSERT(NULL != rt);
1267
1268 if (canIgnoreRect && this->glCaps().fullClearIsFree()) {
1269 rect = NULL;
1270 }
1271
1272 SkIRect clippedRect;
1273 if (NULL != rect) {
1274 // flushScissor expects rect to be clipped to the target.
1275 clippedRect = *rect;
1276 SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height());
1277 if (clippedRect.intersect(rtRect)) {
1278 rect = &clippedRect;
1279 } else {
1280 return;
1281 }
1282 }
1283
1284 this->flushRenderTarget(rect);
1285 GrAutoTRestore<ScissorState> asr(&fScissorState);
1286 fScissorState.fEnabled = (NULL != rect);
1287 if (fScissorState.fEnabled) {
1288 fScissorState.fRect = *rect;
1289 }
1290 this->flushScissor();
1291
1292 GrGLfloat r, g, b, a;
1293 static const GrGLfloat scale255 = 1.f / 255.f;
1294 a = GrColorUnpackA(color) * scale255;
1295 GrGLfloat scaleRGB = scale255;
1296 r = GrColorUnpackR(color) * scaleRGB;
1297 g = GrColorUnpackG(color) * scaleRGB;
1298 b = GrColorUnpackB(color) * scaleRGB;
1299
1300 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
1301 fHWWriteToColor = kYes_TriState;
1302 GL_CALL(ClearColor(r, g, b, a));
1303 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1304 }
1305
clearStencil()1306 void GrGpuGL::clearStencil() {
1307 if (NULL == this->getDrawState().getRenderTarget()) {
1308 return;
1309 }
1310
1311 this->flushRenderTarget(&SkIRect::EmptyIRect());
1312
1313 GrAutoTRestore<ScissorState> asr(&fScissorState);
1314 fScissorState.fEnabled = false;
1315 this->flushScissor();
1316
1317 GL_CALL(StencilMask(0xffffffff));
1318 GL_CALL(ClearStencil(0));
1319 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1320 fHWStencilSettings.invalidate();
1321 }
1322
clearStencilClip(const SkIRect & rect,bool insideClip)1323 void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) {
1324 const GrDrawState& drawState = this->getDrawState();
1325 const GrRenderTarget* rt = drawState.getRenderTarget();
1326 SkASSERT(NULL != rt);
1327
1328 // this should only be called internally when we know we have a
1329 // stencil buffer.
1330 SkASSERT(NULL != rt->getStencilBuffer());
1331 GrGLint stencilBitCount = rt->getStencilBuffer()->bits();
1332 #if 0
1333 SkASSERT(stencilBitCount > 0);
1334 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
1335 #else
1336 // we could just clear the clip bit but when we go through
1337 // ANGLE a partial stencil mask will cause clears to be
1338 // turned into draws. Our contract on GrDrawTarget says that
1339 // changing the clip between stencil passes may or may not
1340 // zero the client's clip bits. So we just clear the whole thing.
1341 static const GrGLint clipStencilMask = ~0;
1342 #endif
1343 GrGLint value;
1344 if (insideClip) {
1345 value = (1 << (stencilBitCount - 1));
1346 } else {
1347 value = 0;
1348 }
1349 this->flushRenderTarget(&SkIRect::EmptyIRect());
1350
1351 GrAutoTRestore<ScissorState> asr(&fScissorState);
1352 fScissorState.fEnabled = true;
1353 fScissorState.fRect = rect;
1354 this->flushScissor();
1355
1356 GL_CALL(StencilMask((uint32_t) clipStencilMask));
1357 GL_CALL(ClearStencil(value));
1358 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1359 fHWStencilSettings.invalidate();
1360 }
1361
onForceRenderTargetFlush()1362 void GrGpuGL::onForceRenderTargetFlush() {
1363 this->flushRenderTarget(&SkIRect::EmptyIRect());
1364 }
1365
readPixelsWillPayForYFlip(GrRenderTarget * renderTarget,int left,int top,int width,int height,GrPixelConfig config,size_t rowBytes) const1366 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
1367 int left, int top,
1368 int width, int height,
1369 GrPixelConfig config,
1370 size_t rowBytes) const {
1371 // If this rendertarget is aready TopLeft, we don't need to flip.
1372 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
1373 return false;
1374 }
1375
1376 // if GL can do the flip then we'll never pay for it.
1377 if (this->glCaps().packFlipYSupport()) {
1378 return false;
1379 }
1380
1381 // If we have to do memcpy to handle non-trim rowBytes then we
1382 // get the flip for free. Otherwise it costs.
1383 if (this->glCaps().packRowLengthSupport()) {
1384 return true;
1385 }
1386 // If we have to do memcpys to handle rowBytes then y-flip is free
1387 // Note the rowBytes might be tight to the passed in data, but if data
1388 // gets clipped in x to the target the rowBytes will no longer be tight.
1389 if (left >= 0 && (left + width) < renderTarget->width()) {
1390 return 0 == rowBytes ||
1391 GrBytesPerPixel(config) * width == rowBytes;
1392 } else {
1393 return false;
1394 }
1395 }
1396
onReadPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes)1397 bool GrGpuGL::onReadPixels(GrRenderTarget* target,
1398 int left, int top,
1399 int width, int height,
1400 GrPixelConfig config,
1401 void* buffer,
1402 size_t rowBytes) {
1403 GrGLenum format;
1404 GrGLenum type;
1405 bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
1406 if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
1407 return false;
1408 }
1409 size_t bpp = GrBytesPerPixel(config);
1410 if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
1411 &left, &top, &width, &height,
1412 const_cast<const void**>(&buffer),
1413 &rowBytes)) {
1414 return false;
1415 }
1416
1417 // resolve the render target if necessary
1418 GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target);
1419 GrDrawState::AutoRenderTargetRestore artr;
1420 switch (tgt->getResolveType()) {
1421 case GrGLRenderTarget::kCantResolve_ResolveType:
1422 return false;
1423 case GrGLRenderTarget::kAutoResolves_ResolveType:
1424 artr.set(this->drawState(), target);
1425 this->flushRenderTarget(&SkIRect::EmptyIRect());
1426 break;
1427 case GrGLRenderTarget::kCanResolve_ResolveType:
1428 this->onResolveRenderTarget(tgt);
1429 // we don't track the state of the READ FBO ID.
1430 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
1431 tgt->textureFBOID()));
1432 break;
1433 default:
1434 GrCrash("Unknown resolve type");
1435 }
1436
1437 const GrGLIRect& glvp = tgt->getViewport();
1438
1439 // the read rect is viewport-relative
1440 GrGLIRect readRect;
1441 readRect.setRelativeTo(glvp, left, top, width, height, target->origin());
1442
1443 size_t tightRowBytes = bpp * width;
1444 if (0 == rowBytes) {
1445 rowBytes = tightRowBytes;
1446 }
1447 size_t readDstRowBytes = tightRowBytes;
1448 void* readDst = buffer;
1449
1450 // determine if GL can read using the passed rowBytes or if we need
1451 // a scratch buffer.
1452 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
1453 if (rowBytes != tightRowBytes) {
1454 if (this->glCaps().packRowLengthSupport()) {
1455 SkASSERT(!(rowBytes % sizeof(GrColor)));
1456 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
1457 static_cast<GrGLint>(rowBytes / sizeof(GrColor))));
1458 readDstRowBytes = rowBytes;
1459 } else {
1460 scratch.reset(tightRowBytes * height);
1461 readDst = scratch.get();
1462 }
1463 }
1464 if (flipY && this->glCaps().packFlipYSupport()) {
1465 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
1466 }
1467 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
1468 readRect.fWidth, readRect.fHeight,
1469 format, type, readDst));
1470 if (readDstRowBytes != tightRowBytes) {
1471 SkASSERT(this->glCaps().packRowLengthSupport());
1472 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
1473 }
1474 if (flipY && this->glCaps().packFlipYSupport()) {
1475 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
1476 flipY = false;
1477 }
1478
1479 // now reverse the order of the rows, since GL's are bottom-to-top, but our
1480 // API presents top-to-bottom. We must preserve the padding contents. Note
1481 // that the above readPixels did not overwrite the padding.
1482 if (readDst == buffer) {
1483 SkASSERT(rowBytes == readDstRowBytes);
1484 if (flipY) {
1485 scratch.reset(tightRowBytes);
1486 void* tmpRow = scratch.get();
1487 // flip y in-place by rows
1488 const int halfY = height >> 1;
1489 char* top = reinterpret_cast<char*>(buffer);
1490 char* bottom = top + (height - 1) * rowBytes;
1491 for (int y = 0; y < halfY; y++) {
1492 memcpy(tmpRow, top, tightRowBytes);
1493 memcpy(top, bottom, tightRowBytes);
1494 memcpy(bottom, tmpRow, tightRowBytes);
1495 top += rowBytes;
1496 bottom -= rowBytes;
1497 }
1498 }
1499 } else {
1500 SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes);
1501 // copy from readDst to buffer while flipping y
1502 // const int halfY = height >> 1;
1503 const char* src = reinterpret_cast<const char*>(readDst);
1504 char* dst = reinterpret_cast<char*>(buffer);
1505 if (flipY) {
1506 dst += (height-1) * rowBytes;
1507 }
1508 for (int y = 0; y < height; y++) {
1509 memcpy(dst, src, tightRowBytes);
1510 src += readDstRowBytes;
1511 if (!flipY) {
1512 dst += rowBytes;
1513 } else {
1514 dst -= rowBytes;
1515 }
1516 }
1517 }
1518 return true;
1519 }
1520
flushRenderTarget(const SkIRect * bound)1521 void GrGpuGL::flushRenderTarget(const SkIRect* bound) {
1522
1523 GrGLRenderTarget* rt =
1524 static_cast<GrGLRenderTarget*>(this->drawState()->getRenderTarget());
1525 SkASSERT(NULL != rt);
1526
1527 if (fHWBoundRenderTarget != rt) {
1528 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID()));
1529 #ifdef SK_DEBUG
1530 // don't do this check in Chromium -- this is causing
1531 // lots of repeated command buffer flushes when the compositor is
1532 // rendering with Ganesh, which is really slow; even too slow for
1533 // Debug mode.
1534 if (!this->glContext().info().isChromium()) {
1535 GrGLenum status;
1536 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1537 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1538 GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status);
1539 }
1540 }
1541 #endif
1542 fHWBoundRenderTarget = rt;
1543 const GrGLIRect& vp = rt->getViewport();
1544 if (fHWViewport != vp) {
1545 vp.pushToGLViewport(this->glInterface());
1546 fHWViewport = vp;
1547 }
1548 }
1549 if (NULL == bound || !bound->isEmpty()) {
1550 rt->flagAsNeedingResolve(bound);
1551 }
1552
1553 GrTexture *texture = rt->asTexture();
1554 if (texture) {
1555 texture->dirtyMipMaps(true);
1556 }
1557 }
1558
1559 GrGLenum gPrimitiveType2GLMode[] = {
1560 GR_GL_TRIANGLES,
1561 GR_GL_TRIANGLE_STRIP,
1562 GR_GL_TRIANGLE_FAN,
1563 GR_GL_POINTS,
1564 GR_GL_LINES,
1565 GR_GL_LINE_STRIP
1566 };
1567
1568 #define SWAP_PER_DRAW 0
1569
1570 #if SWAP_PER_DRAW
1571 #if defined(SK_BUILD_FOR_MAC)
1572 #include <AGL/agl.h>
1573 #elif defined(SK_BUILD_FOR_WIN32)
1574 #include <gl/GL.h>
SwapBuf()1575 void SwapBuf() {
1576 DWORD procID = GetCurrentProcessId();
1577 HWND hwnd = GetTopWindow(GetDesktopWindow());
1578 while(hwnd) {
1579 DWORD wndProcID = 0;
1580 GetWindowThreadProcessId(hwnd, &wndProcID);
1581 if(wndProcID == procID) {
1582 SwapBuffers(GetDC(hwnd));
1583 }
1584 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
1585 }
1586 }
1587 #endif
1588 #endif
1589
onGpuDraw(const DrawInfo & info)1590 void GrGpuGL::onGpuDraw(const DrawInfo& info) {
1591 size_t indexOffsetInBytes;
1592 this->setupGeometry(info, &indexOffsetInBytes);
1593
1594 SkASSERT((size_t)info.primitiveType() < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
1595
1596 if (info.isIndexed()) {
1597 GrGLvoid* indices =
1598 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex());
1599 // info.startVertex() was accounted for by setupGeometry.
1600 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()],
1601 info.indexCount(),
1602 GR_GL_UNSIGNED_SHORT,
1603 indices));
1604 } else {
1605 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for
1606 // startVertex in the DrawElements case. So we always rely on setupGeometry to have
1607 // accounted for startVertex.
1608 GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount()));
1609 }
1610 #if SWAP_PER_DRAW
1611 glFlush();
1612 #if defined(SK_BUILD_FOR_MAC)
1613 aglSwapBuffers(aglGetCurrentContext());
1614 int set_a_break_pt_here = 9;
1615 aglSwapBuffers(aglGetCurrentContext());
1616 #elif defined(SK_BUILD_FOR_WIN32)
1617 SwapBuf();
1618 int set_a_break_pt_here = 9;
1619 SwapBuf();
1620 #endif
1621 #endif
1622 }
1623
gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op)1624 static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
1625 switch (op) {
1626 default:
1627 GrCrash("Unexpected path fill.");
1628 /* fallthrough */;
1629 case kIncClamp_StencilOp:
1630 return GR_GL_COUNT_UP;
1631 case kInvert_StencilOp:
1632 return GR_GL_INVERT;
1633 }
1634 }
1635
onGpuStencilPath(const GrPath * path,SkPath::FillType fill)1636 void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) {
1637 SkASSERT(this->caps()->pathRenderingSupport());
1638
1639 GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
1640 SkASSERT(NULL != this->drawState()->getRenderTarget());
1641 SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
1642
1643 flushPathStencilSettings(fill);
1644
1645 // Decide how to manipulate the stencil buffer based on the fill rule.
1646 SkASSERT(!fHWPathStencilSettings.isTwoSided());
1647
1648 GrGLenum fillMode =
1649 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
1650 GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
1651 GL_CALL(StencilFillPath(id, fillMode, writeMask));
1652 }
1653
onGpuDrawPath(const GrPath * path,SkPath::FillType fill)1654 void GrGpuGL::onGpuDrawPath(const GrPath* path, SkPath::FillType fill) {
1655 SkASSERT(this->caps()->pathRenderingSupport());
1656
1657 GrGLuint id = static_cast<const GrGLPath*>(path)->pathID();
1658 SkASSERT(NULL != this->drawState()->getRenderTarget());
1659 SkASSERT(NULL != this->drawState()->getRenderTarget()->getStencilBuffer());
1660 SkASSERT(!fCurrentProgram->hasVertexShader());
1661
1662 flushPathStencilSettings(fill);
1663 const SkStrokeRec& stroke = path->getStroke();
1664
1665 SkPath::FillType nonInvertedFill = SkPath::ConvertToNonInverseFillType(fill);
1666 SkASSERT(!fHWPathStencilSettings.isTwoSided());
1667 GrGLenum fillMode =
1668 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.passOp(GrStencilSettings::kFront_Face));
1669 GrGLint writeMask = fHWPathStencilSettings.writeMask(GrStencilSettings::kFront_Face);
1670
1671 if (stroke.isFillStyle() || SkStrokeRec::kStrokeAndFill_Style == stroke.getStyle()) {
1672 GL_CALL(StencilFillPath(id, fillMode, writeMask));
1673 }
1674 if (stroke.needToApply()) {
1675 GL_CALL(StencilStrokePath(id, 0xffff, writeMask));
1676 }
1677
1678 if (nonInvertedFill == fill) {
1679 if (stroke.needToApply()) {
1680 GL_CALL(CoverStrokePath(id, GR_GL_BOUNDING_BOX));
1681 } else {
1682 GL_CALL(CoverFillPath(id, GR_GL_BOUNDING_BOX));
1683 }
1684 } else {
1685 GrDrawState* drawState = this->drawState();
1686 GrDrawState::AutoViewMatrixRestore avmr;
1687 SkRect bounds = SkRect::MakeLTRB(0, 0,
1688 SkIntToScalar(drawState->getRenderTarget()->width()),
1689 SkIntToScalar(drawState->getRenderTarget()->height()));
1690 SkMatrix vmi;
1691 // mapRect through persp matrix may not be correct
1692 if (!drawState->getViewMatrix().hasPerspective() && drawState->getViewInverse(&vmi)) {
1693 vmi.mapRect(&bounds);
1694 // theoretically could set bloat = 0, instead leave it because of matrix inversion
1695 // precision.
1696 SkScalar bloat = drawState->getViewMatrix().getMaxStretch() * SK_ScalarHalf;
1697 bounds.outset(bloat, bloat);
1698 } else {
1699 avmr.setIdentity(drawState);
1700 }
1701
1702 this->drawSimpleRect(bounds, NULL);
1703 }
1704 }
1705
onResolveRenderTarget(GrRenderTarget * target)1706 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) {
1707 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
1708 if (rt->needsResolve()) {
1709 // Some extensions automatically resolves the texture when it is read.
1710 if (this->glCaps().usesMSAARenderBuffers()) {
1711 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
1712 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
1713 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
1714 // make sure we go through flushRenderTarget() since we've modified
1715 // the bound DRAW FBO ID.
1716 fHWBoundRenderTarget = NULL;
1717 const GrGLIRect& vp = rt->getViewport();
1718 const SkIRect dirtyRect = rt->getResolveRect();
1719 GrGLIRect r;
1720 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
1721 dirtyRect.width(), dirtyRect.height(), target->origin());
1722
1723 GrAutoTRestore<ScissorState> asr;
1724 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
1725 // Apple's extension uses the scissor as the blit bounds.
1726 asr.reset(&fScissorState);
1727 fScissorState.fEnabled = true;
1728 fScissorState.fRect = dirtyRect;
1729 this->flushScissor();
1730 GL_CALL(ResolveMultisampleFramebuffer());
1731 } else {
1732 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
1733 // this respects the scissor during the blit, so disable it.
1734 asr.reset(&fScissorState);
1735 fScissorState.fEnabled = false;
1736 this->flushScissor();
1737 }
1738 int right = r.fLeft + r.fWidth;
1739 int top = r.fBottom + r.fHeight;
1740 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
1741 r.fLeft, r.fBottom, right, top,
1742 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
1743 }
1744 }
1745 rt->flagAsResolved();
1746 }
1747 }
1748
1749 namespace {
1750
gr_to_gl_stencil_func(GrStencilFunc basicFunc)1751 GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) {
1752 static const GrGLenum gTable[] = {
1753 GR_GL_ALWAYS, // kAlways_StencilFunc
1754 GR_GL_NEVER, // kNever_StencilFunc
1755 GR_GL_GREATER, // kGreater_StencilFunc
1756 GR_GL_GEQUAL, // kGEqual_StencilFunc
1757 GR_GL_LESS, // kLess_StencilFunc
1758 GR_GL_LEQUAL, // kLEqual_StencilFunc,
1759 GR_GL_EQUAL, // kEqual_StencilFunc,
1760 GR_GL_NOTEQUAL, // kNotEqual_StencilFunc,
1761 };
1762 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kBasicStencilFuncCount);
1763 GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
1764 GR_STATIC_ASSERT(1 == kNever_StencilFunc);
1765 GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
1766 GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
1767 GR_STATIC_ASSERT(4 == kLess_StencilFunc);
1768 GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
1769 GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
1770 GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
1771 SkASSERT((unsigned) basicFunc < kBasicStencilFuncCount);
1772
1773 return gTable[basicFunc];
1774 }
1775
gr_to_gl_stencil_op(GrStencilOp op)1776 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
1777 static const GrGLenum gTable[] = {
1778 GR_GL_KEEP, // kKeep_StencilOp
1779 GR_GL_REPLACE, // kReplace_StencilOp
1780 GR_GL_INCR_WRAP, // kIncWrap_StencilOp
1781 GR_GL_INCR, // kIncClamp_StencilOp
1782 GR_GL_DECR_WRAP, // kDecWrap_StencilOp
1783 GR_GL_DECR, // kDecClamp_StencilOp
1784 GR_GL_ZERO, // kZero_StencilOp
1785 GR_GL_INVERT, // kInvert_StencilOp
1786 };
1787 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kStencilOpCount);
1788 GR_STATIC_ASSERT(0 == kKeep_StencilOp);
1789 GR_STATIC_ASSERT(1 == kReplace_StencilOp);
1790 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
1791 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
1792 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
1793 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
1794 GR_STATIC_ASSERT(6 == kZero_StencilOp);
1795 GR_STATIC_ASSERT(7 == kInvert_StencilOp);
1796 SkASSERT((unsigned) op < kStencilOpCount);
1797 return gTable[op];
1798 }
1799
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings & settings,GrGLenum glFace,GrStencilSettings::Face grFace)1800 void set_gl_stencil(const GrGLInterface* gl,
1801 const GrStencilSettings& settings,
1802 GrGLenum glFace,
1803 GrStencilSettings::Face grFace) {
1804 GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace));
1805 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace));
1806 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace));
1807
1808 GrGLint ref = settings.funcRef(grFace);
1809 GrGLint mask = settings.funcMask(grFace);
1810 GrGLint writeMask = settings.writeMask(grFace);
1811
1812 if (GR_GL_FRONT_AND_BACK == glFace) {
1813 // we call the combined func just in case separate stencil is not
1814 // supported.
1815 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
1816 GR_GL_CALL(gl, StencilMask(writeMask));
1817 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp));
1818 } else {
1819 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
1820 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
1821 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp));
1822 }
1823 }
1824 }
1825
flushStencil(DrawType type)1826 void GrGpuGL::flushStencil(DrawType type) {
1827 if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) {
1828 if (fStencilSettings.isDisabled()) {
1829 if (kNo_TriState != fHWStencilTestEnabled) {
1830 GL_CALL(Disable(GR_GL_STENCIL_TEST));
1831 fHWStencilTestEnabled = kNo_TriState;
1832 }
1833 } else {
1834 if (kYes_TriState != fHWStencilTestEnabled) {
1835 GL_CALL(Enable(GR_GL_STENCIL_TEST));
1836 fHWStencilTestEnabled = kYes_TriState;
1837 }
1838 }
1839 if (!fStencilSettings.isDisabled()) {
1840 if (this->caps()->twoSidedStencilSupport()) {
1841 set_gl_stencil(this->glInterface(),
1842 fStencilSettings,
1843 GR_GL_FRONT,
1844 GrStencilSettings::kFront_Face);
1845 set_gl_stencil(this->glInterface(),
1846 fStencilSettings,
1847 GR_GL_BACK,
1848 GrStencilSettings::kBack_Face);
1849 } else {
1850 set_gl_stencil(this->glInterface(),
1851 fStencilSettings,
1852 GR_GL_FRONT_AND_BACK,
1853 GrStencilSettings::kFront_Face);
1854 }
1855 }
1856 fHWStencilSettings = fStencilSettings;
1857 }
1858 }
1859
flushAAState(DrawType type)1860 void GrGpuGL::flushAAState(DrawType type) {
1861 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but
1862 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide.
1863 #if 0
1864 // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern
1865 #define RT_HAS_MSAA rt->isMultisampled()
1866 #else
1867 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type)
1868 #endif
1869
1870 const GrRenderTarget* rt = this->getDrawState().getRenderTarget();
1871 if (kDesktop_GrGLBinding == this->glBinding()) {
1872 // ES doesn't support toggling GL_MULTISAMPLE and doesn't have
1873 // smooth lines.
1874 // we prefer smooth lines over multisampled lines
1875 bool smoothLines = false;
1876
1877 if (kDrawLines_DrawType == type) {
1878 smoothLines = this->willUseHWAALines();
1879 if (smoothLines) {
1880 if (kYes_TriState != fHWAAState.fSmoothLineEnabled) {
1881 GL_CALL(Enable(GR_GL_LINE_SMOOTH));
1882 fHWAAState.fSmoothLineEnabled = kYes_TriState;
1883 // must disable msaa to use line smoothing
1884 if (RT_HAS_MSAA &&
1885 kNo_TriState != fHWAAState.fMSAAEnabled) {
1886 GL_CALL(Disable(GR_GL_MULTISAMPLE));
1887 fHWAAState.fMSAAEnabled = kNo_TriState;
1888 }
1889 }
1890 } else {
1891 if (kNo_TriState != fHWAAState.fSmoothLineEnabled) {
1892 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
1893 fHWAAState.fSmoothLineEnabled = kNo_TriState;
1894 }
1895 }
1896 }
1897 if (!smoothLines && RT_HAS_MSAA) {
1898 // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths
1899 // convex hulls of each segment appear to get filled.
1900 bool enableMSAA = kStencilPath_DrawType == type ||
1901 this->getDrawState().isHWAntialiasState();
1902 if (enableMSAA) {
1903 if (kYes_TriState != fHWAAState.fMSAAEnabled) {
1904 GL_CALL(Enable(GR_GL_MULTISAMPLE));
1905 fHWAAState.fMSAAEnabled = kYes_TriState;
1906 }
1907 } else {
1908 if (kNo_TriState != fHWAAState.fMSAAEnabled) {
1909 GL_CALL(Disable(GR_GL_MULTISAMPLE));
1910 fHWAAState.fMSAAEnabled = kNo_TriState;
1911 }
1912 }
1913 }
1914 }
1915 }
1916
flushPathStencilSettings(SkPath::FillType fill)1917 void GrGpuGL::flushPathStencilSettings(SkPath::FillType fill) {
1918 GrStencilSettings pathStencilSettings;
1919 this->getPathStencilSettingsForFillType(fill, &pathStencilSettings);
1920 if (fHWPathStencilSettings != pathStencilSettings) {
1921 // Just the func, ref, and mask is set here. The op and write mask are params to the call
1922 // that draws the path to the SB (glStencilFillPath)
1923 GrGLenum func =
1924 gr_to_gl_stencil_func(pathStencilSettings.func(GrStencilSettings::kFront_Face));
1925 GL_CALL(PathStencilFunc(func,
1926 pathStencilSettings.funcRef(GrStencilSettings::kFront_Face),
1927 pathStencilSettings.funcMask(GrStencilSettings::kFront_Face)));
1928
1929 fHWPathStencilSettings = pathStencilSettings;
1930 }
1931 }
1932
flushBlend(bool isLines,GrBlendCoeff srcCoeff,GrBlendCoeff dstCoeff)1933 void GrGpuGL::flushBlend(bool isLines,
1934 GrBlendCoeff srcCoeff,
1935 GrBlendCoeff dstCoeff) {
1936 if (isLines && this->willUseHWAALines()) {
1937 if (kYes_TriState != fHWBlendState.fEnabled) {
1938 GL_CALL(Enable(GR_GL_BLEND));
1939 fHWBlendState.fEnabled = kYes_TriState;
1940 }
1941 if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff ||
1942 kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) {
1943 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff],
1944 gXfermodeCoeff2Blend[kISA_GrBlendCoeff]));
1945 fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff;
1946 fHWBlendState.fDstCoeff = kISA_GrBlendCoeff;
1947 }
1948 } else {
1949 // any optimization to disable blending should
1950 // have already been applied and tweaked the coeffs
1951 // to (1, 0).
1952 bool blendOff = kOne_GrBlendCoeff == srcCoeff &&
1953 kZero_GrBlendCoeff == dstCoeff;
1954 if (blendOff) {
1955 if (kNo_TriState != fHWBlendState.fEnabled) {
1956 GL_CALL(Disable(GR_GL_BLEND));
1957 fHWBlendState.fEnabled = kNo_TriState;
1958 }
1959 } else {
1960 if (kYes_TriState != fHWBlendState.fEnabled) {
1961 GL_CALL(Enable(GR_GL_BLEND));
1962 fHWBlendState.fEnabled = kYes_TriState;
1963 }
1964 if (fHWBlendState.fSrcCoeff != srcCoeff ||
1965 fHWBlendState.fDstCoeff != dstCoeff) {
1966 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
1967 gXfermodeCoeff2Blend[dstCoeff]));
1968 fHWBlendState.fSrcCoeff = srcCoeff;
1969 fHWBlendState.fDstCoeff = dstCoeff;
1970 }
1971 GrColor blendConst = this->getDrawState().getBlendConstant();
1972 if ((BlendCoeffReferencesConstant(srcCoeff) ||
1973 BlendCoeffReferencesConstant(dstCoeff)) &&
1974 (!fHWBlendState.fConstColorValid ||
1975 fHWBlendState.fConstColor != blendConst)) {
1976 GrGLfloat c[4];
1977 GrColorToRGBAFloat(blendConst, c);
1978 GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
1979 fHWBlendState.fConstColor = blendConst;
1980 fHWBlendState.fConstColorValid = true;
1981 }
1982 }
1983 }
1984 }
1985
tile_to_gl_wrap(SkShader::TileMode tm)1986 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
1987 static const GrGLenum gWrapModes[] = {
1988 GR_GL_CLAMP_TO_EDGE,
1989 GR_GL_REPEAT,
1990 GR_GL_MIRRORED_REPEAT
1991 };
1992 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
1993 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
1994 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
1995 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
1996 return gWrapModes[tm];
1997 }
1998
bindTexture(int unitIdx,const GrTextureParams & params,GrGLTexture * texture)1999 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) {
2000 SkASSERT(NULL != texture);
2001
2002 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
2003 // from the rt it will still be the last bound texture, but it needs resolving. So keep this
2004 // out of the "last != next" check.
2005 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
2006 if (NULL != texRT) {
2007 this->onResolveRenderTarget(texRT);
2008 }
2009
2010 if (fHWBoundTextures[unitIdx] != texture) {
2011 this->setTextureUnit(unitIdx);
2012 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID()));
2013 fHWBoundTextures[unitIdx] = texture;
2014 }
2015
2016 ResetTimestamp timestamp;
2017 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp);
2018 bool setAll = timestamp < this->getResetTimestamp();
2019 GrGLTexture::TexParams newTexParams;
2020
2021 static GrGLenum glMinFilterModes[] = {
2022 GR_GL_NEAREST,
2023 GR_GL_LINEAR,
2024 GR_GL_LINEAR_MIPMAP_LINEAR
2025 };
2026 static GrGLenum glMagFilterModes[] = {
2027 GR_GL_NEAREST,
2028 GR_GL_LINEAR,
2029 GR_GL_LINEAR
2030 };
2031 newTexParams.fMinFilter = glMinFilterModes[params.filterMode()];
2032 newTexParams.fMagFilter = glMagFilterModes[params.filterMode()];
2033
2034 if (params.filterMode() == GrTextureParams::kMipMap_FilterMode &&
2035 texture->mipMapsAreDirty()) {
2036 // GL_CALL(Hint(GR_GL_GENERATE_MIPMAP_HINT,GR_GL_NICEST));
2037 GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D));
2038 texture->dirtyMipMaps(false);
2039 }
2040
2041 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
2042 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
2043 memcpy(newTexParams.fSwizzleRGBA,
2044 GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()),
2045 sizeof(newTexParams.fSwizzleRGBA));
2046 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
2047 this->setTextureUnit(unitIdx);
2048 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2049 GR_GL_TEXTURE_MAG_FILTER,
2050 newTexParams.fMagFilter));
2051 }
2052 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
2053 this->setTextureUnit(unitIdx);
2054 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2055 GR_GL_TEXTURE_MIN_FILTER,
2056 newTexParams.fMinFilter));
2057 }
2058 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
2059 this->setTextureUnit(unitIdx);
2060 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2061 GR_GL_TEXTURE_WRAP_S,
2062 newTexParams.fWrapS));
2063 }
2064 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
2065 this->setTextureUnit(unitIdx);
2066 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2067 GR_GL_TEXTURE_WRAP_T,
2068 newTexParams.fWrapT));
2069 }
2070 if (this->glCaps().textureSwizzleSupport() &&
2071 (setAll || memcmp(newTexParams.fSwizzleRGBA,
2072 oldTexParams.fSwizzleRGBA,
2073 sizeof(newTexParams.fSwizzleRGBA)))) {
2074 this->setTextureUnit(unitIdx);
2075 if (this->glBinding() == kES_GrGLBinding) {
2076 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2077 const GrGLenum* swizzle = newTexParams.fSwizzleRGBA;
2078 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
2079 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
2080 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
2081 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
2082 } else {
2083 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint));
2084 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA);
2085 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
2086 }
2087 }
2088 texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
2089 }
2090
setProjectionMatrix(const SkMatrix & matrix,const SkISize & renderTargetSize,GrSurfaceOrigin renderTargetOrigin)2091 void GrGpuGL::setProjectionMatrix(const SkMatrix& matrix,
2092 const SkISize& renderTargetSize,
2093 GrSurfaceOrigin renderTargetOrigin) {
2094
2095 SkASSERT(this->glCaps().fixedFunctionSupport());
2096
2097 if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
2098 renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
2099 matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
2100 return;
2101 }
2102
2103 fHWProjectionMatrixState.fViewMatrix = matrix;
2104 fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
2105 fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
2106
2107 GrGLfloat glMatrix[4 * 4];
2108 fHWProjectionMatrixState.getGLMatrix<4>(glMatrix);
2109 GL_CALL(MatrixMode(GR_GL_PROJECTION));
2110 GL_CALL(LoadMatrixf(glMatrix));
2111 }
2112
enableTexGen(int unitIdx,TexGenComponents components,const GrGLfloat * coefficients)2113 void GrGpuGL::enableTexGen(int unitIdx,
2114 TexGenComponents components,
2115 const GrGLfloat* coefficients) {
2116 SkASSERT(this->glCaps().fixedFunctionSupport());
2117 SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents);
2118 SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= unitIdx);
2119
2120 if (GR_GL_OBJECT_LINEAR == fHWTexGenSettings[unitIdx].fMode &&
2121 components == fHWTexGenSettings[unitIdx].fNumComponents &&
2122 !memcmp(coefficients, fHWTexGenSettings[unitIdx].fCoefficients,
2123 3 * components * sizeof(GrGLfloat))) {
2124 return;
2125 }
2126
2127 this->setTextureUnit(unitIdx);
2128
2129 if (GR_GL_OBJECT_LINEAR != fHWTexGenSettings[unitIdx].fMode) {
2130 for (int i = 0; i < 4; i++) {
2131 GL_CALL(TexGeni(GR_GL_S + i, GR_GL_TEXTURE_GEN_MODE, GR_GL_OBJECT_LINEAR));
2132 }
2133 fHWTexGenSettings[unitIdx].fMode = GR_GL_OBJECT_LINEAR;
2134 }
2135
2136 for (int i = fHWTexGenSettings[unitIdx].fNumComponents; i < components; i++) {
2137 GL_CALL(Enable(GR_GL_TEXTURE_GEN_S + i));
2138 }
2139 for (int i = components; i < fHWTexGenSettings[unitIdx].fNumComponents; i++) {
2140 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + i));
2141 }
2142 fHWTexGenSettings[unitIdx].fNumComponents = components;
2143
2144 for (int i = 0; i < components; i++) {
2145 GrGLfloat plane[] = {coefficients[0 + 3 * i],
2146 coefficients[1 + 3 * i],
2147 0,
2148 coefficients[2 + 3 * i]};
2149 GL_CALL(TexGenfv(GR_GL_S + i, GR_GL_OBJECT_PLANE, plane));
2150 }
2151
2152 if (this->caps()->pathRenderingSupport()) {
2153 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + unitIdx,
2154 GR_GL_OBJECT_LINEAR,
2155 components,
2156 coefficients));
2157 }
2158
2159 memcpy(fHWTexGenSettings[unitIdx].fCoefficients, coefficients,
2160 3 * components * sizeof(GrGLfloat));
2161 }
2162
enableTexGen(int unitIdx,TexGenComponents components,const SkMatrix & matrix)2163 void GrGpuGL::enableTexGen(int unitIdx, TexGenComponents components, const SkMatrix& matrix) {
2164 GrGLfloat coefficients[3 * 3];
2165 SkASSERT(this->glCaps().fixedFunctionSupport());
2166 SkASSERT(components >= kS_TexGenComponents && components <= kSTR_TexGenComponents);
2167
2168 coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
2169 coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
2170 coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
2171
2172 if (components >= kST_TexGenComponents) {
2173 coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
2174 coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
2175 coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
2176 }
2177
2178 if (components >= kSTR_TexGenComponents) {
2179 coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
2180 coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
2181 coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
2182 }
2183
2184 enableTexGen(unitIdx, components, coefficients);
2185 }
2186
flushTexGenSettings(int numUsedTexCoordSets)2187 void GrGpuGL::flushTexGenSettings(int numUsedTexCoordSets) {
2188 SkASSERT(this->glCaps().fixedFunctionSupport());
2189 SkASSERT(this->glCaps().maxFixedFunctionTextureCoords() >= numUsedTexCoordSets);
2190
2191 // Only write the inactive tex gens, since active tex gens were written
2192 // when they were enabled.
2193
2194 SkDEBUGCODE(
2195 for (int i = 0; i < numUsedTexCoordSets; i++) {
2196 SkASSERT(0 != fHWTexGenSettings[i].fNumComponents);
2197 }
2198 );
2199
2200 for (int i = numUsedTexCoordSets; i < fHWActiveTexGenSets; i++) {
2201 SkASSERT(0 != fHWTexGenSettings[i].fNumComponents);
2202
2203 this->setTextureUnit(i);
2204 for (int j = 0; j < fHWTexGenSettings[i].fNumComponents; j++) {
2205 GL_CALL(Disable(GR_GL_TEXTURE_GEN_S + j));
2206 }
2207
2208 if (this->caps()->pathRenderingSupport()) {
2209 GL_CALL(PathTexGen(GR_GL_TEXTURE0 + i, GR_GL_NONE, 0, NULL));
2210 }
2211
2212 fHWTexGenSettings[i].fNumComponents = 0;
2213 }
2214
2215 fHWActiveTexGenSets = numUsedTexCoordSets;
2216 }
2217
flushMiscFixedFunctionState()2218 void GrGpuGL::flushMiscFixedFunctionState() {
2219
2220 const GrDrawState& drawState = this->getDrawState();
2221
2222 if (drawState.isDitherState()) {
2223 if (kYes_TriState != fHWDitherEnabled) {
2224 GL_CALL(Enable(GR_GL_DITHER));
2225 fHWDitherEnabled = kYes_TriState;
2226 }
2227 } else {
2228 if (kNo_TriState != fHWDitherEnabled) {
2229 GL_CALL(Disable(GR_GL_DITHER));
2230 fHWDitherEnabled = kNo_TriState;
2231 }
2232 }
2233
2234 if (drawState.isColorWriteDisabled()) {
2235 if (kNo_TriState != fHWWriteToColor) {
2236 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
2237 GR_GL_FALSE, GR_GL_FALSE));
2238 fHWWriteToColor = kNo_TriState;
2239 }
2240 } else {
2241 if (kYes_TriState != fHWWriteToColor) {
2242 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2243 fHWWriteToColor = kYes_TriState;
2244 }
2245 }
2246
2247 if (fHWDrawFace != drawState.getDrawFace()) {
2248 switch (this->getDrawState().getDrawFace()) {
2249 case GrDrawState::kCCW_DrawFace:
2250 GL_CALL(Enable(GR_GL_CULL_FACE));
2251 GL_CALL(CullFace(GR_GL_BACK));
2252 break;
2253 case GrDrawState::kCW_DrawFace:
2254 GL_CALL(Enable(GR_GL_CULL_FACE));
2255 GL_CALL(CullFace(GR_GL_FRONT));
2256 break;
2257 case GrDrawState::kBoth_DrawFace:
2258 GL_CALL(Disable(GR_GL_CULL_FACE));
2259 break;
2260 default:
2261 GrCrash("Unknown draw face.");
2262 }
2263 fHWDrawFace = drawState.getDrawFace();
2264 }
2265 }
2266
notifyRenderTargetDelete(GrRenderTarget * renderTarget)2267 void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) {
2268 SkASSERT(NULL != renderTarget);
2269 if (fHWBoundRenderTarget == renderTarget) {
2270 fHWBoundRenderTarget = NULL;
2271 }
2272 }
2273
notifyTextureDelete(GrGLTexture * texture)2274 void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) {
2275 for (int s = 0; s < fHWBoundTextures.count(); ++s) {
2276 if (fHWBoundTextures[s] == texture) {
2277 // deleting bound texture does implied bind to 0
2278 fHWBoundTextures[s] = NULL;
2279 }
2280 }
2281 }
2282
configToGLFormats(GrPixelConfig config,bool getSizedInternalFormat,GrGLenum * internalFormat,GrGLenum * externalFormat,GrGLenum * externalType)2283 bool GrGpuGL::configToGLFormats(GrPixelConfig config,
2284 bool getSizedInternalFormat,
2285 GrGLenum* internalFormat,
2286 GrGLenum* externalFormat,
2287 GrGLenum* externalType) {
2288 GrGLenum dontCare;
2289 if (NULL == internalFormat) {
2290 internalFormat = &dontCare;
2291 }
2292 if (NULL == externalFormat) {
2293 externalFormat = &dontCare;
2294 }
2295 if (NULL == externalType) {
2296 externalType = &dontCare;
2297 }
2298
2299 switch (config) {
2300 case kRGBA_8888_GrPixelConfig:
2301 *internalFormat = GR_GL_RGBA;
2302 *externalFormat = GR_GL_RGBA;
2303 if (getSizedInternalFormat) {
2304 *internalFormat = GR_GL_RGBA8;
2305 } else {
2306 *internalFormat = GR_GL_RGBA;
2307 }
2308 *externalType = GR_GL_UNSIGNED_BYTE;
2309 break;
2310 case kBGRA_8888_GrPixelConfig:
2311 if (!this->glCaps().bgraFormatSupport()) {
2312 return false;
2313 }
2314 if (this->glCaps().bgraIsInternalFormat()) {
2315 if (getSizedInternalFormat) {
2316 *internalFormat = GR_GL_BGRA8;
2317 } else {
2318 *internalFormat = GR_GL_BGRA;
2319 }
2320 } else {
2321 if (getSizedInternalFormat) {
2322 *internalFormat = GR_GL_RGBA8;
2323 } else {
2324 *internalFormat = GR_GL_RGBA;
2325 }
2326 }
2327 *externalFormat = GR_GL_BGRA;
2328 *externalType = GR_GL_UNSIGNED_BYTE;
2329 break;
2330 case kRGB_565_GrPixelConfig:
2331 *internalFormat = GR_GL_RGB;
2332 *externalFormat = GR_GL_RGB;
2333 if (getSizedInternalFormat) {
2334 if (this->glBinding() == kDesktop_GrGLBinding) {
2335 return false;
2336 } else {
2337 *internalFormat = GR_GL_RGB565;
2338 }
2339 } else {
2340 *internalFormat = GR_GL_RGB;
2341 }
2342 *externalType = GR_GL_UNSIGNED_SHORT_5_6_5;
2343 break;
2344 case kRGBA_4444_GrPixelConfig:
2345 *internalFormat = GR_GL_RGBA;
2346 *externalFormat = GR_GL_RGBA;
2347 if (getSizedInternalFormat) {
2348 *internalFormat = GR_GL_RGBA4;
2349 } else {
2350 *internalFormat = GR_GL_RGBA;
2351 }
2352 *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
2353 break;
2354 case kIndex_8_GrPixelConfig:
2355 if (this->caps()->eightBitPaletteSupport()) {
2356 *internalFormat = GR_GL_PALETTE8_RGBA8;
2357 // glCompressedTexImage doesn't take external params
2358 *externalFormat = GR_GL_PALETTE8_RGBA8;
2359 // no sized/unsized internal format distinction here
2360 *internalFormat = GR_GL_PALETTE8_RGBA8;
2361 // unused with CompressedTexImage
2362 *externalType = GR_GL_UNSIGNED_BYTE;
2363 } else {
2364 return false;
2365 }
2366 break;
2367 case kAlpha_8_GrPixelConfig:
2368 if (this->glCaps().textureRedSupport()) {
2369 *internalFormat = GR_GL_RED;
2370 *externalFormat = GR_GL_RED;
2371 if (getSizedInternalFormat) {
2372 *internalFormat = GR_GL_R8;
2373 } else {
2374 *internalFormat = GR_GL_RED;
2375 }
2376 *externalType = GR_GL_UNSIGNED_BYTE;
2377 } else {
2378 *internalFormat = GR_GL_ALPHA;
2379 *externalFormat = GR_GL_ALPHA;
2380 if (getSizedInternalFormat) {
2381 *internalFormat = GR_GL_ALPHA8;
2382 } else {
2383 *internalFormat = GR_GL_ALPHA;
2384 }
2385 *externalType = GR_GL_UNSIGNED_BYTE;
2386 }
2387 break;
2388 default:
2389 return false;
2390 }
2391 return true;
2392 }
2393
setTextureUnit(int unit)2394 void GrGpuGL::setTextureUnit(int unit) {
2395 SkASSERT(unit >= 0 && unit < fHWBoundTextures.count());
2396 if (unit != fHWActiveTextureUnitIdx) {
2397 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
2398 fHWActiveTextureUnitIdx = unit;
2399 }
2400 }
2401
setScratchTextureUnit()2402 void GrGpuGL::setScratchTextureUnit() {
2403 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
2404 int lastUnitIdx = fHWBoundTextures.count() - 1;
2405 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
2406 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
2407 fHWActiveTextureUnitIdx = lastUnitIdx;
2408 }
2409 // clear out the this field so that if a program does use this unit it will rebind the correct
2410 // texture.
2411 fHWBoundTextures[lastUnitIdx] = NULL;
2412 }
2413
2414 namespace {
2415 // Determines whether glBlitFramebuffer could be used between src and dst.
can_blit_framebuffer(const GrSurface * dst,const GrSurface * src,const GrGpuGL * gpu,bool * wouldNeedTempFBO=NULL)2416 inline bool can_blit_framebuffer(const GrSurface* dst,
2417 const GrSurface* src,
2418 const GrGpuGL* gpu,
2419 bool* wouldNeedTempFBO = NULL) {
2420 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
2421 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2422 gpu->glCaps().usesMSAARenderBuffers()) {
2423 // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
2424 // or the rects are not the same (not just the same size but have the same edges).
2425 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
2426 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
2427 return false;
2428 }
2429 if (NULL != wouldNeedTempFBO) {
2430 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget();
2431 }
2432 return true;
2433 } else {
2434 return false;
2435 }
2436 }
2437
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGpuGL * gpu,bool * wouldNeedTempFBO=NULL)2438 inline bool can_copy_texsubimage(const GrSurface* dst,
2439 const GrSurface* src,
2440 const GrGpuGL* gpu,
2441 bool* wouldNeedTempFBO = NULL) {
2442 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
2443 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
2444 // many drivers would allow it to work, but ANGLE does not.
2445 if (kES_GrGLBinding == gpu->glBinding() && gpu->glCaps().bgraIsInternalFormat() &&
2446 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
2447 return false;
2448 }
2449 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
2450 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
2451 // then we don't want to copy to the texture but to the MSAA buffer.
2452 if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
2453 return false;
2454 }
2455 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2456 // If the src is multisampled (and uses an extension where there is a separate MSAA
2457 // renderbuffer) then it is an invalid operation to call CopyTexSubImage
2458 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2459 return false;
2460 }
2461 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2462 NULL != dst->asTexture() &&
2463 dst->origin() == src->origin() &&
2464 kIndex_8_GrPixelConfig != src->config()) {
2465 if (NULL != wouldNeedTempFBO) {
2466 *wouldNeedTempFBO = NULL == src->asRenderTarget();
2467 }
2468 return true;
2469 } else {
2470 return false;
2471 }
2472 }
2473
2474 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
2475 // relative to is output.
bind_surface_as_fbo(const GrGLInterface * gl,GrSurface * surface,GrGLenum fboTarget,GrGLIRect * viewport)2476 inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl,
2477 GrSurface* surface,
2478 GrGLenum fboTarget,
2479 GrGLIRect* viewport) {
2480 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2481 GrGLuint tempFBOID;
2482 if (NULL == rt) {
2483 SkASSERT(NULL != surface->asTexture());
2484 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
2485 GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID));
2486 GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID));
2487 GR_GL_CALL(gl, FramebufferTexture2D(fboTarget,
2488 GR_GL_COLOR_ATTACHMENT0,
2489 GR_GL_TEXTURE_2D,
2490 texID,
2491 0));
2492 viewport->fLeft = 0;
2493 viewport->fBottom = 0;
2494 viewport->fWidth = surface->width();
2495 viewport->fHeight = surface->height();
2496 } else {
2497 tempFBOID = 0;
2498 GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID()));
2499 *viewport = rt->getViewport();
2500 }
2501 return tempFBOID;
2502 }
2503
2504 }
2505
initCopySurfaceDstDesc(const GrSurface * src,GrTextureDesc * desc)2506 void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
2507 // Check for format issues with glCopyTexSubImage2D
2508 if (kES_GrGLBinding == this->glBinding() && this->glCaps().bgraIsInternalFormat() &&
2509 kBGRA_8888_GrPixelConfig == src->config()) {
2510 // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target
2511 // in order to call glBlitFramebuffer or to copy to it by rendering.
2512 INHERITED::initCopySurfaceDstDesc(src, desc);
2513 return;
2514 } else if (NULL == src->asRenderTarget()) {
2515 // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base
2516 // class handle it by rendering.
2517 INHERITED::initCopySurfaceDstDesc(src, desc);
2518 return;
2519 }
2520
2521 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2522 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2523 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer.
2524 INHERITED::initCopySurfaceDstDesc(src, desc);
2525 } else {
2526 desc->fConfig = src->config();
2527 desc->fOrigin = src->origin();
2528 desc->fFlags = kNone_GrTextureFlags;
2529 }
2530 }
2531
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2532 bool GrGpuGL::onCopySurface(GrSurface* dst,
2533 GrSurface* src,
2534 const SkIRect& srcRect,
2535 const SkIPoint& dstPoint) {
2536 bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
2537 bool copied = false;
2538 bool wouldNeedTempFBO = false;
2539 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) &&
2540 (!wouldNeedTempFBO || !inheritedCouldCopy)) {
2541 GrGLuint srcFBO;
2542 GrGLIRect srcVP;
2543 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP);
2544 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture());
2545 SkASSERT(NULL != dstTex);
2546 // We modified the bound FBO
2547 fHWBoundRenderTarget = NULL;
2548 GrGLIRect srcGLRect;
2549 srcGLRect.setRelativeTo(srcVP,
2550 srcRect.fLeft,
2551 srcRect.fTop,
2552 srcRect.width(),
2553 srcRect.height(),
2554 src->origin());
2555
2556 this->setScratchTextureUnit();
2557 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID()));
2558 GrGLint dstY;
2559 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
2560 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
2561 } else {
2562 dstY = dstPoint.fY;
2563 }
2564 GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0,
2565 dstPoint.fX, dstY,
2566 srcGLRect.fLeft, srcGLRect.fBottom,
2567 srcGLRect.fWidth, srcGLRect.fHeight));
2568 copied = true;
2569 if (srcFBO) {
2570 GL_CALL(DeleteFramebuffers(1, &srcFBO));
2571 }
2572 } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) &&
2573 (!wouldNeedTempFBO || !inheritedCouldCopy)) {
2574 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2575 srcRect.width(), srcRect.height());
2576 bool selfOverlap = false;
2577 if (dst->isSameAs(src)) {
2578 selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect);
2579 }
2580
2581 if (!selfOverlap) {
2582 GrGLuint dstFBO;
2583 GrGLuint srcFBO;
2584 GrGLIRect dstVP;
2585 GrGLIRect srcVP;
2586 dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP);
2587 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP);
2588 // We modified the bound FBO
2589 fHWBoundRenderTarget = NULL;
2590 GrGLIRect srcGLRect;
2591 GrGLIRect dstGLRect;
2592 srcGLRect.setRelativeTo(srcVP,
2593 srcRect.fLeft,
2594 srcRect.fTop,
2595 srcRect.width(),
2596 srcRect.height(),
2597 src->origin());
2598 dstGLRect.setRelativeTo(dstVP,
2599 dstRect.fLeft,
2600 dstRect.fTop,
2601 dstRect.width(),
2602 dstRect.height(),
2603 dst->origin());
2604
2605 GrAutoTRestore<ScissorState> asr;
2606 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) {
2607 // The EXT version applies the scissor during the blit, so disable it.
2608 asr.reset(&fScissorState);
2609 fScissorState.fEnabled = false;
2610 this->flushScissor();
2611 }
2612 GrGLint srcY0;
2613 GrGLint srcY1;
2614 // Does the blit need to y-mirror or not?
2615 if (src->origin() == dst->origin()) {
2616 srcY0 = srcGLRect.fBottom;
2617 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
2618 } else {
2619 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
2620 srcY1 = srcGLRect.fBottom;
2621 }
2622 GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
2623 srcY0,
2624 srcGLRect.fLeft + srcGLRect.fWidth,
2625 srcY1,
2626 dstGLRect.fLeft,
2627 dstGLRect.fBottom,
2628 dstGLRect.fLeft + dstGLRect.fWidth,
2629 dstGLRect.fBottom + dstGLRect.fHeight,
2630 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2631 if (dstFBO) {
2632 GL_CALL(DeleteFramebuffers(1, &dstFBO));
2633 }
2634 if (srcFBO) {
2635 GL_CALL(DeleteFramebuffers(1, &srcFBO));
2636 }
2637 copied = true;
2638 }
2639 }
2640 if (!copied && inheritedCouldCopy) {
2641 copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint);
2642 SkASSERT(copied);
2643 }
2644 return copied;
2645 }
2646
onCanCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2647 bool GrGpuGL::onCanCopySurface(GrSurface* dst,
2648 GrSurface* src,
2649 const SkIRect& srcRect,
2650 const SkIPoint& dstPoint) {
2651 // This mirrors the logic in onCopySurface.
2652 if (can_copy_texsubimage(dst, src, this)) {
2653 return true;
2654 }
2655 if (can_blit_framebuffer(dst, src, this)) {
2656 if (dst->isSameAs(src)) {
2657 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2658 srcRect.width(), srcRect.height());
2659 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
2660 return true;
2661 }
2662 } else {
2663 return true;
2664 }
2665 }
2666 return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint);
2667 }
2668
2669
2670 ///////////////////////////////////////////////////////////////////////////////
2671
bindArrayAndBuffersToDraw(GrGpuGL * gpu,const GrGLVertexBuffer * vbuffer,const GrGLIndexBuffer * ibuffer)2672 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw(
2673 GrGpuGL* gpu,
2674 const GrGLVertexBuffer* vbuffer,
2675 const GrGLIndexBuffer* ibuffer) {
2676 SkASSERT(NULL != vbuffer);
2677 GrGLAttribArrayState* attribState;
2678
2679 // We use a vertex array if we're on a core profile and the verts are in a VBO.
2680 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) {
2681 if (NULL == fVBOVertexArray || !fVBOVertexArray->isValid()) {
2682 SkSafeUnref(fVBOVertexArray);
2683 GrGLuint arrayID;
2684 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
2685 int attrCount = gpu->glCaps().maxVertexAttributes();
2686 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount));
2687 }
2688 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer);
2689 } else {
2690 if (NULL != ibuffer) {
2691 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID());
2692 } else {
2693 this->setVertexArrayID(gpu, 0);
2694 }
2695 int attrCount = gpu->glCaps().maxVertexAttributes();
2696 if (fDefaultVertexArrayAttribState.count() != attrCount) {
2697 fDefaultVertexArrayAttribState.resize(attrCount);
2698 }
2699 attribState = &fDefaultVertexArrayAttribState;
2700 }
2701 return attribState;
2702 }
2703