1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrGLGpu.h"
9 #include "GrBackendSemaphore.h"
10 #include "GrBackendSurface.h"
11 #include "GrFixedClip.h"
12 #include "GrGLBuffer.h"
13 #include "GrGLGpuCommandBuffer.h"
14 #include "GrGLSemaphore.h"
15 #include "GrGLStencilAttachment.h"
16 #include "GrGLTextureRenderTarget.h"
17 #include "GrGpuResourcePriv.h"
18 #include "GrMesh.h"
19 #include "GrPipeline.h"
20 #include "GrRenderTargetPriv.h"
21 #include "GrShaderCaps.h"
22 #include "GrSurfaceProxyPriv.h"
23 #include "GrTexturePriv.h"
24 #include "GrTypes.h"
25 #include "SkAutoMalloc.h"
26 #include "SkConvertPixels.h"
27 #include "SkHalf.h"
28 #include "SkMakeUnique.h"
29 #include "SkMipMap.h"
30 #include "SkPixmap.h"
31 #include "SkSLCompiler.h"
32 #include "SkStrokeRec.h"
33 #include "SkTemplates.h"
34 #include "SkTo.h"
35 #include "SkTraceEvent.h"
36 #include "SkTypes.h"
37 #include "builders/GrGLShaderStringBuilder.h"
38
39 #include <cmath>
40
41 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
42 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
43
44 #define SKIP_CACHE_CHECK true
45
46 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
47 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
48 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
49 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
50 #else
51 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
52 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
53 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
54 #endif
55
56 //#define USE_NSIGHT
57
58 ///////////////////////////////////////////////////////////////////////////////
59
60 static const GrGLenum gXfermodeEquation2Blend[] = {
61 // Basic OpenGL blend equations.
62 GR_GL_FUNC_ADD,
63 GR_GL_FUNC_SUBTRACT,
64 GR_GL_FUNC_REVERSE_SUBTRACT,
65
66 // GL_KHR_blend_equation_advanced.
67 GR_GL_SCREEN,
68 GR_GL_OVERLAY,
69 GR_GL_DARKEN,
70 GR_GL_LIGHTEN,
71 GR_GL_COLORDODGE,
72 GR_GL_COLORBURN,
73 GR_GL_HARDLIGHT,
74 GR_GL_SOFTLIGHT,
75 GR_GL_DIFFERENCE,
76 GR_GL_EXCLUSION,
77 GR_GL_MULTIPLY,
78 GR_GL_HSL_HUE,
79 GR_GL_HSL_SATURATION,
80 GR_GL_HSL_COLOR,
81 GR_GL_HSL_LUMINOSITY,
82
83 // Illegal... needs to map to something.
84 GR_GL_FUNC_ADD,
85 };
86 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
87 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
88 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
89 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
90 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
91 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
92 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
93 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
94 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
95 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
96 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
97 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
98 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
99 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
100 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
101 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
102 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
103 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
104 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
105
106 static const GrGLenum gXfermodeCoeff2Blend[] = {
107 GR_GL_ZERO,
108 GR_GL_ONE,
109 GR_GL_SRC_COLOR,
110 GR_GL_ONE_MINUS_SRC_COLOR,
111 GR_GL_DST_COLOR,
112 GR_GL_ONE_MINUS_DST_COLOR,
113 GR_GL_SRC_ALPHA,
114 GR_GL_ONE_MINUS_SRC_ALPHA,
115 GR_GL_DST_ALPHA,
116 GR_GL_ONE_MINUS_DST_ALPHA,
117 GR_GL_CONSTANT_COLOR,
118 GR_GL_ONE_MINUS_CONSTANT_COLOR,
119 GR_GL_CONSTANT_ALPHA,
120 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
121
122 // extended blend coeffs
123 GR_GL_SRC1_COLOR,
124 GR_GL_ONE_MINUS_SRC1_COLOR,
125 GR_GL_SRC1_ALPHA,
126 GR_GL_ONE_MINUS_SRC1_ALPHA,
127
128 // Illegal... needs to map to something.
129 GR_GL_ZERO,
130 };
131
BlendCoeffReferencesConstant(GrBlendCoeff coeff)132 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
133 static const bool gCoeffReferencesBlendConst[] = {
134 false,
135 false,
136 false,
137 false,
138 false,
139 false,
140 false,
141 false,
142 false,
143 false,
144 true,
145 true,
146 true,
147 true,
148
149 // extended blend coeffs
150 false,
151 false,
152 false,
153 false,
154
155 // Illegal.
156 false,
157 };
158 return gCoeffReferencesBlendConst[coeff];
159 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
160
161 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
162 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
163 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
164 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
165 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
166 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
167 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
168 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
169 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
170 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
171 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
172 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
173 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
174 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
175
176 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
177 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
178 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
179 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
180
181 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
182 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
183 }
184
filter_to_gl_mag_filter(GrSamplerState::Filter filter)185 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
186 switch (filter) {
187 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
188 case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR;
189 case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR;
190 }
191 SK_ABORT("Unknown filter");
192 return 0;
193 }
194
filter_to_gl_min_filter(GrSamplerState::Filter filter)195 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) {
196 switch (filter) {
197 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
198 case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR;
199 case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR_MIPMAP_LINEAR;
200 }
201 SK_ABORT("Unknown filter");
202 return 0;
203 }
204
wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,const GrCaps & caps)205 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
206 const GrCaps& caps) {
207 switch (wrapMode) {
208 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
209 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
210 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
211 case GrSamplerState::WrapMode::kClampToBorder:
212 // May not be supported but should have been caught earlier
213 SkASSERT(caps.clampToBorderSupport());
214 return GR_GL_CLAMP_TO_BORDER;
215 }
216 SK_ABORT("Unknown wrap mode");
217 return 0;
218 }
219
220 ///////////////////////////////////////////////////////////////////////////////
221
222 class GrGLGpu::SamplerObjectCache {
223 public:
SamplerObjectCache(GrGLGpu * gpu)224 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
225 fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers();
226 fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]);
227 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
228 std::fill_n(fSamplers, kNumSamplers, 0);
229 }
230
~SamplerObjectCache()231 ~SamplerObjectCache() {
232 if (!fNumTextureUnits) {
233 // We've already been abandoned.
234 return;
235 }
236 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers));
237 }
238
bindSampler(int unitIdx,const GrSamplerState & state)239 void bindSampler(int unitIdx, const GrSamplerState& state) {
240 int index = StateToIndex(state);
241 if (!fSamplers[index]) {
242 GrGLuint s;
243 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
244 if (!s) {
245 return;
246 }
247 fSamplers[index] = s;
248 auto minFilter = filter_to_gl_min_filter(state.filter());
249 auto magFilter = filter_to_gl_mag_filter(state.filter());
250 auto wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
251 auto wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
252 GR_GL_CALL(fGpu->glInterface(),
253 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
254 GR_GL_CALL(fGpu->glInterface(),
255 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
256 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
257 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
258 }
259 if (fHWBoundSamplers[unitIdx] != fSamplers[index]) {
260 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index]));
261 fHWBoundSamplers[unitIdx] = fSamplers[index];
262 }
263 }
264
invalidateBindings()265 void invalidateBindings() {
266 // When we have sampler support we always use samplers. So setting these to zero will cause
267 // a rebind on next usage.
268 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
269 }
270
abandon()271 void abandon() {
272 fHWBoundSamplers.reset();
273 fNumTextureUnits = 0;
274 }
275
release()276 void release() {
277 if (!fNumTextureUnits) {
278 // We've already been abandoned.
279 return;
280 }
281 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers));
282 std::fill_n(fSamplers, kNumSamplers, 0);
283 // Deleting a bound sampler implicitly binds sampler 0.
284 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0);
285 }
286
287 private:
StateToIndex(const GrSamplerState & state)288 static int StateToIndex(const GrSamplerState& state) {
289 int filter = static_cast<int>(state.filter());
290 SkASSERT(filter >= 0 && filter < 3);
291 int wrapX = static_cast<int>(state.wrapModeX());
292 SkASSERT(wrapX >= 0 && wrapX < 4);
293 int wrapY = static_cast<int>(state.wrapModeY());
294 SkASSERT(wrapY >= 0 && wrapY < 4);
295 int idx = 16 * filter + 4 * wrapX + wrapY;
296 SkASSERT(idx < kNumSamplers);
297 return idx;
298 }
299
300 GrGLGpu* fGpu;
301 static constexpr int kNumSamplers = 48;
302 std::unique_ptr<GrGLuint[]> fHWBoundSamplers;
303 GrGLuint fSamplers[kNumSamplers];
304 int fNumTextureUnits;
305 };
306
307 ///////////////////////////////////////////////////////////////////////////////
308
Make(sk_sp<const GrGLInterface> interface,const GrContextOptions & options,GrContext * context)309 sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options,
310 GrContext* context) {
311 if (!interface) {
312 interface = GrGLMakeNativeInterface();
313 // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated
314 // to GrGLMakeNativeInterface.
315 if (!interface) {
316 interface = sk_ref_sp(GrGLCreateNativeInterface());
317 }
318 if (!interface) {
319 return nullptr;
320 }
321 }
322 #ifdef USE_NSIGHT
323 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
324 #endif
325 auto glContext = GrGLContext::Make(std::move(interface), options);
326 if (!glContext) {
327 return nullptr;
328 }
329 return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context));
330 }
331
GrGLGpu(std::unique_ptr<GrGLContext> ctx,GrContext * context)332 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context)
333 : GrGpu(context)
334 , fGLContext(std::move(ctx))
335 , fProgramCache(new ProgramCache(this))
336 , fHWProgramID(0)
337 , fTempSrcFBOID(0)
338 , fTempDstFBOID(0)
339 , fStencilClearFBOID(0) {
340 SkASSERT(fGLContext);
341 GrGLClearErr(this->glInterface());
342 fCaps = sk_ref_sp(fGLContext->caps());
343
344 fHWBoundTextureUniqueIDs.reset(this->caps()->shaderCaps()->maxFragmentSamplers());
345
346 fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
347 fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
348 fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
349 fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
350 if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
351 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
352 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
353 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
354 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
355 } else {
356 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
357 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
358 }
359 for (int i = 0; i < kGrBufferTypeCount; ++i) {
360 fHWBufferState[i].invalidate();
361 }
362 GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
363
364 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
365 fPathRendering.reset(new GrGLPathRendering(this));
366 }
367
368 if (this->glCaps().samplerObjectSupport()) {
369 fSamplerObjectCache.reset(new SamplerObjectCache(this));
370 }
371 }
372
~GrGLGpu()373 GrGLGpu::~GrGLGpu() {
374 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
375 // to release the resources held by the objects themselves.
376 fPathRendering.reset();
377 fCopyProgramArrayBuffer.reset();
378 fMipmapProgramArrayBuffer.reset();
379
380 fHWProgram.reset();
381 if (fHWProgramID) {
382 // detach the current program so there is no confusion on OpenGL's part
383 // that we want it to be deleted
384 GL_CALL(UseProgram(0));
385 }
386
387 if (fTempSrcFBOID) {
388 this->deleteFramebuffer(fTempSrcFBOID);
389 }
390 if (fTempDstFBOID) {
391 this->deleteFramebuffer(fTempDstFBOID);
392 }
393 if (fStencilClearFBOID) {
394 this->deleteFramebuffer(fStencilClearFBOID);
395 }
396
397 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
398 if (0 != fCopyPrograms[i].fProgram) {
399 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
400 }
401 }
402
403 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
404 if (0 != fMipmapPrograms[i].fProgram) {
405 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
406 }
407 }
408
409 delete fProgramCache;
410 fSamplerObjectCache.reset();
411 }
412
disconnect(DisconnectType type)413 void GrGLGpu::disconnect(DisconnectType type) {
414 INHERITED::disconnect(type);
415 if (DisconnectType::kCleanup == type) {
416 if (fHWProgramID) {
417 GL_CALL(UseProgram(0));
418 }
419 if (fTempSrcFBOID) {
420 this->deleteFramebuffer(fTempSrcFBOID);
421 }
422 if (fTempDstFBOID) {
423 this->deleteFramebuffer(fTempDstFBOID);
424 }
425 if (fStencilClearFBOID) {
426 this->deleteFramebuffer(fStencilClearFBOID);
427 }
428 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
429 if (fCopyPrograms[i].fProgram) {
430 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
431 }
432 }
433 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
434 if (fMipmapPrograms[i].fProgram) {
435 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
436 }
437 }
438
439 if (fSamplerObjectCache) {
440 fSamplerObjectCache->release();
441 }
442 } else {
443 if (fProgramCache) {
444 fProgramCache->abandon();
445 }
446 if (fSamplerObjectCache) {
447 fSamplerObjectCache->abandon();
448 }
449 }
450
451 fHWProgram.reset();
452 delete fProgramCache;
453 fProgramCache = nullptr;
454
455 fHWProgramID = 0;
456 fTempSrcFBOID = 0;
457 fTempDstFBOID = 0;
458 fStencilClearFBOID = 0;
459 fCopyProgramArrayBuffer.reset();
460 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
461 fCopyPrograms[i].fProgram = 0;
462 }
463 fMipmapProgramArrayBuffer.reset();
464 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
465 fMipmapPrograms[i].fProgram = 0;
466 }
467
468 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
469 this->glPathRendering()->disconnect(type);
470 }
471 }
472
473 ///////////////////////////////////////////////////////////////////////////////
474
onResetContext(uint32_t resetBits)475 void GrGLGpu::onResetContext(uint32_t resetBits) {
476 if (resetBits & kMisc_GrGLBackendState) {
477 // we don't use the zb at all
478 GL_CALL(Disable(GR_GL_DEPTH_TEST));
479 GL_CALL(DepthMask(GR_GL_FALSE));
480
481 // We don't use face culling.
482 GL_CALL(Disable(GR_GL_CULL_FACE));
483 // We do use separate stencil. Our algorithms don't care which face is front vs. back so
484 // just set this to the default for self-consistency.
485 GL_CALL(FrontFace(GR_GL_CCW));
486
487 fHWBufferState[kTexel_GrBufferType].invalidate();
488 fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
489 fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
490 fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
491
492 if (kGL_GrGLStandard == this->glStandard()) {
493 #ifndef USE_NSIGHT
494 // Desktop-only state that we never change
495 if (!this->glCaps().isCoreProfile()) {
496 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
497 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
498 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
499 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
500 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
501 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
502 }
503 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
504 // core profile. This seems like a bug since the core spec removes any mention of
505 // GL_ARB_imaging.
506 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
507 GL_CALL(Disable(GR_GL_COLOR_TABLE));
508 }
509 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
510
511 if (this->caps()->wireframeMode()) {
512 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
513 } else {
514 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
515 }
516 #endif
517 // Since ES doesn't support glPointSize at all we always use the VS to
518 // set the point size
519 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
520
521 }
522
523 if (kGLES_GrGLStandard == this->glStandard() &&
524 this->glCaps().fbFetchRequiresEnablePerSample()) {
525 // The arm extension requires specifically enabling MSAA fetching per sample.
526 // On some devices this may have a perf hit. Also multiple render targets are disabled
527 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
528 }
529 fHWWriteToColor = kUnknown_TriState;
530 // we only ever use lines in hairline mode
531 GL_CALL(LineWidth(1));
532 GL_CALL(Disable(GR_GL_DITHER));
533
534 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
535 }
536
537 if (resetBits & kMSAAEnable_GrGLBackendState) {
538 fMSAAEnabled = kUnknown_TriState;
539
540 if (this->caps()->usesMixedSamples()) {
541 if (0 != this->caps()->maxRasterSamples()) {
542 fHWRasterMultisampleEnabled = kUnknown_TriState;
543 fHWNumRasterSamples = 0;
544 }
545
546 // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage
547 // modulation. This state has no effect when not rendering to a mixed sampled target.
548 GL_CALL(CoverageModulation(GR_GL_RGBA));
549 }
550 }
551
552 fHWActiveTextureUnitIdx = -1; // invalid
553 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
554
555 if (resetBits & kTextureBinding_GrGLBackendState) {
556 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) {
557 fHWBoundTextureUniqueIDs[s].makeInvalid();
558 }
559 if (fSamplerObjectCache) {
560 fSamplerObjectCache->invalidateBindings();
561 }
562 }
563
564 if (resetBits & kBlend_GrGLBackendState) {
565 fHWBlendState.invalidate();
566 }
567
568 if (resetBits & kView_GrGLBackendState) {
569 fHWScissorSettings.invalidate();
570 fHWWindowRectsState.invalidate();
571 fHWViewport.invalidate();
572 }
573
574 if (resetBits & kStencil_GrGLBackendState) {
575 fHWStencilSettings.invalidate();
576 fHWStencilTestEnabled = kUnknown_TriState;
577 }
578
579 // Vertex
580 if (resetBits & kVertex_GrGLBackendState) {
581 fHWVertexArrayState.invalidate();
582 fHWBufferState[kVertex_GrBufferType].invalidate();
583 fHWBufferState[kIndex_GrBufferType].invalidate();
584 }
585
586 if (resetBits & kRenderTarget_GrGLBackendState) {
587 fHWBoundRenderTargetUniqueID.makeInvalid();
588 fHWSRGBFramebuffer = kUnknown_TriState;
589 }
590
591 if (resetBits & kPathRendering_GrGLBackendState) {
592 if (this->caps()->shaderCaps()->pathRenderingSupport()) {
593 this->glPathRendering()->resetContext();
594 }
595 }
596
597 // we assume these values
598 if (resetBits & kPixelStore_GrGLBackendState) {
599 if (this->glCaps().unpackRowLengthSupport()) {
600 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
601 }
602 if (this->glCaps().packRowLengthSupport()) {
603 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
604 }
605 if (this->glCaps().packFlipYSupport()) {
606 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
607 }
608 }
609
610 if (resetBits & kProgram_GrGLBackendState) {
611 fHWProgramID = 0;
612 fHWProgram.reset();
613 }
614 }
615
check_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::IDDesc * idDesc)616 static bool check_backend_texture(const GrBackendTexture& backendTex, const GrGLCaps& caps,
617 GrGLTexture::IDDesc* idDesc) {
618 GrGLTextureInfo info;
619 if (!backendTex.getGLTextureInfo(&info) || !info.fID) {
620 return false;
621 }
622
623 idDesc->fInfo = info;
624
625 if (GR_GL_TEXTURE_EXTERNAL == idDesc->fInfo.fTarget) {
626 if (!caps.shaderCaps()->externalTextureSupport()) {
627 return false;
628 }
629 } else if (GR_GL_TEXTURE_RECTANGLE == idDesc->fInfo.fTarget) {
630 if (!caps.rectangleTextureSupport()) {
631 return false;
632 }
633 } else if (GR_GL_TEXTURE_2D != idDesc->fInfo.fTarget) {
634 return false;
635 }
636 return true;
637 }
638
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)639 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
640 GrWrapOwnership ownership, GrWrapCacheable cacheable,
641 GrIOType ioType) {
642 GrGLTexture::IDDesc idDesc;
643 if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) {
644 return nullptr;
645 }
646 if (!idDesc.fInfo.fFormat) {
647 idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config());
648 }
649 if (kBorrow_GrWrapOwnership == ownership) {
650 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed;
651 } else {
652 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
653 }
654
655 GrSurfaceDesc surfDesc;
656 surfDesc.fFlags = kNone_GrSurfaceFlags;
657 surfDesc.fWidth = backendTex.width();
658 surfDesc.fHeight = backendTex.height();
659 surfDesc.fConfig = backendTex.config();
660 surfDesc.fSampleCnt = 1;
661
662 GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid
663 : GrMipMapsStatus::kNotAllocated;
664
665 auto texture =
666 GrGLTexture::MakeWrapped(this, surfDesc, mipMapsStatus, idDesc, cacheable, ioType);
667 // We don't know what parameters are already set on wrapped textures.
668 texture->textureParamsModified();
669 return std::move(texture);
670 }
671
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)672 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
673 int sampleCnt,
674 GrWrapOwnership ownership,
675 GrWrapCacheable cacheable) {
676 GrGLTexture::IDDesc idDesc;
677 if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) {
678 return nullptr;
679 }
680 if (!idDesc.fInfo.fFormat) {
681 idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config());
682 }
683
684 // We don't support rendering to a EXTERNAL texture.
685 if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) {
686 return nullptr;
687 }
688
689 if (kBorrow_GrWrapOwnership == ownership) {
690 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed;
691 } else {
692 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
693 }
694
695 GrSurfaceDesc surfDesc;
696 surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
697 surfDesc.fWidth = backendTex.width();
698 surfDesc.fHeight = backendTex.height();
699 surfDesc.fConfig = backendTex.config();
700 surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config());
701 if (surfDesc.fSampleCnt < 1) {
702 return nullptr;
703 }
704
705 GrGLRenderTarget::IDDesc rtIDDesc;
706 if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) {
707 return nullptr;
708 }
709
710 GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty
711 : GrMipMapsStatus::kNotAllocated;
712
713 sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped(
714 this, surfDesc, idDesc, rtIDDesc, cacheable, mipMapsStatus));
715 texRT->baseLevelWasBoundToFBO();
716 // We don't know what parameters are already set on wrapped textures.
717 texRT->textureParamsModified();
718 return std::move(texRT);
719 }
720
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)721 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
722 GrGLFramebufferInfo info;
723 if (!backendRT.getGLFramebufferInfo(&info)) {
724 return nullptr;
725 }
726
727 GrGLRenderTarget::IDDesc idDesc;
728 idDesc.fRTFBOID = info.fFBOID;
729 idDesc.fMSColorRenderbufferID = 0;
730 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
731 idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
732 idDesc.fIsMixedSampled = false;
733
734 GrSurfaceDesc desc;
735 desc.fFlags = kRenderTarget_GrSurfaceFlag;
736 desc.fWidth = backendRT.width();
737 desc.fHeight = backendRT.height();
738 desc.fConfig = backendRT.config();
739 desc.fSampleCnt =
740 this->caps()->getRenderTargetSampleCount(backendRT.sampleCnt(), backendRT.config());
741
742 return GrGLRenderTarget::MakeWrapped(this, desc, info.fFormat, idDesc, backendRT.stencilBits());
743 }
744
onWrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt)745 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
746 int sampleCnt) {
747 GrGLTextureInfo info;
748 if (!tex.getGLTextureInfo(&info) || !info.fID) {
749 return nullptr;
750 }
751
752 if (GR_GL_TEXTURE_RECTANGLE != info.fTarget &&
753 GR_GL_TEXTURE_2D != info.fTarget) {
754 // Only texture rectangle and texture 2d are supported. We do not check whether texture
755 // rectangle is supported by Skia - if the caller provided us with a texture rectangle,
756 // we assume the necessary support exists.
757 return nullptr;
758 }
759
760 GrSurfaceDesc surfDesc;
761 surfDesc.fFlags = kRenderTarget_GrSurfaceFlag;
762 surfDesc.fWidth = tex.width();
763 surfDesc.fHeight = tex.height();
764 surfDesc.fConfig = tex.config();
765 surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config());
766
767 GrGLRenderTarget::IDDesc rtIDDesc;
768 if (!this->createRenderTargetObjects(surfDesc, info, &rtIDDesc)) {
769 return nullptr;
770 }
771 return GrGLRenderTarget::MakeWrapped(this, surfDesc, info.fFormat, rtIDDesc, 0);
772 }
773
check_write_and_transfer_input(GrGLTexture * glTex)774 static bool check_write_and_transfer_input(GrGLTexture* glTex) {
775 if (!glTex) {
776 return false;
777 }
778
779 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
780 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
781 return false;
782 }
783
784 return true;
785 }
786
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)787 bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
788 GrColorType srcColorType, const GrMipLevel texels[],
789 int mipLevelCount) {
790 auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
791
792 if (!check_write_and_transfer_input(glTex)) {
793 return false;
794 }
795
796 this->setScratchTextureUnit();
797 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
798
799 // No sRGB transformation occurs in uploadTexData. We choose to make the src config match the
800 // srgb-ness of the surface to avoid issues in ES2 where internal/external formats must match.
801 // When we're on ES2 and the dst is GL_SRGB_ALPHA by making the config be kSRGB_8888 we know
802 // that our caps will choose GL_SRGB_ALPHA as the external format, too. On ES3 or regular GL our
803 // caps knows to make the external format be GL_RGBA.
804 auto srgbEncoded = GrPixelConfigIsSRGBEncoded(surface->config());
805 auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srgbEncoded);
806
807 SkASSERT(!GrPixelConfigIsCompressed(glTex->config()));
808 return this->uploadTexData(glTex->config(), glTex->width(), glTex->height(), glTex->target(),
809 kWrite_UploadType, left, top, width, height, srcAsConfig, texels,
810 mipLevelCount);
811 }
812
813 // For GL_[UN]PACK_ALIGNMENT. TODO: This really wants to be GrColorType.
config_alignment(GrPixelConfig config)814 static inline GrGLint config_alignment(GrPixelConfig config) {
815 SkASSERT(!GrPixelConfigIsCompressed(config));
816 switch (config) {
817 case kAlpha_8_GrPixelConfig:
818 case kAlpha_8_as_Alpha_GrPixelConfig:
819 case kAlpha_8_as_Red_GrPixelConfig:
820 case kGray_8_GrPixelConfig:
821 case kGray_8_as_Lum_GrPixelConfig:
822 case kGray_8_as_Red_GrPixelConfig:
823 return 1;
824 case kRGB_565_GrPixelConfig:
825 case kRGBA_4444_GrPixelConfig:
826 case kRG_88_GrPixelConfig:
827 case kAlpha_half_GrPixelConfig:
828 case kAlpha_half_as_Red_GrPixelConfig:
829 case kRGBA_half_GrPixelConfig:
830 return 2;
831 case kRGBA_8888_GrPixelConfig:
832 case kRGB_888_GrPixelConfig: // We're really talking about GrColorType::kRGB_888x here.
833 case kBGRA_8888_GrPixelConfig:
834 case kSRGBA_8888_GrPixelConfig:
835 case kSBGRA_8888_GrPixelConfig:
836 case kRGBA_1010102_GrPixelConfig:
837 case kRGBA_float_GrPixelConfig:
838 case kRG_float_GrPixelConfig:
839 return 4;
840 case kRGB_ETC1_GrPixelConfig:
841 case kUnknown_GrPixelConfig:
842 return 0;
843 }
844 SK_ABORT("Invalid pixel config");
845 return 0;
846 }
847
onTransferPixels(GrTexture * texture,int left,int top,int width,int height,GrColorType bufferColorType,GrBuffer * transferBuffer,size_t offset,size_t rowBytes)848 bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
849 GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
850 size_t rowBytes) {
851 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
852 GrPixelConfig texConfig = glTex->config();
853 SkASSERT(this->caps()->isConfigTexturable(texConfig));
854
855 // Can't transfer compressed data
856 SkASSERT(!GrPixelConfigIsCompressed(glTex->config()));
857
858 if (!check_write_and_transfer_input(glTex)) {
859 return false;
860 }
861
862 static_assert(sizeof(int) == sizeof(int32_t), "");
863 if (width <= 0 || height <= 0) {
864 return false;
865 }
866
867 this->setScratchTextureUnit();
868 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
869
870 SkASSERT(!transferBuffer->isMapped());
871 SkASSERT(!transferBuffer->isCPUBacked());
872 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
873 this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
874
875 SkDEBUGCODE(
876 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
877 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
878 SkASSERT(bounds.contains(subRect));
879 )
880
881 int bpp = GrColorTypeBytesPerPixel(bufferColorType);
882 const size_t trimRowBytes = width * bpp;
883 if (!rowBytes) {
884 rowBytes = trimRowBytes;
885 }
886 const void* pixels = (void*)offset;
887 if (width < 0 || height < 0) {
888 return false;
889 }
890
891 bool restoreGLRowLength = false;
892 if (trimRowBytes != rowBytes) {
893 // we should have checked for this support already
894 SkASSERT(this->glCaps().unpackRowLengthSupport());
895 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
896 restoreGLRowLength = true;
897 }
898
899 // Internal format comes from the texture desc.
900 GrGLenum internalFormat;
901 // External format and type come from the upload data.
902 GrGLenum externalFormat;
903 GrGLenum externalType;
904 auto bufferAsConfig = GrColorTypeToPixelConfig(bufferColorType, GrSRGBEncoded::kNo);
905 if (!this->glCaps().getTexImageFormats(texConfig, bufferAsConfig, &internalFormat,
906 &externalFormat, &externalType)) {
907 return false;
908 }
909
910 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig)));
911 GL_CALL(TexSubImage2D(glTex->target(),
912 0,
913 left, top,
914 width,
915 height,
916 externalFormat, externalType,
917 pixels));
918
919 if (restoreGLRowLength) {
920 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
921 }
922
923 return true;
924 }
925
926 /**
927 * Creates storage space for the texture and fills it with texels.
928 *
929 * @param config Pixel config of the texture.
930 * @param interface The GL interface in use.
931 * @param caps The capabilities of the GL device.
932 * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.)
933 * @param internalFormat The data format used for the internal storage of the texture. May be sized.
934 * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized.
935 * @param externalFormat The data format used for the external storage of the texture.
936 * @param externalType The type of the data used for the external storage of the texture.
937 * @param texels The texel data of the texture being created.
938 * @param mipLevelCount Number of mipmap levels
939 * @param baseWidth The width of the texture's base mipmap level
940 * @param baseHeight The height of the texture's base mipmap level
941 */
allocate_and_populate_texture(GrPixelConfig config,const GrGLInterface & interface,const GrGLCaps & caps,GrGLenum target,GrGLenum internalFormat,GrGLenum internalFormatForTexStorage,GrGLenum externalFormat,GrGLenum externalType,const GrMipLevel texels[],int mipLevelCount,int baseWidth,int baseHeight)942 static bool allocate_and_populate_texture(GrPixelConfig config,
943 const GrGLInterface& interface,
944 const GrGLCaps& caps,
945 GrGLenum target,
946 GrGLenum internalFormat,
947 GrGLenum internalFormatForTexStorage,
948 GrGLenum externalFormat,
949 GrGLenum externalType,
950 const GrMipLevel texels[], int mipLevelCount,
951 int baseWidth, int baseHeight) {
952 CLEAR_ERROR_BEFORE_ALLOC(&interface);
953
954 bool useTexStorage = caps.isConfigTexSupportEnabled(config);
955 // We can only use TexStorage if we know we will not later change the storage requirements.
956 // This means if we may later want to add mipmaps, we cannot use TexStorage.
957 // Right now, we cannot know if we will later add mipmaps or not.
958 // The only time we can use TexStorage is when we already have the
959 // mipmaps.
960 useTexStorage &= mipLevelCount > 1;
961
962 if (useTexStorage) {
963 // We never resize or change formats of textures.
964 GL_ALLOC_CALL(&interface,
965 TexStorage2D(target, SkTMax(mipLevelCount, 1), internalFormatForTexStorage,
966 baseWidth, baseHeight));
967 GrGLenum error = CHECK_ALLOC_ERROR(&interface);
968 if (error != GR_GL_NO_ERROR) {
969 return false;
970 } else {
971 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
972 const void* currentMipData = texels[currentMipLevel].fPixels;
973 if (currentMipData == nullptr) {
974 continue;
975 }
976 int twoToTheMipLevel = 1 << currentMipLevel;
977 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
978 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
979
980 GR_GL_CALL(&interface,
981 TexSubImage2D(target,
982 currentMipLevel,
983 0, // left
984 0, // top
985 currentWidth,
986 currentHeight,
987 externalFormat, externalType,
988 currentMipData));
989 }
990 return true;
991 }
992 } else {
993 if (!mipLevelCount) {
994 GL_ALLOC_CALL(&interface,
995 TexImage2D(target,
996 0,
997 internalFormat,
998 baseWidth,
999 baseHeight,
1000 0, // border
1001 externalFormat, externalType,
1002 nullptr));
1003 GrGLenum error = CHECK_ALLOC_ERROR(&interface);
1004 if (error != GR_GL_NO_ERROR) {
1005 return false;
1006 }
1007 } else {
1008 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1009 int twoToTheMipLevel = 1 << currentMipLevel;
1010 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
1011 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
1012 const void* currentMipData = texels[currentMipLevel].fPixels;
1013 // Even if curremtMipData is nullptr, continue to call TexImage2D.
1014 // This will allocate texture memory which we can later populate.
1015 GL_ALLOC_CALL(&interface,
1016 TexImage2D(target,
1017 currentMipLevel,
1018 internalFormat,
1019 currentWidth,
1020 currentHeight,
1021 0, // border
1022 externalFormat, externalType,
1023 currentMipData));
1024 GrGLenum error = CHECK_ALLOC_ERROR(&interface);
1025 if (error != GR_GL_NO_ERROR) {
1026 return false;
1027 }
1028 }
1029 }
1030 }
1031 return true;
1032 }
1033
1034 /**
1035 * Creates storage space for the texture and fills it with texels.
1036 *
1037 * @param config Compressed pixel config of the texture.
1038 * @param interface The GL interface in use.
1039 * @param caps The capabilities of the GL device.
1040 * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.)
1041 * @param internalFormat The data format used for the internal storage of the texture.
1042 * @param texels The texel data of the texture being created.
1043 * @param mipLevelCount Number of mipmap levels
1044 * @param baseWidth The width of the texture's base mipmap level
1045 * @param baseHeight The height of the texture's base mipmap level
1046 */
allocate_and_populate_compressed_texture(GrPixelConfig config,const GrGLInterface & interface,const GrGLCaps & caps,GrGLenum target,GrGLenum internalFormat,const GrMipLevel texels[],int mipLevelCount,int baseWidth,int baseHeight)1047 static bool allocate_and_populate_compressed_texture(GrPixelConfig config,
1048 const GrGLInterface& interface,
1049 const GrGLCaps& caps,
1050 GrGLenum target, GrGLenum internalFormat,
1051 const GrMipLevel texels[], int mipLevelCount,
1052 int baseWidth, int baseHeight) {
1053 CLEAR_ERROR_BEFORE_ALLOC(&interface);
1054 SkASSERT(GrPixelConfigIsCompressed(config));
1055
1056 bool useTexStorage = caps.isConfigTexSupportEnabled(config);
1057 // We can only use TexStorage if we know we will not later change the storage requirements.
1058 // This means if we may later want to add mipmaps, we cannot use TexStorage.
1059 // Right now, we cannot know if we will later add mipmaps or not.
1060 // The only time we can use TexStorage is when we already have the
1061 // mipmaps.
1062 useTexStorage &= mipLevelCount > 1;
1063
1064 if (useTexStorage) {
1065 // We never resize or change formats of textures.
1066 GL_ALLOC_CALL(&interface,
1067 TexStorage2D(target,
1068 mipLevelCount,
1069 internalFormat,
1070 baseWidth, baseHeight));
1071 GrGLenum error = CHECK_ALLOC_ERROR(&interface);
1072 if (error != GR_GL_NO_ERROR) {
1073 return false;
1074 } else {
1075 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1076 const void* currentMipData = texels[currentMipLevel].fPixels;
1077 if (currentMipData == nullptr) {
1078 // Compressed textures require data for every level
1079 return false;
1080 }
1081
1082 int twoToTheMipLevel = 1 << currentMipLevel;
1083 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
1084 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
1085
1086 // Make sure that the width and height that we pass to OpenGL
1087 // is a multiple of the block size.
1088 size_t dataSize = GrCompressedFormatDataSize(config, currentWidth, currentHeight);
1089 GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
1090 currentMipLevel,
1091 0, // left
1092 0, // top
1093 currentWidth,
1094 currentHeight,
1095 internalFormat,
1096 SkToInt(dataSize),
1097 currentMipData));
1098 }
1099 }
1100 } else {
1101 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1102 const void* currentMipData = texels[currentMipLevel].fPixels;
1103 if (currentMipData == nullptr) {
1104 // Compressed textures require data for every level
1105 return false;
1106 }
1107
1108 int twoToTheMipLevel = 1 << currentMipLevel;
1109 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
1110 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
1111
1112 // Make sure that the width and height that we pass to OpenGL
1113 // is a multiple of the block size.
1114 size_t dataSize = GrCompressedFormatDataSize(config, baseWidth, baseHeight);
1115
1116 GL_ALLOC_CALL(&interface,
1117 CompressedTexImage2D(target,
1118 currentMipLevel,
1119 internalFormat,
1120 currentWidth,
1121 currentHeight,
1122 0, // border
1123 SkToInt(dataSize),
1124 currentMipData));
1125
1126 GrGLenum error = CHECK_ALLOC_ERROR(&interface);
1127 if (error != GR_GL_NO_ERROR) {
1128 return false;
1129 }
1130 }
1131 }
1132
1133 return true;
1134 }
1135 /**
1136 * After a texture is created, any state which was altered during its creation
1137 * needs to be restored.
1138 *
1139 * @param interface The GL interface to use.
1140 * @param caps The capabilities of the GL device.
1141 * @param restoreGLRowLength Should the row length unpacking be restored?
1142 * @param glFlipY Did GL flip the texture vertically?
1143 */
restore_pixelstore_state(const GrGLInterface & interface,const GrGLCaps & caps,bool restoreGLRowLength)1144 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
1145 bool restoreGLRowLength) {
1146 if (restoreGLRowLength) {
1147 SkASSERT(caps.unpackRowLengthSupport());
1148 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1149 }
1150 }
1151
unbindCpuToGpuXferBuffer()1152 void GrGLGpu::unbindCpuToGpuXferBuffer() {
1153 auto& xferBufferState = fHWBufferState[kXferCpuToGpu_GrBufferType];
1154 if (!xferBufferState.fBoundBufferUniqueID.isInvalid()) {
1155 GL_CALL(BindBuffer(xferBufferState.fGLTarget, 0));
1156 xferBufferState.invalidate();
1157 }
1158
1159 }
1160
1161 // TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to
1162 // convert from GrColorType to externalFormat/externalType GLenum values.
uploadTexData(GrPixelConfig texConfig,int texWidth,int texHeight,GrGLenum target,UploadType uploadType,int left,int top,int width,int height,GrPixelConfig dataConfig,const GrMipLevel texels[],int mipLevelCount,GrMipMapsStatus * mipMapsStatus)1163 bool GrGLGpu::uploadTexData(GrPixelConfig texConfig, int texWidth, int texHeight, GrGLenum target,
1164 UploadType uploadType, int left, int top, int width, int height,
1165 GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount,
1166 GrMipMapsStatus* mipMapsStatus) {
1167 // If we're uploading compressed data then we should be using uploadCompressedTexData
1168 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
1169
1170 SkASSERT(this->caps()->isConfigTexturable(texConfig));
1171 SkDEBUGCODE(
1172 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
1173 SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight);
1174 SkASSERT(bounds.contains(subRect));
1175 )
1176 SkASSERT(1 == mipLevelCount ||
1177 (0 == left && 0 == top && width == texWidth && height == texHeight));
1178
1179 this->unbindCpuToGpuXferBuffer();
1180
1181 // texels is const.
1182 // But we may need to flip the texture vertically to prepare it.
1183 // Rather than flip in place and alter the incoming data,
1184 // we allocate a new buffer to flip into.
1185 // This means we need to make a non-const shallow copy of texels.
1186 SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
1187
1188 if (mipLevelCount) {
1189 texelsShallowCopy.reset(mipLevelCount);
1190 memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
1191 }
1192
1193 const GrGLInterface* interface = this->glInterface();
1194 const GrGLCaps& caps = this->glCaps();
1195
1196 size_t bpp = GrBytesPerPixel(dataConfig);
1197
1198 if (width == 0 || height == 0) {
1199 return false;
1200 }
1201
1202 // Internal format comes from the texture desc.
1203 GrGLenum internalFormat;
1204 // External format and type come from the upload data.
1205 GrGLenum externalFormat;
1206 GrGLenum externalType;
1207 if (!this->glCaps().getTexImageFormats(texConfig, dataConfig, &internalFormat, &externalFormat,
1208 &externalType)) {
1209 return false;
1210 }
1211 // TexStorage requires a sized format, and internalFormat may or may not be
1212 GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(texConfig);
1213
1214 /*
1215 * Check whether to allocate a temporary buffer for flipping y or
1216 * because our srcData has extra bytes past each row. If so, we need
1217 * to trim those off here, since GL ES may not let us specify
1218 * GL_UNPACK_ROW_LENGTH.
1219 */
1220 bool restoreGLRowLength = false;
1221
1222 // in case we need a temporary, trimmed copy of the src pixels
1223 SkAutoSMalloc<128 * 128> tempStorage;
1224
1225 if (mipMapsStatus) {
1226 *mipMapsStatus = GrMipMapsStatus::kValid;
1227 }
1228
1229 const bool usesMips = mipLevelCount > 1;
1230
1231 // find the combined size of all the mip levels and the relative offset of
1232 // each into the collective buffer
1233 bool willNeedData = false;
1234 size_t combinedBufferSize = 0;
1235 SkTArray<size_t> individualMipOffsets(mipLevelCount);
1236 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1237 if (texelsShallowCopy[currentMipLevel].fPixels) {
1238 int twoToTheMipLevel = 1 << currentMipLevel;
1239 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1240 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1241 const size_t trimRowBytes = currentWidth * bpp;
1242 const size_t trimmedSize = trimRowBytes * currentHeight;
1243
1244 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes
1245 ? texelsShallowCopy[currentMipLevel].fRowBytes
1246 : trimRowBytes;
1247
1248 if (((!caps.unpackRowLengthSupport() || usesMips) && trimRowBytes != rowBytes)) {
1249 willNeedData = true;
1250 }
1251
1252 individualMipOffsets.push_back(combinedBufferSize);
1253 combinedBufferSize += trimmedSize;
1254 } else {
1255 if (mipMapsStatus) {
1256 *mipMapsStatus = GrMipMapsStatus::kDirty;
1257 }
1258 individualMipOffsets.push_back(0);
1259 }
1260 }
1261 if (mipMapsStatus && mipLevelCount <= 1) {
1262 *mipMapsStatus = GrMipMapsStatus::kNotAllocated;
1263 }
1264 char* buffer = nullptr;
1265 if (willNeedData) {
1266 buffer = (char*)tempStorage.reset(combinedBufferSize);
1267 }
1268
1269 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1270 if (!texelsShallowCopy[currentMipLevel].fPixels) {
1271 continue;
1272 }
1273 int twoToTheMipLevel = 1 << currentMipLevel;
1274 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1275 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1276 const size_t trimRowBytes = currentWidth * bpp;
1277
1278 /*
1279 * check whether to allocate a temporary buffer for flipping y or
1280 * because our srcData has extra bytes past each row. If so, we need
1281 * to trim those off here, since GL ES may not let us specify
1282 * GL_UNPACK_ROW_LENGTH.
1283 */
1284 restoreGLRowLength = false;
1285
1286 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes
1287 ? texelsShallowCopy[currentMipLevel].fRowBytes
1288 : trimRowBytes;
1289
1290 // TODO: This optimization should be enabled with or without mips.
1291 // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per
1292 // mip level, before calling glTexImage2D.
1293 if (caps.unpackRowLengthSupport() && !usesMips) {
1294 // can't use this for flipping, only non-neg values allowed. :(
1295 if (rowBytes != trimRowBytes) {
1296 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1297 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1298 restoreGLRowLength = true;
1299 }
1300 } else if (trimRowBytes != rowBytes) {
1301 // copy data into our new storage, skipping the trailing bytes
1302 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1303 char* dst = buffer + individualMipOffsets[currentMipLevel];
1304 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1305 // now point data to our copied version
1306 texelsShallowCopy[currentMipLevel].fPixels = buffer +
1307 individualMipOffsets[currentMipLevel];
1308 texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes;
1309 }
1310 }
1311
1312 if (mipLevelCount) {
1313 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig)));
1314 }
1315
1316 bool succeeded = true;
1317 if (kNewTexture_UploadType == uploadType) {
1318 if (0 == left && 0 == top && texWidth == width && texHeight == height) {
1319 succeeded = allocate_and_populate_texture(
1320 texConfig, *interface, caps, target, internalFormat,
1321 internalFormatForTexStorage, externalFormat, externalType,
1322 texelsShallowCopy, mipLevelCount, width, height);
1323 } else {
1324 succeeded = false;
1325 }
1326 } else {
1327 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1328 if (!texelsShallowCopy[currentMipLevel].fPixels) {
1329 continue;
1330 }
1331 int twoToTheMipLevel = 1 << currentMipLevel;
1332 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1333 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1334
1335 GL_CALL(TexSubImage2D(target,
1336 currentMipLevel,
1337 left, top,
1338 currentWidth,
1339 currentHeight,
1340 externalFormat, externalType,
1341 texelsShallowCopy[currentMipLevel].fPixels));
1342 }
1343 }
1344
1345 restore_pixelstore_state(*interface, caps, restoreGLRowLength);
1346
1347 return succeeded;
1348 }
1349
uploadCompressedTexData(GrPixelConfig texConfig,int texWidth,int texHeight,GrGLenum target,GrPixelConfig dataConfig,const GrMipLevel texels[],int mipLevelCount,GrMipMapsStatus * mipMapsStatus)1350 bool GrGLGpu::uploadCompressedTexData(GrPixelConfig texConfig, int texWidth, int texHeight,
1351 GrGLenum target, GrPixelConfig dataConfig,
1352 const GrMipLevel texels[], int mipLevelCount,
1353 GrMipMapsStatus* mipMapsStatus) {
1354 SkASSERT(this->caps()->isConfigTexturable(texConfig));
1355
1356 const GrGLInterface* interface = this->glInterface();
1357 const GrGLCaps& caps = this->glCaps();
1358
1359 // We only need the internal format for compressed 2D textures.
1360 GrGLenum internalFormat;
1361 if (!caps.getCompressedTexImageFormats(texConfig, &internalFormat)) {
1362 return false;
1363 }
1364
1365 if (mipMapsStatus && mipLevelCount <= 1) {
1366 *mipMapsStatus = GrMipMapsStatus::kNotAllocated;
1367 } else {
1368 *mipMapsStatus = GrMipMapsStatus::kValid;
1369 }
1370
1371 return allocate_and_populate_compressed_texture(texConfig, *interface, caps, target,
1372 internalFormat, texels, mipLevelCount,
1373 texWidth, texHeight);
1374
1375 return true;
1376 }
1377
renderbuffer_storage_msaa(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1378 static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
1379 int sampleCount,
1380 GrGLenum format,
1381 int width, int height) {
1382 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
1383 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1384 switch (ctx.caps()->msFBOType()) {
1385 case GrGLCaps::kStandard_MSFBOType:
1386 case GrGLCaps::kMixedSamples_MSFBOType:
1387 GL_ALLOC_CALL(ctx.interface(),
1388 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
1389 sampleCount,
1390 format,
1391 width, height));
1392 break;
1393 case GrGLCaps::kES_Apple_MSFBOType:
1394 GL_ALLOC_CALL(ctx.interface(),
1395 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
1396 sampleCount,
1397 format,
1398 width, height));
1399 break;
1400 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1401 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1402 GL_ALLOC_CALL(ctx.interface(),
1403 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
1404 sampleCount,
1405 format,
1406 width, height));
1407 break;
1408 case GrGLCaps::kNone_MSFBOType:
1409 SK_ABORT("Shouldn't be here if we don't support multisampled renderbuffers.");
1410 break;
1411 }
1412 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
1413 }
1414
createRenderTargetObjects(const GrSurfaceDesc & desc,const GrGLTextureInfo & texInfo,GrGLRenderTarget::IDDesc * idDesc)1415 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc,
1416 const GrGLTextureInfo& texInfo,
1417 GrGLRenderTarget::IDDesc* idDesc) {
1418 idDesc->fMSColorRenderbufferID = 0;
1419 idDesc->fRTFBOID = 0;
1420 idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1421 idDesc->fTexFBOID = 0;
1422 SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) ==
1423 this->caps()->usesMixedSamples());
1424 idDesc->fIsMixedSampled = desc.fSampleCnt > 1 && this->caps()->usesMixedSamples();
1425
1426 GrGLenum status;
1427
1428 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1429
1430 if (desc.fSampleCnt > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1431 goto FAILED;
1432 }
1433
1434 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID));
1435 if (!idDesc->fTexFBOID) {
1436 goto FAILED;
1437 }
1438
1439 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1440 // the texture bound to the other. The exception is the IMG multisample extension. With this
1441 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1442 // rendered from.
1443 if (desc.fSampleCnt > 1 && this->glCaps().usesMSAARenderBuffers()) {
1444 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID));
1445 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1446 if (!idDesc->fRTFBOID ||
1447 !idDesc->fMSColorRenderbufferID) {
1448 goto FAILED;
1449 }
1450 this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat);
1451 } else {
1452 idDesc->fRTFBOID = idDesc->fTexFBOID;
1453 }
1454
1455 // below here we may bind the FBO
1456 fHWBoundRenderTargetUniqueID.makeInvalid();
1457 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1458 SkASSERT(desc.fSampleCnt > 1);
1459 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID));
1460 if (!renderbuffer_storage_msaa(*fGLContext,
1461 desc.fSampleCnt,
1462 colorRenderbufferFormat,
1463 desc.fWidth, desc.fHeight)) {
1464 goto FAILED;
1465 }
1466 this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID);
1467 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1468 GR_GL_COLOR_ATTACHMENT0,
1469 GR_GL_RENDERBUFFER,
1470 idDesc->fMSColorRenderbufferID));
1471 if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1472 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1473 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1474 goto FAILED;
1475 }
1476 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1477 }
1478 }
1479 this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID);
1480
1481 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 1) {
1482 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
1483 GR_GL_COLOR_ATTACHMENT0,
1484 texInfo.fTarget,
1485 texInfo.fID, 0, desc.fSampleCnt));
1486 } else {
1487 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1488 GR_GL_COLOR_ATTACHMENT0,
1489 texInfo.fTarget,
1490 texInfo.fID, 0));
1491 }
1492 if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1493 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1494 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1495 goto FAILED;
1496 }
1497 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1498 }
1499
1500 return true;
1501
1502 FAILED:
1503 if (idDesc->fMSColorRenderbufferID) {
1504 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1505 }
1506 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1507 this->deleteFramebuffer(idDesc->fRTFBOID);
1508 }
1509 if (idDesc->fTexFBOID) {
1510 this->deleteFramebuffer(idDesc->fTexFBOID);
1511 }
1512 return false;
1513 }
1514
1515 // good to set a break-point here to know when createTexture fails
return_null_texture()1516 static sk_sp<GrTexture> return_null_texture() {
1517 // SkDEBUGFAIL("null texture");
1518 return nullptr;
1519 }
1520
set_initial_texture_params(const GrGLInterface * interface,const GrGLTextureInfo & info)1521 static GrGLTexture::SamplerParams set_initial_texture_params(const GrGLInterface* interface,
1522 const GrGLTextureInfo& info) {
1523 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1524 // drivers have a bug where an FBO won't be complete if it includes a
1525 // texture that is not mipmap complete (considering the filter in use).
1526 GrGLTexture::SamplerParams params;
1527 params.fMinFilter = GR_GL_NEAREST;
1528 params.fMagFilter = GR_GL_NEAREST;
1529 params.fWrapS = GR_GL_CLAMP_TO_EDGE;
1530 params.fWrapT = GR_GL_CLAMP_TO_EDGE;
1531 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, params.fMagFilter));
1532 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, params.fMinFilter));
1533 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, params.fWrapS));
1534 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, params.fWrapT));
1535 return params;
1536 }
1537
onCreateTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const GrMipLevel texels[],int mipLevelCount)1538 sk_sp<GrTexture> GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
1539 SkBudgeted budgeted,
1540 const GrMipLevel texels[],
1541 int mipLevelCount) {
1542 // We fail if the MSAA was requested and is not available.
1543 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt > 1) {
1544 //SkDebugf("MSAA RT requested but not supported on this platform.");
1545 return return_null_texture();
1546 }
1547
1548 bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) &&
1549 !GrPixelConfigIsCompressed(desc.fConfig);
1550
1551 GrMipLevel zeroLevel;
1552 std::unique_ptr<uint8_t[]> zeros;
1553 if (performClear && !this->glCaps().clearTextureSupport() &&
1554 !this->glCaps().canConfigBeFBOColorAttachment(desc.fConfig)) {
1555 size_t rowSize = GrBytesPerPixel(desc.fConfig) * desc.fWidth;
1556 size_t size = rowSize * desc.fHeight;
1557 zeros.reset(new uint8_t[size]);
1558 memset(zeros.get(), 0, size);
1559 zeroLevel.fPixels = zeros.get();
1560 zeroLevel.fRowBytes = 0;
1561 texels = &zeroLevel;
1562 mipLevelCount = 1;
1563 performClear = false;
1564 }
1565
1566 bool isRenderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
1567
1568 GrGLTexture::IDDesc idDesc;
1569 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1570 GrMipMapsStatus mipMapsStatus;
1571 GrGLTexture::SamplerParams initialTexParams;
1572 if (!this->createTextureImpl(desc, &idDesc.fInfo, isRenderTarget, &initialTexParams, texels,
1573 mipLevelCount, &mipMapsStatus)) {
1574 return return_null_texture();
1575 }
1576
1577 sk_sp<GrGLTexture> tex;
1578 if (isRenderTarget) {
1579 // unbind the texture from the texture unit before binding it to the frame buffer
1580 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
1581 GrGLRenderTarget::IDDesc rtIDDesc;
1582
1583 if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) {
1584 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1585 return return_null_texture();
1586 }
1587 tex = sk_make_sp<GrGLTextureRenderTarget>(this, budgeted, desc, idDesc, rtIDDesc,
1588 mipMapsStatus);
1589 tex->baseLevelWasBoundToFBO();
1590 } else {
1591 tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, idDesc, mipMapsStatus);
1592 }
1593
1594 tex->setCachedParams(&initialTexParams, tex->getCachedNonSamplerParams(),
1595 this->getResetTimestamp());
1596 #ifdef TRACE_TEXTURE_CREATION
1597 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1598 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
1599 #endif
1600 if (tex && performClear) {
1601 if (this->glCaps().clearTextureSupport()) {
1602 static constexpr uint32_t kZero = 0;
1603 GL_CALL(ClearTexImage(tex->textureID(), 0, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, &kZero));
1604 } else {
1605 GrGLIRect viewport;
1606 this->bindSurfaceFBOForPixelOps(tex.get(), GR_GL_FRAMEBUFFER, &viewport,
1607 kDst_TempFBOTarget);
1608 this->disableScissor();
1609 this->disableWindowRectangles();
1610 this->flushColorWrite(true);
1611 this->flushClearColor(0, 0, 0, 0);
1612 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1613 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, tex.get());
1614 fHWBoundRenderTargetUniqueID.makeInvalid();
1615 }
1616 }
1617 return std::move(tex);
1618 }
1619
1620 namespace {
1621
1622 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
1623
get_stencil_rb_sizes(const GrGLInterface * gl,GrGLStencilAttachment::Format * format)1624 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1625 GrGLStencilAttachment::Format* format) {
1626
1627 // we shouldn't ever know one size and not the other
1628 SkASSERT((kUnknownBitCount == format->fStencilBits) ==
1629 (kUnknownBitCount == format->fTotalBits));
1630 if (kUnknownBitCount == format->fStencilBits) {
1631 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1632 GR_GL_RENDERBUFFER_STENCIL_SIZE,
1633 (GrGLint*)&format->fStencilBits);
1634 if (format->fPacked) {
1635 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1636 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1637 (GrGLint*)&format->fTotalBits);
1638 format->fTotalBits += format->fStencilBits;
1639 } else {
1640 format->fTotalBits = format->fStencilBits;
1641 }
1642 }
1643 }
1644 }
1645
getCompatibleStencilIndex(GrPixelConfig config)1646 int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) {
1647 static const int kSize = 16;
1648 SkASSERT(this->caps()->isConfigRenderable(config));
1649 if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) {
1650 // Default to unsupported, set this if we find a stencil format that works.
1651 int firstWorkingStencilFormatIndex = -1;
1652
1653 // Create color texture
1654 GrGLuint colorID = 0;
1655 GL_CALL(GenTextures(1, &colorID));
1656 this->setScratchTextureUnit();
1657 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID));
1658 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1659 GR_GL_TEXTURE_MAG_FILTER,
1660 GR_GL_NEAREST));
1661 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1662 GR_GL_TEXTURE_MIN_FILTER,
1663 GR_GL_NEAREST));
1664 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1665 GR_GL_TEXTURE_WRAP_S,
1666 GR_GL_CLAMP_TO_EDGE));
1667 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1668 GR_GL_TEXTURE_WRAP_T,
1669 GR_GL_CLAMP_TO_EDGE));
1670
1671 GrGLenum internalFormat;
1672 GrGLenum externalFormat;
1673 GrGLenum externalType;
1674 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
1675 &externalType)) {
1676 return false;
1677 }
1678 this->unbindCpuToGpuXferBuffer();
1679 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1680 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D,
1681 0,
1682 internalFormat,
1683 kSize,
1684 kSize,
1685 0,
1686 externalFormat,
1687 externalType,
1688 nullptr));
1689 if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) {
1690 GL_CALL(DeleteTextures(1, &colorID));
1691 return -1;
1692 }
1693
1694 // unbind the texture from the texture unit before binding it to the frame buffer
1695 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1696
1697 // Create Framebuffer
1698 GrGLuint fb = 0;
1699 GL_CALL(GenFramebuffers(1, &fb));
1700 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
1701 fHWBoundRenderTargetUniqueID.makeInvalid();
1702 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1703 GR_GL_COLOR_ATTACHMENT0,
1704 GR_GL_TEXTURE_2D,
1705 colorID,
1706 0));
1707 GrGLuint sbRBID = 0;
1708 GL_CALL(GenRenderbuffers(1, &sbRBID));
1709
1710 // look over formats till I find a compatible one
1711 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1712 if (sbRBID) {
1713 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1714 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1715 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i];
1716 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1717 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1718 sFmt.fInternalFormat,
1719 kSize, kSize));
1720 if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) {
1721 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1722 GR_GL_STENCIL_ATTACHMENT,
1723 GR_GL_RENDERBUFFER, sbRBID));
1724 if (sFmt.fPacked) {
1725 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1726 GR_GL_DEPTH_ATTACHMENT,
1727 GR_GL_RENDERBUFFER, sbRBID));
1728 } else {
1729 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1730 GR_GL_DEPTH_ATTACHMENT,
1731 GR_GL_RENDERBUFFER, 0));
1732 }
1733 GrGLenum status;
1734 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1735 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1736 firstWorkingStencilFormatIndex = i;
1737 break;
1738 }
1739 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1740 GR_GL_STENCIL_ATTACHMENT,
1741 GR_GL_RENDERBUFFER, 0));
1742 if (sFmt.fPacked) {
1743 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1744 GR_GL_DEPTH_ATTACHMENT,
1745 GR_GL_RENDERBUFFER, 0));
1746 }
1747 }
1748 }
1749 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1750 }
1751 GL_CALL(DeleteTextures(1, &colorID));
1752 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
1753 this->deleteFramebuffer(fb);
1754 fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex);
1755 }
1756 return this->glCaps().getStencilFormatIndexForConfig(config);
1757 }
1758
createTextureImpl(const GrSurfaceDesc & desc,GrGLTextureInfo * info,bool renderTarget,GrGLTexture::SamplerParams * initialTexParams,const GrMipLevel texels[],int mipLevelCount,GrMipMapsStatus * mipMapsStatus)1759 bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info, bool renderTarget,
1760 GrGLTexture::SamplerParams* initialTexParams,
1761 const GrMipLevel texels[], int mipLevelCount,
1762 GrMipMapsStatus* mipMapsStatus) {
1763 info->fID = 0;
1764 info->fTarget = GR_GL_TEXTURE_2D;
1765 GL_CALL(GenTextures(1, &(info->fID)));
1766
1767 if (!info->fID) {
1768 return false;
1769 }
1770
1771 this->setScratchTextureUnit();
1772 GL_CALL(BindTexture(info->fTarget, info->fID));
1773
1774 if (renderTarget && this->glCaps().textureUsageSupport()) {
1775 // provides a hint about how this texture will be used
1776 GL_CALL(TexParameteri(info->fTarget,
1777 GR_GL_TEXTURE_USAGE,
1778 GR_GL_FRAMEBUFFER_ATTACHMENT));
1779 }
1780
1781 if (info) {
1782 *initialTexParams = set_initial_texture_params(this->glInterface(), *info);
1783 }
1784
1785 bool success = false;
1786 if (GrPixelConfigIsCompressed(desc.fConfig)) {
1787 SkASSERT(!renderTarget);
1788 success = this->uploadCompressedTexData(desc.fConfig, desc.fWidth, desc.fHeight,
1789 info->fTarget, desc.fConfig,
1790 texels, mipLevelCount, mipMapsStatus);
1791 } else {
1792 success = this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget,
1793 kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight,
1794 desc.fConfig, texels, mipLevelCount, mipMapsStatus);
1795 }
1796 if (!success) {
1797 GL_CALL(DeleteTextures(1, &(info->fID)));
1798 return false;
1799 }
1800 info->fFormat = this->glCaps().configSizedInternalFormat(desc.fConfig);
1801 return true;
1802 }
1803
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height)1804 GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
1805 int width, int height) {
1806 SkASSERT(width >= rt->width());
1807 SkASSERT(height >= rt->height());
1808
1809 int samples = rt->numStencilSamples();
1810 GrGLStencilAttachment::IDDesc sbDesc;
1811
1812 int sIdx = this->getCompatibleStencilIndex(rt->config());
1813 if (sIdx < 0) {
1814 return nullptr;
1815 }
1816
1817 if (!sbDesc.fRenderbufferID) {
1818 GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
1819 }
1820 if (!sbDesc.fRenderbufferID) {
1821 return nullptr;
1822 }
1823 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1824 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
1825 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1826 // we do this "if" so that we don't call the multisample
1827 // version on a GL that doesn't have an MSAA extension.
1828 if (samples > 1) {
1829 SkAssertResult(renderbuffer_storage_msaa(*fGLContext,
1830 samples,
1831 sFmt.fInternalFormat,
1832 width, height));
1833 } else {
1834 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1835 sFmt.fInternalFormat,
1836 width, height));
1837 SkASSERT(GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface()));
1838 }
1839 fStats.incStencilAttachmentCreates();
1840 // After sized formats we attempt an unsized format and take
1841 // whatever sizes GL gives us. In that case we query for the size.
1842 GrGLStencilAttachment::Format format = sFmt;
1843 get_stencil_rb_sizes(this->glInterface(), &format);
1844 GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this,
1845 sbDesc,
1846 width,
1847 height,
1848 samples,
1849 format);
1850 return stencil;
1851 }
1852
1853 ////////////////////////////////////////////////////////////////////////////////
1854
1855 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
1856 // objects are implemented as client-side-arrays on tile-deferred architectures.
1857 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
1858
onCreateBuffer(size_t size,GrBufferType intendedType,GrAccessPattern accessPattern,const void * data)1859 sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
1860 GrAccessPattern accessPattern, const void* data) {
1861 return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
1862 }
1863
flushScissor(const GrScissorState & scissorState,const GrGLIRect & rtViewport,GrSurfaceOrigin rtOrigin)1864 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1865 const GrGLIRect& rtViewport,
1866 GrSurfaceOrigin rtOrigin) {
1867 if (scissorState.enabled()) {
1868 GrGLIRect scissor;
1869 scissor.setRelativeTo(rtViewport, scissorState.rect(), rtOrigin);
1870 // if the scissor fully contains the viewport then we fall through and
1871 // disable the scissor test.
1872 if (!scissor.contains(rtViewport)) {
1873 if (fHWScissorSettings.fRect != scissor) {
1874 scissor.pushToGLScissor(this->glInterface());
1875 fHWScissorSettings.fRect = scissor;
1876 }
1877 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1878 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1879 fHWScissorSettings.fEnabled = kYes_TriState;
1880 }
1881 return;
1882 }
1883 }
1884
1885 // See fall through note above
1886 this->disableScissor();
1887 }
1888
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt,GrSurfaceOrigin origin)1889 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
1890 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
1891 #ifndef USE_NSIGHT
1892 typedef GrWindowRectsState::Mode Mode;
1893 SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen.
1894 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
1895
1896 if (!this->caps()->maxWindowRectangles() ||
1897 fHWWindowRectsState.knownEqualTo(origin, rt->getViewport(), windowState)) {
1898 return;
1899 }
1900
1901 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
1902 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
1903 int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
1904 SkASSERT(windowState.numWindows() == numWindows);
1905
1906 GrGLIRect glwindows[GrWindowRectangles::kMaxWindows];
1907 const SkIRect* skwindows = windowState.windows().data();
1908 for (int i = 0; i < numWindows; ++i) {
1909 glwindows[i].setRelativeTo(rt->getViewport(), skwindows[i], origin);
1910 }
1911
1912 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
1913 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
1914
1915 fHWWindowRectsState.set(origin, rt->getViewport(), windowState);
1916 #endif
1917 }
1918
disableWindowRectangles()1919 void GrGLGpu::disableWindowRectangles() {
1920 #ifndef USE_NSIGHT
1921 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
1922 return;
1923 }
1924 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
1925 fHWWindowRectsState.setDisabled();
1926 #endif
1927 }
1928
resolveAndGenerateMipMapsForProcessorTextures(const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrTextureProxy * const primProcTextures[],int numPrimitiveProcessorTextureSets)1929 void GrGLGpu::resolveAndGenerateMipMapsForProcessorTextures(
1930 const GrPrimitiveProcessor& primProc,
1931 const GrPipeline& pipeline,
1932 const GrTextureProxy* const primProcTextures[],
1933 int numPrimitiveProcessorTextureSets) {
1934 auto genLevelsIfNeeded = [this](GrTexture* tex, const GrSamplerState& sampler) {
1935 SkASSERT(tex);
1936 if (sampler.filter() == GrSamplerState::Filter::kMipMap &&
1937 tex->texturePriv().mipMapped() == GrMipMapped::kYes &&
1938 tex->texturePriv().mipMapsAreDirty()) {
1939 SkASSERT(this->caps()->mipMapSupport());
1940 this->regenerateMipMapLevels(static_cast<GrGLTexture*>(tex));
1941 SkASSERT(!tex->asRenderTarget() || !tex->asRenderTarget()->needsResolve());
1942 } else if (auto* rt = tex->asRenderTarget()) {
1943 if (rt->needsResolve()) {
1944 this->resolveRenderTarget(rt);
1945 }
1946 }
1947 };
1948
1949 for (int set = 0, tex = 0; set < numPrimitiveProcessorTextureSets; ++set) {
1950 for (int sampler = 0; sampler < primProc.numTextureSamplers(); ++sampler, ++tex) {
1951 GrTexture* texture = primProcTextures[tex]->peekTexture();
1952 genLevelsIfNeeded(texture, primProc.textureSampler(sampler).samplerState());
1953 }
1954 }
1955
1956 GrFragmentProcessor::Iter iter(pipeline);
1957 while (const GrFragmentProcessor* fp = iter.next()) {
1958 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
1959 const auto& textureSampler = fp->textureSampler(i);
1960 genLevelsIfNeeded(textureSampler.peekTexture(), textureSampler.samplerState());
1961 }
1962 }
1963 }
1964
flushGLState(GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,int dynamicStateArraysLength,bool willDrawPoints)1965 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget,
1966 GrSurfaceOrigin origin,
1967 const GrPrimitiveProcessor& primProc,
1968 const GrPipeline& pipeline,
1969 const GrPipeline::FixedDynamicState* fixedDynamicState,
1970 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
1971 int dynamicStateArraysLength,
1972 bool willDrawPoints) {
1973 const GrTextureProxy* const* primProcProxiesForMipRegen = nullptr;
1974 const GrTextureProxy* const* primProcProxiesToBind = nullptr;
1975 int numPrimProcTextureSets = 1; // number of texture per prim proc sampler.
1976 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
1977 primProcProxiesForMipRegen = dynamicStateArrays->fPrimitiveProcessorTextures;
1978 numPrimProcTextureSets = dynamicStateArraysLength;
1979 } else if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) {
1980 primProcProxiesForMipRegen = fixedDynamicState->fPrimitiveProcessorTextures;
1981 primProcProxiesToBind = fixedDynamicState->fPrimitiveProcessorTextures;
1982 }
1983
1984 SkASSERT(SkToBool(primProcProxiesForMipRegen) == SkToBool(primProc.numTextureSamplers()));
1985
1986 sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, renderTarget, origin, primProc,
1987 primProcProxiesForMipRegen,
1988 pipeline, willDrawPoints));
1989 if (!program) {
1990 GrCapsDebugf(this->caps(), "Failed to create program!\n");
1991 return false;
1992 }
1993 this->resolveAndGenerateMipMapsForProcessorTextures(
1994 primProc, pipeline, primProcProxiesForMipRegen, numPrimProcTextureSets);
1995
1996 GrXferProcessor::BlendInfo blendInfo;
1997 pipeline.getXferProcessor().getBlendInfo(&blendInfo);
1998
1999 this->flushColorWrite(blendInfo.fWriteColor);
2000
2001 this->flushProgram(std::move(program));
2002
2003 // Swizzle the blend to match what the shader will output.
2004 const GrSwizzle& swizzle = this->caps()->shaderCaps()->configOutputSwizzle(
2005 renderTarget->config());
2006 this->flushBlend(blendInfo, swizzle);
2007
2008 fHWProgram->updateUniformsAndTextureBindings(renderTarget, origin,
2009 primProc, pipeline, primProcProxiesToBind);
2010
2011 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2012 GrStencilSettings stencil;
2013 if (pipeline.isStencilEnabled()) {
2014 // TODO: attach stencil and create settings during render target flush.
2015 SkASSERT(glRT->renderTargetPriv().getStencilAttachment());
2016 stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(),
2017 glRT->renderTargetPriv().numStencilBits());
2018 }
2019 this->flushStencil(stencil);
2020 if (pipeline.isScissorEnabled()) {
2021 static constexpr SkIRect kBogusScissor{0, 0, 1, 1};
2022 GrScissorState state(fixedDynamicState ? fixedDynamicState->fScissorRect : kBogusScissor);
2023 this->flushScissor(state, glRT->getViewport(), origin);
2024 } else {
2025 this->disableScissor();
2026 }
2027 this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT, origin);
2028 this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !stencil.isDisabled());
2029
2030 // This must come after textures are flushed because a texture may need
2031 // to be msaa-resolved (which will modify bound FBO state).
2032 this->flushRenderTarget(glRT);
2033
2034 return true;
2035 }
2036
flushProgram(sk_sp<GrGLProgram> program)2037 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
2038 if (!program) {
2039 fHWProgram.reset();
2040 fHWProgramID = 0;
2041 return;
2042 }
2043 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
2044 if (program == fHWProgram) {
2045 return;
2046 }
2047 auto id = program->programID();
2048 SkASSERT(id);
2049 GL_CALL(UseProgram(id));
2050 fHWProgram = std::move(program);
2051 fHWProgramID = id;
2052 }
2053
flushProgram(GrGLuint id)2054 void GrGLGpu::flushProgram(GrGLuint id) {
2055 SkASSERT(id);
2056 if (fHWProgramID == id) {
2057 SkASSERT(!fHWProgram);
2058 return;
2059 }
2060 fHWProgram.reset();
2061 GL_CALL(UseProgram(id));
2062 fHWProgramID = id;
2063 }
2064
setupGeometry(const GrBuffer * indexBuffer,const GrBuffer * vertexBuffer,int baseVertex,const GrBuffer * instanceBuffer,int baseInstance,GrPrimitiveRestart enablePrimitiveRestart)2065 void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
2066 const GrBuffer* vertexBuffer,
2067 int baseVertex,
2068 const GrBuffer* instanceBuffer,
2069 int baseInstance,
2070 GrPrimitiveRestart enablePrimitiveRestart) {
2071 SkASSERT((enablePrimitiveRestart == GrPrimitiveRestart::kNo) || indexBuffer);
2072
2073 GrGLAttribArrayState* attribState;
2074 if (indexBuffer) {
2075 SkASSERT(indexBuffer && !indexBuffer->isMapped());
2076 attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer);
2077 } else {
2078 attribState = fHWVertexArrayState.bindInternalVertexArray(this);
2079 }
2080
2081 int numAttribs = fHWProgram->numVertexAttributes() + fHWProgram->numInstanceAttributes();
2082 attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart);
2083
2084 if (int vertexStride = fHWProgram->vertexStride()) {
2085 SkASSERT(vertexBuffer && !vertexBuffer->isMapped());
2086 size_t bufferOffset = vertexBuffer->baseOffset();
2087 bufferOffset += baseVertex * static_cast<size_t>(vertexStride);
2088 for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) {
2089 const auto& attrib = fHWProgram->vertexAttribute(i);
2090 static constexpr int kDivisor = 0;
2091 attribState->set(this, attrib.fLocation, vertexBuffer, attrib.fCPUType, attrib.fGPUType,
2092 vertexStride, bufferOffset + attrib.fOffset, kDivisor);
2093 }
2094 }
2095 if (int instanceStride = fHWProgram->instanceStride()) {
2096 SkASSERT(instanceBuffer && !instanceBuffer->isMapped());
2097 size_t bufferOffset = instanceBuffer->baseOffset();
2098 bufferOffset += baseInstance * static_cast<size_t>(instanceStride);
2099 int attribIdx = fHWProgram->numVertexAttributes();
2100 for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) {
2101 const auto& attrib = fHWProgram->instanceAttribute(i);
2102 static constexpr int kDivisor = 1;
2103 attribState->set(this, attrib.fLocation, instanceBuffer, attrib.fCPUType,
2104 attrib.fGPUType, instanceStride, bufferOffset + attrib.fOffset,
2105 kDivisor);
2106 }
2107 }
2108 }
2109
bindBuffer(GrBufferType type,const GrBuffer * buffer)2110 GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) {
2111 this->handleDirtyContext();
2112
2113 // Index buffer state is tied to the vertex array.
2114 if (kIndex_GrBufferType == type) {
2115 this->bindVertexArray(0);
2116 }
2117
2118 SkASSERT(type >= 0 && type <= kLast_GrBufferType);
2119 auto& bufferState = fHWBufferState[type];
2120
2121 if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) {
2122 if (buffer->isCPUBacked()) {
2123 if (!bufferState.fBufferZeroKnownBound) {
2124 GL_CALL(BindBuffer(bufferState.fGLTarget, 0));
2125 }
2126 } else {
2127 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
2128 GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID()));
2129 }
2130 bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
2131 bufferState.fBoundBufferUniqueID = buffer->uniqueID();
2132 }
2133
2134 return bufferState.fGLTarget;
2135 }
disableScissor()2136 void GrGLGpu::disableScissor() {
2137 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2138 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2139 fHWScissorSettings.fEnabled = kNo_TriState;
2140 return;
2141 }
2142 }
2143
clear(const GrFixedClip & clip,const SkPMColor4f & color,GrRenderTarget * target,GrSurfaceOrigin origin)2144 void GrGLGpu::clear(const GrFixedClip& clip, const SkPMColor4f& color,
2145 GrRenderTarget* target, GrSurfaceOrigin origin) {
2146 // parent class should never let us get here with no RT
2147 SkASSERT(target);
2148 SkASSERT(!this->caps()->performColorClearsAsDraws());
2149 SkASSERT(!clip.scissorEnabled() || !this->caps()->performPartialClearsAsDraws());
2150
2151 this->handleDirtyContext();
2152
2153 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2154
2155 if (clip.scissorEnabled()) {
2156 this->flushRenderTarget(glRT, origin, clip.scissorRect());
2157 } else {
2158 this->flushRenderTarget(glRT);
2159 }
2160 this->flushScissor(clip.scissorState(), glRT->getViewport(), origin);
2161 this->flushWindowRectangles(clip.windowRectsState(), glRT, origin);
2162 this->flushColorWrite(true);
2163
2164 GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA;
2165 if (this->glCaps().clearToBoundaryValuesIsBroken() &&
2166 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
2167 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
2168 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
2169 a = (1 == a) ? safeAlpha1 : safeAlpha0;
2170 }
2171 this->flushClearColor(r, g, b, a);
2172
2173 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2174 }
2175
clearStencil(GrRenderTarget * target,int clearValue)2176 void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) {
2177 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2178
2179 if (!target) {
2180 return;
2181 }
2182
2183 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
2184 // this should only be called internally when we know we have a
2185 // stencil buffer.
2186 SkASSERT(sb);
2187
2188 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2189 this->flushRenderTargetNoColorWrites(glRT);
2190
2191 this->disableScissor();
2192 this->disableWindowRectangles();
2193
2194 GL_CALL(StencilMask(0xffffffff));
2195 GL_CALL(ClearStencil(clearValue));
2196 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2197 fHWStencilSettings.invalidate();
2198 if (!clearValue) {
2199 sb->cleared();
2200 }
2201 }
2202
clearStencilClip(const GrFixedClip & clip,bool insideStencilMask,GrRenderTarget * target,GrSurfaceOrigin origin)2203 void GrGLGpu::clearStencilClip(const GrFixedClip& clip,
2204 bool insideStencilMask,
2205 GrRenderTarget* target, GrSurfaceOrigin origin) {
2206 SkASSERT(target);
2207 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2208 this->handleDirtyContext();
2209
2210 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
2211 // this should only be called internally when we know we have a
2212 // stencil buffer.
2213 SkASSERT(sb);
2214 GrGLint stencilBitCount = sb->bits();
2215 #if 0
2216 SkASSERT(stencilBitCount > 0);
2217 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2218 #else
2219 // we could just clear the clip bit but when we go through
2220 // ANGLE a partial stencil mask will cause clears to be
2221 // turned into draws. Our contract on GrOpList says that
2222 // changing the clip between stencil passes may or may not
2223 // zero the client's clip bits. So we just clear the whole thing.
2224 static const GrGLint clipStencilMask = ~0;
2225 #endif
2226 GrGLint value;
2227 if (insideStencilMask) {
2228 value = (1 << (stencilBitCount - 1));
2229 } else {
2230 value = 0;
2231 }
2232 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2233 this->flushRenderTargetNoColorWrites(glRT);
2234
2235 this->flushScissor(clip.scissorState(), glRT->getViewport(), origin);
2236 this->flushWindowRectangles(clip.windowRectsState(), glRT, origin);
2237
2238 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2239 GL_CALL(ClearStencil(value));
2240 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2241 fHWStencilSettings.invalidate();
2242 }
2243
readPixelsSupported(GrRenderTarget * target,GrPixelConfig readConfig)2244 bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) {
2245 #ifdef SK_BUILD_FOR_MAC
2246 // Chromium may ask us to read back from locked IOSurfaces. Calling the command buffer's
2247 // glGetIntegerv() with GL_IMPLEMENTATION_COLOR_READ_FORMAT/_TYPE causes the command buffer
2248 // to make a call to check the framebuffer status which can hang the driver. So in Mac Chromium
2249 // we always use a temporary surface to test for read pixels support.
2250 // https://www.crbug.com/662802
2251 if (this->glContext().driver() == kChromium_GrGLDriver) {
2252 return this->readPixelsSupported(target->config(), readConfig);
2253 }
2254 #endif
2255 auto bindRenderTarget = [this, target]() -> bool {
2256 this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(target));
2257 return true;
2258 };
2259 auto unbindRenderTarget = []{};
2260 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2261 GR_GL_GetIntegerv(this->glInterface(), query, value);
2262 };
2263 GrPixelConfig rtConfig = target->config();
2264 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget,
2265 unbindRenderTarget);
2266 }
2267
readPixelsSupported(GrPixelConfig rtConfig,GrPixelConfig readConfig)2268 bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) {
2269 sk_sp<GrTexture> temp;
2270 auto bindRenderTarget = [this, rtConfig, &temp]() -> bool {
2271 GrSurfaceDesc desc;
2272 desc.fConfig = rtConfig;
2273 desc.fWidth = desc.fHeight = 16;
2274 if (this->glCaps().isConfigRenderable(rtConfig)) {
2275 desc.fFlags = kRenderTarget_GrSurfaceFlag;
2276 temp = this->createTexture(desc, SkBudgeted::kNo);
2277 if (!temp) {
2278 return false;
2279 }
2280 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget());
2281 this->flushRenderTargetNoColorWrites(glrt);
2282 return true;
2283 } else if (this->glCaps().canConfigBeFBOColorAttachment(rtConfig)) {
2284 temp = this->createTexture(desc, SkBudgeted::kNo);
2285 if (!temp) {
2286 return false;
2287 }
2288 GrGLIRect vp;
2289 this->bindSurfaceFBOForPixelOps(temp.get(), GR_GL_FRAMEBUFFER, &vp, kDst_TempFBOTarget);
2290 fHWBoundRenderTargetUniqueID.makeInvalid();
2291 return true;
2292 }
2293 return false;
2294 };
2295 auto unbindRenderTarget = [this, &temp]() {
2296 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, temp.get());
2297 };
2298 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2299 GR_GL_GetIntegerv(this->glInterface(), query, value);
2300 };
2301 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget,
2302 unbindRenderTarget);
2303 }
2304
readPixelsSupported(GrSurface * surfaceForConfig,GrPixelConfig readConfig)2305 bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) {
2306 if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) {
2307 return this->readPixelsSupported(rt, readConfig);
2308 } else {
2309 GrPixelConfig config = surfaceForConfig->config();
2310 return this->readPixelsSupported(config, readConfig);
2311 }
2312 }
2313
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType dstColorType,void * buffer,size_t rowBytes)2314 bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
2315 GrColorType dstColorType, void* buffer, size_t rowBytes) {
2316 SkASSERT(surface);
2317
2318 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2319 if (!renderTarget && !this->glCaps().canConfigBeFBOColorAttachment(surface->config())) {
2320 return false;
2321 }
2322
2323 // TODO: Avoid this conversion by making GrGLCaps work with color types.
2324 auto dstAsConfig = GrColorTypeToPixelConfig(dstColorType, GrSRGBEncoded::kNo);
2325
2326 if (!this->readPixelsSupported(surface, dstAsConfig)) {
2327 return false;
2328 }
2329
2330 GrGLenum externalFormat;
2331 GrGLenum externalType;
2332 if (!this->glCaps().getReadPixelsFormat(surface->config(), dstAsConfig, &externalFormat,
2333 &externalType)) {
2334 return false;
2335 }
2336
2337 GrGLIRect glvp;
2338 if (renderTarget) {
2339 // resolve the render target if necessary
2340 switch (renderTarget->getResolveType()) {
2341 case GrGLRenderTarget::kCantResolve_ResolveType:
2342 return false;
2343 case GrGLRenderTarget::kAutoResolves_ResolveType:
2344 this->flushRenderTargetNoColorWrites(renderTarget);
2345 break;
2346 case GrGLRenderTarget::kCanResolve_ResolveType:
2347 this->onResolveRenderTarget(renderTarget);
2348 // we don't track the state of the READ FBO ID.
2349 this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID());
2350 break;
2351 default:
2352 SK_ABORT("Unknown resolve type");
2353 }
2354 glvp = renderTarget->getViewport();
2355 } else {
2356 // Use a temporary FBO.
2357 this->bindSurfaceFBOForPixelOps(surface, GR_GL_FRAMEBUFFER, &glvp, kSrc_TempFBOTarget);
2358 fHWBoundRenderTargetUniqueID.makeInvalid();
2359 }
2360
2361 // the read rect is viewport-relative
2362 GrGLIRect readRect;
2363 readRect.setRelativeTo(glvp, left, top, width, height, kTopLeft_GrSurfaceOrigin);
2364
2365 int bytesPerPixel = GrBytesPerPixel(dstAsConfig);
2366 size_t tightRowBytes = bytesPerPixel * width;
2367
2368 size_t readDstRowBytes = tightRowBytes;
2369 void* readDst = buffer;
2370
2371 // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
2372 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
2373 if (rowBytes != tightRowBytes) {
2374 if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) {
2375 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
2376 static_cast<GrGLint>(rowBytes / bytesPerPixel)));
2377 readDstRowBytes = rowBytes;
2378 } else {
2379 scratch.reset(tightRowBytes * height);
2380 readDst = scratch.get();
2381 }
2382 }
2383 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(dstAsConfig)));
2384
2385 bool reattachStencil = false;
2386 if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() &&
2387 renderTarget &&
2388 renderTarget->renderTargetPriv().getStencilAttachment() &&
2389 renderTarget->numColorSamples() > 1) {
2390 // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached
2391 reattachStencil = true;
2392 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
2393 GR_GL_RENDERBUFFER, 0));
2394 }
2395
2396 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
2397 readRect.fWidth, readRect.fHeight,
2398 externalFormat, externalType, readDst));
2399
2400 if (reattachStencil) {
2401 GrGLStencilAttachment* stencilAttachment = static_cast<GrGLStencilAttachment*>(
2402 renderTarget->renderTargetPriv().getStencilAttachment());
2403 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
2404 GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID()));
2405 }
2406
2407 if (readDstRowBytes != tightRowBytes) {
2408 SkASSERT(this->glCaps().packRowLengthSupport());
2409 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2410 }
2411
2412 if (readDst != buffer) {
2413 SkASSERT(readDst != buffer);
2414 SkASSERT(rowBytes != tightRowBytes);
2415 const char* src = reinterpret_cast<const char*>(readDst);
2416 char* dst = reinterpret_cast<char*>(buffer);
2417 SkRectMemcpy(dst, rowBytes, src, readDstRowBytes, tightRowBytes, height);
2418 }
2419 if (!renderTarget) {
2420 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, surface);
2421 }
2422 return true;
2423 }
2424
getCommandBuffer(GrRenderTarget * rt,GrSurfaceOrigin origin,const SkRect & bounds,const GrGpuRTCommandBuffer::LoadAndStoreInfo & colorInfo,const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo & stencilInfo)2425 GrGpuRTCommandBuffer* GrGLGpu::getCommandBuffer(
2426 GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds,
2427 const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
2428 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
2429 if (!fCachedRTCommandBuffer) {
2430 fCachedRTCommandBuffer.reset(new GrGLGpuRTCommandBuffer(this));
2431 }
2432
2433 fCachedRTCommandBuffer->set(rt, origin, colorInfo, stencilInfo);
2434 return fCachedRTCommandBuffer.get();
2435 }
2436
getCommandBuffer(GrTexture * texture,GrSurfaceOrigin origin)2437 GrGpuTextureCommandBuffer* GrGLGpu::getCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin) {
2438 if (!fCachedTexCommandBuffer) {
2439 fCachedTexCommandBuffer.reset(new GrGLGpuTextureCommandBuffer(this));
2440 }
2441
2442 fCachedTexCommandBuffer->set(texture, origin);
2443 return fCachedTexCommandBuffer.get();
2444 }
2445
flushRenderTarget(GrGLRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2446 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin,
2447 const SkIRect& bounds) {
2448 this->flushRenderTargetNoColorWrites(target);
2449 this->didWriteToSurface(target, origin, &bounds);
2450 }
2451
flushRenderTarget(GrGLRenderTarget * target)2452 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target) {
2453 this->flushRenderTargetNoColorWrites(target);
2454 this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr);
2455 }
2456
flushRenderTargetNoColorWrites(GrGLRenderTarget * target)2457 void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target) {
2458 SkASSERT(target);
2459 GrGpuResource::UniqueID rtID = target->uniqueID();
2460 if (fHWBoundRenderTargetUniqueID != rtID) {
2461 this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID());
2462 #ifdef SK_DEBUG
2463 // don't do this check in Chromium -- this is causing
2464 // lots of repeated command buffer flushes when the compositor is
2465 // rendering with Ganesh, which is really slow; even too slow for
2466 // Debug mode.
2467 if (kChromium_GrGLDriver != this->glContext().driver()) {
2468 GrGLenum status;
2469 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2470 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2471 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
2472 }
2473 }
2474 #endif
2475 fHWBoundRenderTargetUniqueID = rtID;
2476 this->flushViewport(target->getViewport());
2477 }
2478
2479 if (this->glCaps().srgbWriteControl()) {
2480 this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config()));
2481 }
2482 }
2483
flushFramebufferSRGB(bool enable)2484 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2485 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2486 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2487 fHWSRGBFramebuffer = kYes_TriState;
2488 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2489 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2490 fHWSRGBFramebuffer = kNo_TriState;
2491 }
2492 }
2493
flushViewport(const GrGLIRect & viewport)2494 void GrGLGpu::flushViewport(const GrGLIRect& viewport) {
2495 if (fHWViewport != viewport) {
2496 viewport.pushToGLViewport(this->glInterface());
2497 fHWViewport = viewport;
2498 }
2499 }
2500
2501 #define SWAP_PER_DRAW 0
2502
2503 #if SWAP_PER_DRAW
2504 #if defined(SK_BUILD_FOR_MAC)
2505 #include <AGL/agl.h>
2506 #elif defined(SK_BUILD_FOR_WIN)
2507 #include <gl/GL.h>
SwapBuf()2508 void SwapBuf() {
2509 DWORD procID = GetCurrentProcessId();
2510 HWND hwnd = GetTopWindow(GetDesktopWindow());
2511 while(hwnd) {
2512 DWORD wndProcID = 0;
2513 GetWindowThreadProcessId(hwnd, &wndProcID);
2514 if(wndProcID == procID) {
2515 SwapBuffers(GetDC(hwnd));
2516 }
2517 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
2518 }
2519 }
2520 #endif
2521 #endif
2522
draw(GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,const GrMesh meshes[],int meshCount)2523 void GrGLGpu::draw(GrRenderTarget* renderTarget, GrSurfaceOrigin origin,
2524 const GrPrimitiveProcessor& primProc,
2525 const GrPipeline& pipeline,
2526 const GrPipeline::FixedDynamicState* fixedDynamicState,
2527 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
2528 const GrMesh meshes[],
2529 int meshCount) {
2530 this->handleDirtyContext();
2531
2532 bool hasPoints = false;
2533 for (int i = 0; i < meshCount; ++i) {
2534 if (meshes[i].primitiveType() == GrPrimitiveType::kPoints) {
2535 hasPoints = true;
2536 break;
2537 }
2538 }
2539 if (!this->flushGLState(renderTarget, origin, primProc, pipeline, fixedDynamicState,
2540 dynamicStateArrays, meshCount, hasPoints)) {
2541 return;
2542 }
2543
2544 bool dynamicScissor = false;
2545 bool dynamicPrimProcTextures = false;
2546 if (dynamicStateArrays) {
2547 dynamicScissor = pipeline.isScissorEnabled() && dynamicStateArrays->fScissorRects;
2548 dynamicPrimProcTextures = dynamicStateArrays->fPrimitiveProcessorTextures;
2549 }
2550 for (int m = 0; m < meshCount; ++m) {
2551 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(renderTarget->asTexture(),
2552 *this->caps())) {
2553 this->xferBarrier(renderTarget, barrierType);
2554 }
2555
2556 if (dynamicScissor) {
2557 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2558 this->flushScissor(GrScissorState(dynamicStateArrays->fScissorRects[m]),
2559 glRT->getViewport(), origin);
2560 }
2561 if (dynamicPrimProcTextures) {
2562 auto texProxyArray = dynamicStateArrays->fPrimitiveProcessorTextures +
2563 m * primProc.numTextureSamplers();
2564 fHWProgram->updatePrimitiveProcessorTextureBindings(primProc, texProxyArray);
2565 }
2566 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
2567 GrIsPrimTypeLines(meshes[m].primitiveType()) &&
2568 !GrIsPrimTypeLines(fLastPrimitiveType)) {
2569 GL_CALL(Enable(GR_GL_CULL_FACE));
2570 GL_CALL(Disable(GR_GL_CULL_FACE));
2571 }
2572 meshes[m].sendToGpu(this);
2573 fLastPrimitiveType = meshes[m].primitiveType();
2574 }
2575
2576 #if SWAP_PER_DRAW
2577 glFlush();
2578 #if defined(SK_BUILD_FOR_MAC)
2579 aglSwapBuffers(aglGetCurrentContext());
2580 int set_a_break_pt_here = 9;
2581 aglSwapBuffers(aglGetCurrentContext());
2582 #elif defined(SK_BUILD_FOR_WIN)
2583 SwapBuf();
2584 int set_a_break_pt_here = 9;
2585 SwapBuf();
2586 #endif
2587 #endif
2588 }
2589
gr_primitive_type_to_gl_mode(GrPrimitiveType primitiveType)2590 static GrGLenum gr_primitive_type_to_gl_mode(GrPrimitiveType primitiveType) {
2591 switch (primitiveType) {
2592 case GrPrimitiveType::kTriangles:
2593 return GR_GL_TRIANGLES;
2594 case GrPrimitiveType::kTriangleStrip:
2595 return GR_GL_TRIANGLE_STRIP;
2596 case GrPrimitiveType::kPoints:
2597 return GR_GL_POINTS;
2598 case GrPrimitiveType::kLines:
2599 return GR_GL_LINES;
2600 case GrPrimitiveType::kLineStrip:
2601 return GR_GL_LINE_STRIP;
2602 case GrPrimitiveType::kLinesAdjacency:
2603 return GR_GL_LINES_ADJACENCY;
2604 }
2605 SK_ABORT("invalid GrPrimitiveType");
2606 return GR_GL_TRIANGLES;
2607 }
2608
sendMeshToGpu(GrPrimitiveType primitiveType,const GrBuffer * vertexBuffer,int vertexCount,int baseVertex)2609 void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer,
2610 int vertexCount, int baseVertex) {
2611 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
2612 if (this->glCaps().drawArraysBaseVertexIsBroken()) {
2613 this->setupGeometry(nullptr, vertexBuffer, baseVertex, nullptr, 0, GrPrimitiveRestart::kNo);
2614 GL_CALL(DrawArrays(glPrimType, 0, vertexCount));
2615 } else {
2616 this->setupGeometry(nullptr, vertexBuffer, 0, nullptr, 0, GrPrimitiveRestart::kNo);
2617 GL_CALL(DrawArrays(glPrimType, baseVertex, vertexCount));
2618 }
2619 fStats.incNumDraws();
2620 }
2621
sendIndexedMeshToGpu(GrPrimitiveType primitiveType,const GrBuffer * indexBuffer,int indexCount,int baseIndex,uint16_t minIndexValue,uint16_t maxIndexValue,const GrBuffer * vertexBuffer,int baseVertex,GrPrimitiveRestart enablePrimitiveRestart)2622 void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer,
2623 int indexCount, int baseIndex, uint16_t minIndexValue,
2624 uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
2625 int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) {
2626 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
2627 GrGLvoid* const indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
2628 sizeof(uint16_t) * baseIndex);
2629
2630 this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart);
2631
2632 if (this->glCaps().drawRangeElementsSupport()) {
2633 GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount,
2634 GR_GL_UNSIGNED_SHORT, indices));
2635 } else {
2636 GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices));
2637 }
2638 fStats.incNumDraws();
2639 }
2640
sendInstancedMeshToGpu(GrPrimitiveType primitiveType,const GrBuffer * vertexBuffer,int vertexCount,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance)2641 void GrGLGpu::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer,
2642 int vertexCount, int baseVertex,
2643 const GrBuffer* instanceBuffer, int instanceCount,
2644 int baseInstance) {
2645 GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
2646 int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
2647 for (int i = 0; i < instanceCount; i += maxInstances) {
2648 this->setupGeometry(nullptr, vertexBuffer, 0, instanceBuffer, baseInstance + i,
2649 GrPrimitiveRestart::kNo);
2650 GL_CALL(DrawArraysInstanced(glPrimType, baseVertex, vertexCount,
2651 SkTMin(instanceCount - i, maxInstances)));
2652 fStats.incNumDraws();
2653 }
2654 }
2655
sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,const GrBuffer * indexBuffer,int indexCount,int baseIndex,const GrBuffer * vertexBuffer,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance,GrPrimitiveRestart enablePrimitiveRestart)2656 void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
2657 const GrBuffer* indexBuffer, int indexCount,
2658 int baseIndex, const GrBuffer* vertexBuffer,
2659 int baseVertex, const GrBuffer* instanceBuffer,
2660 int instanceCount, int baseInstance,
2661 GrPrimitiveRestart enablePrimitiveRestart) {
2662 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
2663 GrGLvoid* indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
2664 sizeof(uint16_t) * baseIndex);
2665 int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
2666 for (int i = 0; i < instanceCount; i += maxInstances) {
2667 this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i,
2668 enablePrimitiveRestart);
2669 GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices,
2670 SkTMin(instanceCount - i, maxInstances)));
2671 fStats.incNumDraws();
2672 }
2673 }
2674
onResolveRenderTarget(GrRenderTarget * target)2675 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
2676 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
2677 if (rt->needsResolve()) {
2678 // Some extensions automatically resolves the texture when it is read.
2679 if (this->glCaps().usesMSAARenderBuffers()) {
2680 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
2681 SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0);
2682 this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID());
2683 this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID());
2684
2685 // make sure we go through flushRenderTarget() since we've modified
2686 // the bound DRAW FBO ID.
2687 fHWBoundRenderTargetUniqueID.makeInvalid();
2688 const GrGLIRect& vp = rt->getViewport();
2689 const SkIRect dirtyRect = rt->getResolveRect();
2690 // The dirty rect tracked on the RT is always stored in the native coordinates of the
2691 // surface. Choose kTopLeft so no adjustments are made
2692 static constexpr auto kDirtyRectOrigin = kTopLeft_GrSurfaceOrigin;
2693 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
2694 // Apple's extension uses the scissor as the blit bounds.
2695 GrScissorState scissorState;
2696 scissorState.set(dirtyRect);
2697 this->flushScissor(scissorState, vp, kDirtyRectOrigin);
2698 this->disableWindowRectangles();
2699 GL_CALL(ResolveMultisampleFramebuffer());
2700 } else {
2701 int l, b, r, t;
2702 if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag &
2703 this->glCaps().blitFramebufferSupportFlags()) {
2704 l = 0;
2705 b = 0;
2706 r = target->width();
2707 t = target->height();
2708 } else {
2709 GrGLIRect rect;
2710 rect.setRelativeTo(vp, dirtyRect, kDirtyRectOrigin);
2711 l = rect.fLeft;
2712 b = rect.fBottom;
2713 r = rect.fLeft + rect.fWidth;
2714 t = rect.fBottom + rect.fHeight;
2715 }
2716
2717 // BlitFrameBuffer respects the scissor, so disable it.
2718 this->disableScissor();
2719 this->disableWindowRectangles();
2720 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t,
2721 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2722 }
2723 }
2724 rt->flagAsResolved();
2725 }
2726 }
2727
2728 namespace {
2729
2730
gr_to_gl_stencil_op(GrStencilOp op)2731 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2732 static const GrGLenum gTable[kGrStencilOpCount] = {
2733 GR_GL_KEEP, // kKeep
2734 GR_GL_ZERO, // kZero
2735 GR_GL_REPLACE, // kReplace
2736 GR_GL_INVERT, // kInvert
2737 GR_GL_INCR_WRAP, // kIncWrap
2738 GR_GL_DECR_WRAP, // kDecWrap
2739 GR_GL_INCR, // kIncClamp
2740 GR_GL_DECR, // kDecClamp
2741 };
2742 GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
2743 GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
2744 GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
2745 GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
2746 GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
2747 GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
2748 GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
2749 GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
2750 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2751 return gTable[(int)op];
2752 }
2753
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2754 void set_gl_stencil(const GrGLInterface* gl,
2755 const GrStencilSettings::Face& face,
2756 GrGLenum glFace) {
2757 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2758 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2759 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2760
2761 GrGLint ref = face.fRef;
2762 GrGLint mask = face.fTestMask;
2763 GrGLint writeMask = face.fWriteMask;
2764
2765 if (GR_GL_FRONT_AND_BACK == glFace) {
2766 // we call the combined func just in case separate stencil is not
2767 // supported.
2768 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2769 GR_GL_CALL(gl, StencilMask(writeMask));
2770 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2771 } else {
2772 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2773 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2774 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2775 }
2776 }
2777 }
2778
flushStencil(const GrStencilSettings & stencilSettings)2779 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) {
2780 if (stencilSettings.isDisabled()) {
2781 this->disableStencil();
2782 } else if (fHWStencilSettings != stencilSettings) {
2783 if (kYes_TriState != fHWStencilTestEnabled) {
2784 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2785
2786 fHWStencilTestEnabled = kYes_TriState;
2787 }
2788 if (stencilSettings.isTwoSided()) {
2789 set_gl_stencil(this->glInterface(),
2790 stencilSettings.front(),
2791 GR_GL_FRONT);
2792 set_gl_stencil(this->glInterface(),
2793 stencilSettings.back(),
2794 GR_GL_BACK);
2795 } else {
2796 set_gl_stencil(this->glInterface(),
2797 stencilSettings.front(),
2798 GR_GL_FRONT_AND_BACK);
2799 }
2800 fHWStencilSettings = stencilSettings;
2801 }
2802 }
2803
disableStencil()2804 void GrGLGpu::disableStencil() {
2805 if (kNo_TriState != fHWStencilTestEnabled) {
2806 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2807
2808 fHWStencilTestEnabled = kNo_TriState;
2809 fHWStencilSettings.invalidate();
2810 }
2811 }
2812
flushHWAAState(GrRenderTarget * rt,bool useHWAA,bool stencilEnabled)2813 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) {
2814 // rt is only optional if useHWAA is false.
2815 SkASSERT(rt || !useHWAA);
2816 SkASSERT(!useHWAA || rt->isStencilBufferMultisampled());
2817
2818 if (this->caps()->multisampleDisableSupport()) {
2819 if (useHWAA) {
2820 if (kYes_TriState != fMSAAEnabled) {
2821 GL_CALL(Enable(GR_GL_MULTISAMPLE));
2822 fMSAAEnabled = kYes_TriState;
2823 }
2824 } else {
2825 if (kNo_TriState != fMSAAEnabled) {
2826 GL_CALL(Disable(GR_GL_MULTISAMPLE));
2827 fMSAAEnabled = kNo_TriState;
2828 }
2829 }
2830 }
2831
2832 if (0 != this->caps()->maxRasterSamples()) {
2833 if (useHWAA && GrFSAAType::kMixedSamples == rt->fsaaType() && !stencilEnabled) {
2834 // Since stencil is disabled and we want more samples than are in the color buffer, we
2835 // need to tell the rasterizer explicitly how many to run.
2836 if (kYes_TriState != fHWRasterMultisampleEnabled) {
2837 GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE));
2838 fHWRasterMultisampleEnabled = kYes_TriState;
2839 }
2840 int numStencilSamples = rt->numStencilSamples();
2841 // convert to GL's understanding of sample counts where 0 means nonMSAA.
2842 numStencilSamples = 1 == numStencilSamples ? 0 : numStencilSamples;
2843 if (numStencilSamples != fHWNumRasterSamples) {
2844 SkASSERT(numStencilSamples <= this->caps()->maxRasterSamples());
2845 GL_CALL(RasterSamples(numStencilSamples, GR_GL_TRUE));
2846 fHWNumRasterSamples = numStencilSamples;
2847 }
2848 } else {
2849 if (kNo_TriState != fHWRasterMultisampleEnabled) {
2850 GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE));
2851 fHWRasterMultisampleEnabled = kNo_TriState;
2852 }
2853 }
2854 } else {
2855 SkASSERT(!useHWAA || GrFSAAType::kMixedSamples != rt->fsaaType() || stencilEnabled);
2856 }
2857 }
2858
flushBlend(const GrXferProcessor::BlendInfo & blendInfo,const GrSwizzle & swizzle)2859 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
2860 // Any optimization to disable blending should have already been applied and
2861 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
2862
2863 GrBlendEquation equation = blendInfo.fEquation;
2864 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2865 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2866 bool blendOff =
2867 ((kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
2868 kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff) ||
2869 !blendInfo.fWriteColor;
2870 if (blendOff) {
2871 if (kNo_TriState != fHWBlendState.fEnabled) {
2872 GL_CALL(Disable(GR_GL_BLEND));
2873
2874 // Workaround for the ARM KHR_blend_equation_advanced blacklist issue
2875 // https://code.google.com/p/skia/issues/detail?id=3943
2876 if (kARM_GrGLVendor == this->ctxInfo().vendor() &&
2877 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2878 SkASSERT(this->caps()->advancedBlendEquationSupport());
2879 // Set to any basic blending equation.
2880 GrBlendEquation blend_equation = kAdd_GrBlendEquation;
2881 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
2882 fHWBlendState.fEquation = blend_equation;
2883 }
2884
2885 fHWBlendState.fEnabled = kNo_TriState;
2886 }
2887 return;
2888 }
2889
2890 if (kYes_TriState != fHWBlendState.fEnabled) {
2891 GL_CALL(Enable(GR_GL_BLEND));
2892
2893 fHWBlendState.fEnabled = kYes_TriState;
2894 }
2895
2896 if (fHWBlendState.fEquation != equation) {
2897 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
2898 fHWBlendState.fEquation = equation;
2899 }
2900
2901 if (GrBlendEquationIsAdvanced(equation)) {
2902 SkASSERT(this->caps()->advancedBlendEquationSupport());
2903 // Advanced equations have no other blend state.
2904 return;
2905 }
2906
2907 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2908 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
2909 gXfermodeCoeff2Blend[dstCoeff]));
2910 fHWBlendState.fSrcCoeff = srcCoeff;
2911 fHWBlendState.fDstCoeff = dstCoeff;
2912 }
2913
2914 if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) {
2915 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
2916 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2917 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
2918 fHWBlendState.fConstColor = blendConst;
2919 fHWBlendState.fConstColorValid = true;
2920 }
2921 }
2922 }
2923
get_gl_swizzle_values(const GrSwizzle & swizzle,GrGLenum glValues[4])2924 static void get_gl_swizzle_values(const GrSwizzle& swizzle, GrGLenum glValues[4]) {
2925 for (int i = 0; i < 4; ++i) {
2926 switch (swizzle[i]) {
2927 case 'r': glValues[i] = GR_GL_RED; break;
2928 case 'g': glValues[i] = GR_GL_GREEN; break;
2929 case 'b': glValues[i] = GR_GL_BLUE; break;
2930 case 'a': glValues[i] = GR_GL_ALPHA; break;
2931 default: SK_ABORT("Unsupported component");
2932 }
2933 }
2934 }
2935
bindTexture(int unitIdx,GrSamplerState samplerState,GrGLTexture * texture)2936 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, GrGLTexture* texture) {
2937 SkASSERT(texture);
2938
2939 #ifdef SK_DEBUG
2940 if (!this->caps()->npotTextureTileSupport()) {
2941 if (samplerState.isRepeated()) {
2942 const int w = texture->width();
2943 const int h = texture->height();
2944 SkASSERT(SkIsPow2(w) && SkIsPow2(h));
2945 }
2946 }
2947 #endif
2948
2949 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
2950 // from the rt it will still be the last bound texture, but it needs resolving. So keep this
2951 // out of the "last != next" check.
2952 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
2953 if (texRT) {
2954 this->onResolveRenderTarget(texRT);
2955 }
2956
2957 GrGpuResource::UniqueID textureID = texture->uniqueID();
2958 GrGLenum target = texture->target();
2959 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) {
2960 this->setTextureUnit(unitIdx);
2961 GL_CALL(BindTexture(target, texture->textureID()));
2962 fHWBoundTextureUniqueIDs[unitIdx] = textureID;
2963 }
2964
2965 if (samplerState.filter() == GrSamplerState::Filter::kMipMap) {
2966 if (!this->caps()->mipMapSupport() ||
2967 texture->texturePriv().mipMapped() == GrMipMapped::kNo) {
2968 samplerState.setFilterMode(GrSamplerState::Filter::kBilerp);
2969 }
2970 }
2971
2972 #ifdef SK_DEBUG
2973 // We were supposed to ensure MipMaps were up-to-date before getting here.
2974 if (samplerState.filter() == GrSamplerState::Filter::kMipMap) {
2975 SkASSERT(!texture->texturePriv().mipMapsAreDirty());
2976 }
2977 #endif
2978
2979 ResetTimestamp timestamp = texture->getCachedParamsTimestamp();
2980 bool setAll = timestamp < this->getResetTimestamp();
2981
2982 const GrGLTexture::SamplerParams* samplerParamsToRecord = nullptr;
2983 GrGLTexture::SamplerParams newSamplerParams;
2984 if (fSamplerObjectCache) {
2985 fSamplerObjectCache->bindSampler(unitIdx, samplerState);
2986 } else {
2987 const GrGLTexture::SamplerParams& oldSamplerParams = texture->getCachedSamplerParams();
2988 samplerParamsToRecord = &newSamplerParams;
2989
2990 newSamplerParams.fMinFilter = filter_to_gl_min_filter(samplerState.filter());
2991 newSamplerParams.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
2992
2993 newSamplerParams.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
2994 newSamplerParams.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
2995
2996 // These are the OpenGL default values.
2997 newSamplerParams.fMinLOD = -1000.f;
2998 newSamplerParams.fMaxLOD = 1000.f;
2999
3000 if (setAll || newSamplerParams.fMagFilter != oldSamplerParams.fMagFilter) {
3001 this->setTextureUnit(unitIdx);
3002 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerParams.fMagFilter));
3003 }
3004 if (setAll || newSamplerParams.fMinFilter != oldSamplerParams.fMinFilter) {
3005 this->setTextureUnit(unitIdx);
3006 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerParams.fMinFilter));
3007 }
3008 if (this->glCaps().mipMapLevelAndLodControlSupport()) {
3009 if (setAll || newSamplerParams.fMinLOD != oldSamplerParams.fMinLOD) {
3010 this->setTextureUnit(unitIdx);
3011 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerParams.fMinLOD));
3012 }
3013 if (setAll || newSamplerParams.fMaxLOD != oldSamplerParams.fMaxLOD) {
3014 this->setTextureUnit(unitIdx);
3015 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerParams.fMaxLOD));
3016 }
3017 }
3018 if (setAll || newSamplerParams.fWrapS != oldSamplerParams.fWrapS) {
3019 this->setTextureUnit(unitIdx);
3020 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerParams.fWrapS));
3021 }
3022 if (setAll || newSamplerParams.fWrapT != oldSamplerParams.fWrapT) {
3023 this->setTextureUnit(unitIdx);
3024 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerParams.fWrapT));
3025 }
3026 if (this->glCaps().clampToBorderSupport()) {
3027 // Make sure the border color is transparent black (the default)
3028 if (setAll || oldSamplerParams.fBorderColorInvalid) {
3029 this->setTextureUnit(unitIdx);
3030 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
3031 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
3032 }
3033 }
3034 }
3035 GrGLTexture::NonSamplerParams newNonSamplerParams;
3036 newNonSamplerParams.fBaseMipMapLevel = 0;
3037 newNonSamplerParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
3038
3039 const GrGLTexture::NonSamplerParams& oldNonSamplerParams = texture->getCachedNonSamplerParams();
3040 if (this->glCaps().textureSwizzleSupport()) {
3041 auto swizzle = this->glCaps().configSwizzle(texture->config());
3042 newNonSamplerParams.fSwizzleKey = swizzle.asKey();
3043 if (setAll || swizzle.asKey() != oldNonSamplerParams.fSwizzleKey) {
3044 GrGLenum glValues[4];
3045 get_gl_swizzle_values(swizzle, glValues);
3046 this->setTextureUnit(unitIdx);
3047 if (this->glStandard() == kGLES_GrGLStandard) {
3048 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
3049 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, glValues[0]));
3050 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, glValues[1]));
3051 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, glValues[2]));
3052 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, glValues[3]));
3053 } else {
3054 GR_STATIC_ASSERT(sizeof(glValues[0]) == sizeof(GrGLint));
3055 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
3056 reinterpret_cast<const GrGLint*>(glValues)));
3057 }
3058 }
3059 }
3060 // These are not supported in ES2 contexts
3061 if (this->glCaps().mipMapLevelAndLodControlSupport() &&
3062 (texture->texturePriv().textureType() != GrTextureType::kExternal ||
3063 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
3064 if (newNonSamplerParams.fBaseMipMapLevel != oldNonSamplerParams.fBaseMipMapLevel) {
3065 this->setTextureUnit(unitIdx);
3066 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
3067 newNonSamplerParams.fBaseMipMapLevel));
3068 }
3069 if (newNonSamplerParams.fMaxMipMapLevel != oldNonSamplerParams.fMaxMipMapLevel) {
3070 this->setTextureUnit(unitIdx);
3071 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
3072 newNonSamplerParams.fMaxMipMapLevel));
3073 }
3074 }
3075 texture->setCachedParams(samplerParamsToRecord, newNonSamplerParams, this->getResetTimestamp());
3076 }
3077
flushColorWrite(bool writeColor)3078 void GrGLGpu::flushColorWrite(bool writeColor) {
3079 if (!writeColor) {
3080 if (kNo_TriState != fHWWriteToColor) {
3081 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3082 GR_GL_FALSE, GR_GL_FALSE));
3083 fHWWriteToColor = kNo_TriState;
3084 }
3085 } else {
3086 if (kYes_TriState != fHWWriteToColor) {
3087 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3088 fHWWriteToColor = kYes_TriState;
3089 }
3090 }
3091 }
3092
flushClearColor(GrGLfloat r,GrGLfloat g,GrGLfloat b,GrGLfloat a)3093 void GrGLGpu::flushClearColor(GrGLfloat r, GrGLfloat g, GrGLfloat b, GrGLfloat a) {
3094 if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
3095 b != fHWClearColor[2] || a != fHWClearColor[3]) {
3096 GL_CALL(ClearColor(r, g, b, a));
3097 fHWClearColor[0] = r;
3098 fHWClearColor[1] = g;
3099 fHWClearColor[2] = b;
3100 fHWClearColor[3] = a;
3101 }
3102 }
3103
setTextureUnit(int unit)3104 void GrGLGpu::setTextureUnit(int unit) {
3105 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
3106 if (unit != fHWActiveTextureUnitIdx) {
3107 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3108 fHWActiveTextureUnitIdx = unit;
3109 }
3110 }
3111
setScratchTextureUnit()3112 void GrGLGpu::setScratchTextureUnit() {
3113 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3114 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
3115 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3116 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3117 fHWActiveTextureUnitIdx = lastUnitIdx;
3118 }
3119 // clear out the this field so that if a program does use this unit it will rebind the correct
3120 // texture.
3121 fHWBoundTextureUniqueIDs[lastUnitIdx].makeInvalid();
3122 }
3123
3124 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,GrSurfaceOrigin dstOrigin,const GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint,const GrGLCaps & caps)3125 static inline bool can_blit_framebuffer_for_copy_surface(
3126 const GrSurface* dst, GrSurfaceOrigin dstOrigin,
3127 const GrSurface* src, GrSurfaceOrigin srcOrigin,
3128 const SkIRect& srcRect,
3129 const SkIPoint& dstPoint,
3130 const GrGLCaps& caps) {
3131 int dstSampleCnt = 0;
3132 int srcSampleCnt = 0;
3133 if (const GrRenderTarget* rt = dst->asRenderTarget()) {
3134 dstSampleCnt = rt->numColorSamples();
3135 }
3136 if (const GrRenderTarget* rt = src->asRenderTarget()) {
3137 srcSampleCnt = rt->numColorSamples();
3138 }
3139 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
3140 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
3141
3142 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3143 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3144
3145 bool dstIsGLTexture2D = dstTex ? GR_GL_TEXTURE_2D == dstTex->target() : false;
3146 bool srcIsGLTexture2D = srcTex ? GR_GL_TEXTURE_2D == srcTex->target() : false;
3147
3148 return caps.canCopyAsBlit(dst->config(), dstSampleCnt, SkToBool(dstTex), dstIsGLTexture2D,
3149 dstOrigin, src->config(), srcSampleCnt, SkToBool(srcTex),
3150 srcIsGLTexture2D, srcOrigin, src->getBoundsRect(), srcRect, dstPoint);
3151 }
3152
rt_has_msaa_render_buffer(const GrGLRenderTarget * rt,const GrGLCaps & glCaps)3153 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
3154 // A RT has a separate MSAA renderbuffer if:
3155 // 1) It's multisampled
3156 // 2) We're using an extension with separate MSAA renderbuffers
3157 // 3) It's not FBO 0, which is special and always auto-resolves
3158 return rt->numColorSamples() > 1 && glCaps.usesMSAARenderBuffers() && rt->renderFBOID() != 0;
3159 }
3160
can_copy_texsubimage(const GrSurface * dst,GrSurfaceOrigin dstOrigin,const GrSurface * src,GrSurfaceOrigin srcOrigin,const GrGLCaps & caps)3161 static inline bool can_copy_texsubimage(const GrSurface* dst, GrSurfaceOrigin dstOrigin,
3162 const GrSurface* src, GrSurfaceOrigin srcOrigin,
3163 const GrGLCaps& caps) {
3164
3165 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3166 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3167 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3168 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3169
3170 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
3171 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
3172
3173 bool dstIsGLTexture2D = dstTex ? GR_GL_TEXTURE_2D == dstTex->target() : false;
3174 bool srcIsGLTexture2D = srcTex ? GR_GL_TEXTURE_2D == srcTex->target() : false;
3175
3176 return caps.canCopyTexSubImage(dst->config(), dstHasMSAARenderBuffer, SkToBool(dstTex),
3177 dstIsGLTexture2D, dstOrigin, src->config(),
3178 srcHasMSAARenderBuffer, SkToBool(srcTex), srcIsGLTexture2D,
3179 srcOrigin);
3180 }
3181
3182 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
3183 // relative to is output.
bindSurfaceFBOForPixelOps(GrSurface * surface,GrGLenum fboTarget,GrGLIRect * viewport,TempFBOTarget tempFBOTarget)3184 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
3185 TempFBOTarget tempFBOTarget) {
3186 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3187 if (!rt) {
3188 SkASSERT(surface->asTexture());
3189 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
3190 GrGLuint texID = texture->textureID();
3191 GrGLenum target = texture->target();
3192 GrGLuint* tempFBOID;
3193 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3194
3195 if (0 == *tempFBOID) {
3196 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3197 }
3198
3199 this->bindFramebuffer(fboTarget, *tempFBOID);
3200 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3201 GR_GL_COLOR_ATTACHMENT0,
3202 target,
3203 texID,
3204 0));
3205 texture->baseLevelWasBoundToFBO();
3206 viewport->fLeft = 0;
3207 viewport->fBottom = 0;
3208 viewport->fWidth = surface->width();
3209 viewport->fHeight = surface->height();
3210 } else {
3211 this->bindFramebuffer(fboTarget, rt->renderFBOID());
3212 *viewport = rt->getViewport();
3213 }
3214 }
3215
unbindTextureFBOForPixelOps(GrGLenum fboTarget,GrSurface * surface)3216 void GrGLGpu::unbindTextureFBOForPixelOps(GrGLenum fboTarget, GrSurface* surface) {
3217 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
3218 if (!surface->asRenderTarget()) {
3219 SkASSERT(surface->asTexture());
3220 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3221 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3222 GR_GL_COLOR_ATTACHMENT0,
3223 textureTarget,
3224 0,
3225 0));
3226 }
3227 }
3228
onFBOChanged()3229 void GrGLGpu::onFBOChanged() {
3230 if (this->caps()->workarounds().flush_on_framebuffer_change ||
3231 this->caps()->workarounds().restore_scissor_on_fbo_change) {
3232 GL_CALL(Flush());
3233 }
3234 }
3235
bindFramebuffer(GrGLenum target,GrGLuint fboid)3236 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
3237 fStats.incRenderTargetBinds();
3238 GL_CALL(BindFramebuffer(target, fboid));
3239 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
3240 fBoundDrawFramebuffer = fboid;
3241 }
3242
3243 if (this->caps()->workarounds().restore_scissor_on_fbo_change) {
3244 // The driver forgets the correct scissor when modifying the FBO binding.
3245 if (!fHWScissorSettings.fRect.isInvalid()) {
3246 fHWScissorSettings.fRect.pushToGLScissor(this->glInterface());
3247 }
3248 }
3249
3250 this->onFBOChanged();
3251 }
3252
deleteFramebuffer(GrGLuint fboid)3253 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
3254 if (fboid == fBoundDrawFramebuffer &&
3255 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
3256 // This workaround only applies to deleting currently bound framebuffers
3257 // on Adreno 420. Because this is a somewhat rare case, instead of
3258 // tracking all the attachments of every framebuffer instead just always
3259 // unbind all attachments.
3260 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3261 GR_GL_RENDERBUFFER, 0));
3262 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
3263 GR_GL_RENDERBUFFER, 0));
3264 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3265 GR_GL_RENDERBUFFER, 0));
3266 }
3267
3268 GL_CALL(DeleteFramebuffers(1, &fboid));
3269
3270 // Deleting the currently bound framebuffer rebinds to 0.
3271 if (fboid == fBoundDrawFramebuffer) {
3272 this->onFBOChanged();
3273 }
3274 }
3275
onCopySurface(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint,bool canDiscardOutsideDstRect)3276 bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin,
3277 GrSurface* src, GrSurfaceOrigin srcOrigin,
3278 const SkIRect& srcRect, const SkIPoint& dstPoint,
3279 bool canDiscardOutsideDstRect) {
3280 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the
3281 // swizzle.
3282 if (this->caps()->shaderCaps()->configOutputSwizzle(src->config()) !=
3283 this->caps()->shaderCaps()->configOutputSwizzle(dst->config())) {
3284 return false;
3285 }
3286 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3287 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
3288 bool preferCopy = SkToBool(dst->asRenderTarget());
3289 if (preferCopy && this->glCaps().canCopyAsDraw(dst->config(), SkToBool(src->asTexture()))) {
3290 if (this->copySurfaceAsDraw(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint)) {
3291 return true;
3292 }
3293 }
3294
3295 if (can_copy_texsubimage(dst, dstOrigin, src, srcOrigin, this->glCaps())) {
3296 this->copySurfaceAsCopyTexSubImage(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint);
3297 return true;
3298 }
3299
3300 if (can_blit_framebuffer_for_copy_surface(dst, dstOrigin, src, srcOrigin,
3301 srcRect, dstPoint, this->glCaps())) {
3302 return this->copySurfaceAsBlitFramebuffer(dst, dstOrigin, src, srcOrigin,
3303 srcRect, dstPoint);
3304 }
3305
3306 if (!preferCopy && this->glCaps().canCopyAsDraw(dst->config(), SkToBool(src->asTexture()))) {
3307 if (this->copySurfaceAsDraw(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint)) {
3308 return true;
3309 }
3310 }
3311
3312 return false;
3313 }
3314
createCopyProgram(GrTexture * srcTex)3315 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3316 TRACE_EVENT0("skia", TRACE_FUNC);
3317
3318 int progIdx = TextureToCopyProgramIdx(srcTex);
3319 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3320 GrSLType samplerType =
3321 GrSLCombinedSamplerTypeForTextureType(srcTex->texturePriv().textureType());
3322
3323 if (!fCopyProgramArrayBuffer) {
3324 static const GrGLfloat vdata[] = {
3325 0, 0,
3326 0, 1,
3327 1, 0,
3328 1, 1
3329 };
3330 fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
3331 kStatic_GrAccessPattern, vdata);
3332 }
3333 if (!fCopyProgramArrayBuffer) {
3334 return false;
3335 }
3336
3337 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3338 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3339 if (!fCopyPrograms[progIdx].fProgram) {
3340 return false;
3341 }
3342
3343 const char* version = shaderCaps->versionDeclString();
3344 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier);
3345 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
3346 GrShaderVar::kUniform_TypeModifier);
3347 GrShaderVar uPosXform("u_posXform", kHalf4_GrSLType, GrShaderVar::kUniform_TypeModifier);
3348 GrShaderVar uTexture("u_texture", samplerType, GrShaderVar::kUniform_TypeModifier);
3349 GrShaderVar vTexCoord("v_texCoord", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier);
3350 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType, GrShaderVar::kOut_TypeModifier);
3351
3352 SkString vshaderTxt(version);
3353 if (shaderCaps->noperspectiveInterpolationSupport()) {
3354 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3355 vshaderTxt.appendf("#extension %s : require\n", extension);
3356 }
3357 vTexCoord.addModifier("noperspective");
3358 }
3359
3360 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3361 vshaderTxt.append(";");
3362 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3363 vshaderTxt.append(";");
3364 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3365 vshaderTxt.append(";");
3366 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3367 vshaderTxt.append(";");
3368
3369 vshaderTxt.append(
3370 "// Copy Program VS\n"
3371 "void main() {"
3372 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
3373 " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3374 " sk_Position.zw = half2(0, 1);"
3375 "}"
3376 );
3377
3378 SkString fshaderTxt(version);
3379 if (shaderCaps->noperspectiveInterpolationSupport()) {
3380 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3381 fshaderTxt.appendf("#extension %s : require\n", extension);
3382 }
3383 }
3384 vTexCoord.setTypeModifier(GrShaderVar::kIn_TypeModifier);
3385 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3386 fshaderTxt.append(";");
3387 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3388 fshaderTxt.append(";");
3389 fshaderTxt.appendf(
3390 "// Copy Program FS\n"
3391 "void main() {"
3392 " sk_FragColor = texture(u_texture, v_texCoord);"
3393 "}"
3394 );
3395
3396 const char* str;
3397 GrGLint length;
3398
3399 str = vshaderTxt.c_str();
3400 length = SkToInt(vshaderTxt.size());
3401 SkSL::Program::Settings settings;
3402 settings.fCaps = shaderCaps;
3403 SkSL::String glsl;
3404 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, GR_GL_VERTEX_SHADER,
3405 &str, &length, 1, settings, &glsl);
3406 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3407 GR_GL_VERTEX_SHADER, glsl.c_str(), glsl.size(),
3408 &fStats, settings);
3409 SkASSERT(program->fInputs.isEmpty());
3410
3411 str = fshaderTxt.c_str();
3412 length = SkToInt(fshaderTxt.size());
3413 program = GrSkSLtoGLSL(*fGLContext, GR_GL_FRAGMENT_SHADER, &str, &length, 1, settings, &glsl);
3414 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3415 GR_GL_FRAGMENT_SHADER, glsl.c_str(), glsl.size(),
3416 &fStats, settings);
3417 SkASSERT(program->fInputs.isEmpty());
3418
3419 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3420
3421 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3422 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3423 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3424 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3425 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3426 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3427
3428 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3429
3430 GL_CALL(DeleteShader(vshader));
3431 GL_CALL(DeleteShader(fshader));
3432
3433 return true;
3434 }
3435
createMipmapProgram(int progIdx)3436 bool GrGLGpu::createMipmapProgram(int progIdx) {
3437 const bool oddWidth = SkToBool(progIdx & 0x2);
3438 const bool oddHeight = SkToBool(progIdx & 0x1);
3439 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3440
3441 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3442
3443 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3444 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3445 if (!fMipmapPrograms[progIdx].fProgram) {
3446 return false;
3447 }
3448
3449 const char* version = shaderCaps->versionDeclString();
3450 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier);
3451 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
3452 GrShaderVar::kUniform_TypeModifier);
3453 GrShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
3454 GrShaderVar::kUniform_TypeModifier);
3455 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3456 GrShaderVar vTexCoords[] = {
3457 GrShaderVar("v_texCoord0", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
3458 GrShaderVar("v_texCoord1", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
3459 GrShaderVar("v_texCoord2", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
3460 GrShaderVar("v_texCoord3", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier),
3461 };
3462 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType,GrShaderVar::kOut_TypeModifier);
3463
3464 SkString vshaderTxt(version);
3465 if (shaderCaps->noperspectiveInterpolationSupport()) {
3466 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3467 vshaderTxt.appendf("#extension %s : require\n", extension);
3468 }
3469 vTexCoords[0].addModifier("noperspective");
3470 vTexCoords[1].addModifier("noperspective");
3471 vTexCoords[2].addModifier("noperspective");
3472 vTexCoords[3].addModifier("noperspective");
3473 }
3474
3475 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3476 vshaderTxt.append(";");
3477 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3478 vshaderTxt.append(";");
3479 for (int i = 0; i < numTaps; ++i) {
3480 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3481 vshaderTxt.append(";");
3482 }
3483
3484 vshaderTxt.append(
3485 "// Mipmap Program VS\n"
3486 "void main() {"
3487 " sk_Position.xy = a_vertex * half2(2, 2) - half2(1, 1);"
3488 " sk_Position.zw = half2(0, 1);"
3489 );
3490
3491 // Insert texture coordinate computation:
3492 if (oddWidth && oddHeight) {
3493 vshaderTxt.append(
3494 " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3495 " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
3496 " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
3497 " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3498 );
3499 } else if (oddWidth) {
3500 vshaderTxt.append(
3501 " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
3502 " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
3503 );
3504 } else if (oddHeight) {
3505 vshaderTxt.append(
3506 " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
3507 " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
3508 );
3509 } else {
3510 vshaderTxt.append(
3511 " v_texCoord0 = a_vertex.xy;"
3512 );
3513 }
3514
3515 vshaderTxt.append("}");
3516
3517 SkString fshaderTxt(version);
3518 if (shaderCaps->noperspectiveInterpolationSupport()) {
3519 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3520 fshaderTxt.appendf("#extension %s : require\n", extension);
3521 }
3522 }
3523 for (int i = 0; i < numTaps; ++i) {
3524 vTexCoords[i].setTypeModifier(GrShaderVar::kIn_TypeModifier);
3525 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3526 fshaderTxt.append(";");
3527 }
3528 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3529 fshaderTxt.append(";");
3530 fshaderTxt.append(
3531 "// Mipmap Program FS\n"
3532 "void main() {"
3533 );
3534
3535 if (oddWidth && oddHeight) {
3536 fshaderTxt.append(
3537 " sk_FragColor = (texture(u_texture, v_texCoord0) + "
3538 " texture(u_texture, v_texCoord1) + "
3539 " texture(u_texture, v_texCoord2) + "
3540 " texture(u_texture, v_texCoord3)) * 0.25;"
3541 );
3542 } else if (oddWidth || oddHeight) {
3543 fshaderTxt.append(
3544 " sk_FragColor = (texture(u_texture, v_texCoord0) + "
3545 " texture(u_texture, v_texCoord1)) * 0.5;"
3546 );
3547 } else {
3548 fshaderTxt.append(
3549 " sk_FragColor = texture(u_texture, v_texCoord0);"
3550 );
3551 }
3552
3553 fshaderTxt.append("}");
3554
3555 const char* str;
3556 GrGLint length;
3557
3558 str = vshaderTxt.c_str();
3559 length = SkToInt(vshaderTxt.size());
3560 SkSL::Program::Settings settings;
3561 settings.fCaps = shaderCaps;
3562 SkSL::String glsl;
3563 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, GR_GL_VERTEX_SHADER,
3564 &str, &length, 1, settings, &glsl);
3565 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3566 GR_GL_VERTEX_SHADER, glsl.c_str(), glsl.size(),
3567 &fStats, settings);
3568 SkASSERT(program->fInputs.isEmpty());
3569
3570 str = fshaderTxt.c_str();
3571 length = SkToInt(fshaderTxt.size());
3572 program = GrSkSLtoGLSL(*fGLContext, GR_GL_FRAGMENT_SHADER, &str, &length, 1, settings, &glsl);
3573 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3574 GR_GL_FRAGMENT_SHADER, glsl.c_str(), glsl.size(),
3575 &fStats, settings);
3576 SkASSERT(program->fInputs.isEmpty());
3577
3578 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3579
3580 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3581 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3582 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3583 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3584
3585 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3586
3587 GL_CALL(DeleteShader(vshader));
3588 GL_CALL(DeleteShader(fshader));
3589
3590 return true;
3591 }
3592
copySurfaceAsDraw(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)3593 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, GrSurfaceOrigin dstOrigin,
3594 GrSurface* src, GrSurfaceOrigin srcOrigin,
3595 const SkIRect& srcRect,
3596 const SkIPoint& dstPoint) {
3597 GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3598 int progIdx = TextureToCopyProgramIdx(srcTex);
3599
3600 if (!this->glCaps().canConfigBeFBOColorAttachment(dst->config())) {
3601 return false;
3602 }
3603
3604 if (!fCopyPrograms[progIdx].fProgram) {
3605 if (!this->createCopyProgram(srcTex)) {
3606 SkDebugf("Failed to create copy program.\n");
3607 return false;
3608 }
3609 }
3610
3611 int w = srcRect.width();
3612 int h = srcRect.height();
3613
3614 this->bindTexture(0, GrSamplerState::ClampNearest(), srcTex);
3615
3616 GrGLIRect dstVP;
3617 this->bindSurfaceFBOForPixelOps(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
3618 this->flushViewport(dstVP);
3619 fHWBoundRenderTargetUniqueID.makeInvalid();
3620
3621 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
3622
3623 this->flushProgram(fCopyPrograms[progIdx].fProgram);
3624
3625 fHWVertexArrayState.setVertexArrayID(this, 0);
3626
3627 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3628 attribs->enableVertexArrays(this, 1);
3629 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3630 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
3631
3632 // dst rect edges in NDC (-1 to 1)
3633 int dw = dst->width();
3634 int dh = dst->height();
3635 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
3636 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
3637 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
3638 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
3639 if (kBottomLeft_GrSurfaceOrigin == dstOrigin) {
3640 dy0 = -dy0;
3641 dy1 = -dy1;
3642 }
3643
3644 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3645 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
3646 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3647 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
3648 int sw = src->width();
3649 int sh = src->height();
3650 if (kBottomLeft_GrSurfaceOrigin == srcOrigin) {
3651 sy0 = sh - sy0;
3652 sy1 = sh - sy1;
3653 }
3654 if (srcTex->texturePriv().textureType() != GrTextureType::kRectangle) {
3655 // src rect edges in normalized texture space (0 to 1)
3656 sx0 /= sw;
3657 sx1 /= sw;
3658 sy0 /= sh;
3659 sy1 /= sh;
3660 }
3661
3662 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3663 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3664 sx1 - sx0, sy1 - sy0, sx0, sy0));
3665 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3666
3667 GrXferProcessor::BlendInfo blendInfo;
3668 blendInfo.reset();
3669 this->flushBlend(blendInfo, GrSwizzle::RGBA());
3670 this->flushColorWrite(true);
3671 this->flushHWAAState(nullptr, false, false);
3672 this->disableScissor();
3673 this->disableWindowRectangles();
3674 this->disableStencil();
3675 if (this->glCaps().srgbWriteControl()) {
3676 this->flushFramebufferSRGB(true);
3677 }
3678
3679 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3680 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, dst);
3681 this->didWriteToSurface(dst, dstOrigin, &dstRect);
3682
3683 return true;
3684 }
3685
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)3686 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
3687 GrSurface* src, GrSurfaceOrigin srcOrigin,
3688 const SkIRect& srcRect,
3689 const SkIPoint& dstPoint) {
3690 SkASSERT(can_copy_texsubimage(dst, dstOrigin, src, srcOrigin, this->glCaps()));
3691 GrGLIRect srcVP;
3692 this->bindSurfaceFBOForPixelOps(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
3693 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3694 SkASSERT(dstTex);
3695 // We modified the bound FBO
3696 fHWBoundRenderTargetUniqueID.makeInvalid();
3697 GrGLIRect srcGLRect;
3698 srcGLRect.setRelativeTo(srcVP, srcRect, srcOrigin);
3699
3700 this->setScratchTextureUnit();
3701 GL_CALL(BindTexture(dstTex->target(), dstTex->textureID()));
3702 GrGLint dstY;
3703 if (kBottomLeft_GrSurfaceOrigin == dstOrigin) {
3704 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
3705 } else {
3706 dstY = dstPoint.fY;
3707 }
3708 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3709 dstPoint.fX, dstY,
3710 srcGLRect.fLeft, srcGLRect.fBottom,
3711 srcGLRect.fWidth, srcGLRect.fHeight));
3712 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, src);
3713 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3714 srcRect.width(), srcRect.height());
3715 this->didWriteToSurface(dst, dstOrigin, &dstRect);
3716 }
3717
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurfaceOrigin dstOrigin,GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)3718 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurfaceOrigin dstOrigin,
3719 GrSurface* src, GrSurfaceOrigin srcOrigin,
3720 const SkIRect& srcRect,
3721 const SkIPoint& dstPoint) {
3722 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, dstOrigin, src, srcOrigin,
3723 srcRect, dstPoint, this->glCaps()));
3724 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3725 srcRect.width(), srcRect.height());
3726 if (dst == src) {
3727 if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
3728 return false;
3729 }
3730 }
3731
3732 GrGLIRect dstVP;
3733 GrGLIRect srcVP;
3734 this->bindSurfaceFBOForPixelOps(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
3735 this->bindSurfaceFBOForPixelOps(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
3736 // We modified the bound FBO
3737 fHWBoundRenderTargetUniqueID.makeInvalid();
3738 GrGLIRect srcGLRect;
3739 GrGLIRect dstGLRect;
3740 srcGLRect.setRelativeTo(srcVP, srcRect, srcOrigin);
3741 dstGLRect.setRelativeTo(dstVP, dstRect, dstOrigin);
3742
3743 // BlitFrameBuffer respects the scissor, so disable it.
3744 this->disableScissor();
3745 this->disableWindowRectangles();
3746
3747 GrGLint srcY0;
3748 GrGLint srcY1;
3749 // Does the blit need to y-mirror or not?
3750 if (srcOrigin == dstOrigin) {
3751 srcY0 = srcGLRect.fBottom;
3752 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
3753 } else {
3754 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
3755 srcY1 = srcGLRect.fBottom;
3756 }
3757 GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
3758 srcY0,
3759 srcGLRect.fLeft + srcGLRect.fWidth,
3760 srcY1,
3761 dstGLRect.fLeft,
3762 dstGLRect.fBottom,
3763 dstGLRect.fLeft + dstGLRect.fWidth,
3764 dstGLRect.fBottom + dstGLRect.fHeight,
3765 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
3766 this->unbindTextureFBOForPixelOps(GR_GL_DRAW_FRAMEBUFFER, dst);
3767 this->unbindTextureFBOForPixelOps(GR_GL_READ_FRAMEBUFFER, src);
3768 this->didWriteToSurface(dst, dstOrigin, &dstRect);
3769 return true;
3770 }
3771
onRegenerateMipMapLevels(GrTexture * texture)3772 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
3773 auto glTex = static_cast<GrGLTexture*>(texture);
3774 // Mipmaps are only supported on 2D textures:
3775 if (GR_GL_TEXTURE_2D != glTex->target()) {
3776 return false;
3777 }
3778
3779 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
3780 // Uses draw calls to do a series of downsample operations to successive mips.
3781
3782 // The manual approach requires the ability to limit which level we're sampling and that the
3783 // destination can be bound to a FBO:
3784 if (!this->glCaps().doManualMipmapping() ||
3785 !this->glCaps().canConfigBeFBOColorAttachment(texture->config())) {
3786 GrGLenum target = glTex->target();
3787 this->setScratchTextureUnit();
3788 GL_CALL(BindTexture(target, glTex->textureID()));
3789 GL_CALL(GenerateMipmap(glTex->target()));
3790 return true;
3791 }
3792
3793 int width = texture->width();
3794 int height = texture->height();
3795 int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1;
3796 SkASSERT(levelCount == texture->texturePriv().maxMipMapLevel() + 1);
3797
3798 // Create (if necessary), then bind temporary FBO:
3799 if (0 == fTempDstFBOID) {
3800 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
3801 }
3802 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
3803 fHWBoundRenderTargetUniqueID.makeInvalid();
3804
3805 // Bind the texture, to get things configured for filtering.
3806 // We'll be changing our base level further below:
3807 this->setTextureUnit(0);
3808 this->bindTexture(0, GrSamplerState::ClampBilerp(), glTex);
3809
3810 // Vertex data:
3811 if (!fMipmapProgramArrayBuffer) {
3812 static const GrGLfloat vdata[] = {
3813 0, 0,
3814 0, 1,
3815 1, 0,
3816 1, 1
3817 };
3818 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), kVertex_GrBufferType,
3819 kStatic_GrAccessPattern, vdata);
3820 }
3821 if (!fMipmapProgramArrayBuffer) {
3822 return false;
3823 }
3824
3825 fHWVertexArrayState.setVertexArrayID(this, 0);
3826
3827 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3828 attribs->enableVertexArrays(this, 1);
3829 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3830 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
3831
3832 // Set "simple" state once:
3833 GrXferProcessor::BlendInfo blendInfo;
3834 blendInfo.reset();
3835 this->flushBlend(blendInfo, GrSwizzle::RGBA());
3836 this->flushColorWrite(true);
3837 this->flushHWAAState(nullptr, false, false);
3838 this->disableScissor();
3839 this->disableWindowRectangles();
3840 this->disableStencil();
3841
3842 // Do all the blits:
3843 width = texture->width();
3844 height = texture->height();
3845 GrGLIRect viewport;
3846 viewport.fLeft = 0;
3847 viewport.fBottom = 0;
3848
3849 for (GrGLint level = 1; level < levelCount; ++level) {
3850 // Get and bind the program for this particular downsample (filter shape can vary):
3851 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
3852 if (!fMipmapPrograms[progIdx].fProgram) {
3853 if (!this->createMipmapProgram(progIdx)) {
3854 SkDebugf("Failed to create mipmap program.\n");
3855 // Invalidate all params to cover base level change in a previous iteration.
3856 glTex->textureParamsModified();
3857 return false;
3858 }
3859 }
3860 this->flushProgram(fMipmapPrograms[progIdx].fProgram);
3861
3862 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
3863 const float invWidth = 1.0f / width;
3864 const float invHeight = 1.0f / height;
3865 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3866 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
3867 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
3868
3869 // Only sample from previous mip
3870 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
3871
3872 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
3873 glTex->textureID(), level));
3874
3875 width = SkTMax(1, width / 2);
3876 height = SkTMax(1, height / 2);
3877 viewport.fWidth = width;
3878 viewport.fHeight = height;
3879 this->flushViewport(viewport);
3880
3881 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3882 }
3883
3884 // Unbind:
3885 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3886 GR_GL_TEXTURE_2D, 0, 0));
3887
3888 // We modified the base level param.
3889 GrGLTexture::NonSamplerParams params = glTex->getCachedNonSamplerParams();
3890 params.fBaseMipMapLevel = levelCount - 2; // we drew the 2nd to last level into the last level.
3891 glTex->setCachedParams(nullptr, params, this->getResetTimestamp());
3892
3893 return true;
3894 }
3895
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3896 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3897 SkASSERT(type);
3898 switch (type) {
3899 case kTexture_GrXferBarrierType: {
3900 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3901 SkASSERT(glrt->textureFBOID() != 0 && glrt->renderFBOID() != 0);
3902 if (glrt->textureFBOID() != glrt->renderFBOID()) {
3903 // The render target uses separate storage so no need for glTextureBarrier.
3904 // FIXME: The render target will resolve automatically when its texture is bound,
3905 // but we could resolve only the bounds that will be read if we do it here instead.
3906 return;
3907 }
3908 SkASSERT(this->caps()->textureBarrierSupport());
3909 GL_CALL(TextureBarrier());
3910 return;
3911 }
3912 case kBlend_GrXferBarrierType:
3913 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3914 this->caps()->blendEquationSupport());
3915 GL_CALL(BlendBarrier());
3916 return;
3917 default: break; // placate compiler warnings that kNone not handled
3918 }
3919 }
3920
3921 #if GR_TEST_UTILS
createTestingOnlyBackendTexture(const void * pixels,int w,int h,GrColorType colorType,bool,GrMipMapped mipMapped,size_t rowBytes)3922 GrBackendTexture GrGLGpu::createTestingOnlyBackendTexture(const void* pixels, int w, int h,
3923 GrColorType colorType, bool /*isRT*/,
3924 GrMipMapped mipMapped,
3925 size_t rowBytes) {
3926 this->handleDirtyContext();
3927
3928 GrPixelConfig config = GrColorTypeToPixelConfig(colorType, GrSRGBEncoded::kNo);
3929 if (!this->caps()->isConfigTexturable(config)) {
3930 return GrBackendTexture(); // invalid
3931 }
3932
3933 if (w > this->caps()->maxTextureSize() || h > this->caps()->maxTextureSize()) {
3934 return GrBackendTexture(); // invalid
3935 }
3936
3937 // Currently we don't support uploading pixel data when mipped.
3938 if (pixels && GrMipMapped::kYes == mipMapped) {
3939 return GrBackendTexture(); // invalid
3940 }
3941
3942 int bpp = GrColorTypeBytesPerPixel(colorType);
3943 const size_t trimRowBytes = w * bpp;
3944 if (!rowBytes) {
3945 rowBytes = trimRowBytes;
3946 }
3947
3948 GrGLTextureInfo info;
3949 info.fTarget = GR_GL_TEXTURE_2D;
3950 info.fID = 0;
3951 GL_CALL(GenTextures(1, &info.fID));
3952 GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
3953 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
3954 GL_CALL(BindTexture(info.fTarget, info.fID));
3955 fHWBoundTextureUniqueIDs[0].makeInvalid();
3956 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
3957 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
3958 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
3959 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
3960
3961 // we have to do something special for compressed textures
3962 if (GrPixelConfigIsCompressed(config)) {
3963 GrGLenum internalFormat;
3964 const GrGLInterface* interface = this->glInterface();
3965 const GrGLCaps& caps = this->glCaps();
3966 if (!caps.getCompressedTexImageFormats(config, &internalFormat)) {
3967 return GrBackendTexture();
3968 }
3969
3970 GrMipLevel mipLevel = { pixels, rowBytes };
3971 if (!allocate_and_populate_compressed_texture(config, *interface, caps, info.fTarget,
3972 internalFormat, &mipLevel, 1,
3973 w, h)) {
3974 return GrBackendTexture();
3975 }
3976 } else {
3977 bool restoreGLRowLength = false;
3978 if (trimRowBytes != rowBytes && this->glCaps().unpackRowLengthSupport()) {
3979 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
3980 restoreGLRowLength = true;
3981 }
3982
3983 GrGLenum internalFormat;
3984 GrGLenum externalFormat;
3985 GrGLenum externalType;
3986
3987 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
3988 &externalType)) {
3989 return GrBackendTexture(); // invalid
3990 }
3991
3992 info.fFormat = this->glCaps().configSizedInternalFormat(config);
3993
3994 this->unbindCpuToGpuXferBuffer();
3995
3996 // Figure out the number of mip levels.
3997 int mipLevels = 1;
3998 if (GrMipMapped::kYes == mipMapped) {
3999 mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
4000 }
4001
4002 size_t baseLayerSize = bpp * w * h;
4003 SkAutoMalloc defaultStorage(baseLayerSize);
4004 if (!pixels) {
4005 // Fill in the texture with all zeros so we don't have random garbage
4006 pixels = defaultStorage.get();
4007 memset(defaultStorage.get(), 0, baseLayerSize);
4008 } else if (trimRowBytes != rowBytes && !restoreGLRowLength) {
4009 // We weren't able to use GR_GL_UNPACK_ROW_LENGTH so make a copy
4010 char* copy = (char*)defaultStorage.get();
4011 for (int y = 0; y < h; ++y) {
4012 memcpy(©[y*trimRowBytes], &((const char*)pixels)[y*rowBytes], trimRowBytes);
4013 }
4014 pixels = copy;
4015 }
4016
4017 int width = w;
4018 int height = h;
4019 for (int i = 0; i < mipLevels; ++i) {
4020 GL_CALL(TexImage2D(info.fTarget, i, internalFormat, width, height, 0, externalFormat,
4021 externalType, pixels));
4022 width = SkTMax(1, width / 2);
4023 height = SkTMax(1, height / 2);
4024 }
4025 if (restoreGLRowLength) {
4026 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
4027 }
4028 }
4029
4030 // unbind the texture from the texture unit to avoid asserts
4031 GL_CALL(BindTexture(info.fTarget, 0));
4032
4033 GrBackendTexture beTex = GrBackendTexture(w, h, mipMapped, info);
4034 // Lots of tests don't go through Skia's public interface which will set the config so for
4035 // testing we make sure we set a config here.
4036 beTex.setPixelConfig(config);
4037 return beTex;
4038 }
4039
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const4040 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
4041 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4042
4043 GrGLTextureInfo info;
4044 if (!tex.getGLTextureInfo(&info)) {
4045 return false;
4046 }
4047
4048 GrGLboolean result;
4049 GL_CALL_RET(result, IsTexture(info.fID));
4050
4051 return (GR_GL_TRUE == result);
4052 }
4053
deleteTestingOnlyBackendTexture(const GrBackendTexture & tex)4054 void GrGLGpu::deleteTestingOnlyBackendTexture(const GrBackendTexture& tex) {
4055 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4056
4057 GrGLTextureInfo info;
4058 if (tex.getGLTextureInfo(&info)) {
4059 GL_CALL(DeleteTextures(1, &info.fID));
4060 }
4061 }
4062
createTestingOnlyBackendRenderTarget(int w,int h,GrColorType colorType)4063 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(int w, int h,
4064 GrColorType colorType) {
4065 if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
4066 return GrBackendRenderTarget(); // invalid
4067 }
4068 this->handleDirtyContext();
4069 auto config = GrColorTypeToPixelConfig(colorType, GrSRGBEncoded::kNo);
4070 if (!this->glCaps().isConfigRenderable(config)) {
4071 return {};
4072 }
4073 bool useTexture = false;
4074 GrGLenum colorBufferFormat;
4075 GrGLenum externalFormat = 0, externalType = 0;
4076 if (config == kBGRA_8888_GrPixelConfig && this->glCaps().bgraIsInternalFormat()) {
4077 // BGRA render buffers are not supported.
4078 this->glCaps().getTexImageFormats(config, config, &colorBufferFormat, &externalFormat,
4079 &externalType);
4080 useTexture = true;
4081 } else {
4082 this->glCaps().getRenderbufferFormat(config, &colorBufferFormat);
4083 }
4084 int sFormatIdx = this->getCompatibleStencilIndex(config);
4085 if (sFormatIdx < 0) {
4086 return {};
4087 }
4088 GrGLuint colorID = 0;
4089 GrGLuint stencilID = 0;
4090 auto deleteIDs = [&] {
4091 if (colorID) {
4092 if (useTexture) {
4093 GL_CALL(DeleteTextures(1, &colorID));
4094 } else {
4095 GL_CALL(DeleteRenderbuffers(1, &colorID));
4096 }
4097 }
4098 if (stencilID) {
4099 GL_CALL(DeleteRenderbuffers(1, &stencilID));
4100 }
4101 };
4102
4103 if (useTexture) {
4104 GL_CALL(GenTextures(1, &colorID));
4105 } else {
4106 GL_CALL(GenRenderbuffers(1, &colorID));
4107 }
4108 GL_CALL(GenRenderbuffers(1, &stencilID));
4109 if (!stencilID || !colorID) {
4110 deleteIDs();
4111 return {};
4112 }
4113
4114 GrGLFramebufferInfo info;
4115 info.fFBOID = 0;
4116 this->glCaps().getSizedInternalFormat(config, &info.fFormat);
4117 GL_CALL(GenFramebuffers(1, &info.fFBOID));
4118 if (!info.fFBOID) {
4119 deleteIDs();
4120 return {};
4121 }
4122
4123 this->invalidateBoundRenderTarget();
4124
4125 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4126 if (useTexture) {
4127 this->setScratchTextureUnit();
4128 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID));
4129 GL_CALL(TexImage2D(GR_GL_TEXTURE_2D, 0, colorBufferFormat, w, h, 0, externalFormat,
4130 externalType, nullptr));
4131 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
4132 colorID, 0));
4133 } else {
4134 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
4135 GL_ALLOC_CALL(this->glInterface(),
4136 RenderbufferStorage(GR_GL_RENDERBUFFER, colorBufferFormat, w, h));
4137 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4138 GR_GL_RENDERBUFFER, colorID));
4139 }
4140 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
4141 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx].fInternalFormat;
4142 GL_ALLOC_CALL(this->glInterface(),
4143 RenderbufferStorage(GR_GL_RENDERBUFFER, stencilBufferFormat, w, h));
4144 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER,
4145 stencilID));
4146 if (this->glCaps().stencilFormats()[sFormatIdx].fPacked) {
4147 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
4148 GR_GL_RENDERBUFFER, stencilID));
4149 }
4150
4151 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
4152 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
4153 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
4154 // renderbuffers/texture.
4155 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
4156 deleteIDs();
4157
4158 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4159 GrGLenum status;
4160 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
4161 if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
4162 this->deleteFramebuffer(info.fFBOID);
4163 return {};
4164 }
4165 auto stencilBits = SkToInt(this->glCaps().stencilFormats()[sFormatIdx].fStencilBits);
4166 GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, stencilBits, info);
4167 // Lots of tests don't go through Skia's public interface which will set the config so for
4168 // testing we make sure we set a config here.
4169 beRT.setPixelConfig(config);
4170 #ifdef SK_DEBUG
4171 SkColorType skColorType = GrColorTypeToSkColorType(colorType);
4172 if (skColorType != kUnknown_SkColorType) {
4173 SkASSERT(this->caps()->validateBackendRenderTarget(
4174 beRT, GrColorTypeToSkColorType(colorType)) != kUnknown_GrPixelConfig);
4175 }
4176 #endif
4177 return beRT;
4178 }
4179
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & backendRT)4180 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
4181 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
4182 GrGLFramebufferInfo info;
4183 if (backendRT.getGLFramebufferInfo(&info)) {
4184 if (info.fFBOID) {
4185 this->deleteFramebuffer(info.fFBOID);
4186 }
4187 }
4188 }
4189
testingOnly_flushGpuAndSync()4190 void GrGLGpu::testingOnly_flushGpuAndSync() {
4191 GL_CALL(Finish());
4192 }
4193 #endif
4194
4195 ///////////////////////////////////////////////////////////////////////////////
4196
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)4197 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
4198 const GrBuffer* ibuf) {
4199 GrGLAttribArrayState* attribState;
4200
4201 if (gpu->glCaps().isCoreProfile()) {
4202 if (!fCoreProfileVertexArray) {
4203 GrGLuint arrayID;
4204 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
4205 int attrCount = gpu->glCaps().maxVertexAttributes();
4206 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
4207 }
4208 if (ibuf) {
4209 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
4210 } else {
4211 attribState = fCoreProfileVertexArray->bind(gpu);
4212 }
4213 } else {
4214 if (ibuf) {
4215 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
4216 gpu->bindBuffer(kIndex_GrBufferType, ibuf);
4217 } else {
4218 this->setVertexArrayID(gpu, 0);
4219 }
4220 int attrCount = gpu->glCaps().maxVertexAttributes();
4221 if (fDefaultVertexArrayAttribState.count() != attrCount) {
4222 fDefaultVertexArrayAttribState.resize(attrCount);
4223 }
4224 attribState = &fDefaultVertexArrayAttribState;
4225 }
4226 return attribState;
4227 }
4228
onFinishFlush(bool insertedSemaphore)4229 void GrGLGpu::onFinishFlush(bool insertedSemaphore) {
4230 // If we inserted semaphores during the flush, we need to call GLFlush.
4231 if (insertedSemaphore) {
4232 GL_CALL(Flush());
4233 }
4234 }
4235
submit(GrGpuCommandBuffer * buffer)4236 void GrGLGpu::submit(GrGpuCommandBuffer* buffer) {
4237 if (buffer->asRTCommandBuffer()) {
4238 SkASSERT(fCachedRTCommandBuffer.get() == buffer);
4239 fCachedRTCommandBuffer->reset();
4240 } else {
4241 SkASSERT(fCachedTexCommandBuffer.get() == buffer);
4242 fCachedTexCommandBuffer->reset();
4243 }
4244 }
4245
insertFence()4246 GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() {
4247 SkASSERT(this->caps()->fenceSyncSupport());
4248 GrGLsync sync;
4249 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4250 GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(GrGLsync));
4251 return (GrFence)sync;
4252 }
4253
waitFence(GrFence fence,uint64_t timeout)4254 bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) {
4255 GrGLenum result;
4256 GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout));
4257 return (GR_GL_CONDITION_SATISFIED == result);
4258 }
4259
deleteFence(GrFence fence) const4260 void GrGLGpu::deleteFence(GrFence fence) const {
4261 this->deleteSync((GrGLsync)fence);
4262 }
4263
makeSemaphore(bool isOwned)4264 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) {
4265 SkASSERT(this->caps()->fenceSyncSupport());
4266 return GrGLSemaphore::Make(this, isOwned);
4267 }
4268
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrResourceProvider::SemaphoreWrapType wrapType,GrWrapOwnership ownership)4269 sk_sp<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
4270 GrResourceProvider::SemaphoreWrapType wrapType,
4271 GrWrapOwnership ownership) {
4272 SkASSERT(this->caps()->fenceSyncSupport());
4273 return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership);
4274 }
4275
insertSemaphore(sk_sp<GrSemaphore> semaphore)4276 void GrGLGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
4277 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
4278
4279 GrGLsync sync;
4280 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4281 glSem->setSync(sync);
4282 }
4283
waitSemaphore(sk_sp<GrSemaphore> semaphore)4284 void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
4285 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
4286
4287 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4288 }
4289
deleteSync(GrGLsync sync) const4290 void GrGLGpu::deleteSync(GrGLsync sync) const {
4291 GL_CALL(DeleteSync(sync));
4292 }
4293
insertEventMarker(const char * msg)4294 void GrGLGpu::insertEventMarker(const char* msg) {
4295 GL_CALL(InsertEventMarker(strlen(msg), msg));
4296 }
4297
prepareTextureForCrossContextUsage(GrTexture * texture)4298 sk_sp<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
4299 // Set up a semaphore to be signaled once the data is ready, and flush GL
4300 sk_sp<GrSemaphore> semaphore = this->makeSemaphore(true);
4301 this->insertSemaphore(semaphore);
4302 // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu.
4303 GL_CALL(Flush());
4304
4305 return semaphore;
4306 }
4307
TextureToCopyProgramIdx(GrTexture * texture)4308 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
4309 switch (GrSLCombinedSamplerTypeForTextureType(texture->texturePriv().textureType())) {
4310 case kTexture2DSampler_GrSLType:
4311 return 0;
4312 case kTexture2DRectSampler_GrSLType:
4313 return 1;
4314 case kTextureExternalSampler_GrSLType:
4315 return 2;
4316 default:
4317 SK_ABORT("Unexpected samper type");
4318 return 0;
4319 }
4320 }
4321
4322 #ifdef SK_ENABLE_DUMP_GPU
4323 #include "SkJSONWriter.h"
onDumpJSON(SkJSONWriter * writer) const4324 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
4325 // We are called by the base class, which has already called beginObject(). We choose to nest
4326 // all of our caps information in a named sub-object.
4327 writer->beginObject("GL GPU");
4328
4329 const GrGLubyte* str;
4330 GL_CALL_RET(str, GetString(GR_GL_VERSION));
4331 writer->appendString("GL_VERSION", (const char*)(str));
4332 GL_CALL_RET(str, GetString(GR_GL_RENDERER));
4333 writer->appendString("GL_RENDERER", (const char*)(str));
4334 GL_CALL_RET(str, GetString(GR_GL_VENDOR));
4335 writer->appendString("GL_VENDOR", (const char*)(str));
4336 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
4337 writer->appendString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
4338
4339 writer->appendName("extensions");
4340 glInterface()->fExtensions.dumpJSON(writer);
4341
4342 writer->endObject();
4343 }
4344 #endif
4345