1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/gl/GrGLGpu.h"
9
10 #include "include/core/SkPixmap.h"
11 #include "include/core/SkTypes.h"
12 #include "include/gpu/GrBackendSemaphore.h"
13 #include "include/gpu/GrBackendSurface.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/gpu/GrTypes.h"
16 #include "include/private/SkHalf.h"
17 #include "include/private/SkTemplates.h"
18 #include "include/private/SkTo.h"
19 #include "src/core/SkAutoMalloc.h"
20 #include "src/core/SkCompressedDataUtils.h"
21 #include "src/core/SkMipmap.h"
22 #include "src/core/SkScopeExit.h"
23 #include "src/core/SkTraceEvent.h"
24 #include "src/gpu/GrBackendUtils.h"
25 #include "src/gpu/GrCpuBuffer.h"
26 #include "src/gpu/GrDataUtils.h"
27 #include "src/gpu/GrDirectContextPriv.h"
28 #include "src/gpu/GrGpuResourcePriv.h"
29 #include "src/gpu/GrPipeline.h"
30 #include "src/gpu/GrProgramInfo.h"
31 #include "src/gpu/GrRenderTarget.h"
32 #include "src/gpu/GrShaderCaps.h"
33 #include "src/gpu/GrSurfaceProxyPriv.h"
34 #include "src/gpu/GrTexture.h"
35 #include "src/gpu/gl/GrGLAttachment.h"
36 #include "src/gpu/gl/GrGLBuffer.h"
37 #include "src/gpu/gl/GrGLOpsRenderPass.h"
38 #include "src/gpu/gl/GrGLSemaphore.h"
39 #include "src/gpu/gl/GrGLTextureRenderTarget.h"
40 #include "src/gpu/gl/builders/GrGLShaderStringBuilder.h"
41 #include "src/sksl/SkSLCompiler.h"
42
43 #include <cmath>
44 #include <memory>
45
46 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
47 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
48
49 #define GL_ALLOC_CALL(call) \
50 [&] { \
51 if (this->glCaps().skipErrorChecks()) { \
52 GR_GL_CALL(this->glInterface(), call); \
53 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
54 } else { \
55 this->clearErrorsAndCheckForOOM(); \
56 GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \
57 return this->getErrorAndCheckForOOM(); \
58 } \
59 }()
60
61 //#define USE_NSIGHT
62
63 ///////////////////////////////////////////////////////////////////////////////
64
65 static const GrGLenum gXfermodeEquation2Blend[] = {
66 // Basic OpenGL blend equations.
67 GR_GL_FUNC_ADD,
68 GR_GL_FUNC_SUBTRACT,
69 GR_GL_FUNC_REVERSE_SUBTRACT,
70
71 // GL_KHR_blend_equation_advanced.
72 GR_GL_SCREEN,
73 GR_GL_OVERLAY,
74 GR_GL_DARKEN,
75 GR_GL_LIGHTEN,
76 GR_GL_COLORDODGE,
77 GR_GL_COLORBURN,
78 GR_GL_HARDLIGHT,
79 GR_GL_SOFTLIGHT,
80 GR_GL_DIFFERENCE,
81 GR_GL_EXCLUSION,
82 GR_GL_MULTIPLY,
83 GR_GL_HSL_HUE,
84 GR_GL_HSL_SATURATION,
85 GR_GL_HSL_COLOR,
86 GR_GL_HSL_LUMINOSITY,
87
88 // Illegal... needs to map to something.
89 GR_GL_FUNC_ADD,
90 };
91 static_assert(0 == kAdd_GrBlendEquation);
92 static_assert(1 == kSubtract_GrBlendEquation);
93 static_assert(2 == kReverseSubtract_GrBlendEquation);
94 static_assert(3 == kScreen_GrBlendEquation);
95 static_assert(4 == kOverlay_GrBlendEquation);
96 static_assert(5 == kDarken_GrBlendEquation);
97 static_assert(6 == kLighten_GrBlendEquation);
98 static_assert(7 == kColorDodge_GrBlendEquation);
99 static_assert(8 == kColorBurn_GrBlendEquation);
100 static_assert(9 == kHardLight_GrBlendEquation);
101 static_assert(10 == kSoftLight_GrBlendEquation);
102 static_assert(11 == kDifference_GrBlendEquation);
103 static_assert(12 == kExclusion_GrBlendEquation);
104 static_assert(13 == kMultiply_GrBlendEquation);
105 static_assert(14 == kHSLHue_GrBlendEquation);
106 static_assert(15 == kHSLSaturation_GrBlendEquation);
107 static_assert(16 == kHSLColor_GrBlendEquation);
108 static_assert(17 == kHSLLuminosity_GrBlendEquation);
109 static_assert(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
110
111 static const GrGLenum gXfermodeCoeff2Blend[] = {
112 GR_GL_ZERO,
113 GR_GL_ONE,
114 GR_GL_SRC_COLOR,
115 GR_GL_ONE_MINUS_SRC_COLOR,
116 GR_GL_DST_COLOR,
117 GR_GL_ONE_MINUS_DST_COLOR,
118 GR_GL_SRC_ALPHA,
119 GR_GL_ONE_MINUS_SRC_ALPHA,
120 GR_GL_DST_ALPHA,
121 GR_GL_ONE_MINUS_DST_ALPHA,
122 GR_GL_CONSTANT_COLOR,
123 GR_GL_ONE_MINUS_CONSTANT_COLOR,
124
125 // extended blend coeffs
126 GR_GL_SRC1_COLOR,
127 GR_GL_ONE_MINUS_SRC1_COLOR,
128 GR_GL_SRC1_ALPHA,
129 GR_GL_ONE_MINUS_SRC1_ALPHA,
130
131 // Illegal... needs to map to something.
132 GR_GL_ZERO,
133 };
134
135 //////////////////////////////////////////////////////////////////////////////
136
gl_target_to_binding_index(GrGLenum target)137 static int gl_target_to_binding_index(GrGLenum target) {
138 switch (target) {
139 case GR_GL_TEXTURE_2D:
140 return 0;
141 case GR_GL_TEXTURE_RECTANGLE:
142 return 1;
143 case GR_GL_TEXTURE_EXTERNAL:
144 return 2;
145 }
146 SK_ABORT("Unexpected GL texture target.");
147 }
148
boundID(GrGLenum target) const149 GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const {
150 return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID;
151 }
152
hasBeenModified(GrGLenum target) const153 bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const {
154 return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified;
155 }
156
setBoundID(GrGLenum target,GrGpuResource::UniqueID resourceID)157 void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) {
158 int targetIndex = gl_target_to_binding_index(target);
159 fTargetBindings[targetIndex].fBoundResourceID = resourceID;
160 fTargetBindings[targetIndex].fHasBeenModified = true;
161 }
162
invalidateForScratchUse(GrGLenum target)163 void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) {
164 this->setBoundID(target, GrGpuResource::UniqueID());
165 }
166
invalidateAllTargets(bool markUnmodified)167 void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) {
168 for (auto& targetBinding : fTargetBindings) {
169 targetBinding.fBoundResourceID.makeInvalid();
170 if (markUnmodified) {
171 targetBinding.fHasBeenModified = false;
172 }
173 }
174 }
175
176 //////////////////////////////////////////////////////////////////////////////
177
filter_to_gl_mag_filter(GrSamplerState::Filter filter)178 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
179 switch (filter) {
180 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
181 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR;
182 }
183 SkUNREACHABLE;
184 }
185
filter_to_gl_min_filter(GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm)186 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter,
187 GrSamplerState::MipmapMode mm) {
188 switch (mm) {
189 case GrSamplerState::MipmapMode::kNone:
190 return filter_to_gl_mag_filter(filter);
191 case GrSamplerState::MipmapMode::kNearest:
192 switch (filter) {
193 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_NEAREST;
194 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_NEAREST;
195 }
196 SkUNREACHABLE;
197 case GrSamplerState::MipmapMode::kLinear:
198 switch (filter) {
199 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_LINEAR;
200 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_LINEAR;
201 }
202 SkUNREACHABLE;
203 }
204 SkUNREACHABLE;
205 }
206
wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,const GrCaps & caps)207 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
208 const GrCaps& caps) {
209 switch (wrapMode) {
210 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
211 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
212 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
213 case GrSamplerState::WrapMode::kClampToBorder:
214 // May not be supported but should have been caught earlier
215 SkASSERT(caps.clampToBorderSupport());
216 return GR_GL_CLAMP_TO_BORDER;
217 }
218 SkUNREACHABLE;
219 }
220
221 ///////////////////////////////////////////////////////////////////////////////
222
223 class GrGLGpu::SamplerObjectCache {
224 public:
SamplerObjectCache(GrGLGpu * gpu)225 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
226 fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers();
227 fTextureUnitStates = std::make_unique<UnitState[]>(fNumTextureUnits);
228 std::fill_n(fSamplers, kNumSamplers, 0);
229 }
230
~SamplerObjectCache()231 ~SamplerObjectCache() {
232 if (!fNumTextureUnits) {
233 // We've already been abandoned.
234 return;
235 }
236 for (GrGLuint sampler : fSamplers) {
237 // The spec states that "zero" values should be silently ignored, however they still
238 // trigger GL errors on some NVIDIA platforms.
239 if (sampler) {
240 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(1, &sampler));
241 }
242 }
243 }
244
bindSampler(int unitIdx,GrSamplerState state)245 void bindSampler(int unitIdx, GrSamplerState state) {
246 int index = state.asIndex();
247 if (!fSamplers[index]) {
248 GrGLuint s;
249 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
250 if (!s) {
251 return;
252 }
253 fSamplers[index] = s;
254 GrGLenum minFilter = filter_to_gl_min_filter(state.filter(), state.mipmapMode());
255 GrGLenum magFilter = filter_to_gl_mag_filter(state.filter());
256 GrGLenum wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
257 GrGLenum wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
258 GR_GL_CALL(fGpu->glInterface(),
259 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
260 GR_GL_CALL(fGpu->glInterface(),
261 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
262 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
263 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
264 }
265 if (!fTextureUnitStates[unitIdx].fKnown ||
266 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != fSamplers[index]) {
267 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index]));
268 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = fSamplers[index];
269 fTextureUnitStates[unitIdx].fKnown = true;
270 }
271 }
272
unbindSampler(int unitIdx)273 void unbindSampler(int unitIdx) {
274 if (!fTextureUnitStates[unitIdx].fKnown ||
275 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != 0) {
276 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, 0));
277 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = 0;
278 fTextureUnitStates[unitIdx].fKnown = true;
279 }
280 }
281
invalidateBindings()282 void invalidateBindings() {
283 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
284 }
285
abandon()286 void abandon() {
287 fTextureUnitStates.reset();
288 fNumTextureUnits = 0;
289 }
290
release()291 void release() {
292 if (!fNumTextureUnits) {
293 // We've already been abandoned.
294 return;
295 }
296 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers));
297 std::fill_n(fSamplers, kNumSamplers, 0);
298 // Deleting a bound sampler implicitly binds sampler 0. We just invalidate all of our
299 // knowledge.
300 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
301 }
302
303 private:
304 static constexpr int kNumSamplers = GrSamplerState::kNumUniqueSamplers;
305 struct UnitState {
306 bool fKnown = false;
307 GrGLuint fSamplerIDIfKnown = 0;
308 };
309 GrGLGpu* fGpu;
310 std::unique_ptr<UnitState[]> fTextureUnitStates;
311 GrGLuint fSamplers[kNumSamplers];
312 int fNumTextureUnits;
313 };
314
315 ///////////////////////////////////////////////////////////////////////////////
316
Make(sk_sp<const GrGLInterface> interface,const GrContextOptions & options,GrDirectContext * direct)317 sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options,
318 GrDirectContext* direct) {
319 if (!interface) {
320 interface = GrGLMakeNativeInterface();
321 // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated
322 // to GrGLMakeNativeInterface.
323 if (!interface) {
324 interface = sk_ref_sp(GrGLCreateNativeInterface());
325 }
326 if (!interface) {
327 return nullptr;
328 }
329 }
330 #ifdef USE_NSIGHT
331 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
332 #endif
333 auto glContext = GrGLContext::Make(std::move(interface), options);
334 if (!glContext) {
335 return nullptr;
336 }
337 return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), direct));
338 }
339
GrGLGpu(std::unique_ptr<GrGLContext> ctx,GrDirectContext * dContext)340 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrDirectContext* dContext)
341 : GrGpu(dContext)
342 , fGLContext(std::move(ctx))
343 , fProgramCache(new ProgramCache(dContext->priv().options().fRuntimeProgramCacheSize))
344 , fHWProgramID(0)
345 , fTempSrcFBOID(0)
346 , fTempDstFBOID(0)
347 , fStencilClearFBOID(0)
348 , fFinishCallbacks(this) {
349 SkASSERT(fGLContext);
350 // Clear errors so we don't get confused whether we caused an error.
351 this->clearErrorsAndCheckForOOM();
352 // Toss out any pre-existing OOM that was hanging around before we got started.
353 this->checkAndResetOOMed();
354
355 this->initCapsAndCompiler(sk_ref_sp(fGLContext->caps()));
356
357 fHWTextureUnitBindings.reset(this->numTextureUnits());
358
359 this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
360 this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
361 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
362 if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) {
363 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
364 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
365 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
366 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
367 } else {
368 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
369 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
370 }
371 for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
372 fHWBufferState[i].invalidate();
373 }
374 static_assert(kGrGpuBufferTypeCount == SK_ARRAY_COUNT(fHWBufferState));
375
376 if (this->glCaps().useSamplerObjects()) {
377 fSamplerObjectCache = std::make_unique<SamplerObjectCache>(this);
378 }
379 }
380
~GrGLGpu()381 GrGLGpu::~GrGLGpu() {
382 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
383 // to release the resources held by the objects themselves.
384 fCopyProgramArrayBuffer.reset();
385 fMipmapProgramArrayBuffer.reset();
386 if (fProgramCache) {
387 fProgramCache->reset();
388 }
389
390 fHWProgram.reset();
391 if (fHWProgramID) {
392 // detach the current program so there is no confusion on OpenGL's part
393 // that we want it to be deleted
394 GL_CALL(UseProgram(0));
395 }
396
397 if (fTempSrcFBOID) {
398 this->deleteFramebuffer(fTempSrcFBOID);
399 }
400 if (fTempDstFBOID) {
401 this->deleteFramebuffer(fTempDstFBOID);
402 }
403 if (fStencilClearFBOID) {
404 this->deleteFramebuffer(fStencilClearFBOID);
405 }
406
407 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
408 if (0 != fCopyPrograms[i].fProgram) {
409 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
410 }
411 }
412
413 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
414 if (0 != fMipmapPrograms[i].fProgram) {
415 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
416 }
417 }
418
419 fSamplerObjectCache.reset();
420
421 fFinishCallbacks.callAll(true);
422 }
423
disconnect(DisconnectType type)424 void GrGLGpu::disconnect(DisconnectType type) {
425 INHERITED::disconnect(type);
426 if (DisconnectType::kCleanup == type) {
427 if (fHWProgramID) {
428 GL_CALL(UseProgram(0));
429 }
430 if (fTempSrcFBOID) {
431 this->deleteFramebuffer(fTempSrcFBOID);
432 }
433 if (fTempDstFBOID) {
434 this->deleteFramebuffer(fTempDstFBOID);
435 }
436 if (fStencilClearFBOID) {
437 this->deleteFramebuffer(fStencilClearFBOID);
438 }
439 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
440 if (fCopyPrograms[i].fProgram) {
441 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
442 }
443 }
444 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
445 if (fMipmapPrograms[i].fProgram) {
446 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
447 }
448 }
449
450 if (fSamplerObjectCache) {
451 fSamplerObjectCache->release();
452 }
453 } else {
454 if (fProgramCache) {
455 fProgramCache->abandon();
456 }
457 if (fSamplerObjectCache) {
458 fSamplerObjectCache->abandon();
459 }
460 }
461
462 fHWProgram.reset();
463 fProgramCache->reset();
464 fProgramCache.reset();
465
466 fHWProgramID = 0;
467 fTempSrcFBOID = 0;
468 fTempDstFBOID = 0;
469 fStencilClearFBOID = 0;
470 fCopyProgramArrayBuffer.reset();
471 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
472 fCopyPrograms[i].fProgram = 0;
473 }
474 fMipmapProgramArrayBuffer.reset();
475 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
476 fMipmapPrograms[i].fProgram = 0;
477 }
478
479 fFinishCallbacks.callAll(/* doDelete */ DisconnectType::kCleanup == type);
480 }
481
pipelineBuilder()482 GrThreadSafePipelineBuilder* GrGLGpu::pipelineBuilder() {
483 return fProgramCache.get();
484 }
485
refPipelineBuilder()486 sk_sp<GrThreadSafePipelineBuilder> GrGLGpu::refPipelineBuilder() {
487 return fProgramCache;
488 }
489
490 ///////////////////////////////////////////////////////////////////////////////
491
onResetContext(uint32_t resetBits)492 void GrGLGpu::onResetContext(uint32_t resetBits) {
493 if (resetBits & kMisc_GrGLBackendState) {
494 // we don't use the zb at all
495 GL_CALL(Disable(GR_GL_DEPTH_TEST));
496 GL_CALL(DepthMask(GR_GL_FALSE));
497
498 // We don't use face culling.
499 GL_CALL(Disable(GR_GL_CULL_FACE));
500 // We do use separate stencil. Our algorithms don't care which face is front vs. back so
501 // just set this to the default for self-consistency.
502 GL_CALL(FrontFace(GR_GL_CCW));
503
504 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
505 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
506
507 if (GR_IS_GR_GL(this->glStandard())) {
508 #ifndef USE_NSIGHT
509 // Desktop-only state that we never change
510 if (!this->glCaps().isCoreProfile()) {
511 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
512 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
513 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
514 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
515 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
516 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
517 }
518 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
519 // core profile. This seems like a bug since the core spec removes any mention of
520 // GL_ARB_imaging.
521 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
522 GL_CALL(Disable(GR_GL_COLOR_TABLE));
523 }
524 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
525
526 fHWWireframeEnabled = kUnknown_TriState;
527 #endif
528 // Since ES doesn't support glPointSize at all we always use the VS to
529 // set the point size
530 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
531
532 }
533
534 if (GR_IS_GR_GL_ES(this->glStandard()) &&
535 this->glCaps().fbFetchRequiresEnablePerSample()) {
536 // The arm extension requires specifically enabling MSAA fetching per sample.
537 // On some devices this may have a perf hit. Also multiple render targets are disabled
538 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
539 }
540 fHWWriteToColor = kUnknown_TriState;
541 // we only ever use lines in hairline mode
542 GL_CALL(LineWidth(1));
543 GL_CALL(Disable(GR_GL_DITHER));
544
545 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
546 }
547
548 if (resetBits & kMSAAEnable_GrGLBackendState) {
549 if (this->glCaps().clientCanDisableMultisample()) {
550 // Restore GL_MULTISAMPLE to its initial state. It being enabled has no effect on draws
551 // to non-MSAA targets.
552 GL_CALL(Enable(GR_GL_MULTISAMPLE));
553 }
554 fHWConservativeRasterEnabled = kUnknown_TriState;
555 }
556
557 fHWActiveTextureUnitIdx = -1; // invalid
558 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
559
560 if (resetBits & kTextureBinding_GrGLBackendState) {
561 for (int s = 0; s < this->numTextureUnits(); ++s) {
562 fHWTextureUnitBindings[s].invalidateAllTargets(false);
563 }
564 if (fSamplerObjectCache) {
565 fSamplerObjectCache->invalidateBindings();
566 }
567 }
568
569 if (resetBits & kBlend_GrGLBackendState) {
570 fHWBlendState.invalidate();
571 }
572
573 if (resetBits & kView_GrGLBackendState) {
574 fHWScissorSettings.invalidate();
575 fHWWindowRectsState.invalidate();
576 fHWViewport.invalidate();
577 }
578
579 if (resetBits & kStencil_GrGLBackendState) {
580 fHWStencilSettings.invalidate();
581 fHWStencilTestEnabled = kUnknown_TriState;
582 }
583
584 // Vertex
585 if (resetBits & kVertex_GrGLBackendState) {
586 fHWVertexArrayState.invalidate();
587 this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
588 this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
589 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate();
590 fHWPatchVertexCount = 0;
591 }
592
593 if (resetBits & kRenderTarget_GrGLBackendState) {
594 fHWBoundRenderTargetUniqueID.makeInvalid();
595 fHWSRGBFramebuffer = kUnknown_TriState;
596 fBoundDrawFramebuffer = 0;
597 }
598
599 // we assume these values
600 if (resetBits & kPixelStore_GrGLBackendState) {
601 if (this->caps()->writePixelsRowBytesSupport() ||
602 this->caps()->transferPixelsToRowBytesSupport()) {
603 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
604 }
605 if (this->glCaps().readPixelsRowBytesSupport()) {
606 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
607 }
608 if (this->glCaps().packFlipYSupport()) {
609 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
610 }
611 }
612
613 if (resetBits & kProgram_GrGLBackendState) {
614 fHWProgramID = 0;
615 fHWProgram.reset();
616 }
617 ++fResetTimestampForTextureParameters;
618 }
619
check_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)620 static bool check_backend_texture(const GrBackendTexture& backendTex,
621 const GrGLCaps& caps,
622 GrGLTexture::Desc* desc,
623 bool skipRectTexSupportCheck = false) {
624 GrGLTextureInfo info;
625 if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) {
626 return false;
627 }
628
629 desc->fSize = {backendTex.width(), backendTex.height()};
630 desc->fTarget = info.fTarget;
631 desc->fID = info.fID;
632 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
633
634 if (desc->fFormat == GrGLFormat::kUnknown) {
635 return false;
636 }
637 if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) {
638 if (!caps.shaderCaps()->externalTextureSupport()) {
639 return false;
640 }
641 } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) {
642 if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) {
643 return false;
644 }
645 } else if (GR_GL_TEXTURE_2D != desc->fTarget) {
646 return false;
647 }
648 if (backendTex.isProtected()) {
649 // Not supported in GL backend at this time.
650 return false;
651 }
652
653 return true;
654 }
655
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)656 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
657 GrWrapOwnership ownership,
658 GrWrapCacheable cacheable,
659 GrIOType ioType) {
660 GrGLTexture::Desc desc;
661 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
662 return nullptr;
663 }
664
665 if (kBorrow_GrWrapOwnership == ownership) {
666 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
667 } else {
668 desc.fOwnership = GrBackendObjectOwnership::kOwned;
669 }
670
671 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
672 : GrMipmapStatus::kNotAllocated;
673
674 auto texture = GrGLTexture::MakeWrapped(this, mipmapStatus, desc,
675 backendTex.getGLTextureParams(), cacheable, ioType);
676 if (this->glCaps().isFormatRenderable(backendTex.getBackendFormat(), 1)) {
677 // Pessimistically assume this external texture may have been bound to a FBO.
678 texture->baseLevelWasBoundToFBO();
679 }
680 return std::move(texture);
681 }
682
check_compressed_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)683 static bool check_compressed_backend_texture(const GrBackendTexture& backendTex,
684 const GrGLCaps& caps, GrGLTexture::Desc* desc,
685 bool skipRectTexSupportCheck = false) {
686 GrGLTextureInfo info;
687 if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) {
688 return false;
689 }
690
691 desc->fSize = {backendTex.width(), backendTex.height()};
692 desc->fTarget = info.fTarget;
693 desc->fID = info.fID;
694 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
695
696 if (desc->fFormat == GrGLFormat::kUnknown) {
697 return false;
698 }
699
700 if (GR_GL_TEXTURE_2D != desc->fTarget) {
701 return false;
702 }
703 if (backendTex.isProtected()) {
704 // Not supported in GL backend at this time.
705 return false;
706 }
707
708 return true;
709 }
710
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)711 sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
712 GrWrapOwnership ownership,
713 GrWrapCacheable cacheable) {
714 GrGLTexture::Desc desc;
715 if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) {
716 return nullptr;
717 }
718
719 if (kBorrow_GrWrapOwnership == ownership) {
720 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
721 } else {
722 desc.fOwnership = GrBackendObjectOwnership::kOwned;
723 }
724
725 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
726 : GrMipmapStatus::kNotAllocated;
727
728 auto texture = GrGLTexture::MakeWrapped(this, mipmapStatus, desc,
729 backendTex.getGLTextureParams(), cacheable,
730 kRead_GrIOType);
731 return std::move(texture);
732 }
733
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)734 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
735 int sampleCnt,
736 GrWrapOwnership ownership,
737 GrWrapCacheable cacheable) {
738 const GrGLCaps& caps = this->glCaps();
739
740 GrGLTexture::Desc desc;
741 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
742 return nullptr;
743 }
744 SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt));
745 SkASSERT(caps.isFormatTexturable(desc.fFormat));
746
747 // We don't support rendering to a EXTERNAL texture.
748 if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) {
749 return nullptr;
750 }
751
752 if (kBorrow_GrWrapOwnership == ownership) {
753 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
754 } else {
755 desc.fOwnership = GrBackendObjectOwnership::kOwned;
756 }
757
758
759 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat);
760 SkASSERT(sampleCnt);
761
762 GrGLRenderTarget::IDs rtIDs;
763 if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) {
764 return nullptr;
765 }
766
767 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kDirty
768 : GrMipmapStatus::kNotAllocated;
769
770 sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped(
771 this, sampleCnt, desc, backendTex.getGLTextureParams(), rtIDs, cacheable,
772 mipmapStatus));
773 texRT->baseLevelWasBoundToFBO();
774 return std::move(texRT);
775 }
776
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)777 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
778 GrGLFramebufferInfo info;
779 if (!backendRT.getGLFramebufferInfo(&info)) {
780 return nullptr;
781 }
782
783 if (backendRT.isProtected()) {
784 // Not supported in GL at this time.
785 return nullptr;
786 }
787
788 const auto format = backendRT.getBackendFormat().asGLFormat();
789 if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) {
790 return nullptr;
791 }
792
793 int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format);
794
795 GrGLRenderTarget::IDs rtIDs;
796 if (sampleCount <= 1) {
797 rtIDs.fSingleSampleFBOID = info.fFBOID;
798 rtIDs.fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
799 } else {
800 rtIDs.fSingleSampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
801 rtIDs.fMultisampleFBOID = info.fFBOID;
802 }
803 rtIDs.fMSColorRenderbufferID = 0;
804 rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
805 rtIDs.fTotalMemorySamplesPerPixel = sampleCount;
806
807 return GrGLRenderTarget::MakeWrapped(this, backendRT.dimensions(), format, sampleCount, rtIDs,
808 backendRT.stencilBits());
809 }
810
check_write_and_transfer_input(GrGLTexture * glTex)811 static bool check_write_and_transfer_input(GrGLTexture* glTex) {
812 if (!glTex) {
813 return false;
814 }
815
816 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
817 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
818 return false;
819 }
820
821 return true;
822 }
823
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)824 bool GrGLGpu::onWritePixels(GrSurface* surface,
825 SkIRect rect,
826 GrColorType surfaceColorType,
827 GrColorType srcColorType,
828 const GrMipLevel texels[],
829 int mipLevelCount,
830 bool prepForTexSampling) {
831 auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
832
833 if (!check_write_and_transfer_input(glTex)) {
834 return false;
835 }
836
837 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
838
839 // If we have mips make sure the base/max levels cover the full range so that the uploads go to
840 // the right levels. We've found some Radeons require this.
841 if (mipLevelCount && this->glCaps().mipmapLevelControlSupport()) {
842 auto params = glTex->parameters();
843 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
844 int maxLevel = glTex->maxMipmapLevel();
845 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
846 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_BASE_LEVEL, 0));
847 nonsamplerState.fBaseMipMapLevel = 0;
848 }
849 if (params->nonsamplerState().fMaxMipmapLevel != maxLevel) {
850 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_MAX_LEVEL, maxLevel));
851 nonsamplerState.fBaseMipMapLevel = maxLevel;
852 }
853 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
854 }
855
856 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
857 return this->uploadColorTypeTexData(glTex->format(),
858 surfaceColorType,
859 glTex->dimensions(),
860 glTex->target(),
861 rect,
862 srcColorType,
863 texels,
864 mipLevelCount);
865 }
866
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)867 bool GrGLGpu::onTransferPixelsTo(GrTexture* texture,
868 SkIRect rect,
869 GrColorType textureColorType,
870 GrColorType bufferColorType,
871 sk_sp<GrGpuBuffer> transferBuffer,
872 size_t offset,
873 size_t rowBytes) {
874 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
875
876 // Can't transfer compressed data
877 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
878
879 if (!check_write_and_transfer_input(glTex)) {
880 return false;
881 }
882
883 static_assert(sizeof(int) == sizeof(int32_t), "");
884
885 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
886
887 SkASSERT(!transferBuffer->isMapped());
888 SkASSERT(!transferBuffer->isCpuBuffer());
889 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer.get());
890 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
891
892 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
893
894 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
895 const size_t trimRowBytes = rect.width() * bpp;
896 const void* pixels = (void*)offset;
897
898 bool restoreGLRowLength = false;
899 if (trimRowBytes != rowBytes) {
900 // we should have checked for this support already
901 SkASSERT(this->glCaps().transferPixelsToRowBytesSupport());
902 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
903 restoreGLRowLength = true;
904 }
905
906 GrGLFormat textureFormat = glTex->format();
907 // External format and type come from the upload data.
908 GrGLenum externalFormat = 0;
909 GrGLenum externalType = 0;
910 this->glCaps().getTexSubImageExternalFormatAndType(
911 textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType);
912 if (!externalFormat || !externalType) {
913 return false;
914 }
915
916 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
917 GL_CALL(TexSubImage2D(glTex->target(),
918 0,
919 rect.left(),
920 rect.top(),
921 rect.width(),
922 rect.height(),
923 externalFormat,
924 externalType,
925 pixels));
926
927 if (restoreGLRowLength) {
928 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
929 }
930
931 return true;
932 }
933
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)934 bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface,
935 SkIRect rect,
936 GrColorType surfaceColorType,
937 GrColorType dstColorType,
938 sk_sp<GrGpuBuffer> transferBuffer,
939 size_t offset) {
940 auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer.get());
941 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer);
942 auto offsetAsPtr = reinterpret_cast<void*>(offset);
943 return this->readOrTransferPixelsFrom(surface,
944 rect,
945 surfaceColorType,
946 dstColorType,
947 offsetAsPtr,
948 rect.width());
949 }
950
unbindXferBuffer(GrGpuBufferType type)951 void GrGLGpu::unbindXferBuffer(GrGpuBufferType type) {
952 if (this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kARB_PBO &&
953 this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kNV_PBO) {
954 return;
955 }
956 SkASSERT(type == GrGpuBufferType::kXferCpuToGpu || type == GrGpuBufferType::kXferGpuToCpu);
957 auto* xferBufferState = this->hwBufferState(type);
958 if (!xferBufferState->fBufferZeroKnownBound) {
959 GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
960 xferBufferState->fBoundBufferUniqueID.makeInvalid();
961 xferBufferState->fBufferZeroKnownBound = true;
962 }
963 }
964
uploadColorTypeTexData(GrGLFormat textureFormat,GrColorType textureColorType,SkISize texDims,GrGLenum target,SkIRect dstRect,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)965 bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat,
966 GrColorType textureColorType,
967 SkISize texDims,
968 GrGLenum target,
969 SkIRect dstRect,
970 GrColorType srcColorType,
971 const GrMipLevel texels[],
972 int mipLevelCount) {
973 // If we're uploading compressed data then we should be using uploadCompressedTexData
974 SkASSERT(!GrGLFormatIsCompressed(textureFormat));
975
976 SkASSERT(this->glCaps().isFormatTexturable(textureFormat));
977
978 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
979
980 // External format and type come from the upload data.
981 GrGLenum externalFormat;
982 GrGLenum externalType;
983 this->glCaps().getTexSubImageExternalFormatAndType(
984 textureFormat, textureColorType, srcColorType, &externalFormat, &externalType);
985 if (!externalFormat || !externalType) {
986 return false;
987 }
988 this->uploadTexData(texDims, target, dstRect, externalFormat, externalType, bpp, texels,
989 mipLevelCount);
990 return true;
991 }
992
uploadColorToTex(GrGLFormat textureFormat,SkISize texDims,GrGLenum target,std::array<float,4> color,uint32_t levelMask)993 bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat,
994 SkISize texDims,
995 GrGLenum target,
996 std::array<float, 4> color,
997 uint32_t levelMask) {
998 GrColorType colorType;
999 GrGLenum externalFormat, externalType;
1000 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(textureFormat, &externalFormat,
1001 &externalType, &colorType);
1002 if (colorType == GrColorType::kUnknown) {
1003 return false;
1004 }
1005
1006 std::unique_ptr<char[]> pixelStorage;
1007 size_t bpp = 0;
1008 int numLevels = SkMipmap::ComputeLevelCount(texDims) + 1;
1009 SkSTArray<16, GrMipLevel> levels;
1010 levels.resize(numLevels);
1011 SkISize levelDims = texDims;
1012 for (int i = 0; i < numLevels; ++i, levelDims = {std::max(levelDims.width() >> 1, 1),
1013 std::max(levelDims.height() >> 1, 1)}) {
1014 if (levelMask & (1 << i)) {
1015 if (!pixelStorage) {
1016 // Make one tight image at the first size and reuse it for smaller levels.
1017 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, levelDims);
1018 size_t rb = ii.minRowBytes();
1019 pixelStorage.reset(new char[rb * levelDims.height()]);
1020 if (!GrClearImage(ii, pixelStorage.get(), ii.minRowBytes(), color)) {
1021 return false;
1022 }
1023 bpp = ii.bpp();
1024 }
1025 levels[i] = {pixelStorage.get(), levelDims.width()*bpp, nullptr};
1026 }
1027 }
1028 this->uploadTexData(texDims, target, SkIRect::MakeSize(texDims), externalFormat, externalType,
1029 bpp, levels.begin(), levels.count());
1030 return true;
1031 }
1032
uploadTexData(SkISize texDims,GrGLenum target,SkIRect dstRect,GrGLenum externalFormat,GrGLenum externalType,size_t bpp,const GrMipLevel texels[],int mipLevelCount)1033 void GrGLGpu::uploadTexData(SkISize texDims,
1034 GrGLenum target,
1035 SkIRect dstRect,
1036 GrGLenum externalFormat,
1037 GrGLenum externalType,
1038 size_t bpp,
1039 const GrMipLevel texels[],
1040 int mipLevelCount) {
1041 SkASSERT(!texDims.isEmpty());
1042 SkASSERT(!dstRect.isEmpty());
1043 SkASSERT(SkIRect::MakeSize(texDims).contains(dstRect));
1044 SkASSERT(mipLevelCount > 0 && mipLevelCount <= SkMipmap::ComputeLevelCount(texDims) + 1);
1045 SkASSERT(mipLevelCount == 1 || dstRect == SkIRect::MakeSize(texDims));
1046
1047 const GrGLCaps& caps = this->glCaps();
1048
1049 bool restoreGLRowLength = false;
1050
1051 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1052 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1053
1054 SkISize dims = dstRect.size();
1055 for (int level = 0; level < mipLevelCount; ++level, dims = {std::max(dims.width() >> 1, 1),
1056 std::max(dims.height() >> 1, 1)}) {
1057 if (!texels[level].fPixels) {
1058 continue;
1059 }
1060 const size_t trimRowBytes = dims.width() * bpp;
1061 const size_t rowBytes = texels[level].fRowBytes;
1062
1063 if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) {
1064 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1065 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1066 restoreGLRowLength = true;
1067 } else {
1068 SkASSERT(rowBytes == trimRowBytes);
1069 }
1070
1071 GL_CALL(TexSubImage2D(target, level, dstRect.x(), dstRect.y(), dims.width(), dims.height(),
1072 externalFormat, externalType, texels[level].fPixels));
1073 }
1074 if (restoreGLRowLength) {
1075 SkASSERT(caps.writePixelsRowBytesSupport());
1076 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1077 }
1078 }
1079
uploadCompressedTexData(SkImage::CompressionType compressionType,GrGLFormat format,SkISize dimensions,GrMipmapped mipMapped,GrGLenum target,const void * data,size_t dataSize)1080 bool GrGLGpu::uploadCompressedTexData(SkImage::CompressionType compressionType,
1081 GrGLFormat format,
1082 SkISize dimensions,
1083 GrMipmapped mipMapped,
1084 GrGLenum target,
1085 const void* data, size_t dataSize) {
1086 SkASSERT(format != GrGLFormat::kUnknown);
1087 const GrGLCaps& caps = this->glCaps();
1088
1089 // We only need the internal format for compressed 2D textures.
1090 GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format);
1091 if (!internalFormat) {
1092 return false;
1093 }
1094
1095 SkASSERT(compressionType != SkImage::CompressionType::kNone);
1096
1097 bool useTexStorage = caps.formatSupportsTexStorage(format);
1098
1099 int numMipLevels = 1;
1100 if (mipMapped == GrMipmapped::kYes) {
1101 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1102 }
1103
1104 // TODO: Make sure that the width and height that we pass to OpenGL
1105 // is a multiple of the block size.
1106
1107 if (useTexStorage) {
1108 // We never resize or change formats of textures.
1109 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat,
1110 dimensions.width(), dimensions.height()));
1111 if (error != GR_GL_NO_ERROR) {
1112 return false;
1113 }
1114
1115 size_t offset = 0;
1116 for (int level = 0; level < numMipLevels; ++level) {
1117
1118 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1119 nullptr, false);
1120
1121 error = GL_ALLOC_CALL(CompressedTexSubImage2D(target,
1122 level,
1123 0, // left
1124 0, // top
1125 dimensions.width(),
1126 dimensions.height(),
1127 internalFormat,
1128 SkToInt(levelDataSize),
1129 &((char*)data)[offset]));
1130
1131 if (error != GR_GL_NO_ERROR) {
1132 return false;
1133 }
1134
1135 offset += levelDataSize;
1136 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1137 }
1138 } else {
1139 size_t offset = 0;
1140
1141 for (int level = 0; level < numMipLevels; ++level) {
1142 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1143 nullptr, false);
1144
1145 const char* rawLevelData = &((char*)data)[offset];
1146 GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target,
1147 level,
1148 internalFormat,
1149 dimensions.width(),
1150 dimensions.height(),
1151 0, // border
1152 SkToInt(levelDataSize),
1153 rawLevelData));
1154
1155 if (error != GR_GL_NO_ERROR) {
1156 return false;
1157 }
1158
1159 offset += levelDataSize;
1160 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1161 }
1162 }
1163 return true;
1164 }
1165
renderbufferStorageMSAA(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1166 bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format,
1167 int width, int height) {
1168 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1169 GrGLenum error;
1170 switch (ctx.caps()->msFBOType()) {
1171 case GrGLCaps::kStandard_MSFBOType:
1172 error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount,
1173 format, width, height));
1174 break;
1175 case GrGLCaps::kES_Apple_MSFBOType:
1176 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE(
1177 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1178 break;
1179 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1180 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1181 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT(
1182 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1183 break;
1184 case GrGLCaps::kNone_MSFBOType:
1185 SkUNREACHABLE;
1186 break;
1187 }
1188 return error == GR_GL_NO_ERROR;
1189 }
1190
createRenderTargetObjects(const GrGLTexture::Desc & desc,int sampleCount,GrGLRenderTarget::IDs * rtIDs)1191 bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc,
1192 int sampleCount,
1193 GrGLRenderTarget::IDs* rtIDs) {
1194 rtIDs->fMSColorRenderbufferID = 0;
1195 rtIDs->fMultisampleFBOID = 0;
1196 rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1197 rtIDs->fSingleSampleFBOID = 0;
1198 rtIDs->fTotalMemorySamplesPerPixel = 0;
1199
1200 SkScopeExit cleanupOnFail([&] {
1201 if (rtIDs->fMSColorRenderbufferID) {
1202 GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1203 }
1204 if (rtIDs->fMultisampleFBOID != rtIDs->fSingleSampleFBOID) {
1205 this->deleteFramebuffer(rtIDs->fMultisampleFBOID);
1206 }
1207 if (rtIDs->fSingleSampleFBOID) {
1208 this->deleteFramebuffer(rtIDs->fSingleSampleFBOID);
1209 }
1210 });
1211
1212 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1213
1214 if (desc.fFormat == GrGLFormat::kUnknown) {
1215 return false;
1216 }
1217
1218 if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1219 return false;
1220 }
1221
1222 GL_CALL(GenFramebuffers(1, &rtIDs->fSingleSampleFBOID));
1223 if (!rtIDs->fSingleSampleFBOID) {
1224 return false;
1225 }
1226
1227 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1228 // the texture bound to the other. The exception is the IMG multisample extension. With this
1229 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1230 // rendered from.
1231 if (sampleCount <= 1) {
1232 rtIDs->fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
1233 } else if (this->glCaps().usesImplicitMSAAResolve()) {
1234 // GrGLRenderTarget target will configure the FBO as multisample or not base on need.
1235 rtIDs->fMultisampleFBOID = rtIDs->fSingleSampleFBOID;
1236 } else {
1237 GL_CALL(GenFramebuffers(1, &rtIDs->fMultisampleFBOID));
1238 if (!rtIDs->fMultisampleFBOID) {
1239 return false;
1240 }
1241 GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1242 if (!rtIDs->fMSColorRenderbufferID) {
1243 return false;
1244 }
1245 colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat);
1246 }
1247
1248 #if defined(__has_feature)
1249 #define IS_TSAN __has_feature(thread_sanitizer)
1250 #else
1251 #define IS_TSAN 0
1252 #endif
1253
1254 // below here we may bind the FBO
1255 fHWBoundRenderTargetUniqueID.makeInvalid();
1256 if (rtIDs->fMSColorRenderbufferID) {
1257 SkASSERT(sampleCount > 1);
1258 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID));
1259 if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat,
1260 desc.fSize.width(), desc.fSize.height())) {
1261 return false;
1262 }
1263 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fMultisampleFBOID);
1264 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1265 GR_GL_COLOR_ATTACHMENT0,
1266 GR_GL_RENDERBUFFER,
1267 rtIDs->fMSColorRenderbufferID));
1268 // See skbug.com/12644
1269 #if !IS_TSAN
1270 if (!this->glCaps().skipErrorChecks()) {
1271 GrGLenum status;
1272 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1273 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1274 return false;
1275 }
1276 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1277 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1278 GR_GL_COLOR_ATTACHMENT0,
1279 GR_GL_RENDERBUFFER,
1280 0));
1281 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1282 GR_GL_COLOR_ATTACHMENT0,
1283 GR_GL_RENDERBUFFER,
1284 rtIDs->fMSColorRenderbufferID));
1285 }
1286 }
1287 #endif
1288 rtIDs->fTotalMemorySamplesPerPixel += sampleCount;
1289 }
1290 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fSingleSampleFBOID);
1291 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1292 GR_GL_COLOR_ATTACHMENT0,
1293 desc.fTarget,
1294 desc.fID,
1295 0));
1296 // See skbug.com/12644
1297 #if !IS_TSAN
1298 if (!this->glCaps().skipErrorChecks()) {
1299 GrGLenum status;
1300 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1301 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1302 return false;
1303 }
1304 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1305 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1306 GR_GL_COLOR_ATTACHMENT0,
1307 desc.fTarget,
1308 0,
1309 0));
1310 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1311 GR_GL_COLOR_ATTACHMENT0,
1312 desc.fTarget,
1313 desc.fID,
1314 0));
1315 }
1316 }
1317 #endif
1318
1319 #undef IS_TSAN
1320 ++rtIDs->fTotalMemorySamplesPerPixel;
1321
1322 // We did it!
1323 cleanupOnFail.clear();
1324 return true;
1325 }
1326
1327 // good to set a break-point here to know when createTexture fails
return_null_texture()1328 static sk_sp<GrTexture> return_null_texture() {
1329 // SkDEBUGFAIL("null texture");
1330 return nullptr;
1331 }
1332
set_initial_texture_params(const GrGLInterface * interface,GrGLenum target)1333 static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params(
1334 const GrGLInterface* interface, GrGLenum target) {
1335 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1336 // drivers have a bug where an FBO won't be complete if it includes a
1337 // texture that is not mipmap complete (considering the filter in use).
1338 GrGLTextureParameters::SamplerOverriddenState state;
1339 state.fMinFilter = GR_GL_NEAREST;
1340 state.fMagFilter = GR_GL_NEAREST;
1341 state.fWrapS = GR_GL_CLAMP_TO_EDGE;
1342 state.fWrapT = GR_GL_CLAMP_TO_EDGE;
1343 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter));
1344 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter));
1345 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS));
1346 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT));
1347 return state;
1348 }
1349
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)1350 sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions,
1351 const GrBackendFormat& format,
1352 GrRenderable renderable,
1353 int renderTargetSampleCnt,
1354 SkBudgeted budgeted,
1355 GrProtected isProtected,
1356 int mipLevelCount,
1357 uint32_t levelClearMask) {
1358 // We don't support protected textures in GL.
1359 if (isProtected == GrProtected::kYes) {
1360 return nullptr;
1361 }
1362 SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1);
1363
1364 SkASSERT(mipLevelCount > 0);
1365 GrMipmapStatus mipmapStatus =
1366 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1367 GrGLTextureParameters::SamplerOverriddenState initialState;
1368 GrGLTexture::Desc texDesc;
1369 texDesc.fSize = dimensions;
1370 switch (format.textureType()) {
1371 case GrTextureType::kExternal:
1372 case GrTextureType::kNone:
1373 return nullptr;
1374 case GrTextureType::k2D:
1375 texDesc.fTarget = GR_GL_TEXTURE_2D;
1376 break;
1377 case GrTextureType::kRectangle:
1378 if (mipLevelCount > 1 || !this->glCaps().rectangleTextureSupport()) {
1379 return nullptr;
1380 }
1381 texDesc.fTarget = GR_GL_TEXTURE_RECTANGLE;
1382 break;
1383 }
1384 texDesc.fFormat = format.asGLFormat();
1385 texDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1386 SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown);
1387 SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat));
1388
1389 texDesc.fID = this->createTexture(dimensions, texDesc.fFormat, texDesc.fTarget, renderable,
1390 &initialState, mipLevelCount);
1391
1392 if (!texDesc.fID) {
1393 return return_null_texture();
1394 }
1395
1396 sk_sp<GrGLTexture> tex;
1397 if (renderable == GrRenderable::kYes) {
1398 // unbind the texture from the texture unit before binding it to the frame buffer
1399 GL_CALL(BindTexture(texDesc.fTarget, 0));
1400 GrGLRenderTarget::IDs rtIDDesc;
1401
1402 if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) {
1403 GL_CALL(DeleteTextures(1, &texDesc.fID));
1404 return return_null_texture();
1405 }
1406 tex = sk_make_sp<GrGLTextureRenderTarget>(
1407 this, budgeted, renderTargetSampleCnt, texDesc, rtIDDesc, mipmapStatus);
1408 tex->baseLevelWasBoundToFBO();
1409 } else {
1410 tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipmapStatus);
1411 }
1412 // The non-sampler params are still at their default values.
1413 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1414 fResetTimestampForTextureParameters);
1415 if (levelClearMask) {
1416 if (this->glCaps().clearTextureSupport()) {
1417 GrGLenum externalFormat, externalType;
1418 GrColorType colorType;
1419 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(
1420 texDesc.fFormat, &externalFormat, &externalType, &colorType);
1421 for (int i = 0; i < mipLevelCount; ++i) {
1422 if (levelClearMask & (1U << i)) {
1423 GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType,
1424 nullptr));
1425 }
1426 }
1427 } else if (this->glCaps().canFormatBeFBOColorAttachment(format.asGLFormat()) &&
1428 !this->glCaps().performColorClearsAsDraws()) {
1429 this->flushScissorTest(GrScissorTest::kDisabled);
1430 this->disableWindowRectangles();
1431 this->flushColorWrite(true);
1432 this->flushClearColor({0, 0, 0, 0});
1433 for (int i = 0; i < mipLevelCount; ++i) {
1434 if (levelClearMask & (1U << i)) {
1435 this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER,
1436 kDst_TempFBOTarget);
1437 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1438 this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER);
1439 }
1440 }
1441 fHWBoundRenderTargetUniqueID.makeInvalid();
1442 } else {
1443 this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID());
1444 std::array<float, 4> zeros = {};
1445 this->uploadColorToTex(texDesc.fFormat,
1446 texDesc.fSize,
1447 texDesc.fTarget,
1448 zeros,
1449 levelClearMask);
1450 }
1451 }
1452 return std::move(tex);
1453 }
1454
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)1455 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1456 const GrBackendFormat& format,
1457 SkBudgeted budgeted,
1458 GrMipmapped mipMapped,
1459 GrProtected isProtected,
1460 const void* data, size_t dataSize) {
1461 // We don't support protected textures in GL.
1462 if (isProtected == GrProtected::kYes) {
1463 return nullptr;
1464 }
1465 SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1466
1467 GrGLTextureParameters::SamplerOverriddenState initialState;
1468 GrGLTexture::Desc desc;
1469 desc.fSize = dimensions;
1470 desc.fTarget = GR_GL_TEXTURE_2D;
1471 desc.fOwnership = GrBackendObjectOwnership::kOwned;
1472 desc.fFormat = format.asGLFormat();
1473 desc.fID = this->createCompressedTexture2D(desc.fSize, compression, desc.fFormat,
1474 mipMapped, &initialState);
1475 if (!desc.fID) {
1476 return nullptr;
1477 }
1478
1479 if (data) {
1480 if (!this->uploadCompressedTexData(compression, desc.fFormat, dimensions, mipMapped,
1481 GR_GL_TEXTURE_2D, data, dataSize)) {
1482 GL_CALL(DeleteTextures(1, &desc.fID));
1483 return nullptr;
1484 }
1485 }
1486
1487 // Unbind this texture from the scratch texture unit.
1488 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1489
1490 GrMipmapStatus mipmapStatus = mipMapped == GrMipmapped::kYes
1491 ? GrMipmapStatus::kValid
1492 : GrMipmapStatus::kNotAllocated;
1493
1494 auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipmapStatus);
1495 // The non-sampler params are still at their default values.
1496 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1497 fResetTimestampForTextureParameters);
1498 return std::move(tex);
1499 }
1500
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1501 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1502 const GrBackendFormat& format,
1503 SkBudgeted budgeted,
1504 GrMipmapped mipMapped,
1505 GrProtected isProtected,
1506 OH_NativeBuffer* nativeBuffer,
1507 size_t bufferSize) {
1508 SkASSERT(!"unimplemented");
1509 return nullptr;
1510 }
1511
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)1512 GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(
1513 SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped,
1514 GrProtected isProtected) {
1515 // We don't support protected textures in GL.
1516 if (isProtected == GrProtected::kYes) {
1517 return {};
1518 }
1519
1520 this->handleDirtyContext();
1521
1522 GrGLFormat glFormat = format.asGLFormat();
1523 if (glFormat == GrGLFormat::kUnknown) {
1524 return {};
1525 }
1526
1527 SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1528
1529 GrGLTextureInfo info;
1530 GrGLTextureParameters::SamplerOverriddenState initialState;
1531
1532 info.fTarget = GR_GL_TEXTURE_2D;
1533 info.fFormat = GrGLFormatToEnum(glFormat);
1534 info.fID = this->createCompressedTexture2D(dimensions, compression, glFormat,
1535 mipMapped, &initialState);
1536 if (!info.fID) {
1537 return {};
1538 }
1539
1540 // Unbind this texture from the scratch texture unit.
1541 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1542
1543 auto parameters = sk_make_sp<GrGLTextureParameters>();
1544 // The non-sampler params are still at their default values.
1545 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1546 fResetTimestampForTextureParameters);
1547
1548 return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info,
1549 std::move(parameters));
1550 }
1551
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t length)1552 bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1553 sk_sp<GrRefCntedCallback> finishedCallback,
1554 const void* data,
1555 size_t length) {
1556 GrGLTextureInfo info;
1557 SkAssertResult(backendTexture.getGLTextureInfo(&info));
1558
1559 GrBackendFormat format = backendTexture.getBackendFormat();
1560 GrGLFormat glFormat = format.asGLFormat();
1561 if (glFormat == GrGLFormat::kUnknown) {
1562 return false;
1563 }
1564 SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1565
1566 GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
1567
1568 this->bindTextureToScratchUnit(info.fTarget, info.fID);
1569
1570 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
1571 // so that the uploads go to the right levels.
1572 if (backendTexture.hasMipMaps() && this->glCaps().mipmapLevelControlSupport()) {
1573 auto params = backendTexture.getGLTextureParams();
1574 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
1575 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
1576 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
1577 nonsamplerState.fBaseMipMapLevel = 0;
1578 }
1579 int numMipLevels =
1580 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
1581 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
1582 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
1583 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
1584 }
1585 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
1586 }
1587
1588 bool result = this->uploadCompressedTexData(compression,
1589 glFormat,
1590 backendTexture.dimensions(),
1591 mipMapped,
1592 GR_GL_TEXTURE_2D,
1593 data,
1594 length);
1595
1596 // Unbind this texture from the scratch texture unit.
1597 this->bindTextureToScratchUnit(info.fTarget, 0);
1598
1599 return result;
1600 }
1601
getCompatibleStencilIndex(GrGLFormat format)1602 int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) {
1603 static const int kSize = 16;
1604 SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format));
1605
1606 if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) {
1607 // Default to unsupported, set this if we find a stencil format that works.
1608 int firstWorkingStencilFormatIndex = -1;
1609
1610 GrGLuint colorID = this->createTexture({kSize, kSize}, format, GR_GL_TEXTURE_2D,
1611 GrRenderable::kYes, nullptr, 1);
1612 if (!colorID) {
1613 return -1;
1614 }
1615 // unbind the texture from the texture unit before binding it to the frame buffer
1616 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1617
1618 // Create Framebuffer
1619 GrGLuint fb = 0;
1620 GL_CALL(GenFramebuffers(1, &fb));
1621 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
1622 fHWBoundRenderTargetUniqueID.makeInvalid();
1623 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1624 GR_GL_COLOR_ATTACHMENT0,
1625 GR_GL_TEXTURE_2D,
1626 colorID,
1627 0));
1628 GrGLuint sbRBID = 0;
1629 GL_CALL(GenRenderbuffers(1, &sbRBID));
1630
1631 // look over formats till I find a compatible one
1632 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1633 if (sbRBID) {
1634 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1635 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1636 GrGLFormat sFmt = this->glCaps().stencilFormats()[i];
1637 GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage(
1638 GR_GL_RENDERBUFFER, GrGLFormatToEnum(sFmt), kSize, kSize));
1639 if (error == GR_GL_NO_ERROR) {
1640 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1641 GR_GL_STENCIL_ATTACHMENT,
1642 GR_GL_RENDERBUFFER, sbRBID));
1643 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1644 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1645 GR_GL_DEPTH_ATTACHMENT,
1646 GR_GL_RENDERBUFFER, sbRBID));
1647 } else {
1648 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1649 GR_GL_DEPTH_ATTACHMENT,
1650 GR_GL_RENDERBUFFER, 0));
1651 }
1652 GrGLenum status;
1653 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1654 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1655 firstWorkingStencilFormatIndex = i;
1656 break;
1657 }
1658 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1659 GR_GL_STENCIL_ATTACHMENT,
1660 GR_GL_RENDERBUFFER, 0));
1661 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1662 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1663 GR_GL_DEPTH_ATTACHMENT,
1664 GR_GL_RENDERBUFFER, 0));
1665 }
1666 }
1667 }
1668 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1669 }
1670 GL_CALL(DeleteTextures(1, &colorID));
1671 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
1672 this->deleteFramebuffer(fb);
1673 fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex);
1674 }
1675 return this->glCaps().getStencilFormatIndexForFormat(format);
1676 }
1677
set_khr_debug_label(GrGLGpu * gpu,const GrGLuint id)1678 static void set_khr_debug_label(GrGLGpu* gpu, const GrGLuint id) {
1679 if (gpu->glCaps().debugSupport()) {
1680 SkString label = SkStringPrintf("Skia_Texture_%d", id);
1681 GR_GL_CALL(gpu->glInterface(), ObjectLabel(GR_GL_TEXTURE, id, -1, label.c_str()));
1682 }
1683 }
1684
createCompressedTexture2D(SkISize dimensions,SkImage::CompressionType compression,GrGLFormat format,GrMipmapped mipMapped,GrGLTextureParameters::SamplerOverriddenState * initialState)1685 GrGLuint GrGLGpu::createCompressedTexture2D(
1686 SkISize dimensions,
1687 SkImage::CompressionType compression,
1688 GrGLFormat format,
1689 GrMipmapped mipMapped,
1690 GrGLTextureParameters::SamplerOverriddenState* initialState) {
1691 if (format == GrGLFormat::kUnknown) {
1692 return 0;
1693 }
1694 GrGLuint id = 0;
1695 GL_CALL(GenTextures(1, &id));
1696 if (!id) {
1697 return 0;
1698 }
1699
1700 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
1701
1702 set_khr_debug_label(this, id);
1703
1704 *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D);
1705
1706 return id;
1707 }
1708
createTexture(SkISize dimensions,GrGLFormat format,GrGLenum target,GrRenderable renderable,GrGLTextureParameters::SamplerOverriddenState * initialState,int mipLevelCount)1709 GrGLuint GrGLGpu::createTexture(SkISize dimensions,
1710 GrGLFormat format,
1711 GrGLenum target,
1712 GrRenderable renderable,
1713 GrGLTextureParameters::SamplerOverriddenState* initialState,
1714 int mipLevelCount) {
1715 SkASSERT(format != GrGLFormat::kUnknown);
1716 SkASSERT(!GrGLFormatIsCompressed(format));
1717
1718 GrGLuint id = 0;
1719 GL_CALL(GenTextures(1, &id));
1720
1721 if (!id) {
1722 return 0;
1723 }
1724
1725 this->bindTextureToScratchUnit(target, id);
1726
1727 set_khr_debug_label(this, id);
1728
1729 if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) {
1730 // provides a hint about how this texture will be used
1731 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT));
1732 }
1733
1734 if (initialState) {
1735 *initialState = set_initial_texture_params(this->glInterface(), target);
1736 } else {
1737 set_initial_texture_params(this->glInterface(), target);
1738 }
1739
1740 GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format);
1741
1742 bool success = false;
1743 if (internalFormat) {
1744 if (this->glCaps().formatSupportsTexStorage(format)) {
1745 auto levelCount = std::max(mipLevelCount, 1);
1746 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, levelCount, internalFormat,
1747 dimensions.width(), dimensions.height()));
1748 success = (error == GR_GL_NO_ERROR);
1749 } else {
1750 GrGLenum externalFormat, externalType;
1751 this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType);
1752 GrGLenum error = GR_GL_NO_ERROR;
1753 if (externalFormat && externalType) {
1754 for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) {
1755 const int twoToTheMipLevel = 1 << level;
1756 const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel);
1757 const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel);
1758 error = GL_ALLOC_CALL(TexImage2D(target, level, internalFormat, currentWidth,
1759 currentHeight, 0, externalFormat, externalType,
1760 nullptr));
1761 }
1762 success = (error == GR_GL_NO_ERROR);
1763 }
1764 }
1765 }
1766 if (success) {
1767 return id;
1768 }
1769 GL_CALL(DeleteTextures(1, &id));
1770 return 0;
1771 }
1772
makeStencilAttachment(const GrBackendFormat & colorFormat,SkISize dimensions,int numStencilSamples)1773 sk_sp<GrAttachment> GrGLGpu::makeStencilAttachment(const GrBackendFormat& colorFormat,
1774 SkISize dimensions, int numStencilSamples) {
1775 int sIdx = this->getCompatibleStencilIndex(colorFormat.asGLFormat());
1776 if (sIdx < 0) {
1777 return nullptr;
1778 }
1779 GrGLFormat sFmt = this->glCaps().stencilFormats()[sIdx];
1780
1781 auto stencil = GrGLAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1782 if (stencil) {
1783 fStats.incStencilAttachmentCreates();
1784 }
1785 return std::move(stencil);
1786 }
1787
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)1788 sk_sp<GrAttachment> GrGLGpu::makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format,
1789 int numSamples, GrProtected isProtected,
1790 GrMemoryless isMemoryless) {
1791 SkASSERT(isMemoryless == GrMemoryless::kNo);
1792 return GrGLAttachment::MakeMSAA(this, dimensions, numSamples, format.asGLFormat());
1793 }
1794
1795 ////////////////////////////////////////////////////////////////////////////////
1796
onCreateBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)1797 sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
1798 GrAccessPattern accessPattern, const void* data) {
1799 return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
1800 }
1801
flushScissorTest(GrScissorTest scissorTest)1802 void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) {
1803 if (GrScissorTest::kEnabled == scissorTest) {
1804 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1805 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1806 fHWScissorSettings.fEnabled = kYes_TriState;
1807 }
1808 } else {
1809 if (kNo_TriState != fHWScissorSettings.fEnabled) {
1810 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
1811 fHWScissorSettings.fEnabled = kNo_TriState;
1812 }
1813 }
1814 }
1815
flushScissorRect(const SkIRect & scissor,int rtHeight,GrSurfaceOrigin rtOrigin)1816 void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtHeight, GrSurfaceOrigin rtOrigin) {
1817 SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState);
1818 auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor);
1819 if (fHWScissorSettings.fRect != nativeScissor) {
1820 GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth,
1821 nativeScissor.fHeight));
1822 fHWScissorSettings.fRect = nativeScissor;
1823 }
1824 }
1825
flushViewport(const SkIRect & viewport,int rtHeight,GrSurfaceOrigin rtOrigin)1826 void GrGLGpu::flushViewport(const SkIRect& viewport, int rtHeight, GrSurfaceOrigin rtOrigin) {
1827 auto nativeViewport = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, viewport);
1828 if (fHWViewport != nativeViewport) {
1829 GL_CALL(Viewport(nativeViewport.fX, nativeViewport.fY,
1830 nativeViewport.fWidth, nativeViewport.fHeight));
1831 fHWViewport = nativeViewport;
1832 }
1833 }
1834
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt,GrSurfaceOrigin origin)1835 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
1836 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
1837 #ifndef USE_NSIGHT
1838 typedef GrWindowRectsState::Mode Mode;
1839 // Window rects can't be used on-screen.
1840 SkASSERT(!windowState.enabled() || !rt->glRTFBOIDis0());
1841 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
1842
1843 if (!this->caps()->maxWindowRectangles() ||
1844 fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) {
1845 return;
1846 }
1847
1848 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
1849 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
1850 int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
1851 SkASSERT(windowState.numWindows() == numWindows);
1852
1853 GrNativeRect glwindows[GrWindowRectangles::kMaxWindows];
1854 const SkIRect* skwindows = windowState.windows().data();
1855 for (int i = 0; i < numWindows; ++i) {
1856 glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]);
1857 }
1858
1859 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
1860 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
1861
1862 fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState);
1863 #endif
1864 }
1865
disableWindowRectangles()1866 void GrGLGpu::disableWindowRectangles() {
1867 #ifndef USE_NSIGHT
1868 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
1869 return;
1870 }
1871 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
1872 fHWWindowRectsState.setDisabled();
1873 #endif
1874 }
1875
flushGLState(GrRenderTarget * renderTarget,bool useMultisampleFBO,const GrProgramInfo & programInfo)1876 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, bool useMultisampleFBO,
1877 const GrProgramInfo& programInfo) {
1878 this->handleDirtyContext();
1879
1880 sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(this->getContext(),
1881 programInfo);
1882 if (!program) {
1883 GrCapsDebugf(this->caps(), "Failed to create program!\n");
1884 return false;
1885 }
1886
1887 this->flushProgram(std::move(program));
1888
1889 if (GrPrimitiveType::kPatches == programInfo.primitiveType()) {
1890 this->flushPatchVertexCount(programInfo.tessellationPatchVertexCount());
1891 }
1892
1893 // Swizzle the blend to match what the shader will output.
1894 this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(),
1895 programInfo.pipeline().writeSwizzle());
1896
1897 fHWProgram->updateUniforms(renderTarget, programInfo);
1898
1899 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
1900 GrStencilSettings stencil;
1901 if (programInfo.isStencilEnabled()) {
1902 SkASSERT(glRT->getStencilAttachment(useMultisampleFBO));
1903 stencil.reset(*programInfo.userStencilSettings(),
1904 programInfo.pipeline().hasStencilClip(),
1905 glRT->numStencilBits(useMultisampleFBO));
1906 }
1907 this->flushStencil(stencil, programInfo.origin());
1908 this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled()));
1909 this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(),
1910 glRT, programInfo.origin());
1911 this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster());
1912 this->flushWireframeState(programInfo.pipeline().isWireframe());
1913
1914 // This must come after textures are flushed because a texture may need
1915 // to be msaa-resolved (which will modify bound FBO state).
1916 this->flushRenderTarget(glRT, useMultisampleFBO);
1917
1918 return true;
1919 }
1920
flushProgram(sk_sp<GrGLProgram> program)1921 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
1922 if (!program) {
1923 fHWProgram.reset();
1924 fHWProgramID = 0;
1925 return;
1926 }
1927 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
1928 if (program == fHWProgram) {
1929 return;
1930 }
1931 auto id = program->programID();
1932 SkASSERT(id);
1933 GL_CALL(UseProgram(id));
1934 fHWProgram = std::move(program);
1935 fHWProgramID = id;
1936 }
1937
flushProgram(GrGLuint id)1938 void GrGLGpu::flushProgram(GrGLuint id) {
1939 SkASSERT(id);
1940 if (fHWProgramID == id) {
1941 SkASSERT(!fHWProgram);
1942 return;
1943 }
1944 fHWProgram.reset();
1945 GL_CALL(UseProgram(id));
1946 fHWProgramID = id;
1947 }
1948
bindBuffer(GrGpuBufferType type,const GrBuffer * buffer)1949 GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
1950 this->handleDirtyContext();
1951
1952 // Index buffer state is tied to the vertex array.
1953 if (GrGpuBufferType::kIndex == type) {
1954 this->bindVertexArray(0);
1955 }
1956
1957 auto* bufferState = this->hwBufferState(type);
1958 if (buffer->isCpuBuffer()) {
1959 if (!bufferState->fBufferZeroKnownBound) {
1960 GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
1961 bufferState->fBufferZeroKnownBound = true;
1962 bufferState->fBoundBufferUniqueID.makeInvalid();
1963 }
1964 } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
1965 bufferState->fBoundBufferUniqueID) {
1966 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
1967 GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
1968 bufferState->fBufferZeroKnownBound = false;
1969 bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
1970 }
1971
1972 return bufferState->fGLTarget;
1973 }
1974
clear(const GrScissorState & scissor,std::array<float,4> color,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)1975 void GrGLGpu::clear(const GrScissorState& scissor,
1976 std::array<float, 4> color,
1977 GrRenderTarget* target,
1978 bool useMultisampleFBO,
1979 GrSurfaceOrigin origin) {
1980 // parent class should never let us get here with no RT
1981 SkASSERT(target);
1982 SkASSERT(!this->caps()->performColorClearsAsDraws());
1983 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
1984
1985 this->handleDirtyContext();
1986
1987 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1988
1989 if (scissor.enabled()) {
1990 this->flushRenderTarget(glRT, useMultisampleFBO, origin, scissor.rect());
1991 } else {
1992 this->flushRenderTarget(glRT, useMultisampleFBO);
1993 }
1994 this->flushScissor(scissor, glRT->height(), origin);
1995 this->disableWindowRectangles();
1996 this->flushColorWrite(true);
1997 this->flushClearColor(color);
1998 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1999 }
2000
use_tiled_rendering(const GrGLCaps & glCaps,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2001 static bool use_tiled_rendering(const GrGLCaps& glCaps,
2002 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2003 // Only use the tiled rendering extension if we can explicitly clear and discard the stencil.
2004 // Otherwise it's faster to just not use it.
2005 return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp &&
2006 GrStoreOp::kDiscard == stencilLoadStore.fStoreOp;
2007 }
2008
beginCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const SkIRect & bounds,GrSurfaceOrigin origin,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2009 void GrGLGpu::beginCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2010 const SkIRect& bounds, GrSurfaceOrigin origin,
2011 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2012 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2013 SkASSERT(!fIsExecutingCommandBuffer_DebugOnly);
2014
2015 this->handleDirtyContext();
2016
2017 this->flushRenderTarget(rt, useMultisampleFBO);
2018 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true);
2019
2020 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2021 auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, rt->height(), bounds);
2022 GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp)
2023 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2024 SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering().
2025 GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth,
2026 nativeBounds.fHeight, preserveMask));
2027 }
2028
2029 GrGLbitfield clearMask = 0;
2030 if (GrLoadOp::kClear == colorLoadStore.fLoadOp) {
2031 SkASSERT(!this->caps()->performColorClearsAsDraws());
2032 this->flushClearColor(colorLoadStore.fClearColor);
2033 this->flushColorWrite(true);
2034 clearMask |= GR_GL_COLOR_BUFFER_BIT;
2035 }
2036 if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) {
2037 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2038 GL_CALL(StencilMask(0xffffffff));
2039 GL_CALL(ClearStencil(0));
2040 clearMask |= GR_GL_STENCIL_BUFFER_BIT;
2041 }
2042 if (clearMask) {
2043 this->flushScissorTest(GrScissorTest::kDisabled);
2044 this->disableWindowRectangles();
2045 GL_CALL(Clear(clearMask));
2046 }
2047 }
2048
endCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2049 void GrGLGpu::endCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2050 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2051 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2052 SkASSERT(fIsExecutingCommandBuffer_DebugOnly);
2053
2054 this->handleDirtyContext();
2055
2056 if (rt->uniqueID() != fHWBoundRenderTargetUniqueID ||
2057 useMultisampleFBO != fHWBoundFramebufferIsMSAA) {
2058 // The framebuffer binding changed in the middle of a command buffer. We should have already
2059 // printed a warning during onFBOChanged.
2060 return;
2061 }
2062
2063 if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) {
2064 SkSTArray<2, GrGLenum> discardAttachments;
2065 if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) {
2066 discardAttachments.push_back(
2067 rt->isFBO0(useMultisampleFBO) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0);
2068 }
2069 if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) {
2070 discardAttachments.push_back(
2071 rt->isFBO0(useMultisampleFBO) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT);
2072
2073 }
2074
2075 if (!discardAttachments.empty()) {
2076 if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) {
2077 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(),
2078 discardAttachments.begin()));
2079 } else {
2080 SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType());
2081 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(),
2082 discardAttachments.begin()));
2083 }
2084 }
2085 }
2086
2087 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2088 GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp)
2089 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2090 // Handled by use_tiled_rendering().
2091 SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp);
2092 GL_CALL(EndTiling(preserveMask));
2093 }
2094
2095 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false);
2096 }
2097
clearStencilClip(const GrScissorState & scissor,bool insideStencilMask,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2098 void GrGLGpu::clearStencilClip(const GrScissorState& scissor, bool insideStencilMask,
2099 GrRenderTarget* target, bool useMultisampleFBO,
2100 GrSurfaceOrigin origin) {
2101 SkASSERT(target);
2102 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2103 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2104 this->handleDirtyContext();
2105
2106 GrAttachment* sb = target->getStencilAttachment(useMultisampleFBO);
2107 if (!sb) {
2108 // We should only get here if we marked a proxy as requiring a SB. However,
2109 // the SB creation could later fail. Likely clipping is going to go awry now.
2110 return;
2111 }
2112
2113 GrGLint stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
2114 #if 0
2115 SkASSERT(stencilBitCount > 0);
2116 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2117 #else
2118 // we could just clear the clip bit but when we go through
2119 // ANGLE a partial stencil mask will cause clears to be
2120 // turned into draws. Our contract on OpsTask says that
2121 // changing the clip between stencil passes may or may not
2122 // zero the client's clip bits. So we just clear the whole thing.
2123 static const GrGLint clipStencilMask = ~0;
2124 #endif
2125 GrGLint value;
2126 if (insideStencilMask) {
2127 value = (1 << (stencilBitCount - 1));
2128 } else {
2129 value = 0;
2130 }
2131 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2132 this->flushRenderTargetNoColorWrites(glRT, useMultisampleFBO);
2133
2134 this->flushScissor(scissor, glRT->height(), origin);
2135 this->disableWindowRectangles();
2136
2137 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2138 GL_CALL(ClearStencil(value));
2139 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2140 fHWStencilSettings.invalidate();
2141 }
2142
readOrTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * offsetOrPtr,int rowWidthInPixels)2143 bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface,
2144 SkIRect rect,
2145 GrColorType surfaceColorType,
2146 GrColorType dstColorType,
2147 void* offsetOrPtr,
2148 int rowWidthInPixels) {
2149 SkASSERT(surface);
2150
2151 auto format = surface->backendFormat().asGLFormat();
2152 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2153 if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) {
2154 return false;
2155 }
2156 GrGLenum externalFormat = 0;
2157 GrGLenum externalType = 0;
2158 this->glCaps().getReadPixelsFormat(surface->backendFormat().asGLFormat(),
2159 surfaceColorType,
2160 dstColorType,
2161 &externalFormat,
2162 &externalType);
2163 if (!externalFormat || !externalType) {
2164 return false;
2165 }
2166
2167 if (renderTarget) {
2168 // Always bind the single sample FBO since we can't read pixels from an MSAA framebuffer.
2169 constexpr bool useMultisampleFBO = false;
2170 if (renderTarget->numSamples() > 1 && renderTarget->isFBO0(useMultisampleFBO)) {
2171 return false;
2172 }
2173 this->flushRenderTargetNoColorWrites(renderTarget, useMultisampleFBO);
2174 } else {
2175 // Use a temporary FBO.
2176 this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
2177 fHWBoundRenderTargetUniqueID.makeInvalid();
2178 }
2179
2180 // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
2181 if (rowWidthInPixels != rect.width()) {
2182 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2183 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels));
2184 }
2185 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1));
2186
2187 GL_CALL(ReadPixels(rect.left(),
2188 rect.top(),
2189 rect.width(),
2190 rect.height(),
2191 externalFormat,
2192 externalType,
2193 offsetOrPtr));
2194
2195 if (rowWidthInPixels != rect.width()) {
2196 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2197 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2198 }
2199
2200 if (!renderTarget) {
2201 this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER);
2202 }
2203 return true;
2204 }
2205
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2206 bool GrGLGpu::onReadPixels(GrSurface* surface,
2207 SkIRect rect,
2208 GrColorType surfaceColorType,
2209 GrColorType dstColorType,
2210 void* buffer,
2211 size_t rowBytes) {
2212 SkASSERT(surface);
2213
2214 size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType);
2215
2216 // GL_PACK_ROW_LENGTH is in terms of pixels not bytes.
2217 int rowPixelWidth;
2218
2219 if (rowBytes == SkToSizeT(rect.width()*bytesPerPixel)) {
2220 rowPixelWidth = rect.width();
2221 } else {
2222 SkASSERT(!(rowBytes % bytesPerPixel));
2223 rowPixelWidth = rowBytes / bytesPerPixel;
2224 }
2225 this->unbindXferBuffer(GrGpuBufferType::kXferGpuToCpu);
2226 return this->readOrTransferPixelsFrom(surface,
2227 rect,
2228 surfaceColorType,
2229 dstColorType,
2230 buffer,
2231 rowPixelWidth);
2232 }
2233
onGetOpsRenderPass(GrRenderTarget * rt,bool useMultisampleFBO,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)2234 GrOpsRenderPass* GrGLGpu::onGetOpsRenderPass(
2235 GrRenderTarget* rt,
2236 bool useMultisampleFBO,
2237 GrAttachment*,
2238 GrSurfaceOrigin origin,
2239 const SkIRect& bounds,
2240 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
2241 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
2242 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
2243 GrXferBarrierFlags renderPassXferBarriers) {
2244 if (!fCachedOpsRenderPass) {
2245 fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this);
2246 }
2247 if (useMultisampleFBO && rt->numSamples() == 1) {
2248 // We will be using dynamic msaa. Ensure there is an attachment.
2249 auto glRT = static_cast<GrGLRenderTarget*>(rt);
2250 if (!glRT->ensureDynamicMSAAAttachment()) {
2251 SkDebugf("WARNING: Failed to make dmsaa attachment. Render pass will be dropped.");
2252 return nullptr;
2253 }
2254 }
2255 fCachedOpsRenderPass->set(rt, useMultisampleFBO, bounds, origin, colorInfo, stencilInfo);
2256 return fCachedOpsRenderPass.get();
2257 }
2258
flushRenderTarget(GrGLRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin,const SkIRect & bounds)2259 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool useMultisampleFBO,
2260 GrSurfaceOrigin origin, const SkIRect& bounds) {
2261 this->flushRenderTargetNoColorWrites(target, useMultisampleFBO);
2262 this->didWriteToSurface(target, origin, &bounds);
2263 }
2264
flushRenderTarget(GrGLRenderTarget * target,bool useMultisampleFBO)2265 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool useMultisampleFBO) {
2266 this->flushRenderTargetNoColorWrites(target, useMultisampleFBO);
2267 this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr);
2268 }
2269
flushRenderTargetNoColorWrites(GrGLRenderTarget * target,bool useMultisampleFBO)2270 void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target, bool useMultisampleFBO) {
2271 SkASSERT(target);
2272 GrGpuResource::UniqueID rtID = target->uniqueID();
2273 if (fHWBoundRenderTargetUniqueID != rtID || fHWBoundFramebufferIsMSAA != useMultisampleFBO) {
2274 target->bind(useMultisampleFBO);
2275 #ifdef SK_DEBUG
2276 // don't do this check in Chromium -- this is causing
2277 // lots of repeated command buffer flushes when the compositor is
2278 // rendering with Ganesh, which is really slow; even too slow for
2279 // Debug mode.
2280 // Also don't do this when we know glCheckFramebufferStatus() may have side effects.
2281 if (!this->glCaps().skipErrorChecks() &&
2282 !this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
2283 GrGLenum status;
2284 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2285 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2286 SkDebugf("GrGLGpu::flushRenderTargetNoColorWrites glCheckFramebufferStatus %x\n",
2287 status);
2288 }
2289 }
2290 #endif
2291 fHWBoundRenderTargetUniqueID = rtID;
2292 fHWBoundFramebufferIsMSAA = useMultisampleFBO;
2293 this->flushViewport(SkIRect::MakeSize(target->dimensions()),
2294 target->height(),
2295 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
2296 }
2297 if (this->caps()->workarounds().force_update_scissor_state_when_binding_fbo0) {
2298 // The driver forgets the correct scissor state when using FBO 0.
2299 if (!fHWScissorSettings.fRect.isInvalid()) {
2300 const GrNativeRect& r = fHWScissorSettings.fRect;
2301 GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight));
2302 }
2303 if (fHWScissorSettings.fEnabled == kYes_TriState) {
2304 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2305 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2306 } else if (fHWScissorSettings.fEnabled == kNo_TriState) {
2307 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2308 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2309 }
2310 }
2311
2312 if (this->glCaps().srgbWriteControl()) {
2313 this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat()));
2314 }
2315
2316 if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) {
2317 GrGLint format;
2318 GrGLint type;
2319 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
2320 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
2321 this->glCaps().didQueryImplementationReadSupport(target->format(), format, type);
2322 }
2323 }
2324
flushFramebufferSRGB(bool enable)2325 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2326 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2327 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2328 fHWSRGBFramebuffer = kYes_TriState;
2329 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2330 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2331 fHWSRGBFramebuffer = kNo_TriState;
2332 }
2333 }
2334
prepareToDraw(GrPrimitiveType primitiveType)2335 GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) {
2336 fStats.incNumDraws();
2337
2338 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
2339 GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) {
2340 GL_CALL(Enable(GR_GL_CULL_FACE));
2341 GL_CALL(Disable(GR_GL_CULL_FACE));
2342 }
2343 fLastPrimitiveType = primitiveType;
2344
2345 switch (primitiveType) {
2346 case GrPrimitiveType::kTriangles:
2347 return GR_GL_TRIANGLES;
2348 case GrPrimitiveType::kTriangleStrip:
2349 return GR_GL_TRIANGLE_STRIP;
2350 case GrPrimitiveType::kPoints:
2351 return GR_GL_POINTS;
2352 case GrPrimitiveType::kLines:
2353 return GR_GL_LINES;
2354 case GrPrimitiveType::kLineStrip:
2355 return GR_GL_LINE_STRIP;
2356 case GrPrimitiveType::kPatches:
2357 return GR_GL_PATCHES;
2358 case GrPrimitiveType::kPath:
2359 SK_ABORT("non-mesh-based GrPrimitiveType");
2360 return 0;
2361 }
2362 SK_ABORT("invalid GrPrimitiveType");
2363 }
2364
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)2365 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
2366 auto glRT = static_cast<GrGLRenderTarget*>(target);
2367 if (this->glCaps().framebufferResolvesMustBeFullSize()) {
2368 this->resolveRenderFBOs(glRT, SkIRect::MakeSize(glRT->dimensions()),
2369 ResolveDirection::kMSAAToSingle);
2370 } else {
2371 this->resolveRenderFBOs(glRT, resolveRect, ResolveDirection::kMSAAToSingle);
2372 }
2373 }
2374
resolveRenderFBOs(GrGLRenderTarget * rt,const SkIRect & resolveRect,ResolveDirection resolveDirection,bool invalidateReadBufferAfterBlit)2375 void GrGLGpu::resolveRenderFBOs(GrGLRenderTarget* rt, const SkIRect& resolveRect,
2376 ResolveDirection resolveDirection,
2377 bool invalidateReadBufferAfterBlit) {
2378 this->handleDirtyContext();
2379 rt->bindForResolve(resolveDirection);
2380
2381 const GrGLCaps& caps = this->glCaps();
2382
2383 // make sure we go through flushRenderTarget() since we've modified
2384 // the bound DRAW FBO ID.
2385 fHWBoundRenderTargetUniqueID.makeInvalid();
2386 if (GrGLCaps::kES_Apple_MSFBOType == caps.msFBOType()) {
2387 // The Apple extension doesn't support blitting from single to multisample.
2388 SkASSERT(resolveDirection != ResolveDirection::kSingleToMSAA);
2389 SkASSERT(resolveRect == SkIRect::MakeSize(rt->dimensions()));
2390 // Apple's extension uses the scissor as the blit bounds.
2391 // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect
2392 // happens inside flushScissor since resolveRect is already in native device coordinates.
2393 GrScissorState scissor(rt->dimensions());
2394 SkAssertResult(scissor.set(resolveRect));
2395 this->flushScissor(scissor, rt->height(), kTopLeft_GrSurfaceOrigin);
2396 this->disableWindowRectangles();
2397 GL_CALL(ResolveMultisampleFramebuffer());
2398 } else {
2399 SkASSERT(!caps.framebufferResolvesMustBeFullSize() ||
2400 resolveRect == SkIRect::MakeSize(rt->dimensions()));
2401 int l = resolveRect.x();
2402 int b = resolveRect.y();
2403 int r = resolveRect.x() + resolveRect.width();
2404 int t = resolveRect.y() + resolveRect.height();
2405
2406 // BlitFrameBuffer respects the scissor, so disable it.
2407 this->flushScissorTest(GrScissorTest::kDisabled);
2408 this->disableWindowRectangles();
2409 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2410 }
2411
2412 if (caps.invalidateFBType() != GrGLCaps::kNone_InvalidateFBType &&
2413 invalidateReadBufferAfterBlit) {
2414 // Invalidate the read FBO attachment after the blit, in hopes that this allows the driver
2415 // to perform tiling optimizations.
2416 bool readBufferIsMSAA = resolveDirection == ResolveDirection::kMSAAToSingle;
2417 GrGLenum colorDiscardAttachment = rt->isFBO0(readBufferIsMSAA) ? GR_GL_COLOR
2418 : GR_GL_COLOR_ATTACHMENT0;
2419 if (caps.invalidateFBType() == GrGLCaps::kInvalidate_InvalidateFBType) {
2420 GL_CALL(InvalidateFramebuffer(GR_GL_READ_FRAMEBUFFER, 1, &colorDiscardAttachment));
2421 } else {
2422 SkASSERT(caps.invalidateFBType() == GrGLCaps::kDiscard_InvalidateFBType);
2423 // glDiscardFramebuffer only accepts GL_FRAMEBUFFER.
2424 rt->bind(readBufferIsMSAA);
2425 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, 1, &colorDiscardAttachment));
2426 }
2427 }
2428 }
2429
2430 namespace {
2431
2432
gr_to_gl_stencil_op(GrStencilOp op)2433 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2434 static const GrGLenum gTable[kGrStencilOpCount] = {
2435 GR_GL_KEEP, // kKeep
2436 GR_GL_ZERO, // kZero
2437 GR_GL_REPLACE, // kReplace
2438 GR_GL_INVERT, // kInvert
2439 GR_GL_INCR_WRAP, // kIncWrap
2440 GR_GL_DECR_WRAP, // kDecWrap
2441 GR_GL_INCR, // kIncClamp
2442 GR_GL_DECR, // kDecClamp
2443 };
2444 static_assert(0 == (int)GrStencilOp::kKeep);
2445 static_assert(1 == (int)GrStencilOp::kZero);
2446 static_assert(2 == (int)GrStencilOp::kReplace);
2447 static_assert(3 == (int)GrStencilOp::kInvert);
2448 static_assert(4 == (int)GrStencilOp::kIncWrap);
2449 static_assert(5 == (int)GrStencilOp::kDecWrap);
2450 static_assert(6 == (int)GrStencilOp::kIncClamp);
2451 static_assert(7 == (int)GrStencilOp::kDecClamp);
2452 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2453 return gTable[(int)op];
2454 }
2455
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2456 void set_gl_stencil(const GrGLInterface* gl,
2457 const GrStencilSettings::Face& face,
2458 GrGLenum glFace) {
2459 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2460 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2461 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2462
2463 GrGLint ref = face.fRef;
2464 GrGLint mask = face.fTestMask;
2465 GrGLint writeMask = face.fWriteMask;
2466
2467 if (GR_GL_FRONT_AND_BACK == glFace) {
2468 // we call the combined func just in case separate stencil is not
2469 // supported.
2470 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2471 GR_GL_CALL(gl, StencilMask(writeMask));
2472 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2473 } else {
2474 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2475 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2476 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2477 }
2478 }
2479 } // namespace
2480
flushStencil(const GrStencilSettings & stencilSettings,GrSurfaceOrigin origin)2481 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) {
2482 if (stencilSettings.isDisabled()) {
2483 this->disableStencil();
2484 } else if (fHWStencilSettings != stencilSettings ||
2485 (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) {
2486 if (kYes_TriState != fHWStencilTestEnabled) {
2487 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2488
2489 fHWStencilTestEnabled = kYes_TriState;
2490 }
2491 if (!stencilSettings.isTwoSided()) {
2492 set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(),
2493 GR_GL_FRONT_AND_BACK);
2494 } else {
2495 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin),
2496 GR_GL_FRONT);
2497 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin),
2498 GR_GL_BACK);
2499 }
2500 fHWStencilSettings = stencilSettings;
2501 fHWStencilOrigin = origin;
2502 }
2503 }
2504
disableStencil()2505 void GrGLGpu::disableStencil() {
2506 if (kNo_TriState != fHWStencilTestEnabled) {
2507 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2508
2509 fHWStencilTestEnabled = kNo_TriState;
2510 fHWStencilSettings.invalidate();
2511 }
2512 }
2513
flushConservativeRasterState(bool enabled)2514 void GrGLGpu::flushConservativeRasterState(bool enabled) {
2515 if (this->caps()->conservativeRasterSupport()) {
2516 if (enabled) {
2517 if (kYes_TriState != fHWConservativeRasterEnabled) {
2518 GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION));
2519 fHWConservativeRasterEnabled = kYes_TriState;
2520 }
2521 } else {
2522 if (kNo_TriState != fHWConservativeRasterEnabled) {
2523 GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION));
2524 fHWConservativeRasterEnabled = kNo_TriState;
2525 }
2526 }
2527 }
2528 }
2529
flushWireframeState(bool enabled)2530 void GrGLGpu::flushWireframeState(bool enabled) {
2531 if (this->caps()->wireframeSupport()) {
2532 if (this->caps()->wireframeMode() || enabled) {
2533 if (kYes_TriState != fHWWireframeEnabled) {
2534 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
2535 fHWWireframeEnabled = kYes_TriState;
2536 }
2537 } else {
2538 if (kNo_TriState != fHWWireframeEnabled) {
2539 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
2540 fHWWireframeEnabled = kNo_TriState;
2541 }
2542 }
2543 }
2544 }
2545
flushBlendAndColorWrite(const GrXferProcessor::BlendInfo & blendInfo,const GrSwizzle & swizzle)2546 void GrGLGpu::flushBlendAndColorWrite(
2547 const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
2548 if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWriteColor) {
2549 // We need to work around a driver bug by using a blend state that preserves the dst color,
2550 // rather than disabling color writes.
2551 GrXferProcessor::BlendInfo preserveDstBlend;
2552 preserveDstBlend.fSrcBlend = kZero_GrBlendCoeff;
2553 preserveDstBlend.fDstBlend = kOne_GrBlendCoeff;
2554 this->flushBlendAndColorWrite(preserveDstBlend, swizzle);
2555 return;
2556 }
2557
2558 GrBlendEquation equation = blendInfo.fEquation;
2559 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2560 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2561
2562 // Any optimization to disable blending should have already been applied and
2563 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
2564 bool blendOff = GrBlendShouldDisable(equation, srcCoeff, dstCoeff) ||
2565 !blendInfo.fWriteColor;
2566
2567 if (blendOff) {
2568 if (kNo_TriState != fHWBlendState.fEnabled) {
2569 GL_CALL(Disable(GR_GL_BLEND));
2570
2571 // Workaround for the ARM KHR_blend_equation_advanced disable flags issue
2572 // https://code.google.com/p/skia/issues/detail?id=3943
2573 if (this->ctxInfo().vendor() == GrGLVendor::kARM &&
2574 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2575 SkASSERT(this->caps()->advancedBlendEquationSupport());
2576 // Set to any basic blending equation.
2577 GrBlendEquation blend_equation = kAdd_GrBlendEquation;
2578 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
2579 fHWBlendState.fEquation = blend_equation;
2580 }
2581
2582 // Workaround for Adreno 5xx BlendFunc bug. See crbug.com/1241134.
2583 // We must also check to see if the blend coeffs are invalid because the client may have
2584 // reset our gl state and thus we will have forgotten if the previous use was a coeff
2585 // that referenced src2.
2586 if (this->glCaps().mustResetBlendFuncBetweenDualSourceAndDisable() &&
2587 (GrBlendCoeffRefsSrc2(fHWBlendState.fSrcCoeff) ||
2588 GrBlendCoeffRefsSrc2(fHWBlendState.fDstCoeff) ||
2589 fHWBlendState.fSrcCoeff == kIllegal_GrBlendCoeff ||
2590 fHWBlendState.fDstCoeff == kIllegal_GrBlendCoeff)) {
2591 // We just reset the blend func to anything that doesn't reference src2
2592 GL_CALL(BlendFunc(GR_GL_ONE, GR_GL_ZERO));
2593 fHWBlendState.fSrcCoeff = kOne_GrBlendCoeff;
2594 fHWBlendState.fDstCoeff = kZero_GrBlendCoeff;
2595 }
2596
2597 fHWBlendState.fEnabled = kNo_TriState;
2598 }
2599 } else {
2600 if (kYes_TriState != fHWBlendState.fEnabled) {
2601 GL_CALL(Enable(GR_GL_BLEND));
2602
2603 fHWBlendState.fEnabled = kYes_TriState;
2604 }
2605
2606 if (fHWBlendState.fEquation != equation) {
2607 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
2608 fHWBlendState.fEquation = equation;
2609 }
2610
2611 if (GrBlendEquationIsAdvanced(equation)) {
2612 SkASSERT(this->caps()->advancedBlendEquationSupport());
2613 // Advanced equations have no other blend state.
2614 return;
2615 }
2616
2617 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2618 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
2619 gXfermodeCoeff2Blend[dstCoeff]));
2620 fHWBlendState.fSrcCoeff = srcCoeff;
2621 fHWBlendState.fDstCoeff = dstCoeff;
2622 }
2623
2624 if (GrBlendCoeffRefsConstant(srcCoeff) || GrBlendCoeffRefsConstant(dstCoeff)) {
2625 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
2626 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2627 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
2628 fHWBlendState.fConstColor = blendConst;
2629 fHWBlendState.fConstColorValid = true;
2630 }
2631 }
2632 }
2633
2634 this->flushColorWrite(blendInfo.fWriteColor);
2635 }
2636
bindTexture(int unitIdx,GrSamplerState samplerState,const GrSwizzle & swizzle,GrGLTexture * texture)2637 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const GrSwizzle& swizzle,
2638 GrGLTexture* texture) {
2639 SkASSERT(texture);
2640
2641 #ifdef SK_DEBUG
2642 if (!this->caps()->npotTextureTileSupport()) {
2643 if (samplerState.isRepeatedX()) {
2644 const int w = texture->width();
2645 SkASSERT(SkIsPow2(w));
2646 }
2647 if (samplerState.isRepeatedY()) {
2648 const int h = texture->height();
2649 SkASSERT(SkIsPow2(h));
2650 }
2651 }
2652 #endif
2653
2654 GrGpuResource::UniqueID textureID = texture->uniqueID();
2655 GrGLenum target = texture->target();
2656 if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) {
2657 this->setTextureUnit(unitIdx);
2658 GL_CALL(BindTexture(target, texture->textureID()));
2659 fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID);
2660 }
2661
2662 if (samplerState.mipmapped() == GrMipmapped::kYes) {
2663 if (!this->caps()->mipmapSupport() || texture->mipmapped() == GrMipmapped::kNo) {
2664 samplerState.setMipmapMode(GrSamplerState::MipmapMode::kNone);
2665 } else {
2666 SkASSERT(!texture->mipmapsAreDirty());
2667 }
2668 }
2669
2670 auto timestamp = texture->parameters()->resetTimestamp();
2671 bool setAll = timestamp < fResetTimestampForTextureParameters;
2672 const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr;
2673 GrGLTextureParameters::SamplerOverriddenState newSamplerState;
2674 if (this->glCaps().useSamplerObjects()) {
2675 fSamplerObjectCache->bindSampler(unitIdx, samplerState);
2676 if (this->glCaps().mustSetAnyTexParameterToEnableMipmapping()) {
2677 if (samplerState.mipmapped() == GrMipmapped::kYes) {
2678 GrGLenum minFilter = filter_to_gl_min_filter(samplerState.filter(),
2679 samplerState.mipmapMode());
2680 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2681 texture->parameters()->samplerOverriddenState();
2682 this->setTextureUnit(unitIdx);
2683 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, minFilter));
2684 newSamplerState = oldSamplerState;
2685 newSamplerState.fMinFilter = minFilter;
2686 samplerStateToRecord = &newSamplerState;
2687 }
2688 }
2689 } else {
2690 if (fSamplerObjectCache) {
2691 fSamplerObjectCache->unbindSampler(unitIdx);
2692 }
2693 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2694 texture->parameters()->samplerOverriddenState();
2695 samplerStateToRecord = &newSamplerState;
2696
2697 newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter(),
2698 samplerState.mipmapMode());
2699 newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
2700
2701 newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
2702 newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
2703
2704 // These are the OpenGL default values.
2705 newSamplerState.fMinLOD = -1000.f;
2706 newSamplerState.fMaxLOD = 1000.f;
2707
2708 if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) {
2709 this->setTextureUnit(unitIdx);
2710 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter));
2711 }
2712 if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) {
2713 this->setTextureUnit(unitIdx);
2714 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter));
2715 }
2716 if (this->glCaps().mipmapLodControlSupport()) {
2717 if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) {
2718 this->setTextureUnit(unitIdx);
2719 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD));
2720 }
2721 if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) {
2722 this->setTextureUnit(unitIdx);
2723 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD));
2724 }
2725 }
2726 if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) {
2727 this->setTextureUnit(unitIdx);
2728 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS));
2729 }
2730 if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) {
2731 this->setTextureUnit(unitIdx);
2732 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT));
2733 }
2734 if (this->glCaps().clampToBorderSupport()) {
2735 // Make sure the border color is transparent black (the default)
2736 if (setAll || oldSamplerState.fBorderColorInvalid) {
2737 this->setTextureUnit(unitIdx);
2738 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
2739 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
2740 }
2741 }
2742 }
2743 GrGLTextureParameters::NonsamplerState newNonsamplerState;
2744 newNonsamplerState.fBaseMipMapLevel = 0;
2745 newNonsamplerState.fMaxMipmapLevel = texture->maxMipmapLevel();
2746 newNonsamplerState.fSwizzleIsRGBA = true;
2747
2748 const GrGLTextureParameters::NonsamplerState& oldNonsamplerState =
2749 texture->parameters()->nonsamplerState();
2750 if (this->glCaps().textureSwizzleSupport()) {
2751 if (setAll || !oldNonsamplerState.fSwizzleIsRGBA) {
2752 static constexpr GrGLenum kRGBA[4] {
2753 GR_GL_RED,
2754 GR_GL_GREEN,
2755 GR_GL_BLUE,
2756 GR_GL_ALPHA
2757 };
2758 this->setTextureUnit(unitIdx);
2759 if (GR_IS_GR_GL(this->glStandard())) {
2760 static_assert(sizeof(kRGBA[0]) == sizeof(GrGLint));
2761 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
2762 reinterpret_cast<const GrGLint*>(kRGBA)));
2763 } else if (GR_IS_GR_GL_ES(this->glStandard())) {
2764 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2765 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, kRGBA[0]));
2766 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, kRGBA[1]));
2767 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, kRGBA[2]));
2768 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, kRGBA[3]));
2769 }
2770 }
2771 }
2772 // These are not supported in ES2 contexts
2773 if (this->glCaps().mipmapLevelControlSupport() &&
2774 (texture->textureType() != GrTextureType::kExternal ||
2775 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
2776 if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) {
2777 this->setTextureUnit(unitIdx);
2778 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
2779 newNonsamplerState.fBaseMipMapLevel));
2780 }
2781 if (newNonsamplerState.fMaxMipmapLevel != oldNonsamplerState.fMaxMipmapLevel) {
2782 this->setTextureUnit(unitIdx);
2783 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
2784 newNonsamplerState.fMaxMipmapLevel));
2785 }
2786 }
2787 texture->parameters()->set(samplerStateToRecord, newNonsamplerState,
2788 fResetTimestampForTextureParameters);
2789 }
2790
onResetTextureBindings()2791 void GrGLGpu::onResetTextureBindings() {
2792 static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE,
2793 GR_GL_TEXTURE_EXTERNAL};
2794 for (int i = 0; i < this->numTextureUnits(); ++i) {
2795 this->setTextureUnit(i);
2796 for (auto target : kTargets) {
2797 if (fHWTextureUnitBindings[i].hasBeenModified(target)) {
2798 GL_CALL(BindTexture(target, 0));
2799 }
2800 }
2801 fHWTextureUnitBindings[i].invalidateAllTargets(true);
2802 }
2803 }
2804
flushPatchVertexCount(uint8_t count)2805 void GrGLGpu::flushPatchVertexCount(uint8_t count) {
2806 SkASSERT(this->caps()->shaderCaps()->tessellationSupport());
2807 if (fHWPatchVertexCount != count) {
2808 GL_CALL(PatchParameteri(GR_GL_PATCH_VERTICES, count));
2809 fHWPatchVertexCount = count;
2810 }
2811 }
2812
flushColorWrite(bool writeColor)2813 void GrGLGpu::flushColorWrite(bool writeColor) {
2814 if (!writeColor) {
2815 if (kNo_TriState != fHWWriteToColor) {
2816 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
2817 GR_GL_FALSE, GR_GL_FALSE));
2818 fHWWriteToColor = kNo_TriState;
2819 }
2820 } else {
2821 if (kYes_TriState != fHWWriteToColor) {
2822 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2823 fHWWriteToColor = kYes_TriState;
2824 }
2825 }
2826 }
2827
flushClearColor(std::array<float,4> color)2828 void GrGLGpu::flushClearColor(std::array<float, 4> color) {
2829 GrGLfloat r = color[0], g = color[1], b = color[2], a = color[3];
2830 if (this->glCaps().clearToBoundaryValuesIsBroken() &&
2831 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
2832 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
2833 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
2834 a = (1 == a) ? safeAlpha1 : safeAlpha0;
2835 }
2836 if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
2837 b != fHWClearColor[2] || a != fHWClearColor[3]) {
2838 GL_CALL(ClearColor(r, g, b, a));
2839 fHWClearColor[0] = r;
2840 fHWClearColor[1] = g;
2841 fHWClearColor[2] = b;
2842 fHWClearColor[3] = a;
2843 }
2844 }
2845
setTextureUnit(int unit)2846 void GrGLGpu::setTextureUnit(int unit) {
2847 SkASSERT(unit >= 0 && unit < this->numTextureUnits());
2848 if (unit != fHWActiveTextureUnitIdx) {
2849 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
2850 fHWActiveTextureUnitIdx = unit;
2851 }
2852 }
2853
bindTextureToScratchUnit(GrGLenum target,GrGLint textureID)2854 void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) {
2855 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
2856 int lastUnitIdx = this->numTextureUnits() - 1;
2857 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
2858 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
2859 fHWActiveTextureUnitIdx = lastUnitIdx;
2860 }
2861 // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the
2862 // correct texture.
2863 fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target);
2864 GL_CALL(BindTexture(target, textureID));
2865 }
2866
2867 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint,const GrGLCaps & caps)2868 static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
2869 const GrSurface* src,
2870 const SkIRect& srcRect,
2871 const SkIPoint& dstPoint,
2872 const GrGLCaps& caps) {
2873 int dstSampleCnt = 0;
2874 int srcSampleCnt = 0;
2875 if (const GrRenderTarget* rt = dst->asRenderTarget()) {
2876 dstSampleCnt = rt->numSamples();
2877 }
2878 if (const GrRenderTarget* rt = src->asRenderTarget()) {
2879 srcSampleCnt = rt->numSamples();
2880 }
2881 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
2882 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
2883
2884 GrGLFormat dstFormat = dst->backendFormat().asGLFormat();
2885 GrGLFormat srcFormat = src->backendFormat().asGLFormat();
2886
2887 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
2888 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
2889
2890 GrTextureType dstTexType;
2891 GrTextureType* dstTexTypePtr = nullptr;
2892 GrTextureType srcTexType;
2893 GrTextureType* srcTexTypePtr = nullptr;
2894 if (dstTex) {
2895 dstTexType = dstTex->textureType();
2896 dstTexTypePtr = &dstTexType;
2897 }
2898 if (srcTex) {
2899 srcTexType = srcTex->textureType();
2900 srcTexTypePtr = &srcTexType;
2901 }
2902
2903 return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr,
2904 srcFormat, srcSampleCnt, srcTexTypePtr,
2905 src->getBoundsRect(), true, srcRect, dstPoint);
2906 }
2907
rt_has_msaa_render_buffer(const GrGLRenderTarget * rt,const GrGLCaps & glCaps)2908 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
2909 // A RT has a separate MSAA renderbuffer if:
2910 // 1) It's multisampled
2911 // 2) We're using an extension with separate MSAA renderbuffers
2912 // 3) It's not FBO 0, which is special and always auto-resolves
2913 return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && !rt->isFBO0(true/*msaa*/);
2914 }
2915
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLCaps & caps)2916 static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src,
2917 const GrGLCaps& caps) {
2918
2919 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
2920 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2921 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
2922 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
2923
2924 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
2925 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
2926
2927 GrGLFormat dstFormat = dst->backendFormat().asGLFormat();
2928 GrGLFormat srcFormat = src->backendFormat().asGLFormat();
2929
2930 GrTextureType dstTexType;
2931 GrTextureType* dstTexTypePtr = nullptr;
2932 GrTextureType srcTexType;
2933 GrTextureType* srcTexTypePtr = nullptr;
2934 if (dstTex) {
2935 dstTexType = dstTex->textureType();
2936 dstTexTypePtr = &dstTexType;
2937 }
2938 if (srcTex) {
2939 srcTexType = srcTex->textureType();
2940 srcTexTypePtr = &srcTexType;
2941 }
2942
2943 return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr,
2944 srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr);
2945 }
2946
bindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget,TempFBOTarget tempFBOTarget)2947 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
2948 TempFBOTarget tempFBOTarget) {
2949 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2950 if (!rt || mipLevel > 0) {
2951 SkASSERT(surface->asTexture());
2952 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
2953 GrGLuint texID = texture->textureID();
2954 GrGLenum target = texture->target();
2955 GrGLuint* tempFBOID;
2956 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
2957
2958 if (0 == *tempFBOID) {
2959 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
2960 }
2961
2962 this->bindFramebuffer(fboTarget, *tempFBOID);
2963 GR_GL_CALL(
2964 this->glInterface(),
2965 FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel));
2966 if (mipLevel == 0) {
2967 texture->baseLevelWasBoundToFBO();
2968 }
2969 } else {
2970 rt->bindForPixelOps(fboTarget);
2971 }
2972 }
2973
unbindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget)2974 void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) {
2975 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
2976 if (mipLevel > 0 || !surface->asRenderTarget()) {
2977 SkASSERT(surface->asTexture());
2978 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
2979 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
2980 GR_GL_COLOR_ATTACHMENT0,
2981 textureTarget,
2982 0,
2983 0));
2984 }
2985 }
2986
onFBOChanged()2987 void GrGLGpu::onFBOChanged() {
2988 if (this->caps()->workarounds().flush_on_framebuffer_change) {
2989 this->flush(FlushType::kForce);
2990 }
2991 #ifdef SK_DEBUG
2992 if (fIsExecutingCommandBuffer_DebugOnly) {
2993 SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. "
2994 "This will severely hurt performance.\n");
2995 }
2996 #endif
2997 }
2998
bindFramebuffer(GrGLenum target,GrGLuint fboid)2999 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
3000 GL_CALL(BindFramebuffer(target, fboid));
3001 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
3002 fBoundDrawFramebuffer = fboid;
3003 }
3004 this->onFBOChanged();
3005 }
3006
deleteFramebuffer(GrGLuint fboid)3007 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
3008 // We're relying on the GL state shadowing being correct in the workaround code below so we
3009 // need to handle a dirty context.
3010 this->handleDirtyContext();
3011 if (fboid == fBoundDrawFramebuffer &&
3012 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
3013 // This workaround only applies to deleting currently bound framebuffers
3014 // on Adreno 420. Because this is a somewhat rare case, instead of
3015 // tracking all the attachments of every framebuffer instead just always
3016 // unbind all attachments.
3017 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3018 GR_GL_RENDERBUFFER, 0));
3019 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
3020 GR_GL_RENDERBUFFER, 0));
3021 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3022 GR_GL_RENDERBUFFER, 0));
3023 }
3024
3025 GL_CALL(DeleteFramebuffers(1, &fboid));
3026
3027 // Deleting the currently bound framebuffer rebinds to 0.
3028 if (fboid == fBoundDrawFramebuffer) {
3029 this->onFBOChanged();
3030 }
3031 }
3032
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3033 bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3034 const SkIPoint& dstPoint) {
3035 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3036 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
3037 bool preferCopy = SkToBool(dst->asRenderTarget());
3038 auto dstFormat = dst->backendFormat().asGLFormat();
3039 if (preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) {
3040 GrRenderTarget* dstRT = dst->asRenderTarget();
3041 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3042 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstPoint)) {
3043 return true;
3044 }
3045 }
3046
3047 if (can_copy_texsubimage(dst, src, this->glCaps())) {
3048 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
3049 return true;
3050 }
3051
3052 if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())) {
3053 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
3054 }
3055
3056 if (!preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) {
3057 GrRenderTarget* dstRT = dst->asRenderTarget();
3058 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3059 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstPoint)) {
3060 return true;
3061 }
3062 }
3063
3064 return false;
3065 }
3066
createCopyProgram(GrTexture * srcTex)3067 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3068 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
3069
3070 int progIdx = TextureToCopyProgramIdx(srcTex);
3071 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3072 GrSLType samplerType = GrSLCombinedSamplerTypeForTextureType(srcTex->textureType());
3073
3074 if (!fCopyProgramArrayBuffer) {
3075 static const GrGLfloat vdata[] = {
3076 0, 0,
3077 0, 1,
3078 1, 0,
3079 1, 1
3080 };
3081 fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
3082 kStatic_GrAccessPattern, vdata);
3083 }
3084 if (!fCopyProgramArrayBuffer) {
3085 return false;
3086 }
3087
3088 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3089 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3090 if (!fCopyPrograms[progIdx].fProgram) {
3091 return false;
3092 }
3093
3094 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::TypeModifier::In);
3095 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
3096 GrShaderVar::TypeModifier::Uniform);
3097 GrShaderVar uPosXform("u_posXform", kHalf4_GrSLType, GrShaderVar::TypeModifier::Uniform);
3098 GrShaderVar uTexture("u_texture", samplerType, GrShaderVar::TypeModifier::Uniform);
3099 GrShaderVar vTexCoord("v_texCoord", kHalf2_GrSLType, GrShaderVar::TypeModifier::Out);
3100 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType, GrShaderVar::TypeModifier::Out);
3101
3102 SkString vshaderTxt;
3103 if (shaderCaps->noperspectiveInterpolationSupport()) {
3104 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3105 vshaderTxt.appendf("#extension %s : require\n", extension);
3106 }
3107 vTexCoord.addModifier("noperspective");
3108 }
3109
3110 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3111 vshaderTxt.append(";");
3112 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3113 vshaderTxt.append(";");
3114 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3115 vshaderTxt.append(";");
3116 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3117 vshaderTxt.append(";");
3118
3119 vshaderTxt.append(
3120 "// Copy Program VS\n"
3121 "void main() {"
3122 " v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);"
3123 " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3124 " sk_Position.zw = half2(0, 1);"
3125 "}"
3126 );
3127
3128 SkString fshaderTxt;
3129 if (shaderCaps->noperspectiveInterpolationSupport()) {
3130 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3131 fshaderTxt.appendf("#extension %s : require\n", extension);
3132 }
3133 }
3134 vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In);
3135 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3136 fshaderTxt.append(";");
3137 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3138 fshaderTxt.append(";");
3139 fshaderTxt.appendf(
3140 "// Copy Program FS\n"
3141 "void main() {"
3142 " sk_FragColor = sample(u_texture, v_texCoord);"
3143 "}"
3144 );
3145
3146 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3147 SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size());
3148 SkSL::Program::Settings settings;
3149 SkSL::String glsl;
3150 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(this, SkSL::ProgramKind::kVertex,
3151 sksl, settings, &glsl, errorHandler);
3152 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3153 GR_GL_VERTEX_SHADER, glsl, fProgramCache->stats(),
3154 errorHandler);
3155 SkASSERT(program->fInputs == SkSL::Program::Inputs());
3156
3157 sksl.assign(fshaderTxt.c_str(), fshaderTxt.size());
3158 program = GrSkSLtoGLSL(this, SkSL::ProgramKind::kFragment, sksl, settings, &glsl,
3159 errorHandler);
3160 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3161 GR_GL_FRAGMENT_SHADER, glsl,
3162 fProgramCache->stats(), errorHandler);
3163 SkASSERT(program->fInputs == SkSL::Program::Inputs());
3164
3165 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3166
3167 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3168 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3169 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3170 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3171 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3172 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3173
3174 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3175
3176 GL_CALL(DeleteShader(vshader));
3177 GL_CALL(DeleteShader(fshader));
3178
3179 return true;
3180 }
3181
createMipmapProgram(int progIdx)3182 bool GrGLGpu::createMipmapProgram(int progIdx) {
3183 const bool oddWidth = SkToBool(progIdx & 0x2);
3184 const bool oddHeight = SkToBool(progIdx & 0x1);
3185 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3186
3187 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3188
3189 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3190 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3191 if (!fMipmapPrograms[progIdx].fProgram) {
3192 return false;
3193 }
3194
3195 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::TypeModifier::In);
3196 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType,
3197 GrShaderVar::TypeModifier::Uniform);
3198 GrShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
3199 GrShaderVar::TypeModifier::Uniform);
3200 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3201 GrShaderVar vTexCoords[] = {
3202 GrShaderVar("v_texCoord0", kHalf2_GrSLType, GrShaderVar::TypeModifier::Out),
3203 GrShaderVar("v_texCoord1", kHalf2_GrSLType, GrShaderVar::TypeModifier::Out),
3204 GrShaderVar("v_texCoord2", kHalf2_GrSLType, GrShaderVar::TypeModifier::Out),
3205 GrShaderVar("v_texCoord3", kHalf2_GrSLType, GrShaderVar::TypeModifier::Out),
3206 };
3207 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType,GrShaderVar::TypeModifier::Out);
3208
3209 SkString vshaderTxt;
3210 if (shaderCaps->noperspectiveInterpolationSupport()) {
3211 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3212 vshaderTxt.appendf("#extension %s : require\n", extension);
3213 }
3214 vTexCoords[0].addModifier("noperspective");
3215 vTexCoords[1].addModifier("noperspective");
3216 vTexCoords[2].addModifier("noperspective");
3217 vTexCoords[3].addModifier("noperspective");
3218 }
3219
3220 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3221 vshaderTxt.append(";");
3222 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3223 vshaderTxt.append(";");
3224 for (int i = 0; i < numTaps; ++i) {
3225 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3226 vshaderTxt.append(";");
3227 }
3228
3229 vshaderTxt.append(
3230 "// Mipmap Program VS\n"
3231 "void main() {"
3232 " sk_Position.xy = a_vertex * half2(2) - half2(1);"
3233 " sk_Position.zw = half2(0, 1);"
3234 );
3235
3236 // Insert texture coordinate computation:
3237 if (oddWidth && oddHeight) {
3238 vshaderTxt.append(
3239 " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3240 " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
3241 " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
3242 " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3243 );
3244 } else if (oddWidth) {
3245 vshaderTxt.append(
3246 " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
3247 " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
3248 );
3249 } else if (oddHeight) {
3250 vshaderTxt.append(
3251 " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
3252 " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
3253 );
3254 } else {
3255 vshaderTxt.append(
3256 " v_texCoord0 = a_vertex.xy;"
3257 );
3258 }
3259
3260 vshaderTxt.append("}");
3261
3262 SkString fshaderTxt;
3263 if (shaderCaps->noperspectiveInterpolationSupport()) {
3264 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3265 fshaderTxt.appendf("#extension %s : require\n", extension);
3266 }
3267 }
3268 for (int i = 0; i < numTaps; ++i) {
3269 vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In);
3270 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3271 fshaderTxt.append(";");
3272 }
3273 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3274 fshaderTxt.append(";");
3275 fshaderTxt.append(
3276 "// Mipmap Program FS\n"
3277 "void main() {"
3278 );
3279
3280 if (oddWidth && oddHeight) {
3281 fshaderTxt.append(
3282 " sk_FragColor = (sample(u_texture, v_texCoord0) + "
3283 " sample(u_texture, v_texCoord1) + "
3284 " sample(u_texture, v_texCoord2) + "
3285 " sample(u_texture, v_texCoord3)) * 0.25;"
3286 );
3287 } else if (oddWidth || oddHeight) {
3288 fshaderTxt.append(
3289 " sk_FragColor = (sample(u_texture, v_texCoord0) + "
3290 " sample(u_texture, v_texCoord1)) * 0.5;"
3291 );
3292 } else {
3293 fshaderTxt.append(
3294 " sk_FragColor = sample(u_texture, v_texCoord0);"
3295 );
3296 }
3297
3298 fshaderTxt.append("}");
3299
3300 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3301 SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size());
3302 SkSL::Program::Settings settings;
3303 SkSL::String glsl;
3304 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(this, SkSL::ProgramKind::kVertex,
3305 sksl, settings, &glsl, errorHandler);
3306 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3307 GR_GL_VERTEX_SHADER, glsl,
3308 fProgramCache->stats(), errorHandler);
3309 SkASSERT(program->fInputs == SkSL::Program::Inputs());
3310
3311 sksl.assign(fshaderTxt.c_str(), fshaderTxt.size());
3312 program = GrSkSLtoGLSL(this, SkSL::ProgramKind::kFragment, sksl, settings, &glsl,
3313 errorHandler);
3314 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3315 GR_GL_FRAGMENT_SHADER, glsl,
3316 fProgramCache->stats(), errorHandler);
3317 SkASSERT(program->fInputs == SkSL::Program::Inputs());
3318
3319 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3320
3321 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3322 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3323 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3324 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3325
3326 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3327
3328 GL_CALL(DeleteShader(vshader));
3329 GL_CALL(DeleteShader(fshader));
3330
3331 return true;
3332 }
3333
copySurfaceAsDraw(GrSurface * dst,bool drawToMultisampleFBO,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3334 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, bool drawToMultisampleFBO, GrSurface* src,
3335 const SkIRect& srcRect, const SkIPoint& dstPoint) {
3336 auto* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3337 if (!srcTex) {
3338 return false;
3339 }
3340 // We don't swizzle at all in our copies.
3341 this->bindTexture(0, GrSamplerState::Filter::kNearest, GrSwizzle::RGBA(), srcTex);
3342 if (auto* dstRT = static_cast<GrGLRenderTarget*>(dst->asRenderTarget())) {
3343 this->flushRenderTargetNoColorWrites(dstRT, drawToMultisampleFBO);
3344 } else {
3345 auto* dstTex = static_cast<GrGLTexture*>(src->asTexture());
3346 SkASSERT(dstTex);
3347 SkASSERT(!drawToMultisampleFBO);
3348 if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) {
3349 return false;
3350 }
3351 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget);
3352 fHWBoundRenderTargetUniqueID.makeInvalid();
3353 }
3354 int progIdx = TextureToCopyProgramIdx(srcTex);
3355 if (!fCopyPrograms[progIdx].fProgram) {
3356 if (!this->createCopyProgram(srcTex)) {
3357 SkDebugf("Failed to create copy program.\n");
3358 return false;
3359 }
3360 }
3361 this->flushViewport(SkIRect::MakeSize(dst->dimensions()),
3362 dst->height(),
3363 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
3364 int w = srcRect.width();
3365 int h = srcRect.height();
3366 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
3367 this->flushProgram(fCopyPrograms[progIdx].fProgram);
3368 fHWVertexArrayState.setVertexArrayID(this, 0);
3369 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3370 attribs->enableVertexArrays(this, 1);
3371 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3372 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
3373 // dst rect edges in NDC (-1 to 1)
3374 int dw = dst->width();
3375 int dh = dst->height();
3376 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
3377 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
3378 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
3379 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
3380 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3381 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
3382 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3383 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
3384 int sw = src->width();
3385 int sh = src->height();
3386 if (srcTex->textureType() != GrTextureType::kRectangle) {
3387 // src rect edges in normalized texture space (0 to 1)
3388 sx0 /= sw;
3389 sx1 /= sw;
3390 sy0 /= sh;
3391 sy1 /= sh;
3392 }
3393 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3394 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3395 sx1 - sx0, sy1 - sy0, sx0, sy0));
3396 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3397 this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA());
3398 this->flushConservativeRasterState(false);
3399 this->flushWireframeState(false);
3400 this->flushScissorTest(GrScissorTest::kDisabled);
3401 this->disableWindowRectangles();
3402 this->disableStencil();
3403 if (this->glCaps().srgbWriteControl()) {
3404 this->flushFramebufferSRGB(true);
3405 }
3406 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3407 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER);
3408 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3409 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3410 return true;
3411 }
3412
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3413 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3414 const SkIPoint& dstPoint) {
3415 SkASSERT(can_copy_texsubimage(dst, src, this->glCaps()));
3416 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
3417 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3418 SkASSERT(dstTex);
3419 // We modified the bound FBO
3420 fHWBoundRenderTargetUniqueID.makeInvalid();
3421
3422 this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID());
3423 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3424 dstPoint.fX, dstPoint.fY,
3425 srcRect.fLeft, srcRect.fTop,
3426 srcRect.width(), srcRect.height()));
3427 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER);
3428 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3429 srcRect.width(), srcRect.height());
3430 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3431 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3432 }
3433
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3434 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3435 const SkIPoint& dstPoint) {
3436 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps()));
3437 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3438 srcRect.width(), srcRect.height());
3439 if (dst == src) {
3440 if (SkIRect::Intersects(dstRect, srcRect)) {
3441 return false;
3442 }
3443 }
3444
3445 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget);
3446 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget);
3447 // We modified the bound FBO
3448 fHWBoundRenderTargetUniqueID.makeInvalid();
3449
3450 // BlitFrameBuffer respects the scissor, so disable it.
3451 this->flushScissorTest(GrScissorTest::kDisabled);
3452 this->disableWindowRectangles();
3453
3454 GL_CALL(BlitFramebuffer(srcRect.fLeft,
3455 srcRect.fTop,
3456 srcRect.fRight,
3457 srcRect.fBottom,
3458 dstRect.fLeft,
3459 dstRect.fTop,
3460 dstRect.fRight,
3461 dstRect.fBottom,
3462 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
3463 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER);
3464 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER);
3465
3466 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3467 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3468 return true;
3469 }
3470
onRegenerateMipMapLevels(GrTexture * texture)3471 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
3472 auto glTex = static_cast<GrGLTexture*>(texture);
3473 // Mipmaps are only supported on 2D textures:
3474 if (GR_GL_TEXTURE_2D != glTex->target()) {
3475 return false;
3476 }
3477 GrGLFormat format = glTex->format();
3478 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
3479 // Uses draw calls to do a series of downsample operations to successive mips.
3480
3481 // The manual approach requires the ability to limit which level we're sampling and that the
3482 // destination can be bound to a FBO:
3483 if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) {
3484 GrGLenum target = glTex->target();
3485 this->bindTextureToScratchUnit(target, glTex->textureID());
3486 GL_CALL(GenerateMipmap(glTex->target()));
3487 return true;
3488 }
3489
3490 int width = texture->width();
3491 int height = texture->height();
3492 int levelCount = SkMipmap::ComputeLevelCount(width, height) + 1;
3493 SkASSERT(levelCount == texture->maxMipmapLevel() + 1);
3494
3495 // Create (if necessary), then bind temporary FBO:
3496 if (0 == fTempDstFBOID) {
3497 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
3498 }
3499 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
3500 fHWBoundRenderTargetUniqueID.makeInvalid();
3501
3502 // Bind the texture, to get things configured for filtering.
3503 // We'll be changing our base level further below:
3504 this->setTextureUnit(0);
3505 // The mipmap program does not do any swizzling.
3506 this->bindTexture(0, GrSamplerState::Filter::kLinear, GrSwizzle::RGBA(), glTex);
3507
3508 // Vertex data:
3509 if (!fMipmapProgramArrayBuffer) {
3510 static const GrGLfloat vdata[] = {
3511 0, 0,
3512 0, 1,
3513 1, 0,
3514 1, 1
3515 };
3516 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex,
3517 kStatic_GrAccessPattern, vdata);
3518 }
3519 if (!fMipmapProgramArrayBuffer) {
3520 return false;
3521 }
3522
3523 fHWVertexArrayState.setVertexArrayID(this, 0);
3524
3525 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3526 attribs->enableVertexArrays(this, 1);
3527 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3528 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0);
3529
3530 // Set "simple" state once:
3531 this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA());
3532 this->flushScissorTest(GrScissorTest::kDisabled);
3533 this->disableWindowRectangles();
3534 this->disableStencil();
3535
3536 // Do all the blits:
3537 width = texture->width();
3538 height = texture->height();
3539
3540 for (GrGLint level = 1; level < levelCount; ++level) {
3541 // Get and bind the program for this particular downsample (filter shape can vary):
3542 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
3543 if (!fMipmapPrograms[progIdx].fProgram) {
3544 if (!this->createMipmapProgram(progIdx)) {
3545 SkDebugf("Failed to create mipmap program.\n");
3546 // Invalidate all params to cover base level change in a previous iteration.
3547 glTex->textureParamsModified();
3548 return false;
3549 }
3550 }
3551 this->flushProgram(fMipmapPrograms[progIdx].fProgram);
3552
3553 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
3554 const float invWidth = 1.0f / width;
3555 const float invHeight = 1.0f / height;
3556 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3557 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
3558 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
3559
3560 // Only sample from previous mip
3561 SkASSERT(this->glCaps().mipmapLevelControlSupport());
3562 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
3563
3564 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
3565 glTex->textureID(), level));
3566
3567 width = std::max(1, width / 2);
3568 height = std::max(1, height / 2);
3569 this->flushViewport(SkIRect::MakeWH(width, height), height, kTopLeft_GrSurfaceOrigin);
3570
3571 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3572 }
3573
3574 // Unbind:
3575 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3576 GR_GL_TEXTURE_2D, 0, 0));
3577
3578 // We modified the base level param.
3579 GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState();
3580 // We drew the 2nd to last level into the last level.
3581 nonsamplerState.fBaseMipMapLevel = levelCount - 2;
3582 glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3583
3584 return true;
3585 }
3586
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3587 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3588 SkASSERT(type);
3589 switch (type) {
3590 case kTexture_GrXferBarrierType: {
3591 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3592 SkASSERT(glrt->asTexture());
3593 SkASSERT(!glrt->isFBO0(false/*multisample*/));
3594 if (glrt->requiresManualMSAAResolve()) {
3595 // The render target uses separate storage so no need for glTextureBarrier.
3596 // FIXME: The render target will resolve automatically when its texture is bound,
3597 // but we could resolve only the bounds that will be read if we do it here instead.
3598 return;
3599 }
3600 SkASSERT(this->caps()->textureBarrierSupport());
3601 GL_CALL(TextureBarrier());
3602 return;
3603 }
3604 case kBlend_GrXferBarrierType:
3605 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3606 this->caps()->blendEquationSupport());
3607 GL_CALL(BlendBarrier());
3608 return;
3609 default: break; // placate compiler warnings that kNone not handled
3610 }
3611 }
3612
insertManualFramebufferBarrier()3613 void GrGLGpu::insertManualFramebufferBarrier() {
3614 SkASSERT(this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw());
3615 GL_CALL(MemoryBarrier(GR_GL_FRAMEBUFFER_BARRIER_BIT));
3616 }
3617
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)3618 GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions,
3619 const GrBackendFormat& format,
3620 GrRenderable renderable,
3621 GrMipmapped mipMapped,
3622 GrProtected isProtected) {
3623 // We don't support protected textures in GL.
3624 if (isProtected == GrProtected::kYes) {
3625 return {};
3626 }
3627
3628 this->handleDirtyContext();
3629
3630 GrGLFormat glFormat = format.asGLFormat();
3631 if (glFormat == GrGLFormat::kUnknown) {
3632 return {};
3633 }
3634
3635 int numMipLevels = 1;
3636 if (mipMapped == GrMipmapped::kYes) {
3637 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
3638 }
3639
3640 // Compressed formats go through onCreateCompressedBackendTexture
3641 SkASSERT(!GrGLFormatIsCompressed(glFormat));
3642
3643 GrGLTextureInfo info;
3644 GrGLTextureParameters::SamplerOverriddenState initialState;
3645
3646 if (glFormat == GrGLFormat::kUnknown) {
3647 return {};
3648 }
3649 switch (format.textureType()) {
3650 case GrTextureType::kNone:
3651 case GrTextureType::kExternal:
3652 return {};
3653 case GrTextureType::k2D:
3654 info.fTarget = GR_GL_TEXTURE_2D;
3655 break;
3656 case GrTextureType::kRectangle:
3657 if (!this->glCaps().rectangleTextureSupport() || mipMapped == GrMipmapped::kYes) {
3658 return {};
3659 }
3660 info.fTarget = GR_GL_TEXTURE_RECTANGLE;
3661 break;
3662 }
3663 info.fFormat = GrGLFormatToEnum(glFormat);
3664 info.fID = this->createTexture(dimensions, glFormat, info.fTarget, renderable, &initialState,
3665 numMipLevels);
3666 if (!info.fID) {
3667 return {};
3668 }
3669
3670 // Unbind this texture from the scratch texture unit.
3671 this->bindTextureToScratchUnit(info.fTarget, 0);
3672
3673 auto parameters = sk_make_sp<GrGLTextureParameters>();
3674 // The non-sampler params are still at their default values.
3675 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
3676 fResetTimestampForTextureParameters);
3677
3678 return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info,
3679 std::move(parameters));
3680 }
3681
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)3682 bool GrGLGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
3683 sk_sp<GrRefCntedCallback> finishedCallback,
3684 std::array<float, 4> color) {
3685 this->handleDirtyContext();
3686
3687 GrGLTextureInfo info;
3688 SkAssertResult(backendTexture.getGLTextureInfo(&info));
3689
3690 int numMipLevels = 1;
3691 if (backendTexture.hasMipmaps()) {
3692 numMipLevels =
3693 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
3694 }
3695
3696 GrGLFormat glFormat = GrGLFormatFromGLEnum(info.fFormat);
3697
3698 this->bindTextureToScratchUnit(info.fTarget, info.fID);
3699
3700 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
3701 // so that the uploads go to the right levels.
3702 if (numMipLevels && this->glCaps().mipmapLevelControlSupport()) {
3703 auto params = backendTexture.getGLTextureParams();
3704 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
3705 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
3706 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
3707 nonsamplerState.fBaseMipMapLevel = 0;
3708 }
3709 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
3710 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
3711 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
3712 }
3713 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3714 }
3715
3716 uint32_t levelMask = (1 << numMipLevels) - 1;
3717 bool result = this->uploadColorToTex(glFormat,
3718 backendTexture.dimensions(),
3719 info.fTarget,
3720 color,
3721 levelMask);
3722
3723 // Unbind this texture from the scratch texture unit.
3724 this->bindTextureToScratchUnit(info.fTarget, 0);
3725 return result;
3726 }
3727
deleteBackendTexture(const GrBackendTexture & tex)3728 void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) {
3729 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
3730
3731 GrGLTextureInfo info;
3732 if (tex.getGLTextureInfo(&info)) {
3733 GL_CALL(DeleteTextures(1, &info.fID));
3734 }
3735 }
3736
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)3737 bool GrGLGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
3738 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
3739
3740 sk_sp<GrGLProgram> tmp = fProgramCache->findOrCreateProgram(this->getContext(),
3741 desc, programInfo, &stat);
3742 if (!tmp) {
3743 return false;
3744 }
3745
3746 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
3747 }
3748
3749 #if GR_TEST_UTILS
3750
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const3751 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
3752 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
3753
3754 GrGLTextureInfo info;
3755 if (!tex.getGLTextureInfo(&info)) {
3756 return false;
3757 }
3758
3759 GrGLboolean result;
3760 GL_CALL_RET(result, IsTexture(info.fID));
3761
3762 return (GR_GL_TRUE == result);
3763 }
3764
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)3765 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
3766 GrColorType colorType,
3767 int sampleCnt,
3768 GrProtected isProtected) {
3769 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
3770 dimensions.height() > this->caps()->maxRenderTargetSize()) {
3771 return {};
3772 }
3773 if (isProtected == GrProtected::kYes) {
3774 return {};
3775 }
3776
3777 this->handleDirtyContext();
3778 auto format = this->glCaps().getFormatFromColorType(colorType);
3779 sampleCnt = this->glCaps().getRenderTargetSampleCount(sampleCnt, format);
3780 if (!sampleCnt) {
3781 return {};
3782 }
3783 // We make a texture instead of a render target if we're using a
3784 // "multisampled_render_to_texture" style extension or have a BGRA format that
3785 // is allowed for textures but not render buffer internal formats.
3786 bool useTexture = false;
3787 if (sampleCnt > 1 && !this->glCaps().usesMSAARenderBuffers()) {
3788 useTexture = true;
3789 } else if (format == GrGLFormat::kBGRA8 &&
3790 this->glCaps().getRenderbufferInternalFormat(GrGLFormat::kBGRA8) != GR_GL_BGRA8) {
3791 // We have a BGRA extension that doesn't support BGRA render buffers. We can use a texture
3792 // unless we've been asked for MSAA. Note we already checked above for render-to-
3793 // multisampled-texture style extensions.
3794 if (sampleCnt > 1) {
3795 return {};
3796 }
3797 useTexture = true;
3798 }
3799 int sFormatIdx = this->getCompatibleStencilIndex(format);
3800 if (sFormatIdx < 0) {
3801 return {};
3802 }
3803 GrGLuint colorID = 0;
3804 GrGLuint stencilID = 0;
3805 GrGLFramebufferInfo info;
3806 info.fFBOID = 0;
3807 info.fFormat = GrGLFormatToEnum(format);
3808
3809 auto deleteIDs = [&](bool saveFBO = false) {
3810 if (colorID) {
3811 if (useTexture) {
3812 GL_CALL(DeleteTextures(1, &colorID));
3813 } else {
3814 GL_CALL(DeleteRenderbuffers(1, &colorID));
3815 }
3816 }
3817 if (stencilID) {
3818 GL_CALL(DeleteRenderbuffers(1, &stencilID));
3819 }
3820 if (!saveFBO && info.fFBOID) {
3821 this->deleteFramebuffer(info.fFBOID);
3822 }
3823 };
3824
3825 if (useTexture) {
3826 GL_CALL(GenTextures(1, &colorID));
3827 } else {
3828 GL_CALL(GenRenderbuffers(1, &colorID));
3829 }
3830 GL_CALL(GenRenderbuffers(1, &stencilID));
3831 if (!stencilID || !colorID) {
3832 deleteIDs();
3833 return {};
3834 }
3835
3836 GL_CALL(GenFramebuffers(1, &info.fFBOID));
3837 if (!info.fFBOID) {
3838 deleteIDs();
3839 return {};
3840 }
3841
3842 this->invalidateBoundRenderTarget();
3843
3844 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
3845 if (useTexture) {
3846 GrGLTextureParameters::SamplerOverriddenState initialState;
3847 colorID = this->createTexture(dimensions, format, GR_GL_TEXTURE_2D, GrRenderable::kYes,
3848 &initialState, 1);
3849 if (!colorID) {
3850 deleteIDs();
3851 return {};
3852 }
3853 if (sampleCnt == 1) {
3854 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3855 GR_GL_TEXTURE_2D, colorID, 0));
3856 } else {
3857 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3858 GR_GL_TEXTURE_2D, colorID, 0, sampleCnt));
3859 }
3860 } else {
3861 GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format);
3862 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
3863 if (sampleCnt == 1) {
3864 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, dimensions.width(),
3865 dimensions.height()));
3866 } else {
3867 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt, renderBufferFormat,
3868 dimensions.width(), dimensions.height())) {
3869 deleteIDs();
3870 return {};
3871 }
3872 }
3873 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3874 GR_GL_RENDERBUFFER, colorID));
3875 }
3876 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
3877 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx];
3878 if (sampleCnt == 1) {
3879 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, GrGLFormatToEnum(stencilBufferFormat),
3880 dimensions.width(), dimensions.height()));
3881 } else {
3882 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt,
3883 GrGLFormatToEnum(stencilBufferFormat),
3884 dimensions.width(), dimensions.height())) {
3885 deleteIDs();
3886 return {};
3887 }
3888 }
3889 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER,
3890 stencilID));
3891 if (GrGLFormatIsPackedDepthStencil(this->glCaps().stencilFormats()[sFormatIdx])) {
3892 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3893 GR_GL_RENDERBUFFER, stencilID));
3894 }
3895
3896 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
3897 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
3898 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
3899 // renderbuffers/texture.
3900 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
3901 deleteIDs(/* saveFBO = */ true);
3902
3903 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
3904 GrGLenum status;
3905 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
3906 if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
3907 this->deleteFramebuffer(info.fFBOID);
3908 return {};
3909 }
3910
3911 auto stencilBits = SkToInt(GrGLFormatStencilBits(this->glCaps().stencilFormats()[sFormatIdx]));
3912
3913 GrBackendRenderTarget beRT = GrBackendRenderTarget(dimensions.width(), dimensions.height(),
3914 sampleCnt, stencilBits, info);
3915 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat()));
3916 return beRT;
3917 }
3918
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & backendRT)3919 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
3920 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
3921 GrGLFramebufferInfo info;
3922 if (backendRT.getGLFramebufferInfo(&info)) {
3923 if (info.fFBOID) {
3924 this->deleteFramebuffer(info.fFBOID);
3925 }
3926 }
3927 }
3928 #endif
3929
3930 ///////////////////////////////////////////////////////////////////////////////
3931
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)3932 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
3933 const GrBuffer* ibuf) {
3934 SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped());
3935 GrGLAttribArrayState* attribState;
3936
3937 if (gpu->glCaps().isCoreProfile()) {
3938 if (!fCoreProfileVertexArray) {
3939 GrGLuint arrayID;
3940 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
3941 int attrCount = gpu->glCaps().maxVertexAttributes();
3942 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
3943 }
3944 if (ibuf) {
3945 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
3946 } else {
3947 attribState = fCoreProfileVertexArray->bind(gpu);
3948 }
3949 } else {
3950 if (ibuf) {
3951 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
3952 gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
3953 } else {
3954 this->setVertexArrayID(gpu, 0);
3955 }
3956 int attrCount = gpu->glCaps().maxVertexAttributes();
3957 if (fDefaultVertexArrayAttribState.count() != attrCount) {
3958 fDefaultVertexArrayAttribState.resize(attrCount);
3959 }
3960 attribState = &fDefaultVertexArrayAttribState;
3961 }
3962 return attribState;
3963 }
3964
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)3965 void GrGLGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
3966 GrGpuFinishedContext finishedContext) {
3967 fFinishCallbacks.add(finishedProc, finishedContext);
3968 }
3969
flush(FlushType flushType)3970 void GrGLGpu::flush(FlushType flushType) {
3971 if (fNeedsGLFlush || flushType == FlushType::kForce) {
3972 GL_CALL(Flush());
3973 fNeedsGLFlush = false;
3974 }
3975 }
3976
onSubmitToGpu(bool syncCpu)3977 bool GrGLGpu::onSubmitToGpu(bool syncCpu) {
3978 if (syncCpu || (!fFinishCallbacks.empty() && !this->caps()->fenceSyncSupport())) {
3979 this->finishOutstandingGpuWork();
3980 fFinishCallbacks.callAll(true);
3981 } else {
3982 this->flush();
3983 // See if any previously inserted finish procs are good to go.
3984 fFinishCallbacks.check();
3985 }
3986 if (!this->glCaps().skipErrorChecks()) {
3987 this->clearErrorsAndCheckForOOM();
3988 }
3989 return true;
3990 }
3991
submit(GrOpsRenderPass * renderPass)3992 void GrGLGpu::submit(GrOpsRenderPass* renderPass) {
3993 // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here
3994 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
3995 fCachedOpsRenderPass->reset();
3996 }
3997
insertFence()3998 GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() {
3999 if (!this->caps()->fenceSyncSupport()) {
4000 return 0;
4001 }
4002 GrGLsync sync;
4003 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4004 static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint));
4005 GrGLuint fence = 0;
4006 GL_CALL(GenFences(1, &fence));
4007 GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED));
4008 sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence));
4009 } else {
4010 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4011 }
4012 this->setNeedsFlush();
4013 static_assert(sizeof(GrFence) >= sizeof(GrGLsync));
4014 return (GrFence)sync;
4015 }
4016
waitSync(GrGLsync sync,uint64_t timeout,bool flush)4017 bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) {
4018 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4019 GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync));
4020 if (!timeout) {
4021 if (flush) {
4022 this->flush(FlushType::kForce);
4023 }
4024 GrGLboolean result;
4025 GL_CALL_RET(result, TestFence(nvFence));
4026 return result == GR_GL_TRUE;
4027 }
4028 // Ignore non-zero timeouts. GL_NV_fence has no timeout functionality.
4029 // If this really becomes necessary we could poll TestFence().
4030 // FinishFence always flushes so no need to check flush param.
4031 GL_CALL(FinishFence(nvFence));
4032 return true;
4033 } else {
4034 GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0;
4035 GrGLenum result;
4036 GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout));
4037 return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
4038 }
4039 }
4040
waitFence(GrFence fence)4041 bool GrGLGpu::waitFence(GrFence fence) {
4042 if (!this->caps()->fenceSyncSupport()) {
4043 return true;
4044 }
4045 return this->waitSync(reinterpret_cast<GrGLsync>(fence), 0, false);
4046 }
4047
deleteFence(GrFence fence) const4048 void GrGLGpu::deleteFence(GrFence fence) const {
4049 if (this->caps()->fenceSyncSupport()) {
4050 this->deleteSync(reinterpret_cast<GrGLsync>(fence));
4051 }
4052 }
4053
makeSemaphore(bool isOwned)4054 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) {
4055 SkASSERT(this->caps()->semaphoreSupport());
4056 return GrGLSemaphore::Make(this, isOwned);
4057 }
4058
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType,GrWrapOwnership ownership)4059 std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
4060 GrSemaphoreWrapType /* wrapType */,
4061 GrWrapOwnership ownership) {
4062 SkASSERT(this->caps()->semaphoreSupport());
4063 return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership);
4064 }
4065
insertSemaphore(GrSemaphore * semaphore)4066 void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) {
4067 SkASSERT(semaphore);
4068 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4069
4070 GrGLsync sync;
4071 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4072 glSem->setSync(sync);
4073 this->setNeedsFlush();
4074 }
4075
waitSemaphore(GrSemaphore * semaphore)4076 void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) {
4077 SkASSERT(semaphore);
4078 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4079
4080 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4081 }
4082
checkFinishProcs()4083 void GrGLGpu::checkFinishProcs() {
4084 fFinishCallbacks.check();
4085 }
4086
finishOutstandingGpuWork()4087 void GrGLGpu::finishOutstandingGpuWork() {
4088 GL_CALL(Finish());
4089 }
4090
clearErrorsAndCheckForOOM()4091 void GrGLGpu::clearErrorsAndCheckForOOM() {
4092 while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {}
4093 }
4094
getErrorAndCheckForOOM()4095 GrGLenum GrGLGpu::getErrorAndCheckForOOM() {
4096 #if GR_GL_CHECK_ERROR
4097 if (this->glInterface()->checkAndResetOOMed()) {
4098 this->setOOMed();
4099 }
4100 #endif
4101 GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError();
4102 if (error == GR_GL_OUT_OF_MEMORY) {
4103 this->setOOMed();
4104 }
4105 return error;
4106 }
4107
deleteSync(GrGLsync sync) const4108 void GrGLGpu::deleteSync(GrGLsync sync) const {
4109 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4110 GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync));
4111 GL_CALL(DeleteFences(1, &nvFence));
4112 } else {
4113 GL_CALL(DeleteSync(sync));
4114 }
4115 }
4116
prepareTextureForCrossContextUsage(GrTexture * texture)4117 std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
4118 // Set up a semaphore to be signaled once the data is ready, and flush GL
4119 std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true);
4120 SkASSERT(semaphore);
4121 this->insertSemaphore(semaphore.get());
4122 // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu.
4123 this->flush(FlushType::kForce);
4124
4125 return semaphore;
4126 }
4127
TextureToCopyProgramIdx(GrTexture * texture)4128 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
4129 switch (GrSLCombinedSamplerTypeForTextureType(texture->textureType())) {
4130 case kTexture2DSampler_GrSLType:
4131 return 0;
4132 case kTexture2DRectSampler_GrSLType:
4133 return 1;
4134 case kTextureExternalSampler_GrSLType:
4135 return 2;
4136 default:
4137 SK_ABORT("Unexpected samper type");
4138 }
4139 }
4140
4141 #ifdef SK_ENABLE_DUMP_GPU
4142 #include "src/utils/SkJSONWriter.h"
onDumpJSON(SkJSONWriter * writer) const4143 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
4144 // We are called by the base class, which has already called beginObject(). We choose to nest
4145 // all of our caps information in a named sub-object.
4146 writer->beginObject("GL GPU");
4147
4148 const GrGLubyte* str;
4149 GL_CALL_RET(str, GetString(GR_GL_VERSION));
4150 writer->appendString("GL_VERSION", (const char*)(str));
4151 GL_CALL_RET(str, GetString(GR_GL_RENDERER));
4152 writer->appendString("GL_RENDERER", (const char*)(str));
4153 GL_CALL_RET(str, GetString(GR_GL_VENDOR));
4154 writer->appendString("GL_VENDOR", (const char*)(str));
4155 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
4156 writer->appendString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
4157
4158 writer->appendName("extensions");
4159 glInterface()->fExtensions.dumpJSON(writer);
4160
4161 writer->endObject();
4162 }
4163 #endif
4164