1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/gl/GrGLGpu.h"
9
10 #include "include/core/SkAlphaType.h"
11 #include "include/core/SkColor.h"
12 #include "include/core/SkColorSpace.h"
13 #include "include/core/SkData.h"
14 #include "include/core/SkRect.h"
15 #include "include/core/SkSize.h"
16 #include "include/core/SkString.h"
17 #include "include/core/SkTextureCompressionType.h"
18 #include "include/core/SkTypes.h"
19 #include "include/gpu/GpuTypes.h"
20 #include "include/gpu/ganesh/GrBackendSurface.h"
21 #include "include/gpu/ganesh/GrContextOptions.h"
22 #include "include/gpu/ganesh/GrDirectContext.h"
23 #include "include/gpu/ganesh/GrDriverBugWorkarounds.h"
24 #include "include/gpu/ganesh/GrTypes.h"
25 #include "include/gpu/ganesh/gl/GrGLConfig.h"
26 #include "include/private/base/SkFloatingPoint.h"
27 #include "include/private/base/SkMath.h"
28 #include "include/private/base/SkPoint_impl.h"
29 #include "include/private/base/SkTemplates.h"
30 #include "include/private/base/SkTo.h"
31 #include "src/base/SkScopeExit.h"
32 #include "src/core/SkCompressedDataUtils.h"
33 #include "src/core/SkLRUCache.h"
34 #include "src/core/SkMipmap.h"
35 #include "src/core/SkSLTypeShared.h"
36 #include "src/core/SkTraceEvent.h"
37 #include "src/gpu/RefCntedCallback.h"
38 #include "src/gpu/SkRenderEngineAbortf.h"
39 #include "src/gpu/Swizzle.h"
40 #include "src/gpu/ganesh/GrAttachment.h"
41 #include "src/gpu/ganesh/GrBackendSurfacePriv.h"
42 #include "src/gpu/ganesh/GrBackendUtils.h"
43 #include "src/gpu/ganesh/GrBuffer.h"
44 #include "src/gpu/ganesh/GrDataUtils.h"
45 #include "src/gpu/ganesh/GrDirectContextPriv.h"
46 #include "src/gpu/ganesh/GrGpuBuffer.h"
47 #include "src/gpu/ganesh/GrImageInfo.h"
48 #include "src/gpu/ganesh/GrPipeline.h"
49 #include "src/gpu/ganesh/GrProgramInfo.h"
50 #include "src/gpu/ganesh/GrRenderTarget.h"
51 #include "src/gpu/ganesh/GrSemaphore.h"
52 #include "src/gpu/ganesh/GrShaderCaps.h"
53 #include "src/gpu/ganesh/GrShaderVar.h"
54 #include "src/gpu/ganesh/GrStagingBufferManager.h"
55 #include "src/gpu/ganesh/GrSurface.h"
56 #include "src/gpu/ganesh/GrTexture.h"
57 #include "src/gpu/ganesh/GrUtil.h"
58 #include "src/gpu/ganesh/GrWindowRectangles.h"
59 #include "src/gpu/ganesh/gl/GrGLAttachment.h"
60 #include "src/gpu/ganesh/gl/GrGLBackendSurfacePriv.h"
61 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
62 #include "src/gpu/ganesh/gl/GrGLOpsRenderPass.h"
63 #include "src/gpu/ganesh/gl/GrGLProgram.h"
64 #include "src/gpu/ganesh/gl/GrGLSemaphore.h"
65 #include "src/gpu/ganesh/gl/GrGLTextureRenderTarget.h"
66 #include "src/gpu/ganesh/gl/builders/GrGLShaderStringBuilder.h"
67 #include "src/sksl/SkSLProgramKind.h"
68 #include "src/sksl/SkSLProgramSettings.h"
69 #include "src/sksl/ir/SkSLProgram.h"
70
71 #include <algorithm>
72 #include <cmath>
73 #include <functional>
74 #include <memory>
75 #include <string>
76 #include <utility>
77
78 using namespace skia_private;
79
80 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
81 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
82
83 #define GL_ALLOC_CALL(call) \
84 [&] { \
85 if (this->glCaps().skipErrorChecks()) { \
86 GR_GL_CALL(this->glInterface(), call); \
87 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
88 } else { \
89 this->clearErrorsAndCheckForOOM(); \
90 GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \
91 return this->getErrorAndCheckForOOM(); \
92 } \
93 }()
94
95 //#define USE_NSIGHT
96
97 ///////////////////////////////////////////////////////////////////////////////
98
99 static const GrGLenum gXfermodeEquation2Blend[] = {
100 // Basic OpenGL blend equations.
101 GR_GL_FUNC_ADD,
102 GR_GL_FUNC_SUBTRACT,
103 GR_GL_FUNC_REVERSE_SUBTRACT,
104
105 // GL_KHR_blend_equation_advanced.
106 GR_GL_SCREEN,
107 GR_GL_OVERLAY,
108 GR_GL_DARKEN,
109 GR_GL_LIGHTEN,
110 GR_GL_COLORDODGE,
111 GR_GL_COLORBURN,
112 GR_GL_HARDLIGHT,
113 GR_GL_SOFTLIGHT,
114 GR_GL_DIFFERENCE,
115 GR_GL_EXCLUSION,
116 GR_GL_MULTIPLY,
117 GR_GL_HSL_HUE,
118 GR_GL_HSL_SATURATION,
119 GR_GL_HSL_COLOR,
120 GR_GL_HSL_LUMINOSITY,
121
122 // Illegal... needs to map to something.
123 GR_GL_FUNC_ADD,
124 };
125 static_assert(0 == (int)skgpu::BlendEquation::kAdd);
126 static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
127 static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
128 static_assert(3 == (int)skgpu::BlendEquation::kScreen);
129 static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
130 static_assert(5 == (int)skgpu::BlendEquation::kDarken);
131 static_assert(6 == (int)skgpu::BlendEquation::kLighten);
132 static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
133 static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
134 static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
135 static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
136 static_assert(11 == (int)skgpu::BlendEquation::kDifference);
137 static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
138 static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
139 static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
140 static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
141 static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
142 static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
143 static_assert(std::size(gXfermodeEquation2Blend) == skgpu::kBlendEquationCnt);
144
145 static const GrGLenum gXfermodeCoeff2Blend[] = {
146 GR_GL_ZERO,
147 GR_GL_ONE,
148 GR_GL_SRC_COLOR,
149 GR_GL_ONE_MINUS_SRC_COLOR,
150 GR_GL_DST_COLOR,
151 GR_GL_ONE_MINUS_DST_COLOR,
152 GR_GL_SRC_ALPHA,
153 GR_GL_ONE_MINUS_SRC_ALPHA,
154 GR_GL_DST_ALPHA,
155 GR_GL_ONE_MINUS_DST_ALPHA,
156 GR_GL_CONSTANT_COLOR,
157 GR_GL_ONE_MINUS_CONSTANT_COLOR,
158
159 // extended blend coeffs
160 GR_GL_SRC1_COLOR,
161 GR_GL_ONE_MINUS_SRC1_COLOR,
162 GR_GL_SRC1_ALPHA,
163 GR_GL_ONE_MINUS_SRC1_ALPHA,
164
165 // Illegal... needs to map to something.
166 GR_GL_ZERO,
167 };
168
169 //////////////////////////////////////////////////////////////////////////////
170
gl_target_to_binding_index(GrGLenum target)171 static int gl_target_to_binding_index(GrGLenum target) {
172 switch (target) {
173 case GR_GL_TEXTURE_2D:
174 return 0;
175 case GR_GL_TEXTURE_RECTANGLE:
176 return 1;
177 case GR_GL_TEXTURE_EXTERNAL:
178 return 2;
179 }
180 SK_ABORT("Unexpected GL texture target.");
181 }
182
boundID(GrGLenum target) const183 GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const {
184 return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID;
185 }
186
hasBeenModified(GrGLenum target) const187 bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const {
188 return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified;
189 }
190
setBoundID(GrGLenum target,GrGpuResource::UniqueID resourceID)191 void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) {
192 int targetIndex = gl_target_to_binding_index(target);
193 fTargetBindings[targetIndex].fBoundResourceID = resourceID;
194 fTargetBindings[targetIndex].fHasBeenModified = true;
195 }
196
invalidateForScratchUse(GrGLenum target)197 void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) {
198 this->setBoundID(target, GrGpuResource::UniqueID());
199 }
200
invalidateAllTargets(bool markUnmodified)201 void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) {
202 for (auto& targetBinding : fTargetBindings) {
203 targetBinding.fBoundResourceID.makeInvalid();
204 if (markUnmodified) {
205 targetBinding.fHasBeenModified = false;
206 }
207 }
208 }
209
210 //////////////////////////////////////////////////////////////////////////////
211
filter_to_gl_mag_filter(GrSamplerState::Filter filter)212 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
213 switch (filter) {
214 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
215 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR;
216 }
217 SkUNREACHABLE;
218 }
219
filter_to_gl_min_filter(GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm)220 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter,
221 GrSamplerState::MipmapMode mm) {
222 switch (mm) {
223 case GrSamplerState::MipmapMode::kNone:
224 return filter_to_gl_mag_filter(filter);
225 case GrSamplerState::MipmapMode::kNearest:
226 switch (filter) {
227 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_NEAREST;
228 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_NEAREST;
229 }
230 SkUNREACHABLE;
231 case GrSamplerState::MipmapMode::kLinear:
232 switch (filter) {
233 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_LINEAR;
234 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_LINEAR;
235 }
236 SkUNREACHABLE;
237 }
238 SkUNREACHABLE;
239 }
240
wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,const GrCaps & caps)241 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
242 const GrCaps& caps) {
243 switch (wrapMode) {
244 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
245 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
246 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
247 case GrSamplerState::WrapMode::kClampToBorder:
248 // May not be supported but should have been caught earlier
249 SkASSERT(caps.clampToBorderSupport());
250 return GR_GL_CLAMP_TO_BORDER;
251 }
252 SkUNREACHABLE;
253 }
254
255 ///////////////////////////////////////////////////////////////////////////////
256
cleanup_program(GrGLGpu * gpu,GrGLuint * programID,GrGLuint * vshader,GrGLuint * fshader)257 static void cleanup_program(GrGLGpu* gpu,
258 GrGLuint* programID,
259 GrGLuint* vshader,
260 GrGLuint* fshader) {
261 const GrGLInterface* gli = gpu->glInterface();
262 if (programID) {
263 GR_GL_CALL(gli, DeleteProgram(*programID));
264 *programID = 0;
265 }
266 if (vshader) {
267 GR_GL_CALL(gli, DeleteShader(*vshader));
268 *vshader = 0;
269 }
270 if (fshader) {
271 GR_GL_CALL(gli, DeleteShader(*fshader));
272 *fshader = 0;
273 }
274 }
275
276 ///////////////////////////////////////////////////////////////////////////////
277
278 class GrGLGpu::SamplerObjectCache {
279 public:
SamplerObjectCache(GrGLGpu * gpu)280 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
281 fNumTextureUnits = fGpu->glCaps().shaderCaps()->fMaxFragmentSamplers;
282 fTextureUnitStates = std::make_unique<UnitState[]>(fNumTextureUnits);
283 }
284
~SamplerObjectCache()285 ~SamplerObjectCache() {
286 if (!fNumTextureUnits) {
287 // We've already been abandoned.
288 return;
289 }
290 }
291
bindSampler(int unitIdx,GrSamplerState state)292 void bindSampler(int unitIdx, GrSamplerState state) {
293 if (unitIdx >= fNumTextureUnits) {
294 return;
295 }
296 // In GL the max aniso value is specified in addition to min/mag filters and the driver
297 // is encouraged to consider the other filter settings when doing aniso.
298 uint32_t key = state.asKey(/*anisoIsOrthogonal=*/true);
299 const Sampler* sampler = fSamplers.find(key);
300 if (!sampler) {
301 GrGLuint s;
302 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
303 if (!s) {
304 return;
305 }
306 sampler = fSamplers.insert(key, Sampler(s, fGpu->glInterface()));
307 GrGLenum minFilter = filter_to_gl_min_filter(state.filter(), state.mipmapMode());
308 GrGLenum magFilter = filter_to_gl_mag_filter(state.filter());
309 GrGLenum wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
310 GrGLenum wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
311 GR_GL_CALL(fGpu->glInterface(),
312 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
313 GR_GL_CALL(fGpu->glInterface(),
314 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
315 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
316 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
317 SkASSERT(fGpu->glCaps().anisoSupport() || !state.isAniso());
318 if (fGpu->glCaps().anisoSupport()) {
319 float maxAniso = std::min(static_cast<GrGLfloat>(state.maxAniso()),
320 fGpu->glCaps().maxTextureMaxAnisotropy());
321 GR_GL_CALL(fGpu->glInterface(), SamplerParameterf(s,
322 GR_GL_TEXTURE_MAX_ANISOTROPY,
323 maxAniso));
324 }
325 }
326 SkASSERT(sampler && sampler->id());
327 if (!fTextureUnitStates[unitIdx].fKnown ||
328 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != sampler->id()) {
329 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, sampler->id()));
330 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = sampler->id();
331 fTextureUnitStates[unitIdx].fKnown = true;
332 }
333 }
334
unbindSampler(int unitIdx)335 void unbindSampler(int unitIdx) {
336 if (!fTextureUnitStates[unitIdx].fKnown ||
337 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != 0) {
338 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, 0));
339 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = 0;
340 fTextureUnitStates[unitIdx].fKnown = true;
341 }
342 }
343
invalidateBindings()344 void invalidateBindings() {
345 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
346 }
347
abandon()348 void abandon() {
349 fSamplers.foreach([](uint32_t* key, Sampler* sampler) { sampler->abandon(); });
350 fTextureUnitStates.reset();
351 fNumTextureUnits = 0;
352 }
353
release()354 void release() {
355 if (!fNumTextureUnits) {
356 // We've already been abandoned.
357 return;
358 }
359 fSamplers.reset();
360 // Deleting a bound sampler implicitly binds sampler 0. We just invalidate all of our
361 // knowledge.
362 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
363 }
364
365 private:
366 class Sampler {
367 public:
368 Sampler() = default;
369 Sampler(const Sampler&) = delete;
370
Sampler(Sampler && that)371 Sampler(Sampler&& that) {
372 fID = that.fID;
373 fInterface = that.fInterface;
374 that.fID = 0;
375 }
376
Sampler(GrGLuint id,const GrGLInterface * interface)377 Sampler(GrGLuint id, const GrGLInterface* interface) : fID(id), fInterface(interface) {}
378
~Sampler()379 ~Sampler() {
380 if (fID) {
381 GR_GL_CALL(fInterface, DeleteSamplers(1, &fID));
382 }
383 }
384
id() const385 GrGLuint id() const { return fID; }
386
abandon()387 void abandon() { fID = 0; }
388
389 private:
390 GrGLuint fID = 0;
391 const GrGLInterface* fInterface = nullptr;
392 };
393
394 struct UnitState {
395 bool fKnown = false;
396 GrGLuint fSamplerIDIfKnown = 0;
397 };
398
399 static constexpr int kMaxSamplers = 32;
400
401 SkLRUCache<uint32_t, Sampler> fSamplers{kMaxSamplers};
402 std::unique_ptr<UnitState[]> fTextureUnitStates;
403 GrGLGpu* fGpu;
404 int fNumTextureUnits;
405 };
406
407 ///////////////////////////////////////////////////////////////////////////////
408
Make(sk_sp<const GrGLInterface> interface,const GrContextOptions & options,GrDirectContext * direct)409 std::unique_ptr<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface,
410 const GrContextOptions& options,
411 GrDirectContext* direct) {
412 #if !defined(SK_DISABLE_LEGACY_GL_MAKE_NATIVE_INTERFACE)
413 if (!interface) {
414 interface = GrGLMakeNativeInterface();
415 if (!interface) {
416 return nullptr;
417 }
418 }
419 #else
420 if (!interface) {
421 return nullptr;
422 }
423 #endif
424 #ifdef USE_NSIGHT
425 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
426 #endif
427 auto glContext = GrGLContext::Make(std::move(interface), options);
428 if (!glContext) {
429 return nullptr;
430 }
431 return std::unique_ptr<GrGpu>(new GrGLGpu(std::move(glContext), direct));
432 }
433
GrGLGpu(std::unique_ptr<GrGLContext> ctx,GrDirectContext * dContext)434 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrDirectContext* dContext)
435 : GrGpu(dContext)
436 , fGLContext(std::move(ctx))
437 , fProgramCache(new ProgramCache(dContext->priv().options().fRuntimeProgramCacheSize))
438 , fHWProgramID(0)
439 , fTempSrcFBOID(0)
440 , fTempDstFBOID(0)
441 , fStencilClearFBOID(0)
442 , fFinishCallbacks(this) {
443 SkASSERT(fGLContext);
444 // Clear errors so we don't get confused whether we caused an error.
445 this->clearErrorsAndCheckForOOM();
446 // Toss out any pre-existing OOM that was hanging around before we got started.
447 this->checkAndResetOOMed();
448
449 this->initCaps(sk_ref_sp(fGLContext->caps()));
450
451 fHWTextureUnitBindings.reset(this->numTextureUnits());
452
453 this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
454 this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
455 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
456 if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) {
457 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
458 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
459 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
460 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
461 } else {
462 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
463 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
464 }
465 for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
466 fHWBufferState[i].invalidate();
467 }
468
469 if (this->glCaps().useSamplerObjects()) {
470 fSamplerObjectCache = std::make_unique<SamplerObjectCache>(this);
471 }
472 }
473
~GrGLGpu()474 GrGLGpu::~GrGLGpu() {
475 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
476 // to release the resources held by the objects themselves.
477 fCopyProgramArrayBuffer.reset();
478 fMipmapProgramArrayBuffer.reset();
479 if (fProgramCache) {
480 fProgramCache->reset();
481 }
482
483 fHWProgram.reset();
484 if (fHWProgramID) {
485 // detach the current program so there is no confusion on OpenGL's part
486 // that we want it to be deleted
487 GL_CALL(UseProgram(0));
488 }
489
490 if (fTempSrcFBOID) {
491 this->deleteFramebuffer(fTempSrcFBOID);
492 }
493 if (fTempDstFBOID) {
494 this->deleteFramebuffer(fTempDstFBOID);
495 }
496 if (fStencilClearFBOID) {
497 this->deleteFramebuffer(fStencilClearFBOID);
498 }
499
500 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
501 if (0 != fCopyPrograms[i].fProgram) {
502 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
503 }
504 }
505
506 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
507 if (0 != fMipmapPrograms[i].fProgram) {
508 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
509 }
510 }
511
512 fSamplerObjectCache.reset();
513
514 fFinishCallbacks.callAll(true);
515 }
516
disconnect(DisconnectType type)517 void GrGLGpu::disconnect(DisconnectType type) {
518 INHERITED::disconnect(type);
519 if (DisconnectType::kCleanup == type) {
520 if (fHWProgramID) {
521 GL_CALL(UseProgram(0));
522 }
523 if (fTempSrcFBOID) {
524 this->deleteFramebuffer(fTempSrcFBOID);
525 }
526 if (fTempDstFBOID) {
527 this->deleteFramebuffer(fTempDstFBOID);
528 }
529 if (fStencilClearFBOID) {
530 this->deleteFramebuffer(fStencilClearFBOID);
531 }
532 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
533 if (fCopyPrograms[i].fProgram) {
534 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
535 }
536 }
537 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
538 if (fMipmapPrograms[i].fProgram) {
539 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
540 }
541 }
542
543 if (fSamplerObjectCache) {
544 fSamplerObjectCache->release();
545 }
546 } else {
547 if (fProgramCache) {
548 fProgramCache->abandon();
549 }
550 if (fSamplerObjectCache) {
551 fSamplerObjectCache->abandon();
552 }
553 }
554
555 fHWProgram.reset();
556 fProgramCache->reset();
557 fProgramCache.reset();
558
559 fHWProgramID = 0;
560 fTempSrcFBOID = 0;
561 fTempDstFBOID = 0;
562 fStencilClearFBOID = 0;
563 fCopyProgramArrayBuffer.reset();
564 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
565 fCopyPrograms[i].fProgram = 0;
566 }
567 fMipmapProgramArrayBuffer.reset();
568 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
569 fMipmapPrograms[i].fProgram = 0;
570 }
571
572 fFinishCallbacks.callAll(/* doDelete */ DisconnectType::kCleanup == type);
573 }
574
pipelineBuilder()575 GrThreadSafePipelineBuilder* GrGLGpu::pipelineBuilder() {
576 return fProgramCache.get();
577 }
578
refPipelineBuilder()579 sk_sp<GrThreadSafePipelineBuilder> GrGLGpu::refPipelineBuilder() {
580 return fProgramCache;
581 }
582
583 ///////////////////////////////////////////////////////////////////////////////
584
onResetContext(uint32_t resetBits)585 void GrGLGpu::onResetContext(uint32_t resetBits) {
586 if (resetBits & kMisc_GrGLBackendState) {
587 // we don't use the zb at all
588 GL_CALL(Disable(GR_GL_DEPTH_TEST));
589 GL_CALL(DepthMask(GR_GL_FALSE));
590
591 // We don't use face culling.
592 GL_CALL(Disable(GR_GL_CULL_FACE));
593 // We do use separate stencil. Our algorithms don't care which face is front vs. back so
594 // just set this to the default for self-consistency.
595 GL_CALL(FrontFace(GR_GL_CCW));
596
597 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
598 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
599
600 if (GR_IS_GR_GL(this->glStandard())) {
601 #ifndef USE_NSIGHT
602 // Desktop-only state that we never change
603 if (!this->glCaps().isCoreProfile()) {
604 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
605 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
606 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
607 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
608 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
609 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
610 }
611 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
612 // core profile. This seems like a bug since the core spec removes any mention of
613 // GL_ARB_imaging.
614 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
615 GL_CALL(Disable(GR_GL_COLOR_TABLE));
616 }
617 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
618
619 fHWWireframeEnabled = kUnknown_TriState;
620 #endif
621 // Since ES doesn't support glPointSize at all we always use the VS to
622 // set the point size
623 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
624
625 }
626
627 if (GR_IS_GR_GL_ES(this->glStandard()) &&
628 this->glCaps().fbFetchRequiresEnablePerSample()) {
629 // The arm extension requires specifically enabling MSAA fetching per sample.
630 // On some devices this may have a perf hit. Also multiple render targets are disabled
631 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
632 }
633 fHWWriteToColor = kUnknown_TriState;
634 // we only ever use lines in hairline mode
635 GL_CALL(LineWidth(1));
636 GL_CALL(Disable(GR_GL_DITHER));
637
638 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
639 }
640
641 if (resetBits & kMSAAEnable_GrGLBackendState) {
642 if (this->glCaps().clientCanDisableMultisample()) {
643 // Restore GL_MULTISAMPLE to its initial state. It being enabled has no effect on draws
644 // to non-MSAA targets.
645 GL_CALL(Enable(GR_GL_MULTISAMPLE));
646 }
647 fHWConservativeRasterEnabled = kUnknown_TriState;
648 }
649
650 fHWActiveTextureUnitIdx = -1; // invalid
651 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
652
653 if (resetBits & kTextureBinding_GrGLBackendState) {
654 for (int s = 0; s < this->numTextureUnits(); ++s) {
655 fHWTextureUnitBindings[s].invalidateAllTargets(false);
656 }
657 if (fSamplerObjectCache) {
658 fSamplerObjectCache->invalidateBindings();
659 }
660 }
661
662 if (resetBits & kBlend_GrGLBackendState) {
663 fHWBlendState.invalidate();
664 }
665
666 if (resetBits & kView_GrGLBackendState) {
667 fHWScissorSettings.invalidate();
668 fHWWindowRectsState.invalidate();
669 fHWViewport.invalidate();
670 }
671
672 if (resetBits & kStencil_GrGLBackendState) {
673 fHWStencilSettings.invalidate();
674 fHWStencilTestEnabled = kUnknown_TriState;
675 }
676
677 // Vertex
678 if (resetBits & kVertex_GrGLBackendState) {
679 fHWVertexArrayState.invalidate();
680 this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
681 this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
682 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate();
683 }
684
685 if (resetBits & kRenderTarget_GrGLBackendState) {
686 fHWBoundRenderTargetUniqueID.makeInvalid();
687 fHWSRGBFramebuffer = kUnknown_TriState;
688 fBoundDrawFramebuffer = 0;
689 }
690
691 // we assume these values
692 if (resetBits & kPixelStore_GrGLBackendState) {
693 if (this->caps()->writePixelsRowBytesSupport() ||
694 this->caps()->transferPixelsToRowBytesSupport()) {
695 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
696 }
697 if (this->glCaps().readPixelsRowBytesSupport()) {
698 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
699 }
700 if (this->glCaps().packFlipYSupport()) {
701 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
702 }
703 }
704
705 if (resetBits & kProgram_GrGLBackendState) {
706 fHWProgramID = 0;
707 fHWProgram.reset();
708 }
709 ++fResetTimestampForTextureParameters;
710 }
711
check_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)712 static bool check_backend_texture(const GrBackendTexture& backendTex,
713 const GrGLCaps& caps,
714 GrGLTexture::Desc* desc,
715 bool skipRectTexSupportCheck = false) {
716 GrGLTextureInfo info;
717 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
718 return false;
719 }
720
721 if (info.fProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
722 return false;
723 }
724
725 desc->fSize = {backendTex.width(), backendTex.height()};
726 desc->fTarget = info.fTarget;
727 desc->fID = info.fID;
728 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
729 desc->fIsProtected = skgpu::Protected(info.fProtected == skgpu::Protected::kYes ||
730 caps.strictProtectedness());
731
732 if (desc->fFormat == GrGLFormat::kUnknown) {
733 return false;
734 }
735 if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) {
736 if (!caps.shaderCaps()->fExternalTextureSupport) {
737 return false;
738 }
739 } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) {
740 if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) {
741 return false;
742 }
743 } else if (GR_GL_TEXTURE_2D != desc->fTarget) {
744 return false;
745 }
746
747
748 return true;
749 }
750
get_gl_texture_params(const GrBackendTexture & backendTex)751 static sk_sp<GrGLTextureParameters> get_gl_texture_params(const GrBackendTexture& backendTex) {
752 const GrBackendTextureData* btd = GrBackendSurfacePriv::GetBackendData(backendTex);
753 auto glTextureData = static_cast<const GrGLBackendTextureData*>(btd);
754 SkASSERT(glTextureData);
755 return glTextureData->info().refParameters();
756 }
757
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)758 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
759 GrWrapOwnership ownership,
760 GrWrapCacheable cacheable,
761 GrIOType ioType) {
762 GrGLTexture::Desc desc;
763 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
764 return nullptr;
765 }
766
767 if (kBorrow_GrWrapOwnership == ownership) {
768 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
769 } else {
770 desc.fOwnership = GrBackendObjectOwnership::kOwned;
771 }
772
773 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
774 : GrMipmapStatus::kNotAllocated;
775
776 auto texture = GrGLTexture::MakeWrapped(this,
777 mipmapStatus,
778 desc,
779 get_gl_texture_params(backendTex),
780 cacheable,
781 ioType,
782 backendTex.getLabel());
783 if (this->glCaps().isFormatRenderable(backendTex.getBackendFormat(), 1)) {
784 // Pessimistically assume this external texture may have been bound to a FBO.
785 texture->baseLevelWasBoundToFBO();
786 }
787 return texture;
788 }
789
check_compressed_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)790 static bool check_compressed_backend_texture(const GrBackendTexture& backendTex,
791 const GrGLCaps& caps, GrGLTexture::Desc* desc,
792 bool skipRectTexSupportCheck = false) {
793 GrGLTextureInfo info;
794 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
795 return false;
796 }
797 if (info.fProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
798 return false;
799 }
800
801 desc->fSize = {backendTex.width(), backendTex.height()};
802 desc->fTarget = info.fTarget;
803 desc->fID = info.fID;
804 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
805 desc->fIsProtected = skgpu::Protected(info.fProtected == skgpu::Protected::kYes ||
806 caps.strictProtectedness());
807
808 if (desc->fFormat == GrGLFormat::kUnknown) {
809 return false;
810 }
811
812 if (GR_GL_TEXTURE_2D != desc->fTarget) {
813 return false;
814 }
815
816 return true;
817 }
818
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)819 sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
820 GrWrapOwnership ownership,
821 GrWrapCacheable cacheable) {
822 GrGLTexture::Desc desc;
823 if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) {
824 return nullptr;
825 }
826
827 if (kBorrow_GrWrapOwnership == ownership) {
828 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
829 } else {
830 desc.fOwnership = GrBackendObjectOwnership::kOwned;
831 }
832
833 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
834 : GrMipmapStatus::kNotAllocated;
835
836 return GrGLTexture::MakeWrapped(this,
837 mipmapStatus,
838 desc,
839 get_gl_texture_params(backendTex),
840 cacheable,
841 kRead_GrIOType,
842 backendTex.getLabel());
843 }
844
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)845 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
846 int sampleCnt,
847 GrWrapOwnership ownership,
848 GrWrapCacheable cacheable) {
849 const GrGLCaps& caps = this->glCaps();
850
851 GrGLTexture::Desc desc;
852 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
853 return nullptr;
854 }
855 SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt));
856 SkASSERT(caps.isFormatTexturable(desc.fFormat));
857
858 // We don't support rendering to a EXTERNAL texture.
859 if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) {
860 return nullptr;
861 }
862
863 if (kBorrow_GrWrapOwnership == ownership) {
864 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
865 } else {
866 desc.fOwnership = GrBackendObjectOwnership::kOwned;
867 }
868
869
870 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat);
871 SkASSERT(sampleCnt);
872
873 GrGLRenderTarget::IDs rtIDs;
874 if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) {
875 return nullptr;
876 }
877
878 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kDirty
879 : GrMipmapStatus::kNotAllocated;
880
881 sk_sp<GrGLTextureRenderTarget> texRT(
882 GrGLTextureRenderTarget::MakeWrapped(this,
883 sampleCnt,
884 desc,
885 get_gl_texture_params(backendTex),
886 rtIDs,
887 cacheable,
888 mipmapStatus,
889 backendTex.getLabel()));
890 texRT->baseLevelWasBoundToFBO();
891 return texRT;
892 }
893
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)894 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
895 GrGLFramebufferInfo info;
896 if (!GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
897 return nullptr;
898 }
899
900 if (backendRT.isProtected() && !this->glCaps().supportsProtectedContent()) {
901 return nullptr;
902 }
903
904 const auto format = GrBackendFormats::AsGLFormat(backendRT.getBackendFormat());
905 if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) {
906 return nullptr;
907 }
908
909 int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format);
910
911 GrGLRenderTarget::IDs rtIDs;
912 if (sampleCount <= 1) {
913 rtIDs.fSingleSampleFBOID = info.fFBOID;
914 rtIDs.fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
915 } else {
916 rtIDs.fSingleSampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
917 rtIDs.fMultisampleFBOID = info.fFBOID;
918 }
919 rtIDs.fMSColorRenderbufferID = 0;
920 rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
921 rtIDs.fTotalMemorySamplesPerPixel = sampleCount;
922
923 return GrGLRenderTarget::MakeWrapped(this,
924 backendRT.dimensions(),
925 format,
926 sampleCount,
927 rtIDs,
928 backendRT.stencilBits(),
929 skgpu::Protected(backendRT.isProtected()),
930 /*label=*/"GLGpu_WrapBackendRenderTarget");
931 }
932
check_write_and_transfer_input(GrGLTexture * glTex)933 static bool check_write_and_transfer_input(GrGLTexture* glTex) {
934 if (!glTex) {
935 return false;
936 }
937
938 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
939 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
940 return false;
941 }
942
943 return true;
944 }
945
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)946 bool GrGLGpu::onWritePixels(GrSurface* surface,
947 SkIRect rect,
948 GrColorType surfaceColorType,
949 GrColorType srcColorType,
950 const GrMipLevel texels[],
951 int mipLevelCount,
952 bool prepForTexSampling) {
953 auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
954
955 if (!check_write_and_transfer_input(glTex)) {
956 return false;
957 }
958
959 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
960
961 // If we have mips make sure the base/max levels cover the full range so that the uploads go to
962 // the right levels. We've found some Radeons require this.
963 if (mipLevelCount && this->glCaps().mipmapLevelControlSupport()) {
964 auto params = glTex->parameters();
965 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
966 int maxLevel = glTex->maxMipmapLevel();
967 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
968 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_BASE_LEVEL, 0));
969 nonsamplerState.fBaseMipMapLevel = 0;
970 }
971 if (params->nonsamplerState().fMaxMipmapLevel != maxLevel) {
972 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_MAX_LEVEL, maxLevel));
973 nonsamplerState.fBaseMipMapLevel = maxLevel;
974 }
975 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
976 }
977
978 if (this->glCaps().flushBeforeWritePixels()) {
979 GL_CALL(Flush());
980 }
981
982 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
983 return this->uploadColorTypeTexData(glTex->format(),
984 surfaceColorType,
985 glTex->dimensions(),
986 glTex->target(),
987 rect,
988 srcColorType,
989 texels,
990 mipLevelCount);
991 }
992
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)993 bool GrGLGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
994 size_t srcOffset,
995 sk_sp<GrGpuBuffer> dst,
996 size_t dstOffset,
997 size_t size) {
998 SkASSERT(!src->isMapped());
999 SkASSERT(!dst->isMapped());
1000
1001 auto glSrc = static_cast<const GrGLBuffer*>(src.get());
1002 auto glDst = static_cast<const GrGLBuffer*>(dst.get());
1003
1004 // If we refactored bindBuffer() to use something other than GrGpuBufferType to indicate the
1005 // binding target then we could use the COPY_READ and COPY_WRITE targets here. But
1006 // CopyBufferSubData is documented to work with all the targets so it's not clear it's worth it.
1007 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glSrc);
1008 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glDst);
1009
1010 GL_CALL(CopyBufferSubData(GR_GL_PIXEL_UNPACK_BUFFER,
1011 GR_GL_PIXEL_PACK_BUFFER,
1012 srcOffset,
1013 dstOffset,
1014 size));
1015 return true;
1016 }
1017
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)1018 bool GrGLGpu::onTransferPixelsTo(GrTexture* texture,
1019 SkIRect rect,
1020 GrColorType textureColorType,
1021 GrColorType bufferColorType,
1022 sk_sp<GrGpuBuffer> transferBuffer,
1023 size_t offset,
1024 size_t rowBytes) {
1025 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
1026
1027 // Can't transfer compressed data
1028 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
1029
1030 if (!check_write_and_transfer_input(glTex)) {
1031 return false;
1032 }
1033
1034 static_assert(sizeof(int) == sizeof(int32_t), "");
1035
1036 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
1037
1038 SkASSERT(!transferBuffer->isMapped());
1039 SkASSERT(!transferBuffer->isCpuBuffer());
1040 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer.get());
1041 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
1042
1043 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
1044
1045 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1046 const size_t trimRowBytes = rect.width() * bpp;
1047 const void* pixels = (void*)offset;
1048
1049 SkASSERT(glBuffer->size() >= offset + rowBytes*(rect.height() - 1) + trimRowBytes);
1050
1051 bool restoreGLRowLength = false;
1052 if (trimRowBytes != rowBytes) {
1053 // we should have checked for this support already
1054 SkASSERT(this->glCaps().transferPixelsToRowBytesSupport());
1055 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
1056 restoreGLRowLength = true;
1057 }
1058
1059 GrGLFormat textureFormat = glTex->format();
1060 // External format and type come from the upload data.
1061 GrGLenum externalFormat = 0;
1062 GrGLenum externalType = 0;
1063 this->glCaps().getTexSubImageExternalFormatAndType(
1064 textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType);
1065 if (!externalFormat || !externalType) {
1066 return false;
1067 }
1068
1069 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1070 GL_CALL(TexSubImage2D(glTex->target(),
1071 0,
1072 rect.left(),
1073 rect.top(),
1074 rect.width(),
1075 rect.height(),
1076 externalFormat,
1077 externalType,
1078 pixels));
1079
1080 if (restoreGLRowLength) {
1081 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1082 }
1083
1084 return true;
1085 }
1086
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)1087 bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface,
1088 SkIRect rect,
1089 GrColorType surfaceColorType,
1090 GrColorType dstColorType,
1091 sk_sp<GrGpuBuffer> transferBuffer,
1092 size_t offset) {
1093 auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer.get());
1094 SkASSERT(glBuffer->size() >= offset + (rect.width() *
1095 rect.height()*
1096 GrColorTypeBytesPerPixel(dstColorType)));
1097
1098 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer);
1099
1100 auto offsetAsPtr = reinterpret_cast<void*>(offset);
1101 return this->readOrTransferPixelsFrom(surface,
1102 rect,
1103 surfaceColorType,
1104 dstColorType,
1105 offsetAsPtr,
1106 rect.width());
1107 }
1108
unbindXferBuffer(GrGpuBufferType type)1109 void GrGLGpu::unbindXferBuffer(GrGpuBufferType type) {
1110 if (this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kARB_PBO &&
1111 this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kNV_PBO) {
1112 return;
1113 }
1114 SkASSERT(type == GrGpuBufferType::kXferCpuToGpu || type == GrGpuBufferType::kXferGpuToCpu);
1115 auto* xferBufferState = this->hwBufferState(type);
1116 if (!xferBufferState->fBufferZeroKnownBound) {
1117 GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
1118 xferBufferState->fBoundBufferUniqueID.makeInvalid();
1119 xferBufferState->fBufferZeroKnownBound = true;
1120 }
1121 }
1122
uploadColorTypeTexData(GrGLFormat textureFormat,GrColorType textureColorType,SkISize texDims,GrGLenum target,SkIRect dstRect,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)1123 bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat,
1124 GrColorType textureColorType,
1125 SkISize texDims,
1126 GrGLenum target,
1127 SkIRect dstRect,
1128 GrColorType srcColorType,
1129 const GrMipLevel texels[],
1130 int mipLevelCount) {
1131 // If we're uploading compressed data then we should be using uploadCompressedTexData
1132 SkASSERT(!GrGLFormatIsCompressed(textureFormat));
1133
1134 SkASSERT(this->glCaps().isFormatTexturable(textureFormat));
1135
1136 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
1137
1138 // External format and type come from the upload data.
1139 GrGLenum externalFormat;
1140 GrGLenum externalType;
1141 this->glCaps().getTexSubImageExternalFormatAndType(
1142 textureFormat, textureColorType, srcColorType, &externalFormat, &externalType);
1143 if (!externalFormat || !externalType) {
1144 return false;
1145 }
1146 this->uploadTexData(texDims, target, dstRect, externalFormat, externalType, bpp, texels,
1147 mipLevelCount);
1148 return true;
1149 }
1150
uploadColorToTex(GrGLFormat textureFormat,SkISize texDims,GrGLenum target,std::array<float,4> color,uint32_t levelMask)1151 bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat,
1152 SkISize texDims,
1153 GrGLenum target,
1154 std::array<float, 4> color,
1155 uint32_t levelMask) {
1156 GrColorType colorType;
1157 GrGLenum externalFormat, externalType;
1158 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(textureFormat, &externalFormat,
1159 &externalType, &colorType);
1160 if (colorType == GrColorType::kUnknown) {
1161 return false;
1162 }
1163
1164 std::unique_ptr<char[]> pixelStorage;
1165 size_t bpp = 0;
1166 int numLevels = SkMipmap::ComputeLevelCount(texDims) + 1;
1167 STArray<16, GrMipLevel> levels;
1168 levels.resize(numLevels);
1169 SkISize levelDims = texDims;
1170 for (int i = 0; i < numLevels; ++i, levelDims = {std::max(levelDims.width() >> 1, 1),
1171 std::max(levelDims.height() >> 1, 1)}) {
1172 if (levelMask & (1 << i)) {
1173 if (!pixelStorage) {
1174 // Make one tight image at the first size and reuse it for smaller levels.
1175 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, levelDims);
1176 size_t rb = ii.minRowBytes();
1177 pixelStorage.reset(new char[rb * levelDims.height()]);
1178 if (!GrClearImage(ii, pixelStorage.get(), ii.minRowBytes(), color)) {
1179 return false;
1180 }
1181 bpp = ii.bpp();
1182 }
1183 levels[i] = {pixelStorage.get(), levelDims.width()*bpp, nullptr};
1184 }
1185 }
1186 this->uploadTexData(texDims, target, SkIRect::MakeSize(texDims), externalFormat, externalType,
1187 bpp, levels.begin(), levels.size());
1188 return true;
1189 }
1190
uploadTexData(SkISize texDims,GrGLenum target,SkIRect dstRect,GrGLenum externalFormat,GrGLenum externalType,size_t bpp,const GrMipLevel texels[],int mipLevelCount)1191 void GrGLGpu::uploadTexData(SkISize texDims,
1192 GrGLenum target,
1193 SkIRect dstRect,
1194 GrGLenum externalFormat,
1195 GrGLenum externalType,
1196 size_t bpp,
1197 const GrMipLevel texels[],
1198 int mipLevelCount) {
1199 SkASSERT(!texDims.isEmpty());
1200 SkASSERT(!dstRect.isEmpty());
1201 SkASSERT(SkIRect::MakeSize(texDims).contains(dstRect));
1202 SkASSERT(mipLevelCount > 0 && mipLevelCount <= SkMipmap::ComputeLevelCount(texDims) + 1);
1203 SkASSERT(mipLevelCount == 1 || dstRect == SkIRect::MakeSize(texDims));
1204
1205 const GrGLCaps& caps = this->glCaps();
1206
1207 bool restoreGLRowLength = false;
1208
1209 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1210 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1211
1212 SkISize dims = dstRect.size();
1213 for (int level = 0; level < mipLevelCount; ++level, dims = {std::max(dims.width() >> 1, 1),
1214 std::max(dims.height() >> 1, 1)}) {
1215 if (!texels[level].fPixels) {
1216 continue;
1217 }
1218 const size_t trimRowBytes = dims.width() * bpp;
1219 const size_t rowBytes = texels[level].fRowBytes;
1220
1221 if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) {
1222 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1223 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1224 restoreGLRowLength = true;
1225 } else {
1226 SkASSERT(rowBytes == trimRowBytes);
1227 }
1228
1229 GL_CALL(TexSubImage2D(target, level, dstRect.x(), dstRect.y(), dims.width(), dims.height(),
1230 externalFormat, externalType, texels[level].fPixels));
1231 }
1232 if (restoreGLRowLength) {
1233 SkASSERT(caps.writePixelsRowBytesSupport());
1234 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1235 }
1236 }
1237
uploadCompressedTexData(SkTextureCompressionType compressionType,GrGLFormat format,SkISize dimensions,skgpu::Mipmapped mipmapped,GrGLenum target,const void * data,size_t dataSize)1238 bool GrGLGpu::uploadCompressedTexData(SkTextureCompressionType compressionType,
1239 GrGLFormat format,
1240 SkISize dimensions,
1241 skgpu::Mipmapped mipmapped,
1242 GrGLenum target,
1243 const void* data,
1244 size_t dataSize) {
1245 SkASSERT(format != GrGLFormat::kUnknown);
1246 const GrGLCaps& caps = this->glCaps();
1247
1248 // We only need the internal format for compressed 2D textures.
1249 GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format);
1250 if (!internalFormat) {
1251 return false;
1252 }
1253
1254 SkASSERT(compressionType != SkTextureCompressionType::kNone);
1255
1256 bool useTexStorage = caps.formatSupportsTexStorage(format);
1257
1258 int numMipLevels = 1;
1259 if (mipmapped == skgpu::Mipmapped::kYes) {
1260 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1261 }
1262
1263 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1264
1265 // TODO: Make sure that the width and height that we pass to OpenGL
1266 // is a multiple of the block size.
1267
1268 if (useTexStorage) {
1269 // We never resize or change formats of textures.
1270 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat,
1271 dimensions.width(), dimensions.height()));
1272 if (error != GR_GL_NO_ERROR) {
1273 return false;
1274 }
1275
1276 size_t offset = 0;
1277 for (int level = 0; level < numMipLevels; ++level) {
1278
1279 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1280 nullptr, false);
1281
1282 error = GL_ALLOC_CALL(CompressedTexSubImage2D(target,
1283 level,
1284 0, // left
1285 0, // top
1286 dimensions.width(),
1287 dimensions.height(),
1288 internalFormat,
1289 SkToInt(levelDataSize),
1290 &((const char*)data)[offset]));
1291
1292 if (error != GR_GL_NO_ERROR) {
1293 return false;
1294 }
1295
1296 offset += levelDataSize;
1297 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1298 }
1299 } else {
1300 size_t offset = 0;
1301
1302 for (int level = 0; level < numMipLevels; ++level) {
1303 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1304 nullptr, false);
1305
1306 const char* rawLevelData = &((const char*)data)[offset];
1307 GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target,
1308 level,
1309 internalFormat,
1310 dimensions.width(),
1311 dimensions.height(),
1312 0, // border
1313 SkToInt(levelDataSize),
1314 rawLevelData));
1315
1316 if (error != GR_GL_NO_ERROR) {
1317 return false;
1318 }
1319
1320 offset += levelDataSize;
1321 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1322 }
1323 }
1324 return true;
1325 }
1326
renderbufferStorageMSAA(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1327 bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format,
1328 int width, int height) {
1329 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1330 GrGLenum error;
1331 switch (ctx.caps()->msFBOType()) {
1332 case GrGLCaps::kStandard_MSFBOType:
1333 error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount,
1334 format, width, height));
1335 break;
1336 case GrGLCaps::kES_Apple_MSFBOType:
1337 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE(
1338 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1339 break;
1340 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1341 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1342 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT(
1343 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1344 break;
1345 case GrGLCaps::kNone_MSFBOType:
1346 SkUNREACHABLE;
1347 }
1348 return error == GR_GL_NO_ERROR;
1349 }
1350
createRenderTargetObjects(const GrGLTexture::Desc & desc,int sampleCount,GrGLRenderTarget::IDs * rtIDs)1351 bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc,
1352 int sampleCount,
1353 GrGLRenderTarget::IDs* rtIDs) {
1354 rtIDs->fMSColorRenderbufferID = 0;
1355 rtIDs->fMultisampleFBOID = 0;
1356 rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1357 rtIDs->fSingleSampleFBOID = 0;
1358 rtIDs->fTotalMemorySamplesPerPixel = 0;
1359
1360 SkScopeExit cleanupOnFail([&] {
1361 if (rtIDs->fMSColorRenderbufferID) {
1362 GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1363 }
1364 if (rtIDs->fMultisampleFBOID != rtIDs->fSingleSampleFBOID) {
1365 this->deleteFramebuffer(rtIDs->fMultisampleFBOID);
1366 }
1367 if (rtIDs->fSingleSampleFBOID) {
1368 this->deleteFramebuffer(rtIDs->fSingleSampleFBOID);
1369 }
1370 });
1371
1372 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1373
1374 if (desc.fFormat == GrGLFormat::kUnknown) {
1375 return false;
1376 }
1377
1378 if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1379 return false;
1380 }
1381
1382 GL_CALL(GenFramebuffers(1, &rtIDs->fSingleSampleFBOID));
1383 if (!rtIDs->fSingleSampleFBOID) {
1384 RENDERENGINE_ABORTF("%s failed to GenFramebuffers!", __func__);
1385 return false;
1386 }
1387
1388 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1389 // the texture bound to the other. The exception is the IMG multisample extension. With this
1390 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1391 // rendered from.
1392 if (sampleCount <= 1) {
1393 rtIDs->fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
1394 } else if (this->glCaps().usesImplicitMSAAResolve()) {
1395 // GrGLRenderTarget target will configure the FBO as multisample or not base on need.
1396 rtIDs->fMultisampleFBOID = rtIDs->fSingleSampleFBOID;
1397 } else {
1398 GL_CALL(GenFramebuffers(1, &rtIDs->fMultisampleFBOID));
1399 if (!rtIDs->fMultisampleFBOID) {
1400 return false;
1401 }
1402 GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1403 if (!rtIDs->fMSColorRenderbufferID) {
1404 return false;
1405 }
1406 colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat);
1407 }
1408
1409 #if defined(__has_feature)
1410 #define IS_TSAN __has_feature(thread_sanitizer)
1411 #else
1412 #define IS_TSAN 0
1413 #endif
1414
1415 // below here we may bind the FBO
1416 fHWBoundRenderTargetUniqueID.makeInvalid();
1417 if (rtIDs->fMSColorRenderbufferID) {
1418 SkASSERT(sampleCount > 1);
1419 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID));
1420 if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat,
1421 desc.fSize.width(), desc.fSize.height())) {
1422 return false;
1423 }
1424 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fMultisampleFBOID);
1425 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1426 GR_GL_COLOR_ATTACHMENT0,
1427 GR_GL_RENDERBUFFER,
1428 rtIDs->fMSColorRenderbufferID));
1429 // See skbug.com/12644
1430 #if !IS_TSAN
1431 if (!this->glCaps().skipErrorChecks()) {
1432 GrGLenum status;
1433 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1434 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1435 return false;
1436 }
1437 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1438 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1439 GR_GL_COLOR_ATTACHMENT0,
1440 GR_GL_RENDERBUFFER,
1441 0));
1442 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1443 GR_GL_COLOR_ATTACHMENT0,
1444 GR_GL_RENDERBUFFER,
1445 rtIDs->fMSColorRenderbufferID));
1446 }
1447 }
1448 #endif
1449 rtIDs->fTotalMemorySamplesPerPixel += sampleCount;
1450 }
1451 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fSingleSampleFBOID);
1452 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1453 GR_GL_COLOR_ATTACHMENT0,
1454 desc.fTarget,
1455 desc.fID,
1456 0));
1457 // See skbug.com/12644
1458 #if !IS_TSAN
1459 if (!this->glCaps().skipErrorChecks()) {
1460 GrGLenum status;
1461 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1462 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1463 return false;
1464 }
1465 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1466 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1467 GR_GL_COLOR_ATTACHMENT0,
1468 desc.fTarget,
1469 0,
1470 0));
1471 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1472 GR_GL_COLOR_ATTACHMENT0,
1473 desc.fTarget,
1474 desc.fID,
1475 0));
1476 }
1477 }
1478 #endif
1479
1480 #undef IS_TSAN
1481 ++rtIDs->fTotalMemorySamplesPerPixel;
1482
1483 // We did it!
1484 cleanupOnFail.clear();
1485 return true;
1486 }
1487
1488 // good to set a break-point here to know when createTexture fails
return_null_texture()1489 static sk_sp<GrTexture> return_null_texture() {
1490 // SkDEBUGFAIL("null texture");
1491 return nullptr;
1492 }
1493
set_initial_texture_params(const GrGLInterface * interface,const GrGLCaps & caps,GrGLenum target)1494 static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params(
1495 const GrGLInterface* interface,
1496 const GrGLCaps& caps,
1497 GrGLenum target) {
1498 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1499 // drivers have a bug where an FBO won't be complete if it includes a
1500 // texture that is not mipmap complete (considering the filter in use).
1501 GrGLTextureParameters::SamplerOverriddenState state;
1502 state.fMinFilter = GR_GL_NEAREST;
1503 state.fMagFilter = GR_GL_NEAREST;
1504 state.fWrapS = GR_GL_CLAMP_TO_EDGE;
1505 state.fWrapT = GR_GL_CLAMP_TO_EDGE;
1506 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter));
1507 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter));
1508 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS));
1509 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT));
1510 return state;
1511 }
1512
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1513 sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions,
1514 const GrBackendFormat& format,
1515 GrRenderable renderable,
1516 int renderTargetSampleCnt,
1517 skgpu::Budgeted budgeted,
1518 GrProtected isProtected,
1519 int mipLevelCount,
1520 uint32_t levelClearMask,
1521 std::string_view label) {
1522 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1523 return nullptr;
1524 }
1525 SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1);
1526
1527 SkASSERT(mipLevelCount > 0);
1528 GrMipmapStatus mipmapStatus =
1529 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1530 GrGLTextureParameters::SamplerOverriddenState initialState;
1531 GrGLTexture::Desc texDesc;
1532 texDesc.fSize = dimensions;
1533 switch (format.textureType()) {
1534 case GrTextureType::kExternal:
1535 case GrTextureType::kNone:
1536 return nullptr;
1537 case GrTextureType::k2D:
1538 texDesc.fTarget = GR_GL_TEXTURE_2D;
1539 break;
1540 case GrTextureType::kRectangle:
1541 if (mipLevelCount > 1 || !this->glCaps().rectangleTextureSupport()) {
1542 return nullptr;
1543 }
1544 texDesc.fTarget = GR_GL_TEXTURE_RECTANGLE;
1545 break;
1546 }
1547 texDesc.fFormat = GrBackendFormats::AsGLFormat(format);
1548 texDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1549 SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown);
1550 SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat));
1551 texDesc.fIsProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1552 this->glCaps().strictProtectedness());
1553
1554 texDesc.fID = this->createTexture(dimensions, texDesc.fFormat, texDesc.fTarget, renderable,
1555 &initialState, mipLevelCount, texDesc.fIsProtected, label);
1556 if (!texDesc.fID) {
1557 return return_null_texture();
1558 }
1559
1560 sk_sp<GrGLTexture> tex;
1561 if (renderable == GrRenderable::kYes) {
1562 // unbind the texture from the texture unit before binding it to the frame buffer
1563 GL_CALL(BindTexture(texDesc.fTarget, 0));
1564 GrGLRenderTarget::IDs rtIDDesc;
1565
1566 if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) {
1567 GL_CALL(DeleteTextures(1, &texDesc.fID));
1568 return return_null_texture();
1569 }
1570 tex = sk_make_sp<GrGLTextureRenderTarget>(this,
1571 budgeted,
1572 renderTargetSampleCnt,
1573 texDesc,
1574 rtIDDesc,
1575 mipmapStatus,
1576 label);
1577 tex->baseLevelWasBoundToFBO();
1578 } else {
1579 tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipmapStatus, label);
1580 }
1581 // The non-sampler params are still at their default values.
1582 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1583 fResetTimestampForTextureParameters);
1584 if (levelClearMask) {
1585 if (this->glCaps().clearTextureSupport()) {
1586 GrGLenum externalFormat, externalType;
1587 GrColorType colorType;
1588 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(
1589 texDesc.fFormat, &externalFormat, &externalType, &colorType);
1590 for (int i = 0; i < mipLevelCount; ++i) {
1591 if (levelClearMask & (1U << i)) {
1592 GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType,
1593 nullptr));
1594 }
1595 }
1596 } else if (this->glCaps().canFormatBeFBOColorAttachment(
1597 GrBackendFormats::AsGLFormat(format)) &&
1598 !this->glCaps().performColorClearsAsDraws()) {
1599 this->flushScissorTest(GrScissorTest::kDisabled);
1600 this->disableWindowRectangles();
1601 this->flushColorWrite(true);
1602 this->flushClearColor({0, 0, 0, 0});
1603 for (int i = 0; i < mipLevelCount; ++i) {
1604 if (levelClearMask & (1U << i)) {
1605 this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER,
1606 kDst_TempFBOTarget);
1607 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1608 this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER);
1609 }
1610 }
1611 fHWBoundRenderTargetUniqueID.makeInvalid();
1612 } else {
1613 this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID());
1614 std::array<float, 4> zeros = {};
1615 this->uploadColorToTex(texDesc.fFormat,
1616 texDesc.fSize,
1617 texDesc.fTarget,
1618 zeros,
1619 levelClearMask);
1620 }
1621 }
1622 return tex;
1623 }
1624
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1625 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1626 const GrBackendFormat& format,
1627 skgpu::Budgeted budgeted,
1628 skgpu::Mipmapped mipmapped,
1629 GrProtected isProtected,
1630 const void* data,
1631 size_t dataSize) {
1632 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1633 return nullptr;
1634 }
1635 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1636
1637 GrGLTextureParameters::SamplerOverriddenState initialState;
1638 GrGLTexture::Desc desc;
1639 desc.fSize = dimensions;
1640 desc.fTarget = GR_GL_TEXTURE_2D;
1641 desc.fOwnership = GrBackendObjectOwnership::kOwned;
1642 desc.fFormat = GrBackendFormats::AsGLFormat(format);
1643 desc.fIsProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1644 this->glCaps().strictProtectedness());
1645 desc.fID = this->createCompressedTexture2D(desc.fSize, compression, desc.fFormat,
1646 mipmapped, desc.fIsProtected, &initialState);
1647 if (!desc.fID) {
1648 return nullptr;
1649 }
1650
1651 if (data) {
1652 if (!this->uploadCompressedTexData(compression, desc.fFormat, dimensions, mipmapped,
1653 GR_GL_TEXTURE_2D, data, dataSize)) {
1654 GL_CALL(DeleteTextures(1, &desc.fID));
1655 return nullptr;
1656 }
1657 }
1658
1659 // Unbind this texture from the scratch texture unit.
1660 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1661
1662 GrMipmapStatus mipmapStatus = mipmapped == skgpu::Mipmapped::kYes
1663 ? GrMipmapStatus::kValid
1664 : GrMipmapStatus::kNotAllocated;
1665
1666 auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipmapStatus,
1667 /*label=*/"GLGpuCreateCompressedTexture");
1668 // The non-sampler params are still at their default values.
1669 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1670 fResetTimestampForTextureParameters);
1671 return tex;
1672 }
1673
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)1674 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1675 const GrBackendFormat& format,
1676 skgpu::Budgeted budgeted,
1677 skgpu::Mipmapped mipMapped,
1678 GrProtected isProtected,
1679 OH_NativeBuffer* nativeBuffer,
1680 size_t bufferSize) {
1681 SkASSERT(!"unimplemented");
1682 return nullptr;
1683 }
1684
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1685 GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1686 const GrBackendFormat& format,
1687 skgpu::Mipmapped mipmapped,
1688 GrProtected isProtected) {
1689 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1690 return {};
1691 }
1692
1693 this->handleDirtyContext();
1694
1695 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1696 if (glFormat == GrGLFormat::kUnknown) {
1697 return {};
1698 }
1699
1700 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1701
1702 GrGLTextureInfo info;
1703 GrGLTextureParameters::SamplerOverriddenState initialState;
1704
1705 info.fTarget = GR_GL_TEXTURE_2D;
1706 info.fFormat = GrGLFormatToEnum(glFormat);
1707 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1708 this->glCaps().strictProtectedness());
1709 info.fID = this->createCompressedTexture2D(dimensions, compression, glFormat,
1710 mipmapped, info.fProtected, &initialState);
1711 if (!info.fID) {
1712 return {};
1713 }
1714
1715 // Unbind this texture from the scratch texture unit.
1716 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1717
1718 auto parameters = sk_make_sp<GrGLTextureParameters>();
1719 // The non-sampler params are still at their default values.
1720 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1721 fResetTimestampForTextureParameters);
1722
1723 return GrBackendTextures::MakeGL(
1724 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters));
1725 }
1726
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t length)1727 bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1728 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1729 const void* data,
1730 size_t length) {
1731 GrGLTextureInfo info;
1732 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
1733
1734 GrBackendFormat format = backendTexture.getBackendFormat();
1735 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1736 if (glFormat == GrGLFormat::kUnknown) {
1737 return false;
1738 }
1739 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1740
1741 skgpu::Mipmapped mipmapped =
1742 backendTexture.hasMipmaps() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1743
1744 this->bindTextureToScratchUnit(info.fTarget, info.fID);
1745
1746 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
1747 // so that the uploads go to the right levels.
1748 if (backendTexture.hasMipmaps() && this->glCaps().mipmapLevelControlSupport()) {
1749 auto params = get_gl_texture_params(backendTexture);
1750 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
1751 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
1752 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
1753 nonsamplerState.fBaseMipMapLevel = 0;
1754 }
1755 int numMipLevels =
1756 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
1757 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
1758 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
1759 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
1760 }
1761 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
1762 }
1763
1764 bool result = this->uploadCompressedTexData(compression,
1765 glFormat,
1766 backendTexture.dimensions(),
1767 mipmapped,
1768 GR_GL_TEXTURE_2D,
1769 data,
1770 length);
1771
1772 // Unbind this texture from the scratch texture unit.
1773 this->bindTextureToScratchUnit(info.fTarget, 0);
1774
1775 return result;
1776 }
1777
getCompatibleStencilIndex(GrGLFormat format)1778 int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) {
1779 if (this->glCaps().avoidStencilBuffers()) {
1780 return -1;
1781 }
1782
1783 static const int kSize = 16;
1784 SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format));
1785
1786 if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) {
1787 // Default to unsupported, set this if we find a stencil format that works.
1788 int firstWorkingStencilFormatIndex = -1;
1789
1790 // In the following we're not actually creating the StencilBuffer that will be used but,
1791 // rather, are just determining the correct format to use. We assume that the
1792 // acceptable format will not change between Protected and unProtected stencil buffers and
1793 // that using Protected::kNo here will not cause any issues with strictProtectedness mode
1794 // (since no work is actually submitted to a queue).
1795 const GrProtected kNotProtected = skgpu::Protected::kNo;
1796
1797 GrGLuint colorID = this->createTexture({kSize, kSize}, format, GR_GL_TEXTURE_2D,
1798 GrRenderable::kYes,
1799 nullptr,
1800 1,
1801 kNotProtected,
1802 /*label=*/"Skia");
1803 if (!colorID) {
1804 return -1;
1805 }
1806 // unbind the texture from the texture unit before binding it to the frame buffer
1807 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1808
1809 // Create Framebuffer
1810 GrGLuint fb = 0;
1811 GL_CALL(GenFramebuffers(1, &fb));
1812 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
1813 fHWBoundRenderTargetUniqueID.makeInvalid();
1814 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1815 GR_GL_COLOR_ATTACHMENT0,
1816 GR_GL_TEXTURE_2D,
1817 colorID,
1818 0));
1819 GrGLuint sbRBID = 0;
1820 GL_CALL(GenRenderbuffers(1, &sbRBID));
1821
1822 // look over formats till I find a compatible one
1823 int stencilFmtCnt = this->glCaps().stencilFormats().size();
1824 if (sbRBID) {
1825 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1826 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1827 GrGLFormat sFmt = this->glCaps().stencilFormats()[i];
1828 GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage(
1829 GR_GL_RENDERBUFFER, GrGLFormatToEnum(sFmt), kSize, kSize));
1830 if (error == GR_GL_NO_ERROR) {
1831 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1832 GR_GL_STENCIL_ATTACHMENT,
1833 GR_GL_RENDERBUFFER, sbRBID));
1834 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1835 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1836 GR_GL_DEPTH_ATTACHMENT,
1837 GR_GL_RENDERBUFFER, sbRBID));
1838 } else {
1839 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1840 GR_GL_DEPTH_ATTACHMENT,
1841 GR_GL_RENDERBUFFER, 0));
1842 }
1843 GrGLenum status;
1844 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1845 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1846 firstWorkingStencilFormatIndex = i;
1847 break;
1848 }
1849 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1850 GR_GL_STENCIL_ATTACHMENT,
1851 GR_GL_RENDERBUFFER, 0));
1852 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1853 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1854 GR_GL_DEPTH_ATTACHMENT,
1855 GR_GL_RENDERBUFFER, 0));
1856 }
1857 }
1858 }
1859 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1860 }
1861 GL_CALL(DeleteTextures(1, &colorID));
1862 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
1863 this->deleteFramebuffer(fb);
1864 fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex);
1865 }
1866 return this->glCaps().getStencilFormatIndexForFormat(format);
1867 }
1868
set_khr_debug_label(GrGLGpu * gpu,const GrGLuint id,std::string_view label)1869 static void set_khr_debug_label(GrGLGpu* gpu, const GrGLuint id, std::string_view label) {
1870 const std::string khr_debug_label = label.empty() ? "Skia" : std::string(label);
1871 if (gpu->glCaps().debugSupport()) {
1872 GR_GL_CALL(gpu->glInterface(), ObjectLabel(GR_GL_TEXTURE, id, -1, khr_debug_label.c_str()));
1873 }
1874 }
1875
createCompressedTexture2D(SkISize dimensions,SkTextureCompressionType compression,GrGLFormat format,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGLTextureParameters::SamplerOverriddenState * initialState)1876 GrGLuint GrGLGpu::createCompressedTexture2D(
1877 SkISize dimensions,
1878 SkTextureCompressionType compression,
1879 GrGLFormat format,
1880 skgpu::Mipmapped mipmapped,
1881 GrProtected isProtected,
1882 GrGLTextureParameters::SamplerOverriddenState* initialState) {
1883 if (format == GrGLFormat::kUnknown) {
1884 return 0;
1885 }
1886 GrGLuint id = 0;
1887 GL_CALL(GenTextures(1, &id));
1888 if (!id) {
1889 return 0;
1890 }
1891
1892 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
1893
1894 set_khr_debug_label(this, id, /*label=*/"Skia");
1895
1896 *initialState = set_initial_texture_params(this->glInterface(),
1897 this->glCaps(),
1898 GR_GL_TEXTURE_2D);
1899
1900 SkASSERT(isProtected == skgpu::Protected::kNo || this->glCaps().supportsProtectedContent());
1901 SkASSERT(!this->glCaps().strictProtectedness() || isProtected == skgpu::Protected::kYes);
1902
1903 if (GrProtected::kYes == isProtected) {
1904 if (this->glCaps().supportsProtectedContent()) {
1905 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1906 } else {
1907 GL_CALL(DeleteTextures(1, &id));
1908 return 0;
1909 }
1910 }
1911
1912 return id;
1913 }
1914
createTexture(SkISize dimensions,GrGLFormat format,GrGLenum target,GrRenderable renderable,GrGLTextureParameters::SamplerOverriddenState * initialState,int mipLevelCount,GrProtected isProtected,std::string_view label)1915 GrGLuint GrGLGpu::createTexture(SkISize dimensions,
1916 GrGLFormat format,
1917 GrGLenum target,
1918 GrRenderable renderable,
1919 GrGLTextureParameters::SamplerOverriddenState* initialState,
1920 int mipLevelCount,
1921 GrProtected isProtected,
1922 std::string_view label) {
1923 SkASSERT(format != GrGLFormat::kUnknown);
1924 SkASSERT(!GrGLFormatIsCompressed(format));
1925
1926 GrGLuint id = 0;
1927 GL_CALL(GenTextures(1, &id));
1928
1929 if (!id) {
1930 return 0;
1931 }
1932
1933 this->bindTextureToScratchUnit(target, id);
1934
1935 set_khr_debug_label(this, id, label);
1936
1937 if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) {
1938 // provides a hint about how this texture will be used
1939 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT));
1940 }
1941
1942 if (initialState) {
1943 *initialState = set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1944 } else {
1945 set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1946 }
1947
1948 SkASSERT(isProtected == skgpu::Protected::kNo || this->glCaps().supportsProtectedContent());
1949 SkASSERT(!this->glCaps().strictProtectedness() || isProtected == skgpu::Protected::kYes);
1950
1951 if (GrProtected::kYes == isProtected) {
1952 if (this->glCaps().supportsProtectedContent()) {
1953 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1954 } else {
1955 GL_CALL(DeleteTextures(1, &id));
1956 return 0;
1957 }
1958 }
1959
1960 GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format);
1961
1962 bool success = false;
1963 if (internalFormat) {
1964 if (this->glCaps().formatSupportsTexStorage(format)) {
1965 auto levelCount = std::max(mipLevelCount, 1);
1966 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, levelCount, internalFormat,
1967 dimensions.width(), dimensions.height()));
1968 success = (error == GR_GL_NO_ERROR);
1969 } else {
1970 GrGLenum externalFormat, externalType;
1971 this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType);
1972 GrGLenum error = GR_GL_NO_ERROR;
1973 if (externalFormat && externalType) {
1974 // If we don't unbind here then nullptr is treated as a zero offset into the bound
1975 // transfer buffer rather than an indication that there is no data to copy.
1976 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1977 for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) {
1978 const int twoToTheMipLevel = 1 << level;
1979 const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel);
1980 const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel);
1981 error = GL_ALLOC_CALL(TexImage2D(target, level, internalFormat, currentWidth,
1982 currentHeight, 0, externalFormat, externalType,
1983 nullptr));
1984 }
1985 success = (error == GR_GL_NO_ERROR);
1986 }
1987 }
1988 }
1989 if (success) {
1990 return id;
1991 }
1992 GL_CALL(DeleteTextures(1, &id));
1993 return 0;
1994 }
1995
makeStencilAttachment(const GrBackendFormat & colorFormat,SkISize dimensions,int numStencilSamples)1996 sk_sp<GrAttachment> GrGLGpu::makeStencilAttachment(const GrBackendFormat& colorFormat,
1997 SkISize dimensions, int numStencilSamples) {
1998 int sIdx = this->getCompatibleStencilIndex(GrBackendFormats::AsGLFormat(colorFormat));
1999 if (sIdx < 0) {
2000 return nullptr;
2001 }
2002 GrGLFormat sFmt = this->glCaps().stencilFormats()[sIdx];
2003
2004 auto stencil = GrGLAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
2005 if (stencil) {
2006 fStats.incStencilAttachmentCreates();
2007 }
2008 return stencil;
2009 }
2010
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)2011 sk_sp<GrAttachment> GrGLGpu::makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format,
2012 int numSamples, GrProtected isProtected,
2013 GrMemoryless isMemoryless) {
2014 SkASSERT(isMemoryless == GrMemoryless::kNo);
2015 return GrGLAttachment::MakeMSAA(
2016 this, dimensions, numSamples, GrBackendFormats::AsGLFormat(format));
2017 }
2018
2019 ////////////////////////////////////////////////////////////////////////////////
2020
onCreateBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)2021 sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size,
2022 GrGpuBufferType intendedType,
2023 GrAccessPattern accessPattern) {
2024 return GrGLBuffer::Make(this, size, intendedType, accessPattern);
2025 }
2026
flushScissorTest(GrScissorTest scissorTest)2027 void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) {
2028 if (GrScissorTest::kEnabled == scissorTest) {
2029 if (kYes_TriState != fHWScissorSettings.fEnabled) {
2030 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2031 fHWScissorSettings.fEnabled = kYes_TriState;
2032 }
2033 } else {
2034 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2035 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2036 fHWScissorSettings.fEnabled = kNo_TriState;
2037 }
2038 }
2039 }
2040
flushScissorRect(const SkIRect & scissor,int rtHeight,GrSurfaceOrigin rtOrigin)2041 void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtHeight, GrSurfaceOrigin rtOrigin) {
2042 SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState);
2043 auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor);
2044 if (fHWScissorSettings.fRect != nativeScissor) {
2045 GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth,
2046 nativeScissor.fHeight));
2047 fHWScissorSettings.fRect = nativeScissor;
2048 }
2049 }
2050
flushViewport(const SkIRect & viewport,int rtHeight,GrSurfaceOrigin rtOrigin)2051 void GrGLGpu::flushViewport(const SkIRect& viewport, int rtHeight, GrSurfaceOrigin rtOrigin) {
2052 auto nativeViewport = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, viewport);
2053 if (fHWViewport != nativeViewport) {
2054 GL_CALL(Viewport(nativeViewport.fX, nativeViewport.fY,
2055 nativeViewport.fWidth, nativeViewport.fHeight));
2056 fHWViewport = nativeViewport;
2057 }
2058 }
2059
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt,GrSurfaceOrigin origin)2060 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
2061 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
2062 #ifndef USE_NSIGHT
2063 typedef GrWindowRectsState::Mode Mode;
2064 // Window rects can't be used on-screen.
2065 SkASSERT(!windowState.enabled() || !rt->glRTFBOIDis0());
2066 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
2067
2068 if (!this->caps()->maxWindowRectangles() ||
2069 fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) {
2070 return;
2071 }
2072
2073 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
2074 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
2075 int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
2076 SkASSERT(windowState.numWindows() == numWindows);
2077
2078 GrNativeRect glwindows[GrWindowRectangles::kMaxWindows];
2079 const SkIRect* skwindows = windowState.windows().data();
2080 for (int i = 0; i < numWindows; ++i) {
2081 glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]);
2082 }
2083
2084 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
2085 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
2086
2087 fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState);
2088 #endif
2089 }
2090
disableWindowRectangles()2091 void GrGLGpu::disableWindowRectangles() {
2092 #ifndef USE_NSIGHT
2093 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
2094 return;
2095 }
2096 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
2097 fHWWindowRectsState.setDisabled();
2098 #endif
2099 }
2100
flushGLState(GrRenderTarget * renderTarget,bool useMultisampleFBO,const GrProgramInfo & programInfo)2101 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, bool useMultisampleFBO,
2102 const GrProgramInfo& programInfo) {
2103 this->handleDirtyContext();
2104
2105 sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(this->getContext(),
2106 programInfo);
2107 if (!program) {
2108 GrCapsDebugf(this->caps(), "Failed to create program!\n");
2109 return false;
2110 }
2111
2112 this->flushProgram(std::move(program));
2113
2114 // Swizzle the blend to match what the shader will output.
2115 this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(),
2116 programInfo.pipeline().writeSwizzle());
2117
2118 fHWProgram->updateUniforms(renderTarget, programInfo);
2119
2120 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2121 GrStencilSettings stencil;
2122 if (programInfo.isStencilEnabled()) {
2123 SkASSERT(glRT->getStencilAttachment(useMultisampleFBO));
2124 stencil.reset(*programInfo.userStencilSettings(),
2125 programInfo.pipeline().hasStencilClip(),
2126 glRT->numStencilBits(useMultisampleFBO));
2127 }
2128 this->flushStencil(stencil, programInfo.origin());
2129 this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled()));
2130 this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(),
2131 glRT, programInfo.origin());
2132 this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster());
2133 this->flushWireframeState(programInfo.pipeline().isWireframe());
2134
2135 // This must come after textures are flushed because a texture may need
2136 // to be msaa-resolved (which will modify bound FBO state).
2137 this->flushRenderTarget(glRT, useMultisampleFBO);
2138
2139 return true;
2140 }
2141
flushProgram(sk_sp<GrGLProgram> program)2142 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
2143 if (!program) {
2144 fHWProgram.reset();
2145 fHWProgramID = 0;
2146 return;
2147 }
2148 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
2149 if (program == fHWProgram) {
2150 return;
2151 }
2152 auto id = program->programID();
2153 SkASSERT(id);
2154 GL_CALL(UseProgram(id));
2155 fHWProgram = std::move(program);
2156 fHWProgramID = id;
2157 }
2158
flushProgram(GrGLuint id)2159 void GrGLGpu::flushProgram(GrGLuint id) {
2160 SkASSERT(id);
2161 if (fHWProgramID == id) {
2162 SkASSERT(!fHWProgram);
2163 return;
2164 }
2165 fHWProgram.reset();
2166 GL_CALL(UseProgram(id));
2167 fHWProgramID = id;
2168 }
2169
didDrawTo(GrRenderTarget * rt)2170 void GrGLGpu::didDrawTo(GrRenderTarget* rt) {
2171 SkASSERT(fHWWriteToColor != kUnknown_TriState);
2172 if (fHWWriteToColor == kYes_TriState) {
2173 // The bounds are only used to check for empty and we don't know the bounds. The origin
2174 // is irrelevant if there are no bounds.
2175 this->didWriteToSurface(rt, kTopLeft_GrSurfaceOrigin, /*bounds=*/nullptr);
2176 }
2177 }
2178
bindBuffer(GrGpuBufferType type,const GrBuffer * buffer)2179 GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
2180 this->handleDirtyContext();
2181
2182 // Index buffer state is tied to the vertex array.
2183 if (GrGpuBufferType::kIndex == type) {
2184 this->bindVertexArray(0);
2185 }
2186
2187 auto* bufferState = this->hwBufferState(type);
2188 if (buffer->isCpuBuffer()) {
2189 if (!bufferState->fBufferZeroKnownBound) {
2190 GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
2191 bufferState->fBufferZeroKnownBound = true;
2192 bufferState->fBoundBufferUniqueID.makeInvalid();
2193 }
2194 } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
2195 bufferState->fBoundBufferUniqueID) {
2196 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
2197 GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
2198 bufferState->fBufferZeroKnownBound = false;
2199 bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
2200 }
2201
2202 return bufferState->fGLTarget;
2203 }
2204
clear(const GrScissorState & scissor,std::array<float,4> color,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2205 void GrGLGpu::clear(const GrScissorState& scissor,
2206 std::array<float, 4> color,
2207 GrRenderTarget* target,
2208 bool useMultisampleFBO,
2209 GrSurfaceOrigin origin) {
2210 // parent class should never let us get here with no RT
2211 SkASSERT(target);
2212 SkASSERT(!this->caps()->performColorClearsAsDraws());
2213 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2214
2215 this->handleDirtyContext();
2216
2217 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2218
2219 this->flushRenderTarget(glRT, useMultisampleFBO);
2220 this->flushScissor(scissor, glRT->height(), origin);
2221 this->disableWindowRectangles();
2222 this->flushColorWrite(true);
2223 this->flushClearColor(color);
2224 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2225 this->didWriteToSurface(glRT, origin, scissor.enabled() ? &scissor.rect() : nullptr);
2226 }
2227
use_tiled_rendering(const GrGLCaps & glCaps,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2228 static bool use_tiled_rendering(const GrGLCaps& glCaps,
2229 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2230 // Only use the tiled rendering extension if we can explicitly clear and discard the stencil.
2231 // Otherwise it's faster to just not use it.
2232 return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp &&
2233 GrStoreOp::kDiscard == stencilLoadStore.fStoreOp;
2234 }
2235
beginCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const SkIRect & bounds,GrSurfaceOrigin origin,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2236 void GrGLGpu::beginCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2237 const SkIRect& bounds, GrSurfaceOrigin origin,
2238 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2239 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2240 SkASSERT(!fIsExecutingCommandBuffer_DebugOnly);
2241
2242 this->handleDirtyContext();
2243
2244 this->flushRenderTarget(rt, useMultisampleFBO);
2245 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true);
2246
2247 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2248 auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, rt->height(), bounds);
2249 GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp)
2250 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2251 SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering().
2252 GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth,
2253 nativeBounds.fHeight, preserveMask));
2254 }
2255
2256 GrGLbitfield clearMask = 0;
2257 if (GrLoadOp::kClear == colorLoadStore.fLoadOp) {
2258 SkASSERT(!this->caps()->performColorClearsAsDraws());
2259 this->flushClearColor(colorLoadStore.fClearColor);
2260 this->flushColorWrite(true);
2261 clearMask |= GR_GL_COLOR_BUFFER_BIT;
2262 }
2263 if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) {
2264 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2265 GL_CALL(StencilMask(0xffffffff));
2266 GL_CALL(ClearStencil(0));
2267 clearMask |= GR_GL_STENCIL_BUFFER_BIT;
2268 }
2269 if (clearMask) {
2270 this->flushScissorTest(GrScissorTest::kDisabled);
2271 this->disableWindowRectangles();
2272 GL_CALL(Clear(clearMask));
2273 if (clearMask & GR_GL_COLOR_BUFFER_BIT) {
2274 this->didWriteToSurface(rt, origin, nullptr);
2275 }
2276 }
2277 }
2278
endCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2279 void GrGLGpu::endCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2280 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2281 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2282 SkASSERT(fIsExecutingCommandBuffer_DebugOnly);
2283
2284 this->handleDirtyContext();
2285
2286 if (rt->uniqueID() != fHWBoundRenderTargetUniqueID ||
2287 useMultisampleFBO != fHWBoundFramebufferIsMSAA) {
2288 // The framebuffer binding changed in the middle of a command buffer. We should have already
2289 // printed a warning during onFBOChanged.
2290 return;
2291 }
2292
2293 if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) {
2294 STArray<2, GrGLenum> discardAttachments;
2295 if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) {
2296 discardAttachments.push_back(
2297 rt->isFBO0(useMultisampleFBO) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0);
2298 }
2299 if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) {
2300 discardAttachments.push_back(
2301 rt->isFBO0(useMultisampleFBO) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT);
2302 }
2303
2304 if (!discardAttachments.empty()) {
2305 if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) {
2306 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2307 discardAttachments.begin()));
2308 } else {
2309 SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType());
2310 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2311 discardAttachments.begin()));
2312 }
2313 }
2314 }
2315
2316 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2317 GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp)
2318 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2319 // Handled by use_tiled_rendering().
2320 SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp);
2321 GL_CALL(EndTiling(preserveMask));
2322 }
2323
2324 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false);
2325 }
2326
clearStencilClip(const GrScissorState & scissor,bool insideStencilMask,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2327 void GrGLGpu::clearStencilClip(const GrScissorState& scissor, bool insideStencilMask,
2328 GrRenderTarget* target, bool useMultisampleFBO,
2329 GrSurfaceOrigin origin) {
2330 SkASSERT(target);
2331 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2332 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2333 this->handleDirtyContext();
2334
2335 GrAttachment* sb = target->getStencilAttachment(useMultisampleFBO);
2336 if (!sb) {
2337 // We should only get here if we marked a proxy as requiring a SB. However,
2338 // the SB creation could later fail. Likely clipping is going to go awry now.
2339 return;
2340 }
2341
2342 GrGLint stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
2343 #if 0
2344 SkASSERT(stencilBitCount > 0);
2345 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2346 #else
2347 // we could just clear the clip bit but when we go through
2348 // ANGLE a partial stencil mask will cause clears to be
2349 // turned into draws. Our contract on OpsTask says that
2350 // changing the clip between stencil passes may or may not
2351 // zero the client's clip bits. So we just clear the whole thing.
2352 static const GrGLint clipStencilMask = ~0;
2353 #endif
2354 GrGLint value;
2355 if (insideStencilMask) {
2356 value = (1 << (stencilBitCount - 1));
2357 } else {
2358 value = 0;
2359 }
2360 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2361 this->flushRenderTarget(glRT, useMultisampleFBO);
2362
2363 this->flushScissor(scissor, glRT->height(), origin);
2364 this->disableWindowRectangles();
2365
2366 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2367 GL_CALL(ClearStencil(value));
2368 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2369 fHWStencilSettings.invalidate();
2370 }
2371
readOrTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * offsetOrPtr,int rowWidthInPixels)2372 bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface,
2373 SkIRect rect,
2374 GrColorType surfaceColorType,
2375 GrColorType dstColorType,
2376 void* offsetOrPtr,
2377 int rowWidthInPixels) {
2378 SkASSERT(surface);
2379
2380 auto format = GrBackendFormats::AsGLFormat(surface->backendFormat());
2381 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2382 if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) {
2383 return false;
2384 }
2385 GrGLenum externalFormat = 0;
2386 GrGLenum externalType = 0;
2387 this->glCaps().getReadPixelsFormat(
2388 format, surfaceColorType, dstColorType, &externalFormat, &externalType);
2389 if (!externalFormat || !externalType) {
2390 return false;
2391 }
2392
2393 if (renderTarget) {
2394 // Always bind the single sample FBO since we can't read pixels from an MSAA framebuffer.
2395 constexpr bool useMultisampleFBO = false;
2396 if (renderTarget->numSamples() > 1 && renderTarget->isFBO0(useMultisampleFBO)) {
2397 return false;
2398 }
2399 this->flushRenderTarget(renderTarget, useMultisampleFBO);
2400 } else {
2401 // Use a temporary FBO.
2402 this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
2403 fHWBoundRenderTargetUniqueID.makeInvalid();
2404 }
2405
2406 // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
2407 if (rowWidthInPixels != rect.width()) {
2408 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2409 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels));
2410 }
2411 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1));
2412
2413 GL_CALL(ReadPixels(rect.left(),
2414 rect.top(),
2415 rect.width(),
2416 rect.height(),
2417 externalFormat,
2418 externalType,
2419 offsetOrPtr));
2420
2421 if (rowWidthInPixels != rect.width()) {
2422 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2423 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2424 }
2425
2426 if (!renderTarget) {
2427 this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER);
2428 }
2429 return true;
2430 }
2431
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2432 bool GrGLGpu::onReadPixels(GrSurface* surface,
2433 SkIRect rect,
2434 GrColorType surfaceColorType,
2435 GrColorType dstColorType,
2436 void* buffer,
2437 size_t rowBytes) {
2438 SkASSERT(surface);
2439
2440 size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType);
2441
2442 // GL_PACK_ROW_LENGTH is in terms of pixels not bytes.
2443 int rowPixelWidth;
2444
2445 if (rowBytes == SkToSizeT(rect.width()*bytesPerPixel)) {
2446 rowPixelWidth = rect.width();
2447 } else {
2448 SkASSERT(!(rowBytes % bytesPerPixel));
2449 rowPixelWidth = rowBytes / bytesPerPixel;
2450 }
2451 this->unbindXferBuffer(GrGpuBufferType::kXferGpuToCpu);
2452 return this->readOrTransferPixelsFrom(surface,
2453 rect,
2454 surfaceColorType,
2455 dstColorType,
2456 buffer,
2457 rowPixelWidth);
2458 }
2459
onGetOpsRenderPass(GrRenderTarget * rt,bool useMultisampleFBO,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)2460 GrOpsRenderPass* GrGLGpu::onGetOpsRenderPass(
2461 GrRenderTarget* rt,
2462 bool useMultisampleFBO,
2463 GrAttachment*,
2464 GrSurfaceOrigin origin,
2465 const SkIRect& bounds,
2466 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
2467 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
2468 const TArray<GrSurfaceProxy*, true>& sampledProxies,
2469 GrXferBarrierFlags renderPassXferBarriers) {
2470 if (!fCachedOpsRenderPass) {
2471 fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this);
2472 }
2473 if (useMultisampleFBO && rt->numSamples() == 1) {
2474 // We will be using dynamic msaa. Ensure there is an attachment.
2475 auto glRT = static_cast<GrGLRenderTarget*>(rt);
2476 if (!glRT->ensureDynamicMSAAAttachment()) {
2477 SkDebugf("WARNING: Failed to make dmsaa attachment. Render pass will be dropped.");
2478 return nullptr;
2479 }
2480 }
2481 fCachedOpsRenderPass->set(rt, useMultisampleFBO, bounds, origin, colorInfo, stencilInfo);
2482 return fCachedOpsRenderPass.get();
2483 }
2484
flushRenderTarget(GrGLRenderTarget * target,bool useMultisampleFBO)2485 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool useMultisampleFBO) {
2486 SkASSERT(target);
2487 GrGpuResource::UniqueID rtID = target->uniqueID();
2488 if (fHWBoundRenderTargetUniqueID != rtID ||
2489 fHWBoundFramebufferIsMSAA != useMultisampleFBO ||
2490 target->mustRebind(useMultisampleFBO)) {
2491 target->bind(useMultisampleFBO);
2492 #ifdef SK_DEBUG
2493 // don't do this check in Chromium -- this is causing
2494 // lots of repeated command buffer flushes when the compositor is
2495 // rendering with Ganesh, which is really slow; even too slow for
2496 // Debug mode.
2497 // Also don't do this when we know glCheckFramebufferStatus() may have side effects.
2498 if (!this->glCaps().skipErrorChecks() &&
2499 !this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
2500 GrGLenum status;
2501 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2502 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2503 SkDebugf("GrGLGpu::flushRenderTargetNoColorWrites glCheckFramebufferStatus %x\n",
2504 status);
2505 }
2506 }
2507 #endif
2508 fHWBoundRenderTargetUniqueID = rtID;
2509 fHWBoundFramebufferIsMSAA = useMultisampleFBO;
2510 this->flushViewport(SkIRect::MakeSize(target->dimensions()),
2511 target->height(),
2512 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
2513 }
2514 if (this->caps()->workarounds().force_update_scissor_state_when_binding_fbo0) {
2515 // The driver forgets the correct scissor state when using FBO 0.
2516 if (!fHWScissorSettings.fRect.isInvalid()) {
2517 const GrNativeRect& r = fHWScissorSettings.fRect;
2518 GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight));
2519 }
2520 if (fHWScissorSettings.fEnabled == kYes_TriState) {
2521 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2522 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2523 } else if (fHWScissorSettings.fEnabled == kNo_TriState) {
2524 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2525 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2526 }
2527 }
2528
2529 if (this->glCaps().srgbWriteControl()) {
2530 this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat()));
2531 }
2532
2533 if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) {
2534 GrGLint format;
2535 GrGLint type;
2536 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
2537 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
2538 this->glCaps().didQueryImplementationReadSupport(target->format(), format, type);
2539 }
2540 }
2541
flushFramebufferSRGB(bool enable)2542 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2543 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2544 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2545 fHWSRGBFramebuffer = kYes_TriState;
2546 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2547 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2548 fHWSRGBFramebuffer = kNo_TriState;
2549 }
2550 }
2551
prepareToDraw(GrPrimitiveType primitiveType)2552 GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) {
2553 fStats.incNumDraws();
2554
2555 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
2556 GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) {
2557 GL_CALL(Enable(GR_GL_CULL_FACE));
2558 GL_CALL(Disable(GR_GL_CULL_FACE));
2559 }
2560 fLastPrimitiveType = primitiveType;
2561
2562 switch (primitiveType) {
2563 case GrPrimitiveType::kTriangles:
2564 return GR_GL_TRIANGLES;
2565 case GrPrimitiveType::kTriangleStrip:
2566 return GR_GL_TRIANGLE_STRIP;
2567 case GrPrimitiveType::kPoints:
2568 return GR_GL_POINTS;
2569 case GrPrimitiveType::kLines:
2570 return GR_GL_LINES;
2571 case GrPrimitiveType::kLineStrip:
2572 return GR_GL_LINE_STRIP;
2573 }
2574 SK_ABORT("invalid GrPrimitiveType");
2575 }
2576
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)2577 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
2578 auto glRT = static_cast<GrGLRenderTarget*>(target);
2579 if (this->glCaps().framebufferResolvesMustBeFullSize()) {
2580 this->resolveRenderFBOs(glRT, SkIRect::MakeSize(glRT->dimensions()),
2581 ResolveDirection::kMSAAToSingle);
2582 } else {
2583 this->resolveRenderFBOs(glRT, resolveRect, ResolveDirection::kMSAAToSingle);
2584 }
2585 }
2586
resolveRenderFBOs(GrGLRenderTarget * rt,const SkIRect & resolveRect,ResolveDirection resolveDirection,bool invalidateReadBufferAfterBlit)2587 void GrGLGpu::resolveRenderFBOs(GrGLRenderTarget* rt, const SkIRect& resolveRect,
2588 ResolveDirection resolveDirection,
2589 bool invalidateReadBufferAfterBlit) {
2590 this->handleDirtyContext();
2591 rt->bindForResolve(resolveDirection);
2592
2593 const GrGLCaps& caps = this->glCaps();
2594
2595 // make sure we go through flushRenderTarget() since we've modified
2596 // the bound DRAW FBO ID.
2597 fHWBoundRenderTargetUniqueID.makeInvalid();
2598 if (GrGLCaps::kES_Apple_MSFBOType == caps.msFBOType()) {
2599 // The Apple extension doesn't support blitting from single to multisample.
2600 SkASSERT(resolveDirection != ResolveDirection::kSingleToMSAA);
2601 SkASSERT(resolveRect == SkIRect::MakeSize(rt->dimensions()));
2602 // Apple's extension uses the scissor as the blit bounds.
2603 // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect
2604 // happens inside flushScissor since resolveRect is already in native device coordinates.
2605 GrScissorState scissor(rt->dimensions());
2606 SkAssertResult(scissor.set(resolveRect));
2607 this->flushScissor(scissor, rt->height(), kTopLeft_GrSurfaceOrigin);
2608 this->disableWindowRectangles();
2609 GL_CALL(ResolveMultisampleFramebuffer());
2610 } else {
2611 SkASSERT(!caps.framebufferResolvesMustBeFullSize() ||
2612 resolveRect == SkIRect::MakeSize(rt->dimensions()));
2613 int l = resolveRect.x();
2614 int b = resolveRect.y();
2615 int r = resolveRect.x() + resolveRect.width();
2616 int t = resolveRect.y() + resolveRect.height();
2617
2618 // BlitFrameBuffer respects the scissor, so disable it.
2619 this->flushScissorTest(GrScissorTest::kDisabled);
2620 this->disableWindowRectangles();
2621 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2622 }
2623
2624 if (caps.invalidateFBType() != GrGLCaps::kNone_InvalidateFBType &&
2625 invalidateReadBufferAfterBlit) {
2626 // Invalidate the read FBO attachment after the blit, in hopes that this allows the driver
2627 // to perform tiling optimizations.
2628 bool readBufferIsMSAA = resolveDirection == ResolveDirection::kMSAAToSingle;
2629 GrGLenum colorDiscardAttachment = rt->isFBO0(readBufferIsMSAA) ? GR_GL_COLOR
2630 : GR_GL_COLOR_ATTACHMENT0;
2631 if (caps.invalidateFBType() == GrGLCaps::kInvalidate_InvalidateFBType) {
2632 GL_CALL(InvalidateFramebuffer(GR_GL_READ_FRAMEBUFFER, 1, &colorDiscardAttachment));
2633 } else {
2634 SkASSERT(caps.invalidateFBType() == GrGLCaps::kDiscard_InvalidateFBType);
2635 // glDiscardFramebuffer only accepts GL_FRAMEBUFFER.
2636 rt->bind(readBufferIsMSAA);
2637 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, 1, &colorDiscardAttachment));
2638 }
2639 }
2640 }
2641
2642 namespace {
2643
2644
gr_to_gl_stencil_op(GrStencilOp op)2645 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2646 static const GrGLenum gTable[kGrStencilOpCount] = {
2647 GR_GL_KEEP, // kKeep
2648 GR_GL_ZERO, // kZero
2649 GR_GL_REPLACE, // kReplace
2650 GR_GL_INVERT, // kInvert
2651 GR_GL_INCR_WRAP, // kIncWrap
2652 GR_GL_DECR_WRAP, // kDecWrap
2653 GR_GL_INCR, // kIncClamp
2654 GR_GL_DECR, // kDecClamp
2655 };
2656 static_assert(0 == (int)GrStencilOp::kKeep);
2657 static_assert(1 == (int)GrStencilOp::kZero);
2658 static_assert(2 == (int)GrStencilOp::kReplace);
2659 static_assert(3 == (int)GrStencilOp::kInvert);
2660 static_assert(4 == (int)GrStencilOp::kIncWrap);
2661 static_assert(5 == (int)GrStencilOp::kDecWrap);
2662 static_assert(6 == (int)GrStencilOp::kIncClamp);
2663 static_assert(7 == (int)GrStencilOp::kDecClamp);
2664 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2665 return gTable[(int)op];
2666 }
2667
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2668 void set_gl_stencil(const GrGLInterface* gl,
2669 const GrStencilSettings::Face& face,
2670 GrGLenum glFace) {
2671 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2672 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2673 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2674
2675 GrGLint ref = face.fRef;
2676 GrGLint mask = face.fTestMask;
2677 GrGLint writeMask = face.fWriteMask;
2678
2679 if (GR_GL_FRONT_AND_BACK == glFace) {
2680 // we call the combined func just in case separate stencil is not
2681 // supported.
2682 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2683 GR_GL_CALL(gl, StencilMask(writeMask));
2684 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2685 } else {
2686 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2687 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2688 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2689 }
2690 }
2691 } // namespace
2692
flushStencil(const GrStencilSettings & stencilSettings,GrSurfaceOrigin origin)2693 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) {
2694 if (stencilSettings.isDisabled()) {
2695 this->disableStencil();
2696 } else if (fHWStencilSettings != stencilSettings ||
2697 (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) {
2698 if (kYes_TriState != fHWStencilTestEnabled) {
2699 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2700
2701 fHWStencilTestEnabled = kYes_TriState;
2702 }
2703 if (!stencilSettings.isTwoSided()) {
2704 set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(),
2705 GR_GL_FRONT_AND_BACK);
2706 } else {
2707 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin),
2708 GR_GL_FRONT);
2709 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin),
2710 GR_GL_BACK);
2711 }
2712 fHWStencilSettings = stencilSettings;
2713 fHWStencilOrigin = origin;
2714 }
2715 }
2716
disableStencil()2717 void GrGLGpu::disableStencil() {
2718 if (kNo_TriState != fHWStencilTestEnabled) {
2719 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2720
2721 fHWStencilTestEnabled = kNo_TriState;
2722 fHWStencilSettings.invalidate();
2723 }
2724 }
2725
flushConservativeRasterState(bool enabled)2726 void GrGLGpu::flushConservativeRasterState(bool enabled) {
2727 if (this->caps()->conservativeRasterSupport()) {
2728 if (enabled) {
2729 if (kYes_TriState != fHWConservativeRasterEnabled) {
2730 GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION));
2731 fHWConservativeRasterEnabled = kYes_TriState;
2732 }
2733 } else {
2734 if (kNo_TriState != fHWConservativeRasterEnabled) {
2735 GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION));
2736 fHWConservativeRasterEnabled = kNo_TriState;
2737 }
2738 }
2739 }
2740 }
2741
flushWireframeState(bool enabled)2742 void GrGLGpu::flushWireframeState(bool enabled) {
2743 if (this->caps()->wireframeSupport()) {
2744 if (this->caps()->wireframeMode() || enabled) {
2745 if (kYes_TriState != fHWWireframeEnabled) {
2746 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
2747 fHWWireframeEnabled = kYes_TriState;
2748 }
2749 } else {
2750 if (kNo_TriState != fHWWireframeEnabled) {
2751 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
2752 fHWWireframeEnabled = kNo_TriState;
2753 }
2754 }
2755 }
2756 }
2757
flushBlendAndColorWrite(const skgpu::BlendInfo & blendInfo,const skgpu::Swizzle & swizzle)2758 void GrGLGpu::flushBlendAndColorWrite(const skgpu::BlendInfo& blendInfo,
2759 const skgpu::Swizzle& swizzle) {
2760 if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWritesColor) {
2761 // We need to work around a driver bug by using a blend state that preserves the dst color,
2762 // rather than disabling color writes.
2763 skgpu::BlendInfo preserveDstBlend;
2764 preserveDstBlend.fSrcBlend = skgpu::BlendCoeff::kZero;
2765 preserveDstBlend.fDstBlend = skgpu::BlendCoeff::kOne;
2766 this->flushBlendAndColorWrite(preserveDstBlend, swizzle);
2767 return;
2768 }
2769
2770 skgpu::BlendEquation equation = blendInfo.fEquation;
2771 skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
2772 skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
2773
2774 // Any optimization to disable blending should have already been applied and
2775 // tweaked the equation to "add "or "subtract", and the coeffs to (1, 0).
2776 bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff) ||
2777 !blendInfo.fWritesColor;
2778
2779 if (blendOff) {
2780 if (kNo_TriState != fHWBlendState.fEnabled) {
2781 GL_CALL(Disable(GR_GL_BLEND));
2782
2783 // Workaround for the ARM KHR_blend_equation_advanced disable flags issue
2784 // https://code.google.com/p/skia/issues/detail?id=3943
2785 if (this->ctxInfo().vendor() == GrGLVendor::kARM &&
2786 skgpu::BlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2787 SkASSERT(this->caps()->advancedBlendEquationSupport());
2788 // Set to any basic blending equation.
2789 skgpu::BlendEquation blendEquation = skgpu::BlendEquation::kAdd;
2790 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)blendEquation]));
2791 fHWBlendState.fEquation = blendEquation;
2792 }
2793
2794 // Workaround for Adreno 5xx BlendFunc bug. See crbug.com/1241134.
2795 // We must also check to see if the blend coeffs are invalid because the client may have
2796 // reset our gl state and thus we will have forgotten if the previous use was a coeff
2797 // that referenced src2.
2798 if (this->glCaps().mustResetBlendFuncBetweenDualSourceAndDisable() &&
2799 (skgpu::BlendCoeffRefsSrc2(fHWBlendState.fSrcCoeff) ||
2800 skgpu::BlendCoeffRefsSrc2(fHWBlendState.fDstCoeff) ||
2801 fHWBlendState.fSrcCoeff == skgpu::BlendCoeff::kIllegal ||
2802 fHWBlendState.fDstCoeff == skgpu::BlendCoeff::kIllegal)) {
2803 // We just reset the blend func to anything that doesn't reference src2
2804 GL_CALL(BlendFunc(GR_GL_ONE, GR_GL_ZERO));
2805 fHWBlendState.fSrcCoeff = skgpu::BlendCoeff::kOne;
2806 fHWBlendState.fDstCoeff = skgpu::BlendCoeff::kZero;
2807 }
2808
2809 fHWBlendState.fEnabled = kNo_TriState;
2810 }
2811 } else {
2812 if (kYes_TriState != fHWBlendState.fEnabled) {
2813 GL_CALL(Enable(GR_GL_BLEND));
2814
2815 fHWBlendState.fEnabled = kYes_TriState;
2816 }
2817
2818 if (fHWBlendState.fEquation != equation) {
2819 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)equation]));
2820 fHWBlendState.fEquation = equation;
2821 }
2822
2823 if (skgpu::BlendEquationIsAdvanced(equation)) {
2824 SkASSERT(this->caps()->advancedBlendEquationSupport());
2825
2826 this->flushColorWrite(blendInfo.fWritesColor);
2827 // Advanced equations have no other blend state.
2828 return;
2829 }
2830
2831 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2832 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[(int)srcCoeff],
2833 gXfermodeCoeff2Blend[(int)dstCoeff]));
2834 fHWBlendState.fSrcCoeff = srcCoeff;
2835 fHWBlendState.fDstCoeff = dstCoeff;
2836 }
2837
2838 if (skgpu::BlendCoeffRefsConstant(srcCoeff) || skgpu::BlendCoeffRefsConstant(dstCoeff)) {
2839 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
2840 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2841 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
2842 fHWBlendState.fConstColor = blendConst;
2843 fHWBlendState.fConstColorValid = true;
2844 }
2845 }
2846 }
2847
2848 this->flushColorWrite(blendInfo.fWritesColor);
2849 }
2850
bindTexture(int unitIdx,GrSamplerState samplerState,const skgpu::Swizzle & swizzle,GrGLTexture * texture)2851 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const skgpu::Swizzle& swizzle,
2852 GrGLTexture* texture) {
2853 SkASSERT(texture);
2854
2855 #ifdef SK_DEBUG
2856 if (!this->caps()->npotTextureTileSupport()) {
2857 if (samplerState.isRepeatedX()) {
2858 const int w = texture->width();
2859 SkASSERT(SkIsPow2(w));
2860 }
2861 if (samplerState.isRepeatedY()) {
2862 const int h = texture->height();
2863 SkASSERT(SkIsPow2(h));
2864 }
2865 }
2866 #endif
2867
2868 GrGpuResource::UniqueID textureID = texture->uniqueID();
2869 GrGLenum target = texture->target();
2870 if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) {
2871 this->setTextureUnit(unitIdx);
2872 GL_CALL(BindTexture(target, texture->textureID()));
2873 fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID);
2874 }
2875
2876 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2877 if (!this->caps()->mipmapSupport() || texture->mipmapped() == skgpu::Mipmapped::kNo) {
2878 // We should have caught this already.
2879 SkASSERT(!samplerState.isAniso());
2880 samplerState = GrSamplerState(samplerState.wrapModeX(),
2881 samplerState.wrapModeY(),
2882 samplerState.filter(),
2883 GrSamplerState::MipmapMode::kNone);
2884 } else {
2885 SkASSERT(!texture->mipmapsAreDirty());
2886 }
2887 }
2888
2889 auto timestamp = texture->parameters()->resetTimestamp();
2890 bool setAll = timestamp < fResetTimestampForTextureParameters;
2891 const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr;
2892 GrGLTextureParameters::SamplerOverriddenState newSamplerState;
2893 if (this->glCaps().useSamplerObjects()) {
2894 fSamplerObjectCache->bindSampler(unitIdx, samplerState);
2895 if (this->glCaps().mustSetAnyTexParameterToEnableMipmapping()) {
2896 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2897 GrGLenum minFilter = filter_to_gl_min_filter(samplerState.filter(),
2898 samplerState.mipmapMode());
2899 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2900 texture->parameters()->samplerOverriddenState();
2901 this->setTextureUnit(unitIdx);
2902 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, minFilter));
2903 newSamplerState = oldSamplerState;
2904 newSamplerState.fMinFilter = minFilter;
2905 samplerStateToRecord = &newSamplerState;
2906 }
2907 }
2908 } else {
2909 if (fSamplerObjectCache) {
2910 fSamplerObjectCache->unbindSampler(unitIdx);
2911 }
2912 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2913 texture->parameters()->samplerOverriddenState();
2914 samplerStateToRecord = &newSamplerState;
2915
2916 newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter(),
2917 samplerState.mipmapMode());
2918 newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
2919
2920 newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
2921 newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
2922
2923 newSamplerState.fMaxAniso = std::min(static_cast<GrGLfloat>(samplerState.maxAniso()),
2924 this->glCaps().maxTextureMaxAnisotropy());
2925
2926 // These are the OpenGL default values.
2927 newSamplerState.fMinLOD = -1000.f;
2928 newSamplerState.fMaxLOD = 1000.f;
2929
2930 if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) {
2931 this->setTextureUnit(unitIdx);
2932 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter));
2933 }
2934 if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) {
2935 this->setTextureUnit(unitIdx);
2936 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter));
2937 }
2938 if (this->glCaps().mipmapLodControlSupport()) {
2939 if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) {
2940 this->setTextureUnit(unitIdx);
2941 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD));
2942 }
2943 if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) {
2944 this->setTextureUnit(unitIdx);
2945 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD));
2946 }
2947 }
2948 if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) {
2949 this->setTextureUnit(unitIdx);
2950 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS));
2951 }
2952 if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) {
2953 this->setTextureUnit(unitIdx);
2954 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT));
2955 }
2956 if (this->glCaps().clampToBorderSupport()) {
2957 // Make sure the border color is transparent black (the default)
2958 if (setAll || oldSamplerState.fBorderColorInvalid) {
2959 this->setTextureUnit(unitIdx);
2960 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
2961 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
2962 }
2963 }
2964 if (this->caps()->anisoSupport()) {
2965 if (setAll || oldSamplerState.fMaxAniso != newSamplerState.fMaxAniso) {
2966 GL_CALL(TexParameterf(target,
2967 GR_GL_TEXTURE_MAX_ANISOTROPY,
2968 newSamplerState.fMaxAniso));
2969 }
2970 }
2971 }
2972 GrGLTextureParameters::NonsamplerState newNonsamplerState;
2973 newNonsamplerState.fBaseMipMapLevel = 0;
2974 newNonsamplerState.fMaxMipmapLevel = texture->maxMipmapLevel();
2975 newNonsamplerState.fSwizzleIsRGBA = true;
2976
2977 const GrGLTextureParameters::NonsamplerState& oldNonsamplerState =
2978 texture->parameters()->nonsamplerState();
2979 if (this->glCaps().textureSwizzleSupport()) {
2980 if (setAll || !oldNonsamplerState.fSwizzleIsRGBA) {
2981 static constexpr GrGLenum kRGBA[4] {
2982 GR_GL_RED,
2983 GR_GL_GREEN,
2984 GR_GL_BLUE,
2985 GR_GL_ALPHA
2986 };
2987 this->setTextureUnit(unitIdx);
2988 if (GR_IS_GR_GL(this->glStandard())) {
2989 static_assert(sizeof(kRGBA[0]) == sizeof(GrGLint));
2990 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
2991 reinterpret_cast<const GrGLint*>(kRGBA)));
2992 } else if (GR_IS_GR_GL_ES(this->glStandard())) {
2993 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2994 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, kRGBA[0]));
2995 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, kRGBA[1]));
2996 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, kRGBA[2]));
2997 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, kRGBA[3]));
2998 }
2999 }
3000 }
3001 // These are not supported in ES2 contexts
3002 if (this->glCaps().mipmapLevelControlSupport() &&
3003 (texture->textureType() != GrTextureType::kExternal ||
3004 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
3005 if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) {
3006 this->setTextureUnit(unitIdx);
3007 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
3008 newNonsamplerState.fBaseMipMapLevel));
3009 }
3010 if (newNonsamplerState.fMaxMipmapLevel != oldNonsamplerState.fMaxMipmapLevel) {
3011 this->setTextureUnit(unitIdx);
3012 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
3013 newNonsamplerState.fMaxMipmapLevel));
3014 }
3015 }
3016 texture->parameters()->set(samplerStateToRecord, newNonsamplerState,
3017 fResetTimestampForTextureParameters);
3018 }
3019
onResetTextureBindings()3020 void GrGLGpu::onResetTextureBindings() {
3021 static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE,
3022 GR_GL_TEXTURE_EXTERNAL};
3023 for (int i = 0; i < this->numTextureUnits(); ++i) {
3024 this->setTextureUnit(i);
3025 for (auto target : kTargets) {
3026 if (fHWTextureUnitBindings[i].hasBeenModified(target)) {
3027 GL_CALL(BindTexture(target, 0));
3028 }
3029 }
3030 fHWTextureUnitBindings[i].invalidateAllTargets(true);
3031 }
3032 }
3033
flushColorWrite(bool writeColor)3034 void GrGLGpu::flushColorWrite(bool writeColor) {
3035 if (!writeColor) {
3036 if (kNo_TriState != fHWWriteToColor) {
3037 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3038 GR_GL_FALSE, GR_GL_FALSE));
3039 fHWWriteToColor = kNo_TriState;
3040 }
3041 } else {
3042 if (kYes_TriState != fHWWriteToColor) {
3043 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3044 fHWWriteToColor = kYes_TriState;
3045 }
3046 }
3047 }
3048
flushClearColor(std::array<float,4> color)3049 void GrGLGpu::flushClearColor(std::array<float, 4> color) {
3050 GrGLfloat r = color[0], g = color[1], b = color[2], a = color[3];
3051 if (this->glCaps().clearToBoundaryValuesIsBroken() &&
3052 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
3053 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
3054 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
3055 a = (1 == a) ? safeAlpha1 : safeAlpha0;
3056 }
3057 if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
3058 b != fHWClearColor[2] || a != fHWClearColor[3]) {
3059 GL_CALL(ClearColor(r, g, b, a));
3060 fHWClearColor[0] = r;
3061 fHWClearColor[1] = g;
3062 fHWClearColor[2] = b;
3063 fHWClearColor[3] = a;
3064 }
3065 }
3066
setTextureUnit(int unit)3067 void GrGLGpu::setTextureUnit(int unit) {
3068 SkASSERT(unit >= 0 && unit < this->numTextureUnits());
3069 if (unit != fHWActiveTextureUnitIdx) {
3070 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3071 fHWActiveTextureUnitIdx = unit;
3072 }
3073 }
3074
bindTextureToScratchUnit(GrGLenum target,GrGLint textureID)3075 void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) {
3076 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3077 int lastUnitIdx = this->numTextureUnits() - 1;
3078 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3079 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3080 fHWActiveTextureUnitIdx = lastUnitIdx;
3081 }
3082 // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the
3083 // correct texture.
3084 fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target);
3085 GL_CALL(BindTexture(target, textureID));
3086 }
3087
3088 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,const GrGLCaps & caps)3089 static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
3090 const GrSurface* src,
3091 const SkIRect& srcRect,
3092 const SkIRect& dstRect,
3093 const GrGLCaps& caps) {
3094 int dstSampleCnt = 0;
3095 int srcSampleCnt = 0;
3096 if (const GrRenderTarget* rt = dst->asRenderTarget()) {
3097 dstSampleCnt = rt->numSamples();
3098 }
3099 if (const GrRenderTarget* rt = src->asRenderTarget()) {
3100 srcSampleCnt = rt->numSamples();
3101 }
3102 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
3103 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
3104
3105 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3106 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3107
3108 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3109 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3110
3111 GrTextureType dstTexType;
3112 GrTextureType* dstTexTypePtr = nullptr;
3113 GrTextureType srcTexType;
3114 GrTextureType* srcTexTypePtr = nullptr;
3115 if (dstTex) {
3116 dstTexType = dstTex->textureType();
3117 dstTexTypePtr = &dstTexType;
3118 }
3119 if (srcTex) {
3120 srcTexType = srcTex->textureType();
3121 srcTexTypePtr = &srcTexType;
3122 }
3123
3124 return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr,
3125 srcFormat, srcSampleCnt, srcTexTypePtr,
3126 src->getBoundsRect(), true, srcRect, dstRect);
3127 }
3128
rt_has_msaa_render_buffer(const GrGLRenderTarget * rt,const GrGLCaps & glCaps)3129 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
3130 // A RT has a separate MSAA renderbuffer if:
3131 // 1) It's multisampled
3132 // 2) We're using an extension with separate MSAA renderbuffers
3133 // 3) It's not FBO 0, which is special and always auto-resolves
3134 return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && !rt->isFBO0(true/*msaa*/);
3135 }
3136
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLCaps & caps)3137 static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src,
3138 const GrGLCaps& caps) {
3139
3140 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3141 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3142 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3143 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3144
3145 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
3146 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
3147
3148 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3149 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3150
3151 GrTextureType dstTexType;
3152 GrTextureType* dstTexTypePtr = nullptr;
3153 GrTextureType srcTexType;
3154 GrTextureType* srcTexTypePtr = nullptr;
3155 if (dstTex) {
3156 dstTexType = dstTex->textureType();
3157 dstTexTypePtr = &dstTexType;
3158 }
3159 if (srcTex) {
3160 srcTexType = srcTex->textureType();
3161 srcTexTypePtr = &srcTexType;
3162 }
3163
3164 return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr,
3165 srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr);
3166 }
3167
bindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget,TempFBOTarget tempFBOTarget)3168 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
3169 TempFBOTarget tempFBOTarget) {
3170 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3171 if (!rt || mipLevel > 0) {
3172 SkASSERT(surface->asTexture());
3173 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
3174 GrGLuint texID = texture->textureID();
3175 GrGLenum target = texture->target();
3176 GrGLuint* tempFBOID;
3177 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3178
3179 if (0 == *tempFBOID) {
3180 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3181 }
3182
3183 this->bindFramebuffer(fboTarget, *tempFBOID);
3184 GR_GL_CALL(
3185 this->glInterface(),
3186 FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel));
3187 if (mipLevel == 0) {
3188 texture->baseLevelWasBoundToFBO();
3189 }
3190 } else {
3191 rt->bindForPixelOps(fboTarget);
3192 }
3193 }
3194
unbindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget)3195 void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) {
3196 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
3197 if (mipLevel > 0 || !surface->asRenderTarget()) {
3198 SkASSERT(surface->asTexture());
3199 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3200 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3201 GR_GL_COLOR_ATTACHMENT0,
3202 textureTarget,
3203 0,
3204 0));
3205 }
3206 }
3207
onFBOChanged()3208 void GrGLGpu::onFBOChanged() {
3209 if (this->caps()->workarounds().flush_on_framebuffer_change) {
3210 this->flush(FlushType::kForce);
3211 }
3212 #ifdef SK_DEBUG
3213 if (fIsExecutingCommandBuffer_DebugOnly) {
3214 SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. "
3215 "This will severely hurt performance.\n");
3216 }
3217 #endif
3218 }
3219
bindFramebuffer(GrGLenum target,GrGLuint fboid)3220 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
3221 GL_CALL(BindFramebuffer(target, fboid));
3222 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
3223 fBoundDrawFramebuffer = fboid;
3224 }
3225 this->onFBOChanged();
3226 }
3227
deleteFramebuffer(GrGLuint fboid)3228 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
3229 // We're relying on the GL state shadowing being correct in the workaround code below so we
3230 // need to handle a dirty context.
3231 this->handleDirtyContext();
3232 if (fboid == fBoundDrawFramebuffer &&
3233 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
3234 // This workaround only applies to deleting currently bound framebuffers
3235 // on Adreno 420. Because this is a somewhat rare case, instead of
3236 // tracking all the attachments of every framebuffer instead just always
3237 // unbind all attachments.
3238 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3239 GR_GL_RENDERBUFFER, 0));
3240 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
3241 GR_GL_RENDERBUFFER, 0));
3242 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3243 GR_GL_RENDERBUFFER, 0));
3244 }
3245
3246 GL_CALL(DeleteFramebuffers(1, &fboid));
3247
3248 // Deleting the currently bound framebuffer rebinds to 0.
3249 if (fboid == fBoundDrawFramebuffer) {
3250 this->onFBOChanged();
3251 }
3252 }
3253
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)3254 bool GrGLGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
3255 GrSurface* src, const SkIRect& srcRect,
3256 GrSamplerState::Filter filter) {
3257 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3258 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
3259 bool preferCopy = SkToBool(dst->asRenderTarget());
3260 bool scalingCopy = dstRect.size() != srcRect.size();
3261 auto dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3262 if (preferCopy &&
3263 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3264 GrRenderTarget* dstRT = dst->asRenderTarget();
3265 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3266 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3267 return true;
3268 }
3269 }
3270
3271 // Prefer copying as with glCopyTexSubImage when the dimensions are the same.
3272 if (!scalingCopy && can_copy_texsubimage(dst, src, this->glCaps())) {
3273 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstRect.topLeft());
3274 return true;
3275 }
3276
3277 if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps())) {
3278 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstRect, filter);
3279 }
3280
3281 if (!preferCopy &&
3282 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3283 GrRenderTarget* dstRT = dst->asRenderTarget();
3284 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3285 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3286 return true;
3287 }
3288 }
3289
3290 return false;
3291 }
3292
createCopyProgram(GrTexture * srcTex)3293 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3294 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
3295
3296 int progIdx = TextureToCopyProgramIdx(srcTex);
3297 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3298 SkSLType samplerType = SkSLCombinedSamplerTypeForTextureType(srcTex->textureType());
3299
3300 if (!fCopyProgramArrayBuffer) {
3301 static const GrGLfloat vdata[] = {
3302 0, 0,
3303 0, 1,
3304 1, 0,
3305 1, 1
3306 };
3307 fCopyProgramArrayBuffer = GrGLBuffer::Make(this,
3308 sizeof(vdata),
3309 GrGpuBufferType::kVertex,
3310 kStatic_GrAccessPattern);
3311 if (fCopyProgramArrayBuffer) {
3312 fCopyProgramArrayBuffer->updateData(
3313 vdata, /*offset=*/0, sizeof(vdata), /*preserve=*/false);
3314 }
3315 }
3316 if (!fCopyProgramArrayBuffer) {
3317 return false;
3318 }
3319
3320 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3321 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3322 if (!fCopyPrograms[progIdx].fProgram) {
3323 return false;
3324 }
3325
3326 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3327 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3328 GrShaderVar::TypeModifier::Uniform);
3329 GrShaderVar uPosXform("u_posXform", SkSLType::kHalf4, GrShaderVar::TypeModifier::Uniform);
3330 GrShaderVar uTexture("u_texture", samplerType);
3331 GrShaderVar vTexCoord("v_texCoord", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out);
3332 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4, GrShaderVar::TypeModifier::Out);
3333
3334 SkString vshaderTxt;
3335 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3336 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3337 vshaderTxt.appendf("#extension %s : require\n", extension);
3338 }
3339 vTexCoord.addModifier("noperspective");
3340 }
3341
3342 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3343 vshaderTxt.append(";");
3344 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3345 vshaderTxt.append(";");
3346 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3347 vshaderTxt.append(";");
3348 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3349 vshaderTxt.append(";");
3350
3351 vshaderTxt.append(
3352 // Copy Program VS
3353 "void main() {"
3354 "v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);"
3355 "sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3356 "sk_Position.zw = half2(0, 1);"
3357 "}"
3358 );
3359
3360 SkString fshaderTxt;
3361 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3362 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3363 fshaderTxt.appendf("#extension %s : require\n", extension);
3364 }
3365 }
3366 vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In);
3367 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3368 fshaderTxt.append(";");
3369 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3370 fshaderTxt.append(";");
3371 fshaderTxt.appendf(
3372 // Copy Program FS
3373 "void main() {"
3374 "sk_FragColor = sample(u_texture, v_texCoord);"
3375 "}"
3376 );
3377 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3378 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3379
3380 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3381 std::string glsl[kGrShaderTypeCount];
3382 SkSL::ProgramSettings settings;
3383 SkSL::Program::Interface interface;
3384 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3385 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3386 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3387 fCopyPrograms[progIdx].fProgram,
3388 GR_GL_VERTEX_SHADER,
3389 glsl[kVertex_GrShaderType],
3390 /*shaderWasCached=*/false,
3391 fProgramCache->stats(),
3392 errorHandler);
3393 SkASSERT(interface == SkSL::Program::Interface());
3394 if (!vshader) {
3395 // Just delete the program, no shaders to delete
3396 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, nullptr, nullptr);
3397 return false;
3398 }
3399
3400 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3401 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3402 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3403 fCopyPrograms[progIdx].fProgram,
3404 GR_GL_FRAGMENT_SHADER,
3405 glsl[kFragment_GrShaderType],
3406 /*shaderWasCached=*/false,
3407 fProgramCache->stats(),
3408 errorHandler);
3409 SkASSERT(interface == SkSL::Program::Interface());
3410 if (!fshader) {
3411 // Delete the program and previously compiled vertex shader
3412 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, nullptr);
3413 return false;
3414 }
3415
3416 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3417 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3418 if (!GrGLCheckLinkStatus(this,
3419 fCopyPrograms[progIdx].fProgram,
3420 /*shaderWasCached=*/false,
3421 errorHandler,
3422 sksl,
3423 glsl)) {
3424 // Failed to link, delete everything
3425 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, &fshader);
3426 return false;
3427 }
3428
3429 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3430 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3431 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3432 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3433 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3434 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3435
3436 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3437
3438 // Cleanup the shaders, but not the program
3439 cleanup_program(this, nullptr, &vshader, &fshader);
3440
3441 return true;
3442 }
3443
createMipmapProgram(int progIdx)3444 bool GrGLGpu::createMipmapProgram(int progIdx) {
3445 const bool oddWidth = SkToBool(progIdx & 0x2);
3446 const bool oddHeight = SkToBool(progIdx & 0x1);
3447 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3448
3449 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3450
3451 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3452 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3453 if (!fMipmapPrograms[progIdx].fProgram) {
3454 return false;
3455 }
3456
3457 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3458 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3459 GrShaderVar::TypeModifier::Uniform);
3460 GrShaderVar uTexture("u_texture", SkSLType::kTexture2DSampler);
3461 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3462 GrShaderVar vTexCoords[] = {
3463 GrShaderVar("v_texCoord0", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3464 GrShaderVar("v_texCoord1", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3465 GrShaderVar("v_texCoord2", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3466 GrShaderVar("v_texCoord3", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3467 };
3468 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4,GrShaderVar::TypeModifier::Out);
3469
3470 SkString vshaderTxt;
3471 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3472 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3473 vshaderTxt.appendf("#extension %s : require\n", extension);
3474 }
3475 vTexCoords[0].addModifier("noperspective");
3476 vTexCoords[1].addModifier("noperspective");
3477 vTexCoords[2].addModifier("noperspective");
3478 vTexCoords[3].addModifier("noperspective");
3479 }
3480
3481 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3482 vshaderTxt.append(";");
3483 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3484 vshaderTxt.append(";");
3485 for (int i = 0; i < numTaps; ++i) {
3486 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3487 vshaderTxt.append(";");
3488 }
3489
3490 vshaderTxt.append(
3491 // Mipmap Program VS
3492 "void main() {"
3493 "sk_Position.xy = a_vertex * half2(2) - half2(1);"
3494 "sk_Position.zw = half2(0, 1);"
3495 );
3496
3497 // Insert texture coordinate computation:
3498 if (oddWidth && oddHeight) {
3499 vshaderTxt.append(
3500 "v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3501 "v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
3502 "v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
3503 "v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3504 );
3505 } else if (oddWidth) {
3506 vshaderTxt.append(
3507 "v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
3508 "v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
3509 );
3510 } else if (oddHeight) {
3511 vshaderTxt.append(
3512 "v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
3513 "v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
3514 );
3515 } else {
3516 vshaderTxt.append(
3517 "v_texCoord0 = a_vertex.xy;"
3518 );
3519 }
3520
3521 vshaderTxt.append("}");
3522
3523 SkString fshaderTxt;
3524 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3525 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3526 fshaderTxt.appendf("#extension %s : require\n", extension);
3527 }
3528 }
3529 for (int i = 0; i < numTaps; ++i) {
3530 vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In);
3531 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3532 fshaderTxt.append(";");
3533 }
3534 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3535 fshaderTxt.append(";");
3536 fshaderTxt.append(
3537 // Mipmap Program FS
3538 "void main() {"
3539 );
3540
3541 if (oddWidth && oddHeight) {
3542 fshaderTxt.append(
3543 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3544 "sample(u_texture, v_texCoord1) + "
3545 "sample(u_texture, v_texCoord2) + "
3546 "sample(u_texture, v_texCoord3)) * 0.25;"
3547 );
3548 } else if (oddWidth || oddHeight) {
3549 fshaderTxt.append(
3550 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3551 "sample(u_texture, v_texCoord1)) * 0.5;"
3552 );
3553 } else {
3554 fshaderTxt.append(
3555 "sk_FragColor = sample(u_texture, v_texCoord0);"
3556 );
3557 }
3558
3559 fshaderTxt.append("}");
3560
3561 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3562 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3563
3564 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3565 std::string glsl[kGrShaderTypeCount];
3566 SkSL::ProgramSettings settings;
3567 SkSL::Program::Interface interface;
3568
3569 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3570 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3571 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3572 fMipmapPrograms[progIdx].fProgram,
3573 GR_GL_VERTEX_SHADER,
3574 glsl[kVertex_GrShaderType],
3575 /*shaderWasCached=*/false,
3576 fProgramCache->stats(),
3577 errorHandler);
3578 SkASSERT(interface == SkSL::Program::Interface());
3579 if (!vshader) {
3580 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, nullptr, nullptr);
3581 return false;
3582 }
3583
3584 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3585 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3586 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3587 fMipmapPrograms[progIdx].fProgram,
3588 GR_GL_FRAGMENT_SHADER,
3589 glsl[kFragment_GrShaderType],
3590 /*shaderWasCached=*/false,
3591 fProgramCache->stats(),
3592 errorHandler);
3593 SkASSERT(interface == SkSL::Program::Interface());
3594 if (!fshader) {
3595 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, nullptr);
3596 return false;
3597 }
3598
3599 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3600 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3601 if (!GrGLCheckLinkStatus(this,
3602 fMipmapPrograms[progIdx].fProgram,
3603 /*shaderWasCached=*/false,
3604 errorHandler,
3605 sksl,
3606 glsl)) {
3607 // Program linking failed, clean up
3608 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, &fshader);
3609 return false;
3610 }
3611
3612 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3613 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3614 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3615 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3616
3617 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3618
3619 // Clean up the shaders
3620 cleanup_program(this, nullptr, &vshader, &fshader);
3621
3622 return true;
3623 }
3624
copySurfaceAsDraw(GrSurface * dst,bool drawToMultisampleFBO,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3625 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, bool drawToMultisampleFBO, GrSurface* src,
3626 const SkIRect& srcRect, const SkIRect& dstRect,
3627 GrSamplerState::Filter filter) {
3628 auto* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3629 if (!srcTex) {
3630 return false;
3631 }
3632 // We don't swizzle at all in our copies.
3633 this->bindTexture(0, filter, skgpu::Swizzle::RGBA(), srcTex);
3634 if (auto* dstRT = static_cast<GrGLRenderTarget*>(dst->asRenderTarget())) {
3635 this->flushRenderTarget(dstRT, drawToMultisampleFBO);
3636 } else {
3637 auto* dstTex = static_cast<GrGLTexture*>(src->asTexture());
3638 SkASSERT(dstTex);
3639 SkASSERT(!drawToMultisampleFBO);
3640 if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) {
3641 return false;
3642 }
3643 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget);
3644 fHWBoundRenderTargetUniqueID.makeInvalid();
3645 }
3646 int progIdx = TextureToCopyProgramIdx(srcTex);
3647 if (!fCopyPrograms[progIdx].fProgram) {
3648 if (!this->createCopyProgram(srcTex)) {
3649 SkDebugf("Failed to create copy program.\n");
3650 return false;
3651 }
3652 }
3653 this->flushViewport(SkIRect::MakeSize(dst->dimensions()),
3654 dst->height(),
3655 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
3656 this->flushProgram(fCopyPrograms[progIdx].fProgram);
3657 fHWVertexArrayState.setVertexArrayID(this, 0);
3658 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3659 attribs->enableVertexArrays(this, 1);
3660 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3661 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3662 // dst rect edges in NDC (-1 to 1)
3663 int dw = dst->width();
3664 int dh = dst->height();
3665 GrGLfloat dx0 = 2.f * dstRect.fLeft / dw - 1.f;
3666 GrGLfloat dx1 = 2.f * dstRect.fRight / dw - 1.f;
3667 GrGLfloat dy0 = 2.f * dstRect.fTop / dh - 1.f;
3668 GrGLfloat dy1 = 2.f * dstRect.fBottom / dh - 1.f;
3669 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3670 GrGLfloat sx1 = (GrGLfloat)(srcRect.fRight);
3671 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3672 GrGLfloat sy1 = (GrGLfloat)(srcRect.fBottom);
3673 int sw = src->width();
3674 int sh = src->height();
3675 if (srcTex->textureType() != GrTextureType::kRectangle) {
3676 // src rect edges in normalized texture space (0 to 1)
3677 sx0 /= sw;
3678 sx1 /= sw;
3679 sy0 /= sh;
3680 sy1 /= sh;
3681 }
3682 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3683 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3684 sx1 - sx0, sy1 - sy0, sx0, sy0));
3685 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3686 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3687 this->flushConservativeRasterState(false);
3688 this->flushWireframeState(false);
3689 this->flushScissorTest(GrScissorTest::kDisabled);
3690 this->disableWindowRectangles();
3691 this->disableStencil();
3692 if (this->glCaps().srgbWriteControl()) {
3693 this->flushFramebufferSRGB(true);
3694 }
3695 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3696 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER);
3697 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3698 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3699 return true;
3700 }
3701
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3702 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3703 const SkIPoint& dstPoint) {
3704 SkASSERT(can_copy_texsubimage(dst, src, this->glCaps()));
3705 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
3706 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3707 SkASSERT(dstTex);
3708 // We modified the bound FBO
3709 fHWBoundRenderTargetUniqueID.makeInvalid();
3710
3711 this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID());
3712 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3713 dstPoint.fX, dstPoint.fY,
3714 srcRect.fLeft, srcRect.fTop,
3715 srcRect.width(), srcRect.height()));
3716 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER);
3717 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3718 srcRect.width(), srcRect.height());
3719 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3720 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3721 }
3722
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3723 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3724 const SkIRect& dstRect, GrSamplerState::Filter filter) {
3725 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps()));
3726 if (dst == src) {
3727 if (SkIRect::Intersects(dstRect, srcRect)) {
3728 return false;
3729 }
3730 }
3731
3732 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget);
3733 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget);
3734 // We modified the bound FBO
3735 fHWBoundRenderTargetUniqueID.makeInvalid();
3736
3737 // BlitFrameBuffer respects the scissor, so disable it.
3738 this->flushScissorTest(GrScissorTest::kDisabled);
3739 this->disableWindowRectangles();
3740
3741 GL_CALL(BlitFramebuffer(srcRect.fLeft,
3742 srcRect.fTop,
3743 srcRect.fRight,
3744 srcRect.fBottom,
3745 dstRect.fLeft,
3746 dstRect.fTop,
3747 dstRect.fRight,
3748 dstRect.fBottom,
3749 GR_GL_COLOR_BUFFER_BIT,
3750 filter_to_gl_mag_filter(filter)));
3751 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER);
3752 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER);
3753
3754 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3755 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3756 return true;
3757 }
3758
onRegenerateMipMapLevels(GrTexture * texture)3759 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
3760 using RegenerateMipmapType = GrGLCaps::RegenerateMipmapType;
3761
3762 auto glTex = static_cast<GrGLTexture*>(texture);
3763 // Mipmaps are only supported on 2D textures:
3764 if (GR_GL_TEXTURE_2D != glTex->target()) {
3765 return false;
3766 }
3767 GrGLFormat format = glTex->format();
3768 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
3769 // Uses draw calls to do a series of downsample operations to successive mips.
3770
3771 // The manual approach requires the ability to limit which level we're sampling and that the
3772 // destination can be bound to a FBO:
3773 if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) {
3774 GrGLenum target = glTex->target();
3775 this->bindTextureToScratchUnit(target, glTex->textureID());
3776 GL_CALL(GenerateMipmap(glTex->target()));
3777 return true;
3778 }
3779
3780 int width = texture->width();
3781 int height = texture->height();
3782 int levelCount = SkMipmap::ComputeLevelCount(width, height) + 1;
3783 SkASSERT(levelCount == texture->maxMipmapLevel() + 1);
3784
3785 // Create (if necessary), then bind temporary FBO:
3786 if (0 == fTempDstFBOID) {
3787 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
3788 }
3789 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
3790 fHWBoundRenderTargetUniqueID.makeInvalid();
3791
3792 // Bind the texture, to get things configured for filtering.
3793 // We'll be changing our base level and max level further below:
3794 this->setTextureUnit(0);
3795 // The mipmap program does not do any swizzling.
3796 this->bindTexture(0, GrSamplerState::Filter::kLinear, skgpu::Swizzle::RGBA(), glTex);
3797
3798 // Vertex data:
3799 if (!fMipmapProgramArrayBuffer) {
3800 static const GrGLfloat vdata[] = {
3801 0, 0,
3802 0, 1,
3803 1, 0,
3804 1, 1
3805 };
3806 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this,
3807 sizeof(vdata),
3808 GrGpuBufferType::kVertex,
3809 kStatic_GrAccessPattern);
3810 fMipmapProgramArrayBuffer->updateData(vdata, /*offset=*/0,
3811
3812 sizeof(vdata),
3813 /*preserve=*/false);
3814 }
3815 if (!fMipmapProgramArrayBuffer) {
3816 return false;
3817 }
3818
3819 fHWVertexArrayState.setVertexArrayID(this, 0);
3820
3821 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3822 attribs->enableVertexArrays(this, 1);
3823 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3824 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3825
3826 // Set "simple" state once:
3827 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3828 this->flushScissorTest(GrScissorTest::kDisabled);
3829 this->disableWindowRectangles();
3830 this->disableStencil();
3831
3832 // Do all the blits:
3833 width = texture->width();
3834 height = texture->height();
3835
3836 std::unique_ptr<GrSemaphore> semaphore;
3837 for (GrGLint level = 1; level < levelCount; ++level) {
3838 // Get and bind the program for this particular downsample (filter shape can vary):
3839 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
3840 if (!fMipmapPrograms[progIdx].fProgram) {
3841 if (!this->createMipmapProgram(progIdx)) {
3842 SkDebugf("Failed to create mipmap program.\n");
3843 // Invalidate all params to cover base and max level change in a previous iteration.
3844 glTex->textureParamsModified();
3845 return false;
3846 }
3847 }
3848 this->flushProgram(fMipmapPrograms[progIdx].fProgram);
3849
3850 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3851 level > 1) {
3852 this->waitSemaphore(semaphore.get());
3853 semaphore.reset();
3854 }
3855
3856 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
3857 const float invWidth = 1.0f / width;
3858 const float invHeight = 1.0f / height;
3859 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3860 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
3861 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
3862
3863 // Set the base level so that we only sample from the previous mip.
3864 SkASSERT(this->glCaps().mipmapLevelControlSupport());
3865 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
3866 // Setting the max level is technically unnecessary and can affect
3867 // validation for the framebuffer. However, by making it clear that a
3868 // rendering feedback loop is not occurring, we avoid hitting a slow
3869 // path on some drivers.
3870 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3871 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_MAX_LEVEL, level - 1));
3872 }
3873
3874 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
3875 glTex->textureID(), level));
3876
3877 width = std::max(1, width / 2);
3878 height = std::max(1, height / 2);
3879 this->flushViewport(SkIRect::MakeWH(width, height), height, kTopLeft_GrSurfaceOrigin);
3880
3881 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3882
3883 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3884 level < levelCount-1) {
3885 semaphore = this->makeSemaphore(true);
3886 this->insertSemaphore(semaphore.get());
3887 }
3888 }
3889
3890 // Unbind:
3891 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3892 GR_GL_TEXTURE_2D, 0, 0));
3893
3894 // We modified the base level and max level params.
3895 GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState();
3896 // We drew the 2nd to last level into the last level.
3897 nonsamplerState.fBaseMipMapLevel = levelCount - 2;
3898 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3899 nonsamplerState.fMaxMipmapLevel = levelCount - 2;
3900 }
3901 glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3902
3903 return true;
3904 }
3905
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3906 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3907 SkASSERT(type);
3908 switch (type) {
3909 case kTexture_GrXferBarrierType: {
3910 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3911 SkASSERT(glrt->asTexture());
3912 SkASSERT(!glrt->isFBO0(false/*multisample*/));
3913 if (glrt->requiresManualMSAAResolve()) {
3914 // The render target uses separate storage so no need for glTextureBarrier.
3915 // FIXME: The render target will resolve automatically when its texture is bound,
3916 // but we could resolve only the bounds that will be read if we do it here instead.
3917 return;
3918 }
3919 SkASSERT(this->caps()->textureBarrierSupport());
3920 GL_CALL(TextureBarrier());
3921 return;
3922 }
3923 case kBlend_GrXferBarrierType:
3924 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3925 this->caps()->blendEquationSupport());
3926 GL_CALL(BlendBarrier());
3927 return;
3928 default: break; // placate compiler warnings that kNone not handled
3929 }
3930 }
3931
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)3932 GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions,
3933 const GrBackendFormat& format,
3934 GrRenderable renderable,
3935 skgpu::Mipmapped mipmapped,
3936 GrProtected isProtected,
3937 std::string_view label) {
3938 this->handleDirtyContext();
3939
3940 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
3941 return {};
3942 }
3943
3944 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
3945 if (glFormat == GrGLFormat::kUnknown) {
3946 return {};
3947 }
3948
3949 int numMipLevels = 1;
3950 if (mipmapped == skgpu::Mipmapped::kYes) {
3951 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
3952 }
3953
3954 // Compressed formats go through onCreateCompressedBackendTexture
3955 SkASSERT(!GrGLFormatIsCompressed(glFormat));
3956
3957 GrGLTextureInfo info;
3958 GrGLTextureParameters::SamplerOverriddenState initialState;
3959
3960 if (glFormat == GrGLFormat::kUnknown) {
3961 return {};
3962 }
3963 switch (format.textureType()) {
3964 case GrTextureType::kNone:
3965 case GrTextureType::kExternal:
3966 return {};
3967 case GrTextureType::k2D:
3968 info.fTarget = GR_GL_TEXTURE_2D;
3969 break;
3970 case GrTextureType::kRectangle:
3971 if (!this->glCaps().rectangleTextureSupport() || mipmapped == skgpu::Mipmapped::kYes) {
3972 return {};
3973 }
3974 info.fTarget = GR_GL_TEXTURE_RECTANGLE;
3975 break;
3976 }
3977 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
3978 this->glCaps().strictProtectedness());
3979 info.fFormat = GrGLFormatToEnum(glFormat);
3980 info.fID = this->createTexture(dimensions, glFormat, info.fTarget, renderable, &initialState,
3981 numMipLevels, info.fProtected, label);
3982 if (!info.fID) {
3983 return {};
3984 }
3985
3986 // Unbind this texture from the scratch texture unit.
3987 this->bindTextureToScratchUnit(info.fTarget, 0);
3988
3989 auto parameters = sk_make_sp<GrGLTextureParameters>();
3990 // The non-sampler params are still at their default values.
3991 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
3992 fResetTimestampForTextureParameters);
3993
3994 return GrBackendTextures::MakeGL(
3995 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters), label);
3996 }
3997
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)3998 bool GrGLGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
3999 sk_sp<skgpu::RefCntedCallback> finishedCallback,
4000 std::array<float, 4> color) {
4001 this->handleDirtyContext();
4002
4003 GrGLTextureInfo info;
4004 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
4005
4006 int numMipLevels = 1;
4007 if (backendTexture.hasMipmaps()) {
4008 numMipLevels =
4009 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
4010 }
4011
4012 GrGLFormat glFormat = GrGLFormatFromGLEnum(info.fFormat);
4013
4014 this->bindTextureToScratchUnit(info.fTarget, info.fID);
4015
4016 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
4017 // so that the uploads go to the right levels.
4018 if (numMipLevels && this->glCaps().mipmapLevelControlSupport()) {
4019 auto params = get_gl_texture_params(backendTexture);
4020 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
4021 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
4022 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
4023 nonsamplerState.fBaseMipMapLevel = 0;
4024 }
4025 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
4026 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
4027 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
4028 }
4029 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
4030 }
4031
4032 uint32_t levelMask = (1 << numMipLevels) - 1;
4033 bool result = this->uploadColorToTex(glFormat,
4034 backendTexture.dimensions(),
4035 info.fTarget,
4036 color,
4037 levelMask);
4038
4039 // Unbind this texture from the scratch texture unit.
4040 this->bindTextureToScratchUnit(info.fTarget, 0);
4041 return result;
4042 }
4043
deleteBackendTexture(const GrBackendTexture & tex)4044 void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) {
4045 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4046
4047 GrGLTextureInfo info;
4048 if (GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4049 GL_CALL(DeleteTextures(1, &info.fID));
4050 }
4051 }
4052
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)4053 bool GrGLGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
4054 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
4055
4056 sk_sp<GrGLProgram> tmp = fProgramCache->findOrCreateProgram(this->getContext(),
4057 desc, programInfo, &stat);
4058 if (!tmp) {
4059 return false;
4060 }
4061
4062 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
4063 }
4064
4065 #if defined(GPU_TEST_UTILS)
4066
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const4067 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
4068 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4069
4070 GrGLTextureInfo info;
4071 if (!GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4072 return false;
4073 }
4074
4075 GrGLboolean result;
4076 GL_CALL_RET(result, IsTexture(info.fID));
4077
4078 return (GR_GL_TRUE == result);
4079 }
4080
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)4081 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
4082 GrColorType colorType,
4083 int sampleCnt,
4084 GrProtected isProtected) {
4085 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
4086 dimensions.height() > this->caps()->maxRenderTargetSize()) {
4087 return {};
4088 }
4089 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
4090 return {};
4091 }
4092
4093 this->handleDirtyContext();
4094 auto format = this->glCaps().getFormatFromColorType(colorType);
4095 sampleCnt = this->glCaps().getRenderTargetSampleCount(sampleCnt, format);
4096 if (!sampleCnt) {
4097 return {};
4098 }
4099 // We make a texture instead of a render target if we're using a
4100 // "multisampled_render_to_texture" style extension or have a BGRA format that
4101 // is allowed for textures but not render buffer internal formats.
4102 bool useTexture = false;
4103 if (sampleCnt > 1 && !this->glCaps().usesMSAARenderBuffers()) {
4104 useTexture = true;
4105 } else if (format == GrGLFormat::kBGRA8 &&
4106 this->glCaps().getRenderbufferInternalFormat(GrGLFormat::kBGRA8) != GR_GL_BGRA8) {
4107 // We have a BGRA extension that doesn't support BGRA render buffers. We can use a texture
4108 // unless we've been asked for MSAA. Note we already checked above for render-to-
4109 // multisampled-texture style extensions.
4110 if (sampleCnt > 1) {
4111 return {};
4112 }
4113 useTexture = true;
4114 }
4115
4116 bool avoidStencil = this->glCaps().avoidStencilBuffers();
4117 int sFormatIdx = -1;
4118 if (!avoidStencil) {
4119 sFormatIdx = this->getCompatibleStencilIndex(format);
4120 if (sFormatIdx < 0) {
4121 return {};
4122 }
4123 }
4124 GrGLuint colorID = 0;
4125 GrGLuint stencilID = 0;
4126 GrGLFramebufferInfo info;
4127 info.fFBOID = 0;
4128 info.fFormat = GrGLFormatToEnum(format);
4129 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
4130 this->glCaps().strictProtectedness());
4131
4132 auto deleteIDs = [&](bool saveFBO = false) {
4133 if (colorID) {
4134 if (useTexture) {
4135 GL_CALL(DeleteTextures(1, &colorID));
4136 } else {
4137 GL_CALL(DeleteRenderbuffers(1, &colorID));
4138 }
4139 }
4140 if (stencilID) {
4141 GL_CALL(DeleteRenderbuffers(1, &stencilID));
4142 }
4143 if (!saveFBO && info.fFBOID) {
4144 this->deleteFramebuffer(info.fFBOID);
4145 }
4146 };
4147
4148 if (useTexture) {
4149 GL_CALL(GenTextures(1, &colorID));
4150 } else {
4151 GL_CALL(GenRenderbuffers(1, &colorID));
4152 }
4153 if (!colorID) {
4154 deleteIDs();
4155 return {};
4156 }
4157
4158 if (!avoidStencil) {
4159 GL_CALL(GenRenderbuffers(1, &stencilID));
4160 if (!stencilID) {
4161 deleteIDs();
4162 return {};
4163 }
4164 }
4165
4166 GL_CALL(GenFramebuffers(1, &info.fFBOID));
4167 if (!info.fFBOID) {
4168 deleteIDs();
4169 return {};
4170 }
4171
4172 this->invalidateBoundRenderTarget();
4173
4174 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4175 if (useTexture) {
4176 GrGLTextureParameters::SamplerOverriddenState initialState;
4177 colorID = this->createTexture(dimensions, format, GR_GL_TEXTURE_2D, GrRenderable::kYes,
4178 &initialState,
4179 1,
4180 info.fProtected,
4181 /*label=*/"Skia");
4182 if (!colorID) {
4183 deleteIDs();
4184 return {};
4185 }
4186 if (sampleCnt == 1) {
4187 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4188 GR_GL_TEXTURE_2D, colorID, 0));
4189 } else {
4190 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4191 GR_GL_TEXTURE_2D, colorID, 0, sampleCnt));
4192 }
4193 } else {
4194 GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format);
4195 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
4196 if (sampleCnt == 1) {
4197 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, dimensions.width(),
4198 dimensions.height()));
4199 } else {
4200 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt, renderBufferFormat,
4201 dimensions.width(), dimensions.height())) {
4202 deleteIDs();
4203 return {};
4204 }
4205 }
4206 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4207 GR_GL_RENDERBUFFER, colorID));
4208 }
4209 if (!avoidStencil) {
4210 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
4211 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx];
4212 if (sampleCnt == 1) {
4213 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, GrGLFormatToEnum(stencilBufferFormat),
4214 dimensions.width(), dimensions.height()));
4215 } else {
4216 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt,
4217 GrGLFormatToEnum(stencilBufferFormat),
4218 dimensions.width(), dimensions.height())) {
4219 deleteIDs();
4220 return {};
4221 }
4222 }
4223 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
4224 GR_GL_STENCIL_ATTACHMENT,
4225 GR_GL_RENDERBUFFER,
4226 stencilID));
4227 if (GrGLFormatIsPackedDepthStencil(this->glCaps().stencilFormats()[sFormatIdx])) {
4228 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
4229 GR_GL_RENDERBUFFER, stencilID));
4230 }
4231 }
4232
4233 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
4234 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
4235 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
4236 // renderbuffers/texture.
4237 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
4238 deleteIDs(/* saveFBO = */ true);
4239
4240 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4241 GrGLenum status;
4242 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
4243 if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
4244 this->deleteFramebuffer(info.fFBOID);
4245 return {};
4246 }
4247
4248 int stencilBits = 0;
4249 if (!avoidStencil) {
4250 stencilBits = SkToInt(GrGLFormatStencilBits(this->glCaps().stencilFormats()[sFormatIdx]));
4251 }
4252
4253 GrBackendRenderTarget beRT = GrBackendRenderTargets::MakeGL(
4254 dimensions.width(), dimensions.height(), sampleCnt, stencilBits, info);
4255 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat()));
4256 return beRT;
4257 }
4258
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & backendRT)4259 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
4260 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
4261 GrGLFramebufferInfo info;
4262 if (GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
4263 if (info.fFBOID) {
4264 this->deleteFramebuffer(info.fFBOID);
4265 }
4266 }
4267 }
4268 #endif
4269
4270 ///////////////////////////////////////////////////////////////////////////////
4271
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)4272 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
4273 const GrBuffer* ibuf) {
4274 SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped());
4275 GrGLAttribArrayState* attribState;
4276
4277 if (gpu->glCaps().isCoreProfile()) {
4278 if (!fCoreProfileVertexArray) {
4279 GrGLuint arrayID;
4280 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
4281 int attrCount = gpu->glCaps().maxVertexAttributes();
4282 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
4283 }
4284 if (ibuf) {
4285 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
4286 } else {
4287 attribState = fCoreProfileVertexArray->bind(gpu);
4288 }
4289 } else {
4290 if (ibuf) {
4291 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
4292 gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
4293 } else {
4294 this->setVertexArrayID(gpu, 0);
4295 }
4296 int attrCount = gpu->glCaps().maxVertexAttributes();
4297 if (fDefaultVertexArrayAttribState.count() != attrCount) {
4298 fDefaultVertexArrayAttribState.resize(attrCount);
4299 }
4300 attribState = &fDefaultVertexArrayAttribState;
4301 }
4302 return attribState;
4303 }
4304
addFinishedCallback(skgpu::AutoCallback callback,std::optional<GrTimerQuery> timerQuery)4305 void GrGLGpu::addFinishedCallback(skgpu::AutoCallback callback,
4306 std::optional<GrTimerQuery> timerQuery) {
4307 GrGLint glQuery = timerQuery ? static_cast<GrGLint>(timerQuery->query) : 0;
4308 fFinishCallbacks.add(std::move(callback), glQuery);
4309 }
4310
flush(FlushType flushType)4311 void GrGLGpu::flush(FlushType flushType) {
4312 if (fNeedsGLFlush || flushType == FlushType::kForce) {
4313 GL_CALL(Flush());
4314 fNeedsGLFlush = false;
4315 }
4316 }
4317
onSubmitToGpu(const GrSubmitInfo & info)4318 bool GrGLGpu::onSubmitToGpu(const GrSubmitInfo& info) {
4319 if (info.fSync == GrSyncCpu::kYes ||
4320 (!fFinishCallbacks.empty() && !this->glCaps().fenceSyncSupport())) {
4321 this->finishOutstandingGpuWork();
4322 fFinishCallbacks.callAll(true);
4323 } else {
4324 this->flush();
4325 // See if any previously inserted finish procs are good to go.
4326 fFinishCallbacks.check();
4327 }
4328 if (!this->glCaps().skipErrorChecks()) {
4329 this->clearErrorsAndCheckForOOM();
4330 }
4331 return true;
4332 }
4333
willExecute()4334 void GrGLGpu::willExecute() {
4335 // Because our transfers will be submitted to GL to perfom immediately (no command buffer to
4336 // submit), we must unmap any staging buffers.
4337 if (fStagingBufferManager) {
4338 fStagingBufferManager->detachBuffers();
4339 }
4340 }
4341
submit(GrOpsRenderPass * renderPass)4342 void GrGLGpu::submit(GrOpsRenderPass* renderPass) {
4343 // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here
4344 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
4345 fCachedOpsRenderPass->reset();
4346 }
4347
insertSync()4348 [[nodiscard]] GrGLsync GrGLGpu::insertSync() {
4349 GrGLsync sync = nullptr;
4350 switch (this->glCaps().fenceType()) {
4351 case GrGLCaps::FenceType::kNone:
4352 return nullptr;
4353 case GrGLCaps::FenceType::kNVFence: {
4354 static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint));
4355 GrGLuint fence = 0;
4356 GL_CALL(GenFences(1, &fence));
4357 GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED));
4358 sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence));
4359 break;
4360 }
4361 case GrGLCaps::FenceType::kSyncObject: {
4362 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4363 break;
4364 }
4365 }
4366 this->setNeedsFlush();
4367 return sync;
4368 }
4369
testSync(GrGLsync sync)4370 bool GrGLGpu::testSync(GrGLsync sync) {
4371 switch (this->glCaps().fenceType()) {
4372 case GrGLCaps::FenceType::kNone:
4373 SK_ABORT("Testing sync without sync support.");
4374 return false;
4375 case GrGLCaps::FenceType::kNVFence: {
4376 GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync));
4377 GrGLboolean result;
4378 GL_CALL_RET(result, TestFence(nvFence));
4379 return result == GR_GL_TRUE;
4380 }
4381 case GrGLCaps::FenceType::kSyncObject: {
4382 constexpr GrGLbitfield kFlags = 0;
4383 GrGLenum result;
4384 #if defined(__EMSCRIPTEN__)
4385 GL_CALL_RET(result, ClientWaitSync(sync, kFlags, 0, 0));
4386 #else
4387 GL_CALL_RET(result, ClientWaitSync(sync, kFlags, 0));
4388 #endif
4389 return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
4390 }
4391 }
4392 SkUNREACHABLE;
4393 }
4394
deleteSync(GrGLsync sync)4395 void GrGLGpu::deleteSync(GrGLsync sync) {
4396 switch (this->glCaps().fenceType()) {
4397 case GrGLCaps::FenceType::kNone:
4398 SK_ABORT("Deleting sync without sync support.");
4399 break;
4400 case GrGLCaps::FenceType::kNVFence: {
4401 GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync));
4402 GL_CALL(DeleteFences(1, &nvFence));
4403 break;
4404 }
4405 case GrGLCaps::FenceType::kSyncObject:
4406 GL_CALL(DeleteSync(sync));
4407 break;
4408 }
4409 }
4410
makeSemaphore(bool isOwned)4411 [[nodiscard]] std::unique_ptr<GrSemaphore> GrGLGpu::makeSemaphore(bool isOwned) {
4412 SkASSERT(this->caps()->semaphoreSupport());
4413 return GrGLSemaphore::Make(this, isOwned);
4414 }
4415
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)4416 std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore&,
4417 GrSemaphoreWrapType,
4418 GrWrapOwnership) {
4419 SK_ABORT("Unsupported");
4420 }
4421
insertSemaphore(GrSemaphore * semaphore)4422 void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) {
4423 SkASSERT(semaphore);
4424 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4425
4426 GrGLsync sync;
4427 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4428 glSem->setSync(sync);
4429 this->setNeedsFlush();
4430 }
4431
waitSemaphore(GrSemaphore * semaphore)4432 void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) {
4433 SkASSERT(semaphore);
4434 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4435
4436 #if defined(__EMSCRIPTEN__)
4437 constexpr auto kLo = SkTo<GrGLuint>(GR_GL_TIMEOUT_IGNORED & 0xFFFFFFFFull);
4438 constexpr auto kHi = SkTo<GrGLuint>(GR_GL_TIMEOUT_IGNORED >> 32);
4439 GL_CALL(WaitSync(glSem->sync(), 0, kLo, kHi));
4440 #else
4441 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4442 #endif
4443 }
4444
startTimerQuery()4445 std::optional<GrTimerQuery> GrGLGpu::startTimerQuery() {
4446 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kNone) {
4447 return {};
4448 }
4449 GrGLuint glQuery;
4450 GL_CALL(GenQueries(1, &glQuery));
4451 if (!glQuery) {
4452 return {};
4453 }
4454 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kDisjoint) {
4455 // Clear the disjoint state
4456 GrGLint _;
4457 GR_GL_GetIntegerv(this->glInterface(), GR_GL_GPU_DISJOINT, &_);
4458 }
4459 GL_CALL(BeginQuery(GR_GL_TIME_ELAPSED, glQuery));
4460 return GrTimerQuery{glQuery};
4461 }
4462
endTimerQuery(const GrTimerQuery & timerQuery)4463 void GrGLGpu::endTimerQuery(const GrTimerQuery& timerQuery) {
4464 SkASSERT(glCaps().timerQueryType() != GrGLCaps::TimerQueryType::kNone);
4465 SkASSERT(SkToUInt(timerQuery.query));
4466 // Since only one query of a particular type can be active at once, glEndQuery doesn't take a
4467 // query parameter.
4468 GL_CALL(EndQuery(GR_GL_TIME_ELAPSED));
4469 }
4470
getTimerQueryResult(GrGLuint query)4471 uint64_t GrGLGpu::getTimerQueryResult(GrGLuint query) {
4472 SkASSERT(glCaps().timerQueryType() != GrGLCaps::TimerQueryType::kNone);
4473 SkASSERT(query);
4474
4475 // Because we only call this after a sync completes the query *should* be available.
4476 GrGLuint available;
4477 GL_CALL(GetQueryObjectuiv(query, GR_GL_QUERY_RESULT_AVAILABLE, &available));
4478 bool getResult = true;
4479 if (!available) {
4480 SkDebugf("GL timer query is not available.\n");
4481 getResult = false;
4482 }
4483
4484 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kDisjoint) {
4485 // Clear the disjoint state
4486 GrGLint disjoint;
4487 GR_GL_GetIntegerv(this->glInterface(), GR_GL_GPU_DISJOINT, &disjoint);
4488 if (disjoint) {
4489 SkDebugf("GL timer query ignored because of disjoint event.\n");
4490 getResult = false;
4491 }
4492 }
4493
4494 uint64_t result = 0;
4495 if (getResult) {
4496 GR_GL_GetQueryObjectui64v(this->glInterface(), query, GR_GL_QUERY_RESULT, &result);
4497 }
4498 GL_CALL(DeleteQueries(1, &query));
4499 return result;
4500 }
4501
checkFinishedCallbacks()4502 void GrGLGpu::checkFinishedCallbacks() {
4503 fFinishCallbacks.check();
4504 }
4505
finishOutstandingGpuWork()4506 void GrGLGpu::finishOutstandingGpuWork() {
4507 GL_CALL(Finish());
4508 }
4509
clearErrorsAndCheckForOOM()4510 void GrGLGpu::clearErrorsAndCheckForOOM() {
4511 while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {}
4512 }
4513
getErrorAndCheckForOOM()4514 GrGLenum GrGLGpu::getErrorAndCheckForOOM() {
4515 #if GR_GL_CHECK_ERROR
4516 if (this->glInterface()->checkAndResetOOMed()) {
4517 this->setOOMed();
4518 }
4519 #endif
4520 GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError();
4521 if (error == GR_GL_OUT_OF_MEMORY) {
4522 this->setOOMed();
4523 }
4524 return error;
4525 }
4526
prepareTextureForCrossContextUsage(GrTexture * texture)4527 std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
4528 // Set up a semaphore to be signaled once the data is ready, and flush GL
4529 std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true);
4530 SkASSERT(semaphore);
4531 this->insertSemaphore(semaphore.get());
4532 // We must call flush here to make sure the GrGLsync object gets created and sent to the gpu.
4533 this->flush(FlushType::kForce);
4534
4535 return semaphore;
4536 }
4537
TextureToCopyProgramIdx(GrTexture * texture)4538 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
4539 switch (SkSLCombinedSamplerTypeForTextureType(texture->textureType())) {
4540 case SkSLType::kTexture2DSampler:
4541 return 0;
4542 case SkSLType::kTexture2DRectSampler:
4543 return 1;
4544 case SkSLType::kTextureExternalSampler:
4545 return 2;
4546 default:
4547 SK_ABORT("Unexpected samper type");
4548 }
4549 }
4550
4551 #ifdef SK_ENABLE_DUMP_GPU
4552 #include "src/utils/SkJSONWriter.h"
onDumpJSON(SkJSONWriter * writer) const4553 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
4554 // We are called by the base class, which has already called beginObject(). We choose to nest
4555 // all of our caps information in a named sub-object.
4556 writer->beginObject("GL GPU");
4557
4558 const GrGLubyte* str;
4559 GL_CALL_RET(str, GetString(GR_GL_VERSION));
4560 writer->appendCString("GL_VERSION", (const char*)(str));
4561 GL_CALL_RET(str, GetString(GR_GL_RENDERER));
4562 writer->appendCString("GL_RENDERER", (const char*)(str));
4563 GL_CALL_RET(str, GetString(GR_GL_VENDOR));
4564 writer->appendCString("GL_VENDOR", (const char*)(str));
4565 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
4566 writer->appendCString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
4567
4568 writer->appendName("extensions");
4569 glInterface()->fExtensions.dumpJSON(writer);
4570
4571 writer->endObject();
4572 }
4573 #endif
4574