1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/gl/GrGLGpu.h"
9
10 #include "include/core/SkAlphaType.h"
11 #include "include/core/SkColor.h"
12 #include "include/core/SkColorSpace.h"
13 #include "include/core/SkData.h"
14 #include "include/core/SkRect.h"
15 #include "include/core/SkSize.h"
16 #include "include/core/SkString.h"
17 #include "include/core/SkTextureCompressionType.h"
18 #include "include/core/SkTypes.h"
19 #include "include/gpu/GpuTypes.h"
20 #include "include/gpu/GrBackendSurface.h"
21 #include "include/gpu/GrContextOptions.h"
22 #include "include/gpu/GrDirectContext.h"
23 #include "include/gpu/GrDriverBugWorkarounds.h"
24 #include "include/gpu/GrTypes.h"
25 #include "include/gpu/gl/GrGLConfig.h"
26 #include "include/private/base/SkFloatingPoint.h"
27 #include "include/private/base/SkMath.h"
28 #include "include/private/base/SkPoint_impl.h"
29 #include "include/private/base/SkTemplates.h"
30 #include "include/private/base/SkTo.h"
31 #include "src/base/SkScopeExit.h"
32 #include "src/core/SkCompressedDataUtils.h"
33 #include "src/core/SkLRUCache.h"
34 #include "src/core/SkMipmap.h"
35 #include "src/core/SkSLTypeShared.h"
36 #include "src/core/SkTraceEvent.h"
37 #include "src/gpu/SkRenderEngineAbortf.h"
38 #include "src/gpu/Swizzle.h"
39 #include "src/gpu/ganesh/GrAttachment.h"
40 #include "src/gpu/ganesh/GrBackendSurfacePriv.h"
41 #include "src/gpu/ganesh/GrBackendUtils.h"
42 #include "src/gpu/ganesh/GrBuffer.h"
43 #include "src/gpu/ganesh/GrDataUtils.h"
44 #include "src/gpu/ganesh/GrDirectContextPriv.h"
45 #include "src/gpu/ganesh/GrGpuBuffer.h"
46 #include "src/gpu/ganesh/GrImageInfo.h"
47 #include "src/gpu/ganesh/GrPipeline.h"
48 #include "src/gpu/ganesh/GrProgramInfo.h"
49 #include "src/gpu/ganesh/GrRenderTarget.h"
50 #include "src/gpu/ganesh/GrSemaphore.h"
51 #include "src/gpu/ganesh/GrShaderCaps.h"
52 #include "src/gpu/ganesh/GrShaderVar.h"
53 #include "src/gpu/ganesh/GrStagingBufferManager.h"
54 #include "src/gpu/ganesh/GrSurface.h"
55 #include "src/gpu/ganesh/GrTexture.h"
56 #include "src/gpu/ganesh/GrUtil.h"
57 #include "src/gpu/ganesh/GrWindowRectangles.h"
58 #include "src/gpu/ganesh/gl/GrGLAttachment.h"
59 #include "src/gpu/ganesh/gl/GrGLBackendSurfacePriv.h"
60 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
61 #include "src/gpu/ganesh/gl/GrGLOpsRenderPass.h"
62 #include "src/gpu/ganesh/gl/GrGLProgram.h"
63 #include "src/gpu/ganesh/gl/GrGLSemaphore.h"
64 #include "src/gpu/ganesh/gl/GrGLTextureRenderTarget.h"
65 #include "src/gpu/ganesh/gl/builders/GrGLShaderStringBuilder.h"
66 #include "src/sksl/SkSLProgramKind.h"
67 #include "src/sksl/SkSLProgramSettings.h"
68 #include "src/sksl/ir/SkSLProgram.h"
69
70 #include <algorithm>
71 #include <cmath>
72 #include <functional>
73 #include <memory>
74 #include <string>
75 #include <utility>
76
77 using namespace skia_private;
78
79 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
80 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
81
82 #define GL_ALLOC_CALL(call) \
83 [&] { \
84 if (this->glCaps().skipErrorChecks()) { \
85 GR_GL_CALL(this->glInterface(), call); \
86 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
87 } else { \
88 this->clearErrorsAndCheckForOOM(); \
89 GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \
90 return this->getErrorAndCheckForOOM(); \
91 } \
92 }()
93
94 //#define USE_NSIGHT
95
96 ///////////////////////////////////////////////////////////////////////////////
97
98 static const GrGLenum gXfermodeEquation2Blend[] = {
99 // Basic OpenGL blend equations.
100 GR_GL_FUNC_ADD,
101 GR_GL_FUNC_SUBTRACT,
102 GR_GL_FUNC_REVERSE_SUBTRACT,
103
104 // GL_KHR_blend_equation_advanced.
105 GR_GL_SCREEN,
106 GR_GL_OVERLAY,
107 GR_GL_DARKEN,
108 GR_GL_LIGHTEN,
109 GR_GL_COLORDODGE,
110 GR_GL_COLORBURN,
111 GR_GL_HARDLIGHT,
112 GR_GL_SOFTLIGHT,
113 GR_GL_DIFFERENCE,
114 GR_GL_EXCLUSION,
115 GR_GL_MULTIPLY,
116 GR_GL_HSL_HUE,
117 GR_GL_HSL_SATURATION,
118 GR_GL_HSL_COLOR,
119 GR_GL_HSL_LUMINOSITY,
120
121 // Illegal... needs to map to something.
122 GR_GL_FUNC_ADD,
123 };
124 static_assert(0 == (int)skgpu::BlendEquation::kAdd);
125 static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
126 static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
127 static_assert(3 == (int)skgpu::BlendEquation::kScreen);
128 static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
129 static_assert(5 == (int)skgpu::BlendEquation::kDarken);
130 static_assert(6 == (int)skgpu::BlendEquation::kLighten);
131 static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
132 static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
133 static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
134 static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
135 static_assert(11 == (int)skgpu::BlendEquation::kDifference);
136 static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
137 static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
138 static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
139 static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
140 static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
141 static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
142 static_assert(std::size(gXfermodeEquation2Blend) == skgpu::kBlendEquationCnt);
143
144 static const GrGLenum gXfermodeCoeff2Blend[] = {
145 GR_GL_ZERO,
146 GR_GL_ONE,
147 GR_GL_SRC_COLOR,
148 GR_GL_ONE_MINUS_SRC_COLOR,
149 GR_GL_DST_COLOR,
150 GR_GL_ONE_MINUS_DST_COLOR,
151 GR_GL_SRC_ALPHA,
152 GR_GL_ONE_MINUS_SRC_ALPHA,
153 GR_GL_DST_ALPHA,
154 GR_GL_ONE_MINUS_DST_ALPHA,
155 GR_GL_CONSTANT_COLOR,
156 GR_GL_ONE_MINUS_CONSTANT_COLOR,
157
158 // extended blend coeffs
159 GR_GL_SRC1_COLOR,
160 GR_GL_ONE_MINUS_SRC1_COLOR,
161 GR_GL_SRC1_ALPHA,
162 GR_GL_ONE_MINUS_SRC1_ALPHA,
163
164 // Illegal... needs to map to something.
165 GR_GL_ZERO,
166 };
167
168 //////////////////////////////////////////////////////////////////////////////
169
gl_target_to_binding_index(GrGLenum target)170 static int gl_target_to_binding_index(GrGLenum target) {
171 switch (target) {
172 case GR_GL_TEXTURE_2D:
173 return 0;
174 case GR_GL_TEXTURE_RECTANGLE:
175 return 1;
176 case GR_GL_TEXTURE_EXTERNAL:
177 return 2;
178 }
179 SK_ABORT("Unexpected GL texture target.");
180 }
181
boundID(GrGLenum target) const182 GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const {
183 return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID;
184 }
185
hasBeenModified(GrGLenum target) const186 bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const {
187 return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified;
188 }
189
setBoundID(GrGLenum target,GrGpuResource::UniqueID resourceID)190 void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) {
191 int targetIndex = gl_target_to_binding_index(target);
192 fTargetBindings[targetIndex].fBoundResourceID = resourceID;
193 fTargetBindings[targetIndex].fHasBeenModified = true;
194 }
195
invalidateForScratchUse(GrGLenum target)196 void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) {
197 this->setBoundID(target, GrGpuResource::UniqueID());
198 }
199
invalidateAllTargets(bool markUnmodified)200 void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) {
201 for (auto& targetBinding : fTargetBindings) {
202 targetBinding.fBoundResourceID.makeInvalid();
203 if (markUnmodified) {
204 targetBinding.fHasBeenModified = false;
205 }
206 }
207 }
208
209 //////////////////////////////////////////////////////////////////////////////
210
filter_to_gl_mag_filter(GrSamplerState::Filter filter)211 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
212 switch (filter) {
213 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
214 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR;
215 }
216 SkUNREACHABLE;
217 }
218
filter_to_gl_min_filter(GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm)219 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter,
220 GrSamplerState::MipmapMode mm) {
221 switch (mm) {
222 case GrSamplerState::MipmapMode::kNone:
223 return filter_to_gl_mag_filter(filter);
224 case GrSamplerState::MipmapMode::kNearest:
225 switch (filter) {
226 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_NEAREST;
227 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_NEAREST;
228 }
229 SkUNREACHABLE;
230 case GrSamplerState::MipmapMode::kLinear:
231 switch (filter) {
232 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_LINEAR;
233 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_LINEAR;
234 }
235 SkUNREACHABLE;
236 }
237 SkUNREACHABLE;
238 }
239
wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,const GrCaps & caps)240 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
241 const GrCaps& caps) {
242 switch (wrapMode) {
243 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
244 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
245 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
246 case GrSamplerState::WrapMode::kClampToBorder:
247 // May not be supported but should have been caught earlier
248 SkASSERT(caps.clampToBorderSupport());
249 return GR_GL_CLAMP_TO_BORDER;
250 }
251 SkUNREACHABLE;
252 }
253
254 ///////////////////////////////////////////////////////////////////////////////
255
cleanup_program(GrGLGpu * gpu,GrGLuint * programID,GrGLuint * vshader,GrGLuint * fshader)256 static void cleanup_program(GrGLGpu* gpu,
257 GrGLuint* programID,
258 GrGLuint* vshader,
259 GrGLuint* fshader) {
260 const GrGLInterface* gli = gpu->glInterface();
261 if (programID) {
262 GR_GL_CALL(gli, DeleteProgram(*programID));
263 *programID = 0;
264 }
265 if (vshader) {
266 GR_GL_CALL(gli, DeleteShader(*vshader));
267 *vshader = 0;
268 }
269 if (fshader) {
270 GR_GL_CALL(gli, DeleteShader(*fshader));
271 *fshader = 0;
272 }
273 }
274
275 ///////////////////////////////////////////////////////////////////////////////
276
277 class GrGLGpu::SamplerObjectCache {
278 public:
SamplerObjectCache(GrGLGpu * gpu)279 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
280 fNumTextureUnits = fGpu->glCaps().shaderCaps()->fMaxFragmentSamplers;
281 fTextureUnitStates = std::make_unique<UnitState[]>(fNumTextureUnits);
282 }
283
~SamplerObjectCache()284 ~SamplerObjectCache() {
285 if (!fNumTextureUnits) {
286 // We've already been abandoned.
287 return;
288 }
289 }
290
bindSampler(int unitIdx,GrSamplerState state)291 void bindSampler(int unitIdx, GrSamplerState state) {
292 if (unitIdx >= fNumTextureUnits) {
293 return;
294 }
295 // In GL the max aniso value is specified in addition to min/mag filters and the driver
296 // is encouraged to consider the other filter settings when doing aniso.
297 uint32_t key = state.asKey(/*anisoIsOrthogonal=*/true);
298 const Sampler* sampler = fSamplers.find(key);
299 if (!sampler) {
300 GrGLuint s;
301 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
302 if (!s) {
303 return;
304 }
305 sampler = fSamplers.insert(key, Sampler(s, fGpu->glInterface()));
306 GrGLenum minFilter = filter_to_gl_min_filter(state.filter(), state.mipmapMode());
307 GrGLenum magFilter = filter_to_gl_mag_filter(state.filter());
308 GrGLenum wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
309 GrGLenum wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
310 GR_GL_CALL(fGpu->glInterface(),
311 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
312 GR_GL_CALL(fGpu->glInterface(),
313 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
314 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
315 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
316 SkASSERT(fGpu->glCaps().anisoSupport() || !state.isAniso());
317 if (fGpu->glCaps().anisoSupport()) {
318 float maxAniso = std::min(static_cast<GrGLfloat>(state.maxAniso()),
319 fGpu->glCaps().maxTextureMaxAnisotropy());
320 GR_GL_CALL(fGpu->glInterface(), SamplerParameterf(s,
321 GR_GL_TEXTURE_MAX_ANISOTROPY,
322 maxAniso));
323 }
324 }
325 SkASSERT(sampler && sampler->id());
326 if (!fTextureUnitStates[unitIdx].fKnown ||
327 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != sampler->id()) {
328 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, sampler->id()));
329 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = sampler->id();
330 fTextureUnitStates[unitIdx].fKnown = true;
331 }
332 }
333
unbindSampler(int unitIdx)334 void unbindSampler(int unitIdx) {
335 if (!fTextureUnitStates[unitIdx].fKnown ||
336 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != 0) {
337 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, 0));
338 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = 0;
339 fTextureUnitStates[unitIdx].fKnown = true;
340 }
341 }
342
invalidateBindings()343 void invalidateBindings() {
344 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
345 }
346
abandon()347 void abandon() {
348 fSamplers.foreach([](uint32_t* key, Sampler* sampler) { sampler->abandon(); });
349 fTextureUnitStates.reset();
350 fNumTextureUnits = 0;
351 }
352
release()353 void release() {
354 if (!fNumTextureUnits) {
355 // We've already been abandoned.
356 return;
357 }
358 fSamplers.reset();
359 // Deleting a bound sampler implicitly binds sampler 0. We just invalidate all of our
360 // knowledge.
361 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
362 }
363
364 private:
365 class Sampler {
366 public:
367 Sampler() = default;
368 Sampler(const Sampler&) = delete;
369
Sampler(Sampler && that)370 Sampler(Sampler&& that) {
371 fID = that.fID;
372 fInterface = that.fInterface;
373 that.fID = 0;
374 }
375
Sampler(GrGLuint id,const GrGLInterface * interface)376 Sampler(GrGLuint id, const GrGLInterface* interface) : fID(id), fInterface(interface) {}
377
~Sampler()378 ~Sampler() {
379 if (fID) {
380 GR_GL_CALL(fInterface, DeleteSamplers(1, &fID));
381 }
382 }
383
id() const384 GrGLuint id() const { return fID; }
385
abandon()386 void abandon() { fID = 0; }
387
388 private:
389 GrGLuint fID = 0;
390 const GrGLInterface* fInterface = nullptr;
391 };
392
393 struct UnitState {
394 bool fKnown = false;
395 GrGLuint fSamplerIDIfKnown = 0;
396 };
397
398 static constexpr int kMaxSamplers = 32;
399
400 SkLRUCache<uint32_t, Sampler> fSamplers{kMaxSamplers};
401 std::unique_ptr<UnitState[]> fTextureUnitStates;
402 GrGLGpu* fGpu;
403 int fNumTextureUnits;
404 };
405
406 ///////////////////////////////////////////////////////////////////////////////
407
Make(sk_sp<const GrGLInterface> interface,const GrContextOptions & options,GrDirectContext * direct)408 std::unique_ptr<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface,
409 const GrContextOptions& options,
410 GrDirectContext* direct) {
411 #if !defined(SK_DISABLE_LEGACY_GL_MAKE_NATIVE_INTERFACE)
412 if (!interface) {
413 interface = GrGLMakeNativeInterface();
414 if (!interface) {
415 return nullptr;
416 }
417 }
418 #else
419 if (!interface) {
420 return nullptr;
421 }
422 #endif
423 #ifdef USE_NSIGHT
424 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
425 #endif
426 auto glContext = GrGLContext::Make(std::move(interface), options);
427 if (!glContext) {
428 return nullptr;
429 }
430 return std::unique_ptr<GrGpu>(new GrGLGpu(std::move(glContext), direct));
431 }
432
GrGLGpu(std::unique_ptr<GrGLContext> ctx,GrDirectContext * dContext)433 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrDirectContext* dContext)
434 : GrGpu(dContext)
435 , fGLContext(std::move(ctx))
436 , fProgramCache(new ProgramCache(dContext->priv().options().fRuntimeProgramCacheSize))
437 , fHWProgramID(0)
438 , fTempSrcFBOID(0)
439 , fTempDstFBOID(0)
440 , fStencilClearFBOID(0)
441 , fFinishCallbacks(this) {
442 SkASSERT(fGLContext);
443 // Clear errors so we don't get confused whether we caused an error.
444 this->clearErrorsAndCheckForOOM();
445 // Toss out any pre-existing OOM that was hanging around before we got started.
446 this->checkAndResetOOMed();
447
448 this->initCaps(sk_ref_sp(fGLContext->caps()));
449
450 fHWTextureUnitBindings.reset(this->numTextureUnits());
451
452 this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
453 this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
454 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
455 if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) {
456 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
457 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
458 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
459 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
460 } else {
461 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
462 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
463 }
464 for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
465 fHWBufferState[i].invalidate();
466 }
467
468 if (this->glCaps().useSamplerObjects()) {
469 fSamplerObjectCache = std::make_unique<SamplerObjectCache>(this);
470 }
471 }
472
~GrGLGpu()473 GrGLGpu::~GrGLGpu() {
474 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
475 // to release the resources held by the objects themselves.
476 fCopyProgramArrayBuffer.reset();
477 fMipmapProgramArrayBuffer.reset();
478 if (fProgramCache) {
479 fProgramCache->reset();
480 }
481
482 fHWProgram.reset();
483 if (fHWProgramID) {
484 // detach the current program so there is no confusion on OpenGL's part
485 // that we want it to be deleted
486 GL_CALL(UseProgram(0));
487 }
488
489 if (fTempSrcFBOID) {
490 this->deleteFramebuffer(fTempSrcFBOID);
491 }
492 if (fTempDstFBOID) {
493 this->deleteFramebuffer(fTempDstFBOID);
494 }
495 if (fStencilClearFBOID) {
496 this->deleteFramebuffer(fStencilClearFBOID);
497 }
498
499 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
500 if (0 != fCopyPrograms[i].fProgram) {
501 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
502 }
503 }
504
505 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
506 if (0 != fMipmapPrograms[i].fProgram) {
507 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
508 }
509 }
510
511 fSamplerObjectCache.reset();
512
513 fFinishCallbacks.callAll(true);
514 }
515
disconnect(DisconnectType type)516 void GrGLGpu::disconnect(DisconnectType type) {
517 INHERITED::disconnect(type);
518 if (DisconnectType::kCleanup == type) {
519 if (fHWProgramID) {
520 GL_CALL(UseProgram(0));
521 }
522 if (fTempSrcFBOID) {
523 this->deleteFramebuffer(fTempSrcFBOID);
524 }
525 if (fTempDstFBOID) {
526 this->deleteFramebuffer(fTempDstFBOID);
527 }
528 if (fStencilClearFBOID) {
529 this->deleteFramebuffer(fStencilClearFBOID);
530 }
531 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
532 if (fCopyPrograms[i].fProgram) {
533 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
534 }
535 }
536 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
537 if (fMipmapPrograms[i].fProgram) {
538 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
539 }
540 }
541
542 if (fSamplerObjectCache) {
543 fSamplerObjectCache->release();
544 }
545 } else {
546 if (fProgramCache) {
547 fProgramCache->abandon();
548 }
549 if (fSamplerObjectCache) {
550 fSamplerObjectCache->abandon();
551 }
552 }
553
554 fHWProgram.reset();
555 fProgramCache->reset();
556 fProgramCache.reset();
557
558 fHWProgramID = 0;
559 fTempSrcFBOID = 0;
560 fTempDstFBOID = 0;
561 fStencilClearFBOID = 0;
562 fCopyProgramArrayBuffer.reset();
563 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
564 fCopyPrograms[i].fProgram = 0;
565 }
566 fMipmapProgramArrayBuffer.reset();
567 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
568 fMipmapPrograms[i].fProgram = 0;
569 }
570
571 fFinishCallbacks.callAll(/* doDelete */ DisconnectType::kCleanup == type);
572 }
573
pipelineBuilder()574 GrThreadSafePipelineBuilder* GrGLGpu::pipelineBuilder() {
575 return fProgramCache.get();
576 }
577
refPipelineBuilder()578 sk_sp<GrThreadSafePipelineBuilder> GrGLGpu::refPipelineBuilder() {
579 return fProgramCache;
580 }
581
582 ///////////////////////////////////////////////////////////////////////////////
583
onResetContext(uint32_t resetBits)584 void GrGLGpu::onResetContext(uint32_t resetBits) {
585 if (resetBits & kMisc_GrGLBackendState) {
586 // we don't use the zb at all
587 GL_CALL(Disable(GR_GL_DEPTH_TEST));
588 GL_CALL(DepthMask(GR_GL_FALSE));
589
590 // We don't use face culling.
591 GL_CALL(Disable(GR_GL_CULL_FACE));
592 // We do use separate stencil. Our algorithms don't care which face is front vs. back so
593 // just set this to the default for self-consistency.
594 GL_CALL(FrontFace(GR_GL_CCW));
595
596 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
597 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
598
599 if (GR_IS_GR_GL(this->glStandard())) {
600 #ifndef USE_NSIGHT
601 // Desktop-only state that we never change
602 if (!this->glCaps().isCoreProfile()) {
603 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
604 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
605 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
606 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
607 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
608 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
609 }
610 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
611 // core profile. This seems like a bug since the core spec removes any mention of
612 // GL_ARB_imaging.
613 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
614 GL_CALL(Disable(GR_GL_COLOR_TABLE));
615 }
616 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
617
618 fHWWireframeEnabled = kUnknown_TriState;
619 #endif
620 // Since ES doesn't support glPointSize at all we always use the VS to
621 // set the point size
622 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
623
624 }
625
626 if (GR_IS_GR_GL_ES(this->glStandard()) &&
627 this->glCaps().fbFetchRequiresEnablePerSample()) {
628 // The arm extension requires specifically enabling MSAA fetching per sample.
629 // On some devices this may have a perf hit. Also multiple render targets are disabled
630 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
631 }
632 fHWWriteToColor = kUnknown_TriState;
633 // we only ever use lines in hairline mode
634 GL_CALL(LineWidth(1));
635 GL_CALL(Disable(GR_GL_DITHER));
636
637 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
638 }
639
640 if (resetBits & kMSAAEnable_GrGLBackendState) {
641 if (this->glCaps().clientCanDisableMultisample()) {
642 // Restore GL_MULTISAMPLE to its initial state. It being enabled has no effect on draws
643 // to non-MSAA targets.
644 GL_CALL(Enable(GR_GL_MULTISAMPLE));
645 }
646 fHWConservativeRasterEnabled = kUnknown_TriState;
647 }
648
649 fHWActiveTextureUnitIdx = -1; // invalid
650 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
651
652 if (resetBits & kTextureBinding_GrGLBackendState) {
653 for (int s = 0; s < this->numTextureUnits(); ++s) {
654 fHWTextureUnitBindings[s].invalidateAllTargets(false);
655 }
656 if (fSamplerObjectCache) {
657 fSamplerObjectCache->invalidateBindings();
658 }
659 }
660
661 if (resetBits & kBlend_GrGLBackendState) {
662 fHWBlendState.invalidate();
663 }
664
665 if (resetBits & kView_GrGLBackendState) {
666 fHWScissorSettings.invalidate();
667 fHWWindowRectsState.invalidate();
668 fHWViewport.invalidate();
669 }
670
671 if (resetBits & kStencil_GrGLBackendState) {
672 fHWStencilSettings.invalidate();
673 fHWStencilTestEnabled = kUnknown_TriState;
674 }
675
676 // Vertex
677 if (resetBits & kVertex_GrGLBackendState) {
678 fHWVertexArrayState.invalidate();
679 this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
680 this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
681 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate();
682 }
683
684 if (resetBits & kRenderTarget_GrGLBackendState) {
685 fHWBoundRenderTargetUniqueID.makeInvalid();
686 fHWSRGBFramebuffer = kUnknown_TriState;
687 fBoundDrawFramebuffer = 0;
688 }
689
690 // we assume these values
691 if (resetBits & kPixelStore_GrGLBackendState) {
692 if (this->caps()->writePixelsRowBytesSupport() ||
693 this->caps()->transferPixelsToRowBytesSupport()) {
694 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
695 }
696 if (this->glCaps().readPixelsRowBytesSupport()) {
697 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
698 }
699 if (this->glCaps().packFlipYSupport()) {
700 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
701 }
702 }
703
704 if (resetBits & kProgram_GrGLBackendState) {
705 fHWProgramID = 0;
706 fHWProgram.reset();
707 }
708 ++fResetTimestampForTextureParameters;
709 }
710
check_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)711 static bool check_backend_texture(const GrBackendTexture& backendTex,
712 const GrGLCaps& caps,
713 GrGLTexture::Desc* desc,
714 bool skipRectTexSupportCheck = false) {
715 GrGLTextureInfo info;
716 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
717 return false;
718 }
719
720 desc->fSize = {backendTex.width(), backendTex.height()};
721 desc->fTarget = info.fTarget;
722 desc->fID = info.fID;
723 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
724 desc->fIsProtected = info.fProtected;
725
726 if (desc->fFormat == GrGLFormat::kUnknown) {
727 return false;
728 }
729 if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) {
730 if (!caps.shaderCaps()->fExternalTextureSupport) {
731 return false;
732 }
733 } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) {
734 if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) {
735 return false;
736 }
737 } else if (GR_GL_TEXTURE_2D != desc->fTarget) {
738 return false;
739 }
740 if (desc->fIsProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
741 return false;
742 }
743
744 return true;
745 }
746
get_gl_texture_params(const GrBackendTexture & backendTex)747 static sk_sp<GrGLTextureParameters> get_gl_texture_params(const GrBackendTexture& backendTex) {
748 const GrBackendTextureData* btd = GrBackendSurfacePriv::GetBackendData(backendTex);
749 auto glTextureData = static_cast<const GrGLBackendTextureData*>(btd);
750 SkASSERT(glTextureData);
751 return glTextureData->info().refParameters();
752 }
753
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)754 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
755 GrWrapOwnership ownership,
756 GrWrapCacheable cacheable,
757 GrIOType ioType) {
758 GrGLTexture::Desc desc;
759 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
760 return nullptr;
761 }
762
763 if (kBorrow_GrWrapOwnership == ownership) {
764 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
765 } else {
766 desc.fOwnership = GrBackendObjectOwnership::kOwned;
767 }
768
769 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
770 : GrMipmapStatus::kNotAllocated;
771
772 auto texture = GrGLTexture::MakeWrapped(this,
773 mipmapStatus,
774 desc,
775 get_gl_texture_params(backendTex),
776 cacheable,
777 ioType,
778 backendTex.getLabel());
779 if (this->glCaps().isFormatRenderable(backendTex.getBackendFormat(), 1)) {
780 // Pessimistically assume this external texture may have been bound to a FBO.
781 texture->baseLevelWasBoundToFBO();
782 }
783 return texture;
784 }
785
check_compressed_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)786 static bool check_compressed_backend_texture(const GrBackendTexture& backendTex,
787 const GrGLCaps& caps, GrGLTexture::Desc* desc,
788 bool skipRectTexSupportCheck = false) {
789 GrGLTextureInfo info;
790 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
791 return false;
792 }
793
794 desc->fSize = {backendTex.width(), backendTex.height()};
795 desc->fTarget = info.fTarget;
796 desc->fID = info.fID;
797 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
798 desc->fIsProtected = info.fProtected;
799
800 if (desc->fFormat == GrGLFormat::kUnknown) {
801 return false;
802 }
803
804 if (GR_GL_TEXTURE_2D != desc->fTarget) {
805 return false;
806 }
807 if (desc->fIsProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
808 return false;
809 }
810
811 return true;
812 }
813
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)814 sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
815 GrWrapOwnership ownership,
816 GrWrapCacheable cacheable) {
817 GrGLTexture::Desc desc;
818 if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) {
819 return nullptr;
820 }
821
822 if (kBorrow_GrWrapOwnership == ownership) {
823 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
824 } else {
825 desc.fOwnership = GrBackendObjectOwnership::kOwned;
826 }
827
828 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
829 : GrMipmapStatus::kNotAllocated;
830
831 return GrGLTexture::MakeWrapped(this,
832 mipmapStatus,
833 desc,
834 get_gl_texture_params(backendTex),
835 cacheable,
836 kRead_GrIOType,
837 backendTex.getLabel());
838 }
839
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)840 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
841 int sampleCnt,
842 GrWrapOwnership ownership,
843 GrWrapCacheable cacheable) {
844 const GrGLCaps& caps = this->glCaps();
845
846 GrGLTexture::Desc desc;
847 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
848 return nullptr;
849 }
850 SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt));
851 SkASSERT(caps.isFormatTexturable(desc.fFormat));
852
853 // We don't support rendering to a EXTERNAL texture.
854 if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) {
855 return nullptr;
856 }
857
858 if (kBorrow_GrWrapOwnership == ownership) {
859 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
860 } else {
861 desc.fOwnership = GrBackendObjectOwnership::kOwned;
862 }
863
864
865 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat);
866 SkASSERT(sampleCnt);
867
868 GrGLRenderTarget::IDs rtIDs;
869 if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) {
870 return nullptr;
871 }
872
873 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kDirty
874 : GrMipmapStatus::kNotAllocated;
875
876 sk_sp<GrGLTextureRenderTarget> texRT(
877 GrGLTextureRenderTarget::MakeWrapped(this,
878 sampleCnt,
879 desc,
880 get_gl_texture_params(backendTex),
881 rtIDs,
882 cacheable,
883 mipmapStatus,
884 backendTex.getLabel()));
885 texRT->baseLevelWasBoundToFBO();
886 return texRT;
887 }
888
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)889 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
890 GrGLFramebufferInfo info;
891 if (!GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
892 return nullptr;
893 }
894
895 if (backendRT.isProtected() && !this->glCaps().supportsProtectedContent()) {
896 return nullptr;
897 }
898
899 const auto format = GrBackendFormats::AsGLFormat(backendRT.getBackendFormat());
900 if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) {
901 return nullptr;
902 }
903
904 int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format);
905
906 GrGLRenderTarget::IDs rtIDs;
907 if (sampleCount <= 1) {
908 rtIDs.fSingleSampleFBOID = info.fFBOID;
909 rtIDs.fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
910 } else {
911 rtIDs.fSingleSampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
912 rtIDs.fMultisampleFBOID = info.fFBOID;
913 }
914 rtIDs.fMSColorRenderbufferID = 0;
915 rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
916 rtIDs.fTotalMemorySamplesPerPixel = sampleCount;
917
918 return GrGLRenderTarget::MakeWrapped(this,
919 backendRT.dimensions(),
920 format,
921 sampleCount,
922 rtIDs,
923 backendRT.stencilBits(),
924 skgpu::Protected(backendRT.isProtected()),
925 /*label=*/"GLGpu_WrapBackendRenderTarget");
926 }
927
check_write_and_transfer_input(GrGLTexture * glTex)928 static bool check_write_and_transfer_input(GrGLTexture* glTex) {
929 if (!glTex) {
930 return false;
931 }
932
933 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
934 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
935 return false;
936 }
937
938 return true;
939 }
940
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)941 bool GrGLGpu::onWritePixels(GrSurface* surface,
942 SkIRect rect,
943 GrColorType surfaceColorType,
944 GrColorType srcColorType,
945 const GrMipLevel texels[],
946 int mipLevelCount,
947 bool prepForTexSampling) {
948 auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
949
950 if (!check_write_and_transfer_input(glTex)) {
951 return false;
952 }
953
954 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
955
956 // If we have mips make sure the base/max levels cover the full range so that the uploads go to
957 // the right levels. We've found some Radeons require this.
958 if (mipLevelCount && this->glCaps().mipmapLevelControlSupport()) {
959 auto params = glTex->parameters();
960 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
961 int maxLevel = glTex->maxMipmapLevel();
962 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
963 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_BASE_LEVEL, 0));
964 nonsamplerState.fBaseMipMapLevel = 0;
965 }
966 if (params->nonsamplerState().fMaxMipmapLevel != maxLevel) {
967 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_MAX_LEVEL, maxLevel));
968 nonsamplerState.fBaseMipMapLevel = maxLevel;
969 }
970 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
971 }
972
973 if (this->glCaps().flushBeforeWritePixels()) {
974 GL_CALL(Flush());
975 }
976
977 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
978 return this->uploadColorTypeTexData(glTex->format(),
979 surfaceColorType,
980 glTex->dimensions(),
981 glTex->target(),
982 rect,
983 srcColorType,
984 texels,
985 mipLevelCount);
986 }
987
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)988 bool GrGLGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
989 size_t srcOffset,
990 sk_sp<GrGpuBuffer> dst,
991 size_t dstOffset,
992 size_t size) {
993 SkASSERT(!src->isMapped());
994 SkASSERT(!dst->isMapped());
995
996 auto glSrc = static_cast<const GrGLBuffer*>(src.get());
997 auto glDst = static_cast<const GrGLBuffer*>(dst.get());
998
999 // If we refactored bindBuffer() to use something other than GrGpuBufferType to indicate the
1000 // binding target then we could use the COPY_READ and COPY_WRITE targets here. But
1001 // CopyBufferSubData is documented to work with all the targets so it's not clear it's worth it.
1002 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glSrc);
1003 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glDst);
1004
1005 GL_CALL(CopyBufferSubData(GR_GL_PIXEL_UNPACK_BUFFER,
1006 GR_GL_PIXEL_PACK_BUFFER,
1007 srcOffset,
1008 dstOffset,
1009 size));
1010 return true;
1011 }
1012
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)1013 bool GrGLGpu::onTransferPixelsTo(GrTexture* texture,
1014 SkIRect rect,
1015 GrColorType textureColorType,
1016 GrColorType bufferColorType,
1017 sk_sp<GrGpuBuffer> transferBuffer,
1018 size_t offset,
1019 size_t rowBytes) {
1020 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
1021
1022 // Can't transfer compressed data
1023 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
1024
1025 if (!check_write_and_transfer_input(glTex)) {
1026 return false;
1027 }
1028
1029 static_assert(sizeof(int) == sizeof(int32_t), "");
1030
1031 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
1032
1033 SkASSERT(!transferBuffer->isMapped());
1034 SkASSERT(!transferBuffer->isCpuBuffer());
1035 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer.get());
1036 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
1037
1038 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
1039
1040 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1041 const size_t trimRowBytes = rect.width() * bpp;
1042 const void* pixels = (void*)offset;
1043
1044 SkASSERT(glBuffer->size() >= offset + rowBytes*(rect.height() - 1) + trimRowBytes);
1045
1046 bool restoreGLRowLength = false;
1047 if (trimRowBytes != rowBytes) {
1048 // we should have checked for this support already
1049 SkASSERT(this->glCaps().transferPixelsToRowBytesSupport());
1050 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
1051 restoreGLRowLength = true;
1052 }
1053
1054 GrGLFormat textureFormat = glTex->format();
1055 // External format and type come from the upload data.
1056 GrGLenum externalFormat = 0;
1057 GrGLenum externalType = 0;
1058 this->glCaps().getTexSubImageExternalFormatAndType(
1059 textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType);
1060 if (!externalFormat || !externalType) {
1061 return false;
1062 }
1063
1064 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1065 GL_CALL(TexSubImage2D(glTex->target(),
1066 0,
1067 rect.left(),
1068 rect.top(),
1069 rect.width(),
1070 rect.height(),
1071 externalFormat,
1072 externalType,
1073 pixels));
1074
1075 if (restoreGLRowLength) {
1076 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1077 }
1078
1079 return true;
1080 }
1081
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)1082 bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface,
1083 SkIRect rect,
1084 GrColorType surfaceColorType,
1085 GrColorType dstColorType,
1086 sk_sp<GrGpuBuffer> transferBuffer,
1087 size_t offset) {
1088 auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer.get());
1089 SkASSERT(glBuffer->size() >= offset + (rect.width() *
1090 rect.height()*
1091 GrColorTypeBytesPerPixel(dstColorType)));
1092
1093 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer);
1094
1095 auto offsetAsPtr = reinterpret_cast<void*>(offset);
1096 return this->readOrTransferPixelsFrom(surface,
1097 rect,
1098 surfaceColorType,
1099 dstColorType,
1100 offsetAsPtr,
1101 rect.width());
1102 }
1103
unbindXferBuffer(GrGpuBufferType type)1104 void GrGLGpu::unbindXferBuffer(GrGpuBufferType type) {
1105 if (this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kARB_PBO &&
1106 this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kNV_PBO) {
1107 return;
1108 }
1109 SkASSERT(type == GrGpuBufferType::kXferCpuToGpu || type == GrGpuBufferType::kXferGpuToCpu);
1110 auto* xferBufferState = this->hwBufferState(type);
1111 if (!xferBufferState->fBufferZeroKnownBound) {
1112 GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
1113 xferBufferState->fBoundBufferUniqueID.makeInvalid();
1114 xferBufferState->fBufferZeroKnownBound = true;
1115 }
1116 }
1117
uploadColorTypeTexData(GrGLFormat textureFormat,GrColorType textureColorType,SkISize texDims,GrGLenum target,SkIRect dstRect,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)1118 bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat,
1119 GrColorType textureColorType,
1120 SkISize texDims,
1121 GrGLenum target,
1122 SkIRect dstRect,
1123 GrColorType srcColorType,
1124 const GrMipLevel texels[],
1125 int mipLevelCount) {
1126 // If we're uploading compressed data then we should be using uploadCompressedTexData
1127 SkASSERT(!GrGLFormatIsCompressed(textureFormat));
1128
1129 SkASSERT(this->glCaps().isFormatTexturable(textureFormat));
1130
1131 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
1132
1133 // External format and type come from the upload data.
1134 GrGLenum externalFormat;
1135 GrGLenum externalType;
1136 this->glCaps().getTexSubImageExternalFormatAndType(
1137 textureFormat, textureColorType, srcColorType, &externalFormat, &externalType);
1138 if (!externalFormat || !externalType) {
1139 return false;
1140 }
1141 this->uploadTexData(texDims, target, dstRect, externalFormat, externalType, bpp, texels,
1142 mipLevelCount);
1143 return true;
1144 }
1145
uploadColorToTex(GrGLFormat textureFormat,SkISize texDims,GrGLenum target,std::array<float,4> color,uint32_t levelMask)1146 bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat,
1147 SkISize texDims,
1148 GrGLenum target,
1149 std::array<float, 4> color,
1150 uint32_t levelMask) {
1151 GrColorType colorType;
1152 GrGLenum externalFormat, externalType;
1153 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(textureFormat, &externalFormat,
1154 &externalType, &colorType);
1155 if (colorType == GrColorType::kUnknown) {
1156 return false;
1157 }
1158
1159 std::unique_ptr<char[]> pixelStorage;
1160 size_t bpp = 0;
1161 int numLevels = SkMipmap::ComputeLevelCount(texDims) + 1;
1162 STArray<16, GrMipLevel> levels;
1163 levels.resize(numLevels);
1164 SkISize levelDims = texDims;
1165 for (int i = 0; i < numLevels; ++i, levelDims = {std::max(levelDims.width() >> 1, 1),
1166 std::max(levelDims.height() >> 1, 1)}) {
1167 if (levelMask & (1 << i)) {
1168 if (!pixelStorage) {
1169 // Make one tight image at the first size and reuse it for smaller levels.
1170 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, levelDims);
1171 size_t rb = ii.minRowBytes();
1172 pixelStorage.reset(new char[rb * levelDims.height()]);
1173 if (!GrClearImage(ii, pixelStorage.get(), ii.minRowBytes(), color)) {
1174 return false;
1175 }
1176 bpp = ii.bpp();
1177 }
1178 levels[i] = {pixelStorage.get(), levelDims.width()*bpp, nullptr};
1179 }
1180 }
1181 this->uploadTexData(texDims, target, SkIRect::MakeSize(texDims), externalFormat, externalType,
1182 bpp, levels.begin(), levels.size());
1183 return true;
1184 }
1185
uploadTexData(SkISize texDims,GrGLenum target,SkIRect dstRect,GrGLenum externalFormat,GrGLenum externalType,size_t bpp,const GrMipLevel texels[],int mipLevelCount)1186 void GrGLGpu::uploadTexData(SkISize texDims,
1187 GrGLenum target,
1188 SkIRect dstRect,
1189 GrGLenum externalFormat,
1190 GrGLenum externalType,
1191 size_t bpp,
1192 const GrMipLevel texels[],
1193 int mipLevelCount) {
1194 SkASSERT(!texDims.isEmpty());
1195 SkASSERT(!dstRect.isEmpty());
1196 SkASSERT(SkIRect::MakeSize(texDims).contains(dstRect));
1197 SkASSERT(mipLevelCount > 0 && mipLevelCount <= SkMipmap::ComputeLevelCount(texDims) + 1);
1198 SkASSERT(mipLevelCount == 1 || dstRect == SkIRect::MakeSize(texDims));
1199
1200 const GrGLCaps& caps = this->glCaps();
1201
1202 bool restoreGLRowLength = false;
1203
1204 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1205 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1206
1207 SkISize dims = dstRect.size();
1208 for (int level = 0; level < mipLevelCount; ++level, dims = {std::max(dims.width() >> 1, 1),
1209 std::max(dims.height() >> 1, 1)}) {
1210 if (!texels[level].fPixels) {
1211 continue;
1212 }
1213 const size_t trimRowBytes = dims.width() * bpp;
1214 const size_t rowBytes = texels[level].fRowBytes;
1215
1216 if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) {
1217 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1218 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1219 restoreGLRowLength = true;
1220 } else {
1221 SkASSERT(rowBytes == trimRowBytes);
1222 }
1223
1224 GL_CALL(TexSubImage2D(target, level, dstRect.x(), dstRect.y(), dims.width(), dims.height(),
1225 externalFormat, externalType, texels[level].fPixels));
1226 }
1227 if (restoreGLRowLength) {
1228 SkASSERT(caps.writePixelsRowBytesSupport());
1229 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1230 }
1231 }
1232
uploadCompressedTexData(SkTextureCompressionType compressionType,GrGLFormat format,SkISize dimensions,skgpu::Mipmapped mipmapped,GrGLenum target,const void * data,size_t dataSize)1233 bool GrGLGpu::uploadCompressedTexData(SkTextureCompressionType compressionType,
1234 GrGLFormat format,
1235 SkISize dimensions,
1236 skgpu::Mipmapped mipmapped,
1237 GrGLenum target,
1238 const void* data,
1239 size_t dataSize) {
1240 SkASSERT(format != GrGLFormat::kUnknown);
1241 const GrGLCaps& caps = this->glCaps();
1242
1243 // We only need the internal format for compressed 2D textures.
1244 GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format);
1245 if (!internalFormat) {
1246 return false;
1247 }
1248
1249 SkASSERT(compressionType != SkTextureCompressionType::kNone);
1250
1251 bool useTexStorage = caps.formatSupportsTexStorage(format);
1252
1253 int numMipLevels = 1;
1254 if (mipmapped == skgpu::Mipmapped::kYes) {
1255 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1256 }
1257
1258 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1259
1260 // TODO: Make sure that the width and height that we pass to OpenGL
1261 // is a multiple of the block size.
1262
1263 if (useTexStorage) {
1264 // We never resize or change formats of textures.
1265 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat,
1266 dimensions.width(), dimensions.height()));
1267 if (error != GR_GL_NO_ERROR) {
1268 return false;
1269 }
1270
1271 size_t offset = 0;
1272 for (int level = 0; level < numMipLevels; ++level) {
1273
1274 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1275 nullptr, false);
1276
1277 error = GL_ALLOC_CALL(CompressedTexSubImage2D(target,
1278 level,
1279 0, // left
1280 0, // top
1281 dimensions.width(),
1282 dimensions.height(),
1283 internalFormat,
1284 SkToInt(levelDataSize),
1285 &((const char*)data)[offset]));
1286
1287 if (error != GR_GL_NO_ERROR) {
1288 return false;
1289 }
1290
1291 offset += levelDataSize;
1292 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1293 }
1294 } else {
1295 size_t offset = 0;
1296
1297 for (int level = 0; level < numMipLevels; ++level) {
1298 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1299 nullptr, false);
1300
1301 const char* rawLevelData = &((const char*)data)[offset];
1302 GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target,
1303 level,
1304 internalFormat,
1305 dimensions.width(),
1306 dimensions.height(),
1307 0, // border
1308 SkToInt(levelDataSize),
1309 rawLevelData));
1310
1311 if (error != GR_GL_NO_ERROR) {
1312 return false;
1313 }
1314
1315 offset += levelDataSize;
1316 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1317 }
1318 }
1319 return true;
1320 }
1321
renderbufferStorageMSAA(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1322 bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format,
1323 int width, int height) {
1324 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1325 GrGLenum error;
1326 switch (ctx.caps()->msFBOType()) {
1327 case GrGLCaps::kStandard_MSFBOType:
1328 error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount,
1329 format, width, height));
1330 break;
1331 case GrGLCaps::kES_Apple_MSFBOType:
1332 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE(
1333 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1334 break;
1335 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1336 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1337 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT(
1338 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1339 break;
1340 case GrGLCaps::kNone_MSFBOType:
1341 SkUNREACHABLE;
1342 }
1343 return error == GR_GL_NO_ERROR;
1344 }
1345
createRenderTargetObjects(const GrGLTexture::Desc & desc,int sampleCount,GrGLRenderTarget::IDs * rtIDs)1346 bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc,
1347 int sampleCount,
1348 GrGLRenderTarget::IDs* rtIDs) {
1349 rtIDs->fMSColorRenderbufferID = 0;
1350 rtIDs->fMultisampleFBOID = 0;
1351 rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1352 rtIDs->fSingleSampleFBOID = 0;
1353 rtIDs->fTotalMemorySamplesPerPixel = 0;
1354
1355 SkScopeExit cleanupOnFail([&] {
1356 if (rtIDs->fMSColorRenderbufferID) {
1357 GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1358 }
1359 if (rtIDs->fMultisampleFBOID != rtIDs->fSingleSampleFBOID) {
1360 this->deleteFramebuffer(rtIDs->fMultisampleFBOID);
1361 }
1362 if (rtIDs->fSingleSampleFBOID) {
1363 this->deleteFramebuffer(rtIDs->fSingleSampleFBOID);
1364 }
1365 });
1366
1367 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1368
1369 if (desc.fFormat == GrGLFormat::kUnknown) {
1370 return false;
1371 }
1372
1373 if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1374 return false;
1375 }
1376
1377 GL_CALL(GenFramebuffers(1, &rtIDs->fSingleSampleFBOID));
1378 if (!rtIDs->fSingleSampleFBOID) {
1379 RENDERENGINE_ABORTF("%s failed to GenFramebuffers!", __func__);
1380 return false;
1381 }
1382
1383 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1384 // the texture bound to the other. The exception is the IMG multisample extension. With this
1385 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1386 // rendered from.
1387 if (sampleCount <= 1) {
1388 rtIDs->fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
1389 } else if (this->glCaps().usesImplicitMSAAResolve()) {
1390 // GrGLRenderTarget target will configure the FBO as multisample or not base on need.
1391 rtIDs->fMultisampleFBOID = rtIDs->fSingleSampleFBOID;
1392 } else {
1393 GL_CALL(GenFramebuffers(1, &rtIDs->fMultisampleFBOID));
1394 if (!rtIDs->fMultisampleFBOID) {
1395 return false;
1396 }
1397 GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1398 if (!rtIDs->fMSColorRenderbufferID) {
1399 return false;
1400 }
1401 colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat);
1402 }
1403
1404 #if defined(__has_feature)
1405 #define IS_TSAN __has_feature(thread_sanitizer)
1406 #else
1407 #define IS_TSAN 0
1408 #endif
1409
1410 // below here we may bind the FBO
1411 fHWBoundRenderTargetUniqueID.makeInvalid();
1412 if (rtIDs->fMSColorRenderbufferID) {
1413 SkASSERT(sampleCount > 1);
1414 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID));
1415 if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat,
1416 desc.fSize.width(), desc.fSize.height())) {
1417 return false;
1418 }
1419 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fMultisampleFBOID);
1420 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1421 GR_GL_COLOR_ATTACHMENT0,
1422 GR_GL_RENDERBUFFER,
1423 rtIDs->fMSColorRenderbufferID));
1424 // See skbug.com/12644
1425 #if !IS_TSAN
1426 if (!this->glCaps().skipErrorChecks()) {
1427 GrGLenum status;
1428 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1429 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1430 return false;
1431 }
1432 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1433 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1434 GR_GL_COLOR_ATTACHMENT0,
1435 GR_GL_RENDERBUFFER,
1436 0));
1437 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1438 GR_GL_COLOR_ATTACHMENT0,
1439 GR_GL_RENDERBUFFER,
1440 rtIDs->fMSColorRenderbufferID));
1441 }
1442 }
1443 #endif
1444 rtIDs->fTotalMemorySamplesPerPixel += sampleCount;
1445 }
1446 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fSingleSampleFBOID);
1447 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1448 GR_GL_COLOR_ATTACHMENT0,
1449 desc.fTarget,
1450 desc.fID,
1451 0));
1452 // See skbug.com/12644
1453 #if !IS_TSAN
1454 if (!this->glCaps().skipErrorChecks()) {
1455 GrGLenum status;
1456 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1457 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1458 return false;
1459 }
1460 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1461 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1462 GR_GL_COLOR_ATTACHMENT0,
1463 desc.fTarget,
1464 0,
1465 0));
1466 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1467 GR_GL_COLOR_ATTACHMENT0,
1468 desc.fTarget,
1469 desc.fID,
1470 0));
1471 }
1472 }
1473 #endif
1474
1475 #undef IS_TSAN
1476 ++rtIDs->fTotalMemorySamplesPerPixel;
1477
1478 // We did it!
1479 cleanupOnFail.clear();
1480 return true;
1481 }
1482
1483 // good to set a break-point here to know when createTexture fails
return_null_texture()1484 static sk_sp<GrTexture> return_null_texture() {
1485 // SkDEBUGFAIL("null texture");
1486 return nullptr;
1487 }
1488
set_initial_texture_params(const GrGLInterface * interface,const GrGLCaps & caps,GrGLenum target)1489 static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params(
1490 const GrGLInterface* interface,
1491 const GrGLCaps& caps,
1492 GrGLenum target) {
1493 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1494 // drivers have a bug where an FBO won't be complete if it includes a
1495 // texture that is not mipmap complete (considering the filter in use).
1496 GrGLTextureParameters::SamplerOverriddenState state;
1497 state.fMinFilter = GR_GL_NEAREST;
1498 state.fMagFilter = GR_GL_NEAREST;
1499 state.fWrapS = GR_GL_CLAMP_TO_EDGE;
1500 state.fWrapT = GR_GL_CLAMP_TO_EDGE;
1501 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter));
1502 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter));
1503 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS));
1504 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT));
1505 return state;
1506 }
1507
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1508 sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions,
1509 const GrBackendFormat& format,
1510 GrRenderable renderable,
1511 int renderTargetSampleCnt,
1512 skgpu::Budgeted budgeted,
1513 GrProtected isProtected,
1514 int mipLevelCount,
1515 uint32_t levelClearMask,
1516 std::string_view label) {
1517 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1518 return nullptr;
1519 }
1520 SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1);
1521
1522 SkASSERT(mipLevelCount > 0);
1523 GrMipmapStatus mipmapStatus =
1524 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1525 GrGLTextureParameters::SamplerOverriddenState initialState;
1526 GrGLTexture::Desc texDesc;
1527 texDesc.fSize = dimensions;
1528 switch (format.textureType()) {
1529 case GrTextureType::kExternal:
1530 case GrTextureType::kNone:
1531 return nullptr;
1532 case GrTextureType::k2D:
1533 texDesc.fTarget = GR_GL_TEXTURE_2D;
1534 break;
1535 case GrTextureType::kRectangle:
1536 if (mipLevelCount > 1 || !this->glCaps().rectangleTextureSupport()) {
1537 return nullptr;
1538 }
1539 texDesc.fTarget = GR_GL_TEXTURE_RECTANGLE;
1540 break;
1541 }
1542 texDesc.fFormat = GrBackendFormats::AsGLFormat(format);
1543 texDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1544 SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown);
1545 SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat));
1546 texDesc.fIsProtected = isProtected;
1547
1548 texDesc.fID = this->createTexture(dimensions, texDesc.fFormat, texDesc.fTarget, renderable,
1549 &initialState, mipLevelCount, texDesc.fIsProtected, label);
1550 if (!texDesc.fID) {
1551 return return_null_texture();
1552 }
1553
1554 sk_sp<GrGLTexture> tex;
1555 if (renderable == GrRenderable::kYes) {
1556 // unbind the texture from the texture unit before binding it to the frame buffer
1557 GL_CALL(BindTexture(texDesc.fTarget, 0));
1558 GrGLRenderTarget::IDs rtIDDesc;
1559
1560 if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) {
1561 GL_CALL(DeleteTextures(1, &texDesc.fID));
1562 return return_null_texture();
1563 }
1564 tex = sk_make_sp<GrGLTextureRenderTarget>(this,
1565 budgeted,
1566 renderTargetSampleCnt,
1567 texDesc,
1568 rtIDDesc,
1569 mipmapStatus,
1570 label);
1571 tex->baseLevelWasBoundToFBO();
1572 } else {
1573 tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipmapStatus, label);
1574 }
1575 // The non-sampler params are still at their default values.
1576 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1577 fResetTimestampForTextureParameters);
1578 if (levelClearMask) {
1579 if (this->glCaps().clearTextureSupport()) {
1580 GrGLenum externalFormat, externalType;
1581 GrColorType colorType;
1582 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(
1583 texDesc.fFormat, &externalFormat, &externalType, &colorType);
1584 for (int i = 0; i < mipLevelCount; ++i) {
1585 if (levelClearMask & (1U << i)) {
1586 GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType,
1587 nullptr));
1588 }
1589 }
1590 } else if (this->glCaps().canFormatBeFBOColorAttachment(
1591 GrBackendFormats::AsGLFormat(format)) &&
1592 !this->glCaps().performColorClearsAsDraws()) {
1593 this->flushScissorTest(GrScissorTest::kDisabled);
1594 this->disableWindowRectangles();
1595 this->flushColorWrite(true);
1596 this->flushClearColor({0, 0, 0, 0});
1597 for (int i = 0; i < mipLevelCount; ++i) {
1598 if (levelClearMask & (1U << i)) {
1599 this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER,
1600 kDst_TempFBOTarget);
1601 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1602 this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER);
1603 }
1604 }
1605 fHWBoundRenderTargetUniqueID.makeInvalid();
1606 } else {
1607 this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID());
1608 std::array<float, 4> zeros = {};
1609 this->uploadColorToTex(texDesc.fFormat,
1610 texDesc.fSize,
1611 texDesc.fTarget,
1612 zeros,
1613 levelClearMask);
1614 }
1615 }
1616 return tex;
1617 }
1618
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1619 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1620 const GrBackendFormat& format,
1621 skgpu::Budgeted budgeted,
1622 skgpu::Mipmapped mipmapped,
1623 GrProtected isProtected,
1624 const void* data,
1625 size_t dataSize) {
1626 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1627 return nullptr;
1628 }
1629 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1630
1631 GrGLTextureParameters::SamplerOverriddenState initialState;
1632 GrGLTexture::Desc desc;
1633 desc.fSize = dimensions;
1634 desc.fTarget = GR_GL_TEXTURE_2D;
1635 desc.fOwnership = GrBackendObjectOwnership::kOwned;
1636 desc.fFormat = GrBackendFormats::AsGLFormat(format);
1637 desc.fIsProtected = isProtected;
1638 desc.fID = this->createCompressedTexture2D(desc.fSize, compression, desc.fFormat,
1639 mipmapped, desc.fIsProtected, &initialState);
1640 if (!desc.fID) {
1641 return nullptr;
1642 }
1643
1644 if (data) {
1645 if (!this->uploadCompressedTexData(compression, desc.fFormat, dimensions, mipmapped,
1646 GR_GL_TEXTURE_2D, data, dataSize)) {
1647 GL_CALL(DeleteTextures(1, &desc.fID));
1648 return nullptr;
1649 }
1650 }
1651
1652 // Unbind this texture from the scratch texture unit.
1653 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1654
1655 GrMipmapStatus mipmapStatus = mipmapped == skgpu::Mipmapped::kYes
1656 ? GrMipmapStatus::kValid
1657 : GrMipmapStatus::kNotAllocated;
1658
1659 auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipmapStatus,
1660 /*label=*/"GLGpuCreateCompressedTexture");
1661 // The non-sampler params are still at their default values.
1662 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1663 fResetTimestampForTextureParameters);
1664 return tex;
1665 }
1666
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1667 GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1668 const GrBackendFormat& format,
1669 skgpu::Mipmapped mipmapped,
1670 GrProtected isProtected) {
1671 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1672 return {};
1673 }
1674
1675 this->handleDirtyContext();
1676
1677 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1678 if (glFormat == GrGLFormat::kUnknown) {
1679 return {};
1680 }
1681
1682 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1683
1684 GrGLTextureInfo info;
1685 GrGLTextureParameters::SamplerOverriddenState initialState;
1686
1687 info.fTarget = GR_GL_TEXTURE_2D;
1688 info.fFormat = GrGLFormatToEnum(glFormat);
1689 info.fProtected = isProtected;
1690 info.fID = this->createCompressedTexture2D(dimensions, compression, glFormat,
1691 mipmapped, info.fProtected, &initialState);
1692 if (!info.fID) {
1693 return {};
1694 }
1695
1696 // Unbind this texture from the scratch texture unit.
1697 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1698
1699 auto parameters = sk_make_sp<GrGLTextureParameters>();
1700 // The non-sampler params are still at their default values.
1701 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1702 fResetTimestampForTextureParameters);
1703
1704 return GrBackendTextures::MakeGL(
1705 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters));
1706 }
1707
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t length)1708 bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1709 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1710 const void* data,
1711 size_t length) {
1712 GrGLTextureInfo info;
1713 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
1714
1715 GrBackendFormat format = backendTexture.getBackendFormat();
1716 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1717 if (glFormat == GrGLFormat::kUnknown) {
1718 return false;
1719 }
1720 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1721
1722 skgpu::Mipmapped mipmapped =
1723 backendTexture.hasMipmaps() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1724
1725 this->bindTextureToScratchUnit(info.fTarget, info.fID);
1726
1727 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
1728 // so that the uploads go to the right levels.
1729 if (backendTexture.hasMipmaps() && this->glCaps().mipmapLevelControlSupport()) {
1730 auto params = get_gl_texture_params(backendTexture);
1731 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
1732 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
1733 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
1734 nonsamplerState.fBaseMipMapLevel = 0;
1735 }
1736 int numMipLevels =
1737 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
1738 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
1739 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
1740 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
1741 }
1742 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
1743 }
1744
1745 bool result = this->uploadCompressedTexData(compression,
1746 glFormat,
1747 backendTexture.dimensions(),
1748 mipmapped,
1749 GR_GL_TEXTURE_2D,
1750 data,
1751 length);
1752
1753 // Unbind this texture from the scratch texture unit.
1754 this->bindTextureToScratchUnit(info.fTarget, 0);
1755
1756 return result;
1757 }
1758
getCompatibleStencilIndex(GrGLFormat format)1759 int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) {
1760 static const int kSize = 16;
1761 SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format));
1762
1763 if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) {
1764 // Default to unsupported, set this if we find a stencil format that works.
1765 int firstWorkingStencilFormatIndex = -1;
1766
1767 GrGLuint colorID = this->createTexture({kSize, kSize}, format, GR_GL_TEXTURE_2D,
1768 GrRenderable::kYes,
1769 nullptr,
1770 1,
1771 GrProtected::kNo,
1772 /*label=*/"Skia");
1773 if (!colorID) {
1774 return -1;
1775 }
1776 // unbind the texture from the texture unit before binding it to the frame buffer
1777 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1778
1779 // Create Framebuffer
1780 GrGLuint fb = 0;
1781 GL_CALL(GenFramebuffers(1, &fb));
1782 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
1783 fHWBoundRenderTargetUniqueID.makeInvalid();
1784 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1785 GR_GL_COLOR_ATTACHMENT0,
1786 GR_GL_TEXTURE_2D,
1787 colorID,
1788 0));
1789 GrGLuint sbRBID = 0;
1790 GL_CALL(GenRenderbuffers(1, &sbRBID));
1791
1792 // look over formats till I find a compatible one
1793 int stencilFmtCnt = this->glCaps().stencilFormats().size();
1794 if (sbRBID) {
1795 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1796 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1797 GrGLFormat sFmt = this->glCaps().stencilFormats()[i];
1798 GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage(
1799 GR_GL_RENDERBUFFER, GrGLFormatToEnum(sFmt), kSize, kSize));
1800 if (error == GR_GL_NO_ERROR) {
1801 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1802 GR_GL_STENCIL_ATTACHMENT,
1803 GR_GL_RENDERBUFFER, sbRBID));
1804 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1805 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1806 GR_GL_DEPTH_ATTACHMENT,
1807 GR_GL_RENDERBUFFER, sbRBID));
1808 } else {
1809 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1810 GR_GL_DEPTH_ATTACHMENT,
1811 GR_GL_RENDERBUFFER, 0));
1812 }
1813 GrGLenum status;
1814 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1815 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1816 firstWorkingStencilFormatIndex = i;
1817 break;
1818 }
1819 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1820 GR_GL_STENCIL_ATTACHMENT,
1821 GR_GL_RENDERBUFFER, 0));
1822 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1823 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1824 GR_GL_DEPTH_ATTACHMENT,
1825 GR_GL_RENDERBUFFER, 0));
1826 }
1827 }
1828 }
1829 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1830 }
1831 GL_CALL(DeleteTextures(1, &colorID));
1832 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
1833 this->deleteFramebuffer(fb);
1834 fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex);
1835 }
1836 return this->glCaps().getStencilFormatIndexForFormat(format);
1837 }
1838
set_khr_debug_label(GrGLGpu * gpu,const GrGLuint id,std::string_view label)1839 static void set_khr_debug_label(GrGLGpu* gpu, const GrGLuint id, std::string_view label) {
1840 const std::string khr_debug_label = label.empty() ? "Skia" : std::string(label);
1841 if (gpu->glCaps().debugSupport()) {
1842 GR_GL_CALL(gpu->glInterface(), ObjectLabel(GR_GL_TEXTURE, id, -1, khr_debug_label.c_str()));
1843 }
1844 }
1845
createCompressedTexture2D(SkISize dimensions,SkTextureCompressionType compression,GrGLFormat format,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGLTextureParameters::SamplerOverriddenState * initialState)1846 GrGLuint GrGLGpu::createCompressedTexture2D(
1847 SkISize dimensions,
1848 SkTextureCompressionType compression,
1849 GrGLFormat format,
1850 skgpu::Mipmapped mipmapped,
1851 GrProtected isProtected,
1852 GrGLTextureParameters::SamplerOverriddenState* initialState) {
1853 if (format == GrGLFormat::kUnknown) {
1854 return 0;
1855 }
1856 GrGLuint id = 0;
1857 GL_CALL(GenTextures(1, &id));
1858 if (!id) {
1859 return 0;
1860 }
1861
1862 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
1863
1864 set_khr_debug_label(this, id, /*label=*/"Skia");
1865
1866 *initialState = set_initial_texture_params(this->glInterface(),
1867 this->glCaps(),
1868 GR_GL_TEXTURE_2D);
1869
1870 if (GrProtected::kYes == isProtected) {
1871 if (this->glCaps().supportsProtectedContent()) {
1872 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1873 } else {
1874 GL_CALL(DeleteTextures(1, &id));
1875 return 0;
1876 }
1877 }
1878
1879 return id;
1880 }
1881
createTexture(SkISize dimensions,GrGLFormat format,GrGLenum target,GrRenderable renderable,GrGLTextureParameters::SamplerOverriddenState * initialState,int mipLevelCount,GrProtected isProtected,std::string_view label)1882 GrGLuint GrGLGpu::createTexture(SkISize dimensions,
1883 GrGLFormat format,
1884 GrGLenum target,
1885 GrRenderable renderable,
1886 GrGLTextureParameters::SamplerOverriddenState* initialState,
1887 int mipLevelCount,
1888 GrProtected isProtected,
1889 std::string_view label) {
1890 SkASSERT(format != GrGLFormat::kUnknown);
1891 SkASSERT(!GrGLFormatIsCompressed(format));
1892
1893 GrGLuint id = 0;
1894 GL_CALL(GenTextures(1, &id));
1895
1896 if (!id) {
1897 return 0;
1898 }
1899
1900 this->bindTextureToScratchUnit(target, id);
1901
1902 set_khr_debug_label(this, id, label);
1903
1904 if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) {
1905 // provides a hint about how this texture will be used
1906 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT));
1907 }
1908
1909 if (initialState) {
1910 *initialState = set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1911 } else {
1912 set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1913 }
1914
1915 if (GrProtected::kYes == isProtected) {
1916 if (this->glCaps().supportsProtectedContent()) {
1917 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1918 } else {
1919 GL_CALL(DeleteTextures(1, &id));
1920 return 0;
1921 }
1922 }
1923
1924 GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format);
1925
1926 bool success = false;
1927 if (internalFormat) {
1928 if (this->glCaps().formatSupportsTexStorage(format)) {
1929 auto levelCount = std::max(mipLevelCount, 1);
1930 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, levelCount, internalFormat,
1931 dimensions.width(), dimensions.height()));
1932 success = (error == GR_GL_NO_ERROR);
1933 } else {
1934 GrGLenum externalFormat, externalType;
1935 this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType);
1936 GrGLenum error = GR_GL_NO_ERROR;
1937 if (externalFormat && externalType) {
1938 // If we don't unbind here then nullptr is treated as a zero offset into the bound
1939 // transfer buffer rather than an indication that there is no data to copy.
1940 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1941 for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) {
1942 const int twoToTheMipLevel = 1 << level;
1943 const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel);
1944 const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel);
1945 error = GL_ALLOC_CALL(TexImage2D(target, level, internalFormat, currentWidth,
1946 currentHeight, 0, externalFormat, externalType,
1947 nullptr));
1948 }
1949 success = (error == GR_GL_NO_ERROR);
1950 }
1951 }
1952 }
1953 if (success) {
1954 return id;
1955 }
1956 GL_CALL(DeleteTextures(1, &id));
1957 return 0;
1958 }
1959
makeStencilAttachment(const GrBackendFormat & colorFormat,SkISize dimensions,int numStencilSamples)1960 sk_sp<GrAttachment> GrGLGpu::makeStencilAttachment(const GrBackendFormat& colorFormat,
1961 SkISize dimensions, int numStencilSamples) {
1962 int sIdx = this->getCompatibleStencilIndex(GrBackendFormats::AsGLFormat(colorFormat));
1963 if (sIdx < 0) {
1964 return nullptr;
1965 }
1966 GrGLFormat sFmt = this->glCaps().stencilFormats()[sIdx];
1967
1968 auto stencil = GrGLAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1969 if (stencil) {
1970 fStats.incStencilAttachmentCreates();
1971 }
1972 return stencil;
1973 }
1974
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)1975 sk_sp<GrAttachment> GrGLGpu::makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format,
1976 int numSamples, GrProtected isProtected,
1977 GrMemoryless isMemoryless) {
1978 SkASSERT(isMemoryless == GrMemoryless::kNo);
1979 return GrGLAttachment::MakeMSAA(
1980 this, dimensions, numSamples, GrBackendFormats::AsGLFormat(format));
1981 }
1982
1983 ////////////////////////////////////////////////////////////////////////////////
1984
onCreateBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)1985 sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size,
1986 GrGpuBufferType intendedType,
1987 GrAccessPattern accessPattern) {
1988 return GrGLBuffer::Make(this, size, intendedType, accessPattern);
1989 }
1990
flushScissorTest(GrScissorTest scissorTest)1991 void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) {
1992 if (GrScissorTest::kEnabled == scissorTest) {
1993 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1994 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1995 fHWScissorSettings.fEnabled = kYes_TriState;
1996 }
1997 } else {
1998 if (kNo_TriState != fHWScissorSettings.fEnabled) {
1999 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2000 fHWScissorSettings.fEnabled = kNo_TriState;
2001 }
2002 }
2003 }
2004
flushScissorRect(const SkIRect & scissor,int rtHeight,GrSurfaceOrigin rtOrigin)2005 void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtHeight, GrSurfaceOrigin rtOrigin) {
2006 SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState);
2007 auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor);
2008 if (fHWScissorSettings.fRect != nativeScissor) {
2009 GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth,
2010 nativeScissor.fHeight));
2011 fHWScissorSettings.fRect = nativeScissor;
2012 }
2013 }
2014
flushViewport(const SkIRect & viewport,int rtHeight,GrSurfaceOrigin rtOrigin)2015 void GrGLGpu::flushViewport(const SkIRect& viewport, int rtHeight, GrSurfaceOrigin rtOrigin) {
2016 auto nativeViewport = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, viewport);
2017 if (fHWViewport != nativeViewport) {
2018 GL_CALL(Viewport(nativeViewport.fX, nativeViewport.fY,
2019 nativeViewport.fWidth, nativeViewport.fHeight));
2020 fHWViewport = nativeViewport;
2021 }
2022 }
2023
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt,GrSurfaceOrigin origin)2024 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
2025 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
2026 #ifndef USE_NSIGHT
2027 typedef GrWindowRectsState::Mode Mode;
2028 // Window rects can't be used on-screen.
2029 SkASSERT(!windowState.enabled() || !rt->glRTFBOIDis0());
2030 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
2031
2032 if (!this->caps()->maxWindowRectangles() ||
2033 fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) {
2034 return;
2035 }
2036
2037 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
2038 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
2039 int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
2040 SkASSERT(windowState.numWindows() == numWindows);
2041
2042 GrNativeRect glwindows[GrWindowRectangles::kMaxWindows];
2043 const SkIRect* skwindows = windowState.windows().data();
2044 for (int i = 0; i < numWindows; ++i) {
2045 glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]);
2046 }
2047
2048 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
2049 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
2050
2051 fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState);
2052 #endif
2053 }
2054
disableWindowRectangles()2055 void GrGLGpu::disableWindowRectangles() {
2056 #ifndef USE_NSIGHT
2057 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
2058 return;
2059 }
2060 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
2061 fHWWindowRectsState.setDisabled();
2062 #endif
2063 }
2064
flushGLState(GrRenderTarget * renderTarget,bool useMultisampleFBO,const GrProgramInfo & programInfo)2065 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, bool useMultisampleFBO,
2066 const GrProgramInfo& programInfo) {
2067 this->handleDirtyContext();
2068
2069 sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(this->getContext(),
2070 programInfo);
2071 if (!program) {
2072 GrCapsDebugf(this->caps(), "Failed to create program!\n");
2073 return false;
2074 }
2075
2076 this->flushProgram(std::move(program));
2077
2078 // Swizzle the blend to match what the shader will output.
2079 this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(),
2080 programInfo.pipeline().writeSwizzle());
2081
2082 fHWProgram->updateUniforms(renderTarget, programInfo);
2083
2084 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2085 GrStencilSettings stencil;
2086 if (programInfo.isStencilEnabled()) {
2087 SkASSERT(glRT->getStencilAttachment(useMultisampleFBO));
2088 stencil.reset(*programInfo.userStencilSettings(),
2089 programInfo.pipeline().hasStencilClip(),
2090 glRT->numStencilBits(useMultisampleFBO));
2091 }
2092 this->flushStencil(stencil, programInfo.origin());
2093 this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled()));
2094 this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(),
2095 glRT, programInfo.origin());
2096 this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster());
2097 this->flushWireframeState(programInfo.pipeline().isWireframe());
2098
2099 // This must come after textures are flushed because a texture may need
2100 // to be msaa-resolved (which will modify bound FBO state).
2101 this->flushRenderTarget(glRT, useMultisampleFBO);
2102
2103 return true;
2104 }
2105
flushProgram(sk_sp<GrGLProgram> program)2106 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
2107 if (!program) {
2108 fHWProgram.reset();
2109 fHWProgramID = 0;
2110 return;
2111 }
2112 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
2113 if (program == fHWProgram) {
2114 return;
2115 }
2116 auto id = program->programID();
2117 SkASSERT(id);
2118 GL_CALL(UseProgram(id));
2119 fHWProgram = std::move(program);
2120 fHWProgramID = id;
2121 }
2122
flushProgram(GrGLuint id)2123 void GrGLGpu::flushProgram(GrGLuint id) {
2124 SkASSERT(id);
2125 if (fHWProgramID == id) {
2126 SkASSERT(!fHWProgram);
2127 return;
2128 }
2129 fHWProgram.reset();
2130 GL_CALL(UseProgram(id));
2131 fHWProgramID = id;
2132 }
2133
didDrawTo(GrRenderTarget * rt)2134 void GrGLGpu::didDrawTo(GrRenderTarget* rt) {
2135 SkASSERT(fHWWriteToColor != kUnknown_TriState);
2136 if (fHWWriteToColor == kYes_TriState) {
2137 // The bounds are only used to check for empty and we don't know the bounds. The origin
2138 // is irrelevant if there are no bounds.
2139 this->didWriteToSurface(rt, kTopLeft_GrSurfaceOrigin, /*bounds=*/nullptr);
2140 }
2141 }
2142
bindBuffer(GrGpuBufferType type,const GrBuffer * buffer)2143 GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
2144 this->handleDirtyContext();
2145
2146 // Index buffer state is tied to the vertex array.
2147 if (GrGpuBufferType::kIndex == type) {
2148 this->bindVertexArray(0);
2149 }
2150
2151 auto* bufferState = this->hwBufferState(type);
2152 if (buffer->isCpuBuffer()) {
2153 if (!bufferState->fBufferZeroKnownBound) {
2154 GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
2155 bufferState->fBufferZeroKnownBound = true;
2156 bufferState->fBoundBufferUniqueID.makeInvalid();
2157 }
2158 } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
2159 bufferState->fBoundBufferUniqueID) {
2160 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
2161 GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
2162 bufferState->fBufferZeroKnownBound = false;
2163 bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
2164 }
2165
2166 return bufferState->fGLTarget;
2167 }
2168
clear(const GrScissorState & scissor,std::array<float,4> color,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2169 void GrGLGpu::clear(const GrScissorState& scissor,
2170 std::array<float, 4> color,
2171 GrRenderTarget* target,
2172 bool useMultisampleFBO,
2173 GrSurfaceOrigin origin) {
2174 // parent class should never let us get here with no RT
2175 SkASSERT(target);
2176 SkASSERT(!this->caps()->performColorClearsAsDraws());
2177 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2178
2179 this->handleDirtyContext();
2180
2181 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2182
2183 this->flushRenderTarget(glRT, useMultisampleFBO);
2184 this->flushScissor(scissor, glRT->height(), origin);
2185 this->disableWindowRectangles();
2186 this->flushColorWrite(true);
2187 this->flushClearColor(color);
2188 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2189 this->didWriteToSurface(glRT, origin, scissor.enabled() ? &scissor.rect() : nullptr);
2190 }
2191
use_tiled_rendering(const GrGLCaps & glCaps,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2192 static bool use_tiled_rendering(const GrGLCaps& glCaps,
2193 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2194 // Only use the tiled rendering extension if we can explicitly clear and discard the stencil.
2195 // Otherwise it's faster to just not use it.
2196 return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp &&
2197 GrStoreOp::kDiscard == stencilLoadStore.fStoreOp;
2198 }
2199
beginCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const SkIRect & bounds,GrSurfaceOrigin origin,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2200 void GrGLGpu::beginCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2201 const SkIRect& bounds, GrSurfaceOrigin origin,
2202 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2203 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2204 SkASSERT(!fIsExecutingCommandBuffer_DebugOnly);
2205
2206 this->handleDirtyContext();
2207
2208 this->flushRenderTarget(rt, useMultisampleFBO);
2209 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true);
2210
2211 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2212 auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, rt->height(), bounds);
2213 GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp)
2214 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2215 SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering().
2216 GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth,
2217 nativeBounds.fHeight, preserveMask));
2218 }
2219
2220 GrGLbitfield clearMask = 0;
2221 if (GrLoadOp::kClear == colorLoadStore.fLoadOp) {
2222 SkASSERT(!this->caps()->performColorClearsAsDraws());
2223 this->flushClearColor(colorLoadStore.fClearColor);
2224 this->flushColorWrite(true);
2225 clearMask |= GR_GL_COLOR_BUFFER_BIT;
2226 }
2227 if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) {
2228 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2229 GL_CALL(StencilMask(0xffffffff));
2230 GL_CALL(ClearStencil(0));
2231 clearMask |= GR_GL_STENCIL_BUFFER_BIT;
2232 }
2233 if (clearMask) {
2234 this->flushScissorTest(GrScissorTest::kDisabled);
2235 this->disableWindowRectangles();
2236 GL_CALL(Clear(clearMask));
2237 if (clearMask & GR_GL_COLOR_BUFFER_BIT) {
2238 this->didWriteToSurface(rt, origin, nullptr);
2239 }
2240 }
2241 }
2242
endCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2243 void GrGLGpu::endCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2244 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2245 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2246 SkASSERT(fIsExecutingCommandBuffer_DebugOnly);
2247
2248 this->handleDirtyContext();
2249
2250 if (rt->uniqueID() != fHWBoundRenderTargetUniqueID ||
2251 useMultisampleFBO != fHWBoundFramebufferIsMSAA) {
2252 // The framebuffer binding changed in the middle of a command buffer. We should have already
2253 // printed a warning during onFBOChanged.
2254 return;
2255 }
2256
2257 if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) {
2258 STArray<2, GrGLenum> discardAttachments;
2259 if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) {
2260 discardAttachments.push_back(
2261 rt->isFBO0(useMultisampleFBO) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0);
2262 }
2263 if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) {
2264 discardAttachments.push_back(
2265 rt->isFBO0(useMultisampleFBO) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT);
2266 }
2267
2268 if (!discardAttachments.empty()) {
2269 if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) {
2270 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2271 discardAttachments.begin()));
2272 } else {
2273 SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType());
2274 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2275 discardAttachments.begin()));
2276 }
2277 }
2278 }
2279
2280 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2281 GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp)
2282 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2283 // Handled by use_tiled_rendering().
2284 SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp);
2285 GL_CALL(EndTiling(preserveMask));
2286 }
2287
2288 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false);
2289 }
2290
clearStencilClip(const GrScissorState & scissor,bool insideStencilMask,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2291 void GrGLGpu::clearStencilClip(const GrScissorState& scissor, bool insideStencilMask,
2292 GrRenderTarget* target, bool useMultisampleFBO,
2293 GrSurfaceOrigin origin) {
2294 SkASSERT(target);
2295 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2296 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2297 this->handleDirtyContext();
2298
2299 GrAttachment* sb = target->getStencilAttachment(useMultisampleFBO);
2300 if (!sb) {
2301 // We should only get here if we marked a proxy as requiring a SB. However,
2302 // the SB creation could later fail. Likely clipping is going to go awry now.
2303 return;
2304 }
2305
2306 GrGLint stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
2307 #if 0
2308 SkASSERT(stencilBitCount > 0);
2309 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2310 #else
2311 // we could just clear the clip bit but when we go through
2312 // ANGLE a partial stencil mask will cause clears to be
2313 // turned into draws. Our contract on OpsTask says that
2314 // changing the clip between stencil passes may or may not
2315 // zero the client's clip bits. So we just clear the whole thing.
2316 static const GrGLint clipStencilMask = ~0;
2317 #endif
2318 GrGLint value;
2319 if (insideStencilMask) {
2320 value = (1 << (stencilBitCount - 1));
2321 } else {
2322 value = 0;
2323 }
2324 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2325 this->flushRenderTarget(glRT, useMultisampleFBO);
2326
2327 this->flushScissor(scissor, glRT->height(), origin);
2328 this->disableWindowRectangles();
2329
2330 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2331 GL_CALL(ClearStencil(value));
2332 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2333 fHWStencilSettings.invalidate();
2334 }
2335
readOrTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * offsetOrPtr,int rowWidthInPixels)2336 bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface,
2337 SkIRect rect,
2338 GrColorType surfaceColorType,
2339 GrColorType dstColorType,
2340 void* offsetOrPtr,
2341 int rowWidthInPixels) {
2342 SkASSERT(surface);
2343
2344 auto format = GrBackendFormats::AsGLFormat(surface->backendFormat());
2345 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2346 if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) {
2347 return false;
2348 }
2349 GrGLenum externalFormat = 0;
2350 GrGLenum externalType = 0;
2351 this->glCaps().getReadPixelsFormat(
2352 format, surfaceColorType, dstColorType, &externalFormat, &externalType);
2353 if (!externalFormat || !externalType) {
2354 return false;
2355 }
2356
2357 if (renderTarget) {
2358 // Always bind the single sample FBO since we can't read pixels from an MSAA framebuffer.
2359 constexpr bool useMultisampleFBO = false;
2360 if (renderTarget->numSamples() > 1 && renderTarget->isFBO0(useMultisampleFBO)) {
2361 return false;
2362 }
2363 this->flushRenderTarget(renderTarget, useMultisampleFBO);
2364 } else {
2365 // Use a temporary FBO.
2366 this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
2367 fHWBoundRenderTargetUniqueID.makeInvalid();
2368 }
2369
2370 // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
2371 if (rowWidthInPixels != rect.width()) {
2372 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2373 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels));
2374 }
2375 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1));
2376
2377 GL_CALL(ReadPixels(rect.left(),
2378 rect.top(),
2379 rect.width(),
2380 rect.height(),
2381 externalFormat,
2382 externalType,
2383 offsetOrPtr));
2384
2385 if (rowWidthInPixels != rect.width()) {
2386 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2387 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2388 }
2389
2390 if (!renderTarget) {
2391 this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER);
2392 }
2393 return true;
2394 }
2395
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2396 bool GrGLGpu::onReadPixels(GrSurface* surface,
2397 SkIRect rect,
2398 GrColorType surfaceColorType,
2399 GrColorType dstColorType,
2400 void* buffer,
2401 size_t rowBytes) {
2402 SkASSERT(surface);
2403
2404 size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType);
2405
2406 // GL_PACK_ROW_LENGTH is in terms of pixels not bytes.
2407 int rowPixelWidth;
2408
2409 if (rowBytes == SkToSizeT(rect.width()*bytesPerPixel)) {
2410 rowPixelWidth = rect.width();
2411 } else {
2412 SkASSERT(!(rowBytes % bytesPerPixel));
2413 rowPixelWidth = rowBytes / bytesPerPixel;
2414 }
2415 this->unbindXferBuffer(GrGpuBufferType::kXferGpuToCpu);
2416 return this->readOrTransferPixelsFrom(surface,
2417 rect,
2418 surfaceColorType,
2419 dstColorType,
2420 buffer,
2421 rowPixelWidth);
2422 }
2423
onGetOpsRenderPass(GrRenderTarget * rt,bool useMultisampleFBO,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)2424 GrOpsRenderPass* GrGLGpu::onGetOpsRenderPass(
2425 GrRenderTarget* rt,
2426 bool useMultisampleFBO,
2427 GrAttachment*,
2428 GrSurfaceOrigin origin,
2429 const SkIRect& bounds,
2430 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
2431 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
2432 const TArray<GrSurfaceProxy*, true>& sampledProxies,
2433 GrXferBarrierFlags renderPassXferBarriers) {
2434 if (!fCachedOpsRenderPass) {
2435 fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this);
2436 }
2437 if (useMultisampleFBO && rt->numSamples() == 1) {
2438 // We will be using dynamic msaa. Ensure there is an attachment.
2439 auto glRT = static_cast<GrGLRenderTarget*>(rt);
2440 if (!glRT->ensureDynamicMSAAAttachment()) {
2441 SkDebugf("WARNING: Failed to make dmsaa attachment. Render pass will be dropped.");
2442 return nullptr;
2443 }
2444 }
2445 fCachedOpsRenderPass->set(rt, useMultisampleFBO, bounds, origin, colorInfo, stencilInfo);
2446 return fCachedOpsRenderPass.get();
2447 }
2448
flushRenderTarget(GrGLRenderTarget * target,bool useMultisampleFBO)2449 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool useMultisampleFBO) {
2450 SkASSERT(target);
2451 GrGpuResource::UniqueID rtID = target->uniqueID();
2452 if (fHWBoundRenderTargetUniqueID != rtID ||
2453 fHWBoundFramebufferIsMSAA != useMultisampleFBO ||
2454 target->mustRebind(useMultisampleFBO)) {
2455 target->bind(useMultisampleFBO);
2456 #ifdef SK_DEBUG
2457 // don't do this check in Chromium -- this is causing
2458 // lots of repeated command buffer flushes when the compositor is
2459 // rendering with Ganesh, which is really slow; even too slow for
2460 // Debug mode.
2461 // Also don't do this when we know glCheckFramebufferStatus() may have side effects.
2462 if (!this->glCaps().skipErrorChecks() &&
2463 !this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
2464 GrGLenum status;
2465 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2466 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2467 SkDebugf("GrGLGpu::flushRenderTargetNoColorWrites glCheckFramebufferStatus %x\n",
2468 status);
2469 }
2470 }
2471 #endif
2472 fHWBoundRenderTargetUniqueID = rtID;
2473 fHWBoundFramebufferIsMSAA = useMultisampleFBO;
2474 this->flushViewport(SkIRect::MakeSize(target->dimensions()),
2475 target->height(),
2476 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
2477 }
2478 if (this->caps()->workarounds().force_update_scissor_state_when_binding_fbo0) {
2479 // The driver forgets the correct scissor state when using FBO 0.
2480 if (!fHWScissorSettings.fRect.isInvalid()) {
2481 const GrNativeRect& r = fHWScissorSettings.fRect;
2482 GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight));
2483 }
2484 if (fHWScissorSettings.fEnabled == kYes_TriState) {
2485 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2486 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2487 } else if (fHWScissorSettings.fEnabled == kNo_TriState) {
2488 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2489 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2490 }
2491 }
2492
2493 if (this->glCaps().srgbWriteControl()) {
2494 this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat()));
2495 }
2496
2497 if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) {
2498 GrGLint format;
2499 GrGLint type;
2500 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
2501 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
2502 this->glCaps().didQueryImplementationReadSupport(target->format(), format, type);
2503 }
2504 }
2505
flushFramebufferSRGB(bool enable)2506 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2507 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2508 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2509 fHWSRGBFramebuffer = kYes_TriState;
2510 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2511 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2512 fHWSRGBFramebuffer = kNo_TriState;
2513 }
2514 }
2515
prepareToDraw(GrPrimitiveType primitiveType)2516 GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) {
2517 fStats.incNumDraws();
2518
2519 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
2520 GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) {
2521 GL_CALL(Enable(GR_GL_CULL_FACE));
2522 GL_CALL(Disable(GR_GL_CULL_FACE));
2523 }
2524 fLastPrimitiveType = primitiveType;
2525
2526 switch (primitiveType) {
2527 case GrPrimitiveType::kTriangles:
2528 return GR_GL_TRIANGLES;
2529 case GrPrimitiveType::kTriangleStrip:
2530 return GR_GL_TRIANGLE_STRIP;
2531 case GrPrimitiveType::kPoints:
2532 return GR_GL_POINTS;
2533 case GrPrimitiveType::kLines:
2534 return GR_GL_LINES;
2535 case GrPrimitiveType::kLineStrip:
2536 return GR_GL_LINE_STRIP;
2537 }
2538 SK_ABORT("invalid GrPrimitiveType");
2539 }
2540
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)2541 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
2542 auto glRT = static_cast<GrGLRenderTarget*>(target);
2543 if (this->glCaps().framebufferResolvesMustBeFullSize()) {
2544 this->resolveRenderFBOs(glRT, SkIRect::MakeSize(glRT->dimensions()),
2545 ResolveDirection::kMSAAToSingle);
2546 } else {
2547 this->resolveRenderFBOs(glRT, resolveRect, ResolveDirection::kMSAAToSingle);
2548 }
2549 }
2550
resolveRenderFBOs(GrGLRenderTarget * rt,const SkIRect & resolveRect,ResolveDirection resolveDirection,bool invalidateReadBufferAfterBlit)2551 void GrGLGpu::resolveRenderFBOs(GrGLRenderTarget* rt, const SkIRect& resolveRect,
2552 ResolveDirection resolveDirection,
2553 bool invalidateReadBufferAfterBlit) {
2554 this->handleDirtyContext();
2555 rt->bindForResolve(resolveDirection);
2556
2557 const GrGLCaps& caps = this->glCaps();
2558
2559 // make sure we go through flushRenderTarget() since we've modified
2560 // the bound DRAW FBO ID.
2561 fHWBoundRenderTargetUniqueID.makeInvalid();
2562 if (GrGLCaps::kES_Apple_MSFBOType == caps.msFBOType()) {
2563 // The Apple extension doesn't support blitting from single to multisample.
2564 SkASSERT(resolveDirection != ResolveDirection::kSingleToMSAA);
2565 SkASSERT(resolveRect == SkIRect::MakeSize(rt->dimensions()));
2566 // Apple's extension uses the scissor as the blit bounds.
2567 // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect
2568 // happens inside flushScissor since resolveRect is already in native device coordinates.
2569 GrScissorState scissor(rt->dimensions());
2570 SkAssertResult(scissor.set(resolveRect));
2571 this->flushScissor(scissor, rt->height(), kTopLeft_GrSurfaceOrigin);
2572 this->disableWindowRectangles();
2573 GL_CALL(ResolveMultisampleFramebuffer());
2574 } else {
2575 SkASSERT(!caps.framebufferResolvesMustBeFullSize() ||
2576 resolveRect == SkIRect::MakeSize(rt->dimensions()));
2577 int l = resolveRect.x();
2578 int b = resolveRect.y();
2579 int r = resolveRect.x() + resolveRect.width();
2580 int t = resolveRect.y() + resolveRect.height();
2581
2582 // BlitFrameBuffer respects the scissor, so disable it.
2583 this->flushScissorTest(GrScissorTest::kDisabled);
2584 this->disableWindowRectangles();
2585 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2586 }
2587
2588 if (caps.invalidateFBType() != GrGLCaps::kNone_InvalidateFBType &&
2589 invalidateReadBufferAfterBlit) {
2590 // Invalidate the read FBO attachment after the blit, in hopes that this allows the driver
2591 // to perform tiling optimizations.
2592 bool readBufferIsMSAA = resolveDirection == ResolveDirection::kMSAAToSingle;
2593 GrGLenum colorDiscardAttachment = rt->isFBO0(readBufferIsMSAA) ? GR_GL_COLOR
2594 : GR_GL_COLOR_ATTACHMENT0;
2595 if (caps.invalidateFBType() == GrGLCaps::kInvalidate_InvalidateFBType) {
2596 GL_CALL(InvalidateFramebuffer(GR_GL_READ_FRAMEBUFFER, 1, &colorDiscardAttachment));
2597 } else {
2598 SkASSERT(caps.invalidateFBType() == GrGLCaps::kDiscard_InvalidateFBType);
2599 // glDiscardFramebuffer only accepts GL_FRAMEBUFFER.
2600 rt->bind(readBufferIsMSAA);
2601 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, 1, &colorDiscardAttachment));
2602 }
2603 }
2604 }
2605
2606 namespace {
2607
2608
gr_to_gl_stencil_op(GrStencilOp op)2609 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2610 static const GrGLenum gTable[kGrStencilOpCount] = {
2611 GR_GL_KEEP, // kKeep
2612 GR_GL_ZERO, // kZero
2613 GR_GL_REPLACE, // kReplace
2614 GR_GL_INVERT, // kInvert
2615 GR_GL_INCR_WRAP, // kIncWrap
2616 GR_GL_DECR_WRAP, // kDecWrap
2617 GR_GL_INCR, // kIncClamp
2618 GR_GL_DECR, // kDecClamp
2619 };
2620 static_assert(0 == (int)GrStencilOp::kKeep);
2621 static_assert(1 == (int)GrStencilOp::kZero);
2622 static_assert(2 == (int)GrStencilOp::kReplace);
2623 static_assert(3 == (int)GrStencilOp::kInvert);
2624 static_assert(4 == (int)GrStencilOp::kIncWrap);
2625 static_assert(5 == (int)GrStencilOp::kDecWrap);
2626 static_assert(6 == (int)GrStencilOp::kIncClamp);
2627 static_assert(7 == (int)GrStencilOp::kDecClamp);
2628 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2629 return gTable[(int)op];
2630 }
2631
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2632 void set_gl_stencil(const GrGLInterface* gl,
2633 const GrStencilSettings::Face& face,
2634 GrGLenum glFace) {
2635 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2636 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2637 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2638
2639 GrGLint ref = face.fRef;
2640 GrGLint mask = face.fTestMask;
2641 GrGLint writeMask = face.fWriteMask;
2642
2643 if (GR_GL_FRONT_AND_BACK == glFace) {
2644 // we call the combined func just in case separate stencil is not
2645 // supported.
2646 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2647 GR_GL_CALL(gl, StencilMask(writeMask));
2648 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2649 } else {
2650 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2651 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2652 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2653 }
2654 }
2655 } // namespace
2656
flushStencil(const GrStencilSettings & stencilSettings,GrSurfaceOrigin origin)2657 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) {
2658 if (stencilSettings.isDisabled()) {
2659 this->disableStencil();
2660 } else if (fHWStencilSettings != stencilSettings ||
2661 (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) {
2662 if (kYes_TriState != fHWStencilTestEnabled) {
2663 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2664
2665 fHWStencilTestEnabled = kYes_TriState;
2666 }
2667 if (!stencilSettings.isTwoSided()) {
2668 set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(),
2669 GR_GL_FRONT_AND_BACK);
2670 } else {
2671 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin),
2672 GR_GL_FRONT);
2673 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin),
2674 GR_GL_BACK);
2675 }
2676 fHWStencilSettings = stencilSettings;
2677 fHWStencilOrigin = origin;
2678 }
2679 }
2680
disableStencil()2681 void GrGLGpu::disableStencil() {
2682 if (kNo_TriState != fHWStencilTestEnabled) {
2683 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2684
2685 fHWStencilTestEnabled = kNo_TriState;
2686 fHWStencilSettings.invalidate();
2687 }
2688 }
2689
flushConservativeRasterState(bool enabled)2690 void GrGLGpu::flushConservativeRasterState(bool enabled) {
2691 if (this->caps()->conservativeRasterSupport()) {
2692 if (enabled) {
2693 if (kYes_TriState != fHWConservativeRasterEnabled) {
2694 GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION));
2695 fHWConservativeRasterEnabled = kYes_TriState;
2696 }
2697 } else {
2698 if (kNo_TriState != fHWConservativeRasterEnabled) {
2699 GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION));
2700 fHWConservativeRasterEnabled = kNo_TriState;
2701 }
2702 }
2703 }
2704 }
2705
flushWireframeState(bool enabled)2706 void GrGLGpu::flushWireframeState(bool enabled) {
2707 if (this->caps()->wireframeSupport()) {
2708 if (this->caps()->wireframeMode() || enabled) {
2709 if (kYes_TriState != fHWWireframeEnabled) {
2710 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
2711 fHWWireframeEnabled = kYes_TriState;
2712 }
2713 } else {
2714 if (kNo_TriState != fHWWireframeEnabled) {
2715 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
2716 fHWWireframeEnabled = kNo_TriState;
2717 }
2718 }
2719 }
2720 }
2721
flushBlendAndColorWrite(const skgpu::BlendInfo & blendInfo,const skgpu::Swizzle & swizzle)2722 void GrGLGpu::flushBlendAndColorWrite(const skgpu::BlendInfo& blendInfo,
2723 const skgpu::Swizzle& swizzle) {
2724 if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWritesColor) {
2725 // We need to work around a driver bug by using a blend state that preserves the dst color,
2726 // rather than disabling color writes.
2727 skgpu::BlendInfo preserveDstBlend;
2728 preserveDstBlend.fSrcBlend = skgpu::BlendCoeff::kZero;
2729 preserveDstBlend.fDstBlend = skgpu::BlendCoeff::kOne;
2730 this->flushBlendAndColorWrite(preserveDstBlend, swizzle);
2731 return;
2732 }
2733
2734 skgpu::BlendEquation equation = blendInfo.fEquation;
2735 skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
2736 skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
2737
2738 // Any optimization to disable blending should have already been applied and
2739 // tweaked the equation to "add "or "subtract", and the coeffs to (1, 0).
2740 bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff) ||
2741 !blendInfo.fWritesColor;
2742
2743 if (blendOff) {
2744 if (kNo_TriState != fHWBlendState.fEnabled) {
2745 GL_CALL(Disable(GR_GL_BLEND));
2746
2747 // Workaround for the ARM KHR_blend_equation_advanced disable flags issue
2748 // https://code.google.com/p/skia/issues/detail?id=3943
2749 if (this->ctxInfo().vendor() == GrGLVendor::kARM &&
2750 skgpu::BlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2751 SkASSERT(this->caps()->advancedBlendEquationSupport());
2752 // Set to any basic blending equation.
2753 skgpu::BlendEquation blendEquation = skgpu::BlendEquation::kAdd;
2754 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)blendEquation]));
2755 fHWBlendState.fEquation = blendEquation;
2756 }
2757
2758 // Workaround for Adreno 5xx BlendFunc bug. See crbug.com/1241134.
2759 // We must also check to see if the blend coeffs are invalid because the client may have
2760 // reset our gl state and thus we will have forgotten if the previous use was a coeff
2761 // that referenced src2.
2762 if (this->glCaps().mustResetBlendFuncBetweenDualSourceAndDisable() &&
2763 (skgpu::BlendCoeffRefsSrc2(fHWBlendState.fSrcCoeff) ||
2764 skgpu::BlendCoeffRefsSrc2(fHWBlendState.fDstCoeff) ||
2765 fHWBlendState.fSrcCoeff == skgpu::BlendCoeff::kIllegal ||
2766 fHWBlendState.fDstCoeff == skgpu::BlendCoeff::kIllegal)) {
2767 // We just reset the blend func to anything that doesn't reference src2
2768 GL_CALL(BlendFunc(GR_GL_ONE, GR_GL_ZERO));
2769 fHWBlendState.fSrcCoeff = skgpu::BlendCoeff::kOne;
2770 fHWBlendState.fDstCoeff = skgpu::BlendCoeff::kZero;
2771 }
2772
2773 fHWBlendState.fEnabled = kNo_TriState;
2774 }
2775 } else {
2776 if (kYes_TriState != fHWBlendState.fEnabled) {
2777 GL_CALL(Enable(GR_GL_BLEND));
2778
2779 fHWBlendState.fEnabled = kYes_TriState;
2780 }
2781
2782 if (fHWBlendState.fEquation != equation) {
2783 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)equation]));
2784 fHWBlendState.fEquation = equation;
2785 }
2786
2787 if (skgpu::BlendEquationIsAdvanced(equation)) {
2788 SkASSERT(this->caps()->advancedBlendEquationSupport());
2789
2790 this->flushColorWrite(blendInfo.fWritesColor);
2791 // Advanced equations have no other blend state.
2792 return;
2793 }
2794
2795 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2796 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[(int)srcCoeff],
2797 gXfermodeCoeff2Blend[(int)dstCoeff]));
2798 fHWBlendState.fSrcCoeff = srcCoeff;
2799 fHWBlendState.fDstCoeff = dstCoeff;
2800 }
2801
2802 if (skgpu::BlendCoeffRefsConstant(srcCoeff) || skgpu::BlendCoeffRefsConstant(dstCoeff)) {
2803 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
2804 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2805 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
2806 fHWBlendState.fConstColor = blendConst;
2807 fHWBlendState.fConstColorValid = true;
2808 }
2809 }
2810 }
2811
2812 this->flushColorWrite(blendInfo.fWritesColor);
2813 }
2814
bindTexture(int unitIdx,GrSamplerState samplerState,const skgpu::Swizzle & swizzle,GrGLTexture * texture)2815 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const skgpu::Swizzle& swizzle,
2816 GrGLTexture* texture) {
2817 SkASSERT(texture);
2818
2819 #ifdef SK_DEBUG
2820 if (!this->caps()->npotTextureTileSupport()) {
2821 if (samplerState.isRepeatedX()) {
2822 const int w = texture->width();
2823 SkASSERT(SkIsPow2(w));
2824 }
2825 if (samplerState.isRepeatedY()) {
2826 const int h = texture->height();
2827 SkASSERT(SkIsPow2(h));
2828 }
2829 }
2830 #endif
2831
2832 GrGpuResource::UniqueID textureID = texture->uniqueID();
2833 GrGLenum target = texture->target();
2834 if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) {
2835 this->setTextureUnit(unitIdx);
2836 GL_CALL(BindTexture(target, texture->textureID()));
2837 fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID);
2838 }
2839
2840 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2841 if (!this->caps()->mipmapSupport() || texture->mipmapped() == skgpu::Mipmapped::kNo) {
2842 // We should have caught this already.
2843 SkASSERT(!samplerState.isAniso());
2844 samplerState = GrSamplerState(samplerState.wrapModeX(),
2845 samplerState.wrapModeY(),
2846 samplerState.filter(),
2847 GrSamplerState::MipmapMode::kNone);
2848 } else {
2849 SkASSERT(!texture->mipmapsAreDirty());
2850 }
2851 }
2852
2853 auto timestamp = texture->parameters()->resetTimestamp();
2854 bool setAll = timestamp < fResetTimestampForTextureParameters;
2855 const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr;
2856 GrGLTextureParameters::SamplerOverriddenState newSamplerState;
2857 if (this->glCaps().useSamplerObjects()) {
2858 fSamplerObjectCache->bindSampler(unitIdx, samplerState);
2859 if (this->glCaps().mustSetAnyTexParameterToEnableMipmapping()) {
2860 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2861 GrGLenum minFilter = filter_to_gl_min_filter(samplerState.filter(),
2862 samplerState.mipmapMode());
2863 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2864 texture->parameters()->samplerOverriddenState();
2865 this->setTextureUnit(unitIdx);
2866 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, minFilter));
2867 newSamplerState = oldSamplerState;
2868 newSamplerState.fMinFilter = minFilter;
2869 samplerStateToRecord = &newSamplerState;
2870 }
2871 }
2872 } else {
2873 if (fSamplerObjectCache) {
2874 fSamplerObjectCache->unbindSampler(unitIdx);
2875 }
2876 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2877 texture->parameters()->samplerOverriddenState();
2878 samplerStateToRecord = &newSamplerState;
2879
2880 newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter(),
2881 samplerState.mipmapMode());
2882 newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
2883
2884 newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
2885 newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
2886
2887 newSamplerState.fMaxAniso = std::min(static_cast<GrGLfloat>(samplerState.maxAniso()),
2888 this->glCaps().maxTextureMaxAnisotropy());
2889
2890 // These are the OpenGL default values.
2891 newSamplerState.fMinLOD = -1000.f;
2892 newSamplerState.fMaxLOD = 1000.f;
2893
2894 if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) {
2895 this->setTextureUnit(unitIdx);
2896 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter));
2897 }
2898 if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) {
2899 this->setTextureUnit(unitIdx);
2900 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter));
2901 }
2902 if (this->glCaps().mipmapLodControlSupport()) {
2903 if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) {
2904 this->setTextureUnit(unitIdx);
2905 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD));
2906 }
2907 if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) {
2908 this->setTextureUnit(unitIdx);
2909 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD));
2910 }
2911 }
2912 if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) {
2913 this->setTextureUnit(unitIdx);
2914 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS));
2915 }
2916 if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) {
2917 this->setTextureUnit(unitIdx);
2918 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT));
2919 }
2920 if (this->glCaps().clampToBorderSupport()) {
2921 // Make sure the border color is transparent black (the default)
2922 if (setAll || oldSamplerState.fBorderColorInvalid) {
2923 this->setTextureUnit(unitIdx);
2924 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
2925 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
2926 }
2927 }
2928 if (this->caps()->anisoSupport()) {
2929 if (setAll || oldSamplerState.fMaxAniso != newSamplerState.fMaxAniso) {
2930 GL_CALL(TexParameterf(target,
2931 GR_GL_TEXTURE_MAX_ANISOTROPY,
2932 newSamplerState.fMaxAniso));
2933 }
2934 }
2935 }
2936 GrGLTextureParameters::NonsamplerState newNonsamplerState;
2937 newNonsamplerState.fBaseMipMapLevel = 0;
2938 newNonsamplerState.fMaxMipmapLevel = texture->maxMipmapLevel();
2939 newNonsamplerState.fSwizzleIsRGBA = true;
2940
2941 const GrGLTextureParameters::NonsamplerState& oldNonsamplerState =
2942 texture->parameters()->nonsamplerState();
2943 if (this->glCaps().textureSwizzleSupport()) {
2944 if (setAll || !oldNonsamplerState.fSwizzleIsRGBA) {
2945 static constexpr GrGLenum kRGBA[4] {
2946 GR_GL_RED,
2947 GR_GL_GREEN,
2948 GR_GL_BLUE,
2949 GR_GL_ALPHA
2950 };
2951 this->setTextureUnit(unitIdx);
2952 if (GR_IS_GR_GL(this->glStandard())) {
2953 static_assert(sizeof(kRGBA[0]) == sizeof(GrGLint));
2954 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
2955 reinterpret_cast<const GrGLint*>(kRGBA)));
2956 } else if (GR_IS_GR_GL_ES(this->glStandard())) {
2957 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2958 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, kRGBA[0]));
2959 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, kRGBA[1]));
2960 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, kRGBA[2]));
2961 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, kRGBA[3]));
2962 }
2963 }
2964 }
2965 // These are not supported in ES2 contexts
2966 if (this->glCaps().mipmapLevelControlSupport() &&
2967 (texture->textureType() != GrTextureType::kExternal ||
2968 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
2969 if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) {
2970 this->setTextureUnit(unitIdx);
2971 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
2972 newNonsamplerState.fBaseMipMapLevel));
2973 }
2974 if (newNonsamplerState.fMaxMipmapLevel != oldNonsamplerState.fMaxMipmapLevel) {
2975 this->setTextureUnit(unitIdx);
2976 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
2977 newNonsamplerState.fMaxMipmapLevel));
2978 }
2979 }
2980 texture->parameters()->set(samplerStateToRecord, newNonsamplerState,
2981 fResetTimestampForTextureParameters);
2982 }
2983
onResetTextureBindings()2984 void GrGLGpu::onResetTextureBindings() {
2985 static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE,
2986 GR_GL_TEXTURE_EXTERNAL};
2987 for (int i = 0; i < this->numTextureUnits(); ++i) {
2988 this->setTextureUnit(i);
2989 for (auto target : kTargets) {
2990 if (fHWTextureUnitBindings[i].hasBeenModified(target)) {
2991 GL_CALL(BindTexture(target, 0));
2992 }
2993 }
2994 fHWTextureUnitBindings[i].invalidateAllTargets(true);
2995 }
2996 }
2997
flushColorWrite(bool writeColor)2998 void GrGLGpu::flushColorWrite(bool writeColor) {
2999 if (!writeColor) {
3000 if (kNo_TriState != fHWWriteToColor) {
3001 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3002 GR_GL_FALSE, GR_GL_FALSE));
3003 fHWWriteToColor = kNo_TriState;
3004 }
3005 } else {
3006 if (kYes_TriState != fHWWriteToColor) {
3007 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3008 fHWWriteToColor = kYes_TriState;
3009 }
3010 }
3011 }
3012
flushClearColor(std::array<float,4> color)3013 void GrGLGpu::flushClearColor(std::array<float, 4> color) {
3014 GrGLfloat r = color[0], g = color[1], b = color[2], a = color[3];
3015 if (this->glCaps().clearToBoundaryValuesIsBroken() &&
3016 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
3017 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
3018 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
3019 a = (1 == a) ? safeAlpha1 : safeAlpha0;
3020 }
3021 if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
3022 b != fHWClearColor[2] || a != fHWClearColor[3]) {
3023 GL_CALL(ClearColor(r, g, b, a));
3024 fHWClearColor[0] = r;
3025 fHWClearColor[1] = g;
3026 fHWClearColor[2] = b;
3027 fHWClearColor[3] = a;
3028 }
3029 }
3030
setTextureUnit(int unit)3031 void GrGLGpu::setTextureUnit(int unit) {
3032 SkASSERT(unit >= 0 && unit < this->numTextureUnits());
3033 if (unit != fHWActiveTextureUnitIdx) {
3034 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3035 fHWActiveTextureUnitIdx = unit;
3036 }
3037 }
3038
bindTextureToScratchUnit(GrGLenum target,GrGLint textureID)3039 void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) {
3040 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3041 int lastUnitIdx = this->numTextureUnits() - 1;
3042 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3043 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3044 fHWActiveTextureUnitIdx = lastUnitIdx;
3045 }
3046 // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the
3047 // correct texture.
3048 fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target);
3049 GL_CALL(BindTexture(target, textureID));
3050 }
3051
3052 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,const GrGLCaps & caps)3053 static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
3054 const GrSurface* src,
3055 const SkIRect& srcRect,
3056 const SkIRect& dstRect,
3057 const GrGLCaps& caps) {
3058 int dstSampleCnt = 0;
3059 int srcSampleCnt = 0;
3060 if (const GrRenderTarget* rt = dst->asRenderTarget()) {
3061 dstSampleCnt = rt->numSamples();
3062 }
3063 if (const GrRenderTarget* rt = src->asRenderTarget()) {
3064 srcSampleCnt = rt->numSamples();
3065 }
3066 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
3067 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
3068
3069 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3070 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3071
3072 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3073 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3074
3075 GrTextureType dstTexType;
3076 GrTextureType* dstTexTypePtr = nullptr;
3077 GrTextureType srcTexType;
3078 GrTextureType* srcTexTypePtr = nullptr;
3079 if (dstTex) {
3080 dstTexType = dstTex->textureType();
3081 dstTexTypePtr = &dstTexType;
3082 }
3083 if (srcTex) {
3084 srcTexType = srcTex->textureType();
3085 srcTexTypePtr = &srcTexType;
3086 }
3087
3088 return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr,
3089 srcFormat, srcSampleCnt, srcTexTypePtr,
3090 src->getBoundsRect(), true, srcRect, dstRect);
3091 }
3092
rt_has_msaa_render_buffer(const GrGLRenderTarget * rt,const GrGLCaps & glCaps)3093 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
3094 // A RT has a separate MSAA renderbuffer if:
3095 // 1) It's multisampled
3096 // 2) We're using an extension with separate MSAA renderbuffers
3097 // 3) It's not FBO 0, which is special and always auto-resolves
3098 return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && !rt->isFBO0(true/*msaa*/);
3099 }
3100
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLCaps & caps)3101 static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src,
3102 const GrGLCaps& caps) {
3103
3104 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3105 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3106 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3107 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3108
3109 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
3110 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
3111
3112 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3113 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3114
3115 GrTextureType dstTexType;
3116 GrTextureType* dstTexTypePtr = nullptr;
3117 GrTextureType srcTexType;
3118 GrTextureType* srcTexTypePtr = nullptr;
3119 if (dstTex) {
3120 dstTexType = dstTex->textureType();
3121 dstTexTypePtr = &dstTexType;
3122 }
3123 if (srcTex) {
3124 srcTexType = srcTex->textureType();
3125 srcTexTypePtr = &srcTexType;
3126 }
3127
3128 return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr,
3129 srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr);
3130 }
3131
bindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget,TempFBOTarget tempFBOTarget)3132 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
3133 TempFBOTarget tempFBOTarget) {
3134 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3135 if (!rt || mipLevel > 0) {
3136 SkASSERT(surface->asTexture());
3137 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
3138 GrGLuint texID = texture->textureID();
3139 GrGLenum target = texture->target();
3140 GrGLuint* tempFBOID;
3141 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3142
3143 if (0 == *tempFBOID) {
3144 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3145 }
3146
3147 this->bindFramebuffer(fboTarget, *tempFBOID);
3148 GR_GL_CALL(
3149 this->glInterface(),
3150 FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel));
3151 if (mipLevel == 0) {
3152 texture->baseLevelWasBoundToFBO();
3153 }
3154 } else {
3155 rt->bindForPixelOps(fboTarget);
3156 }
3157 }
3158
unbindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget)3159 void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) {
3160 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
3161 if (mipLevel > 0 || !surface->asRenderTarget()) {
3162 SkASSERT(surface->asTexture());
3163 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3164 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3165 GR_GL_COLOR_ATTACHMENT0,
3166 textureTarget,
3167 0,
3168 0));
3169 }
3170 }
3171
onFBOChanged()3172 void GrGLGpu::onFBOChanged() {
3173 if (this->caps()->workarounds().flush_on_framebuffer_change) {
3174 this->flush(FlushType::kForce);
3175 }
3176 #ifdef SK_DEBUG
3177 if (fIsExecutingCommandBuffer_DebugOnly) {
3178 SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. "
3179 "This will severely hurt performance.\n");
3180 }
3181 #endif
3182 }
3183
bindFramebuffer(GrGLenum target,GrGLuint fboid)3184 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
3185 GL_CALL(BindFramebuffer(target, fboid));
3186 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
3187 fBoundDrawFramebuffer = fboid;
3188 }
3189 this->onFBOChanged();
3190 }
3191
deleteFramebuffer(GrGLuint fboid)3192 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
3193 // We're relying on the GL state shadowing being correct in the workaround code below so we
3194 // need to handle a dirty context.
3195 this->handleDirtyContext();
3196 if (fboid == fBoundDrawFramebuffer &&
3197 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
3198 // This workaround only applies to deleting currently bound framebuffers
3199 // on Adreno 420. Because this is a somewhat rare case, instead of
3200 // tracking all the attachments of every framebuffer instead just always
3201 // unbind all attachments.
3202 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3203 GR_GL_RENDERBUFFER, 0));
3204 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
3205 GR_GL_RENDERBUFFER, 0));
3206 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3207 GR_GL_RENDERBUFFER, 0));
3208 }
3209
3210 GL_CALL(DeleteFramebuffers(1, &fboid));
3211
3212 // Deleting the currently bound framebuffer rebinds to 0.
3213 if (fboid == fBoundDrawFramebuffer) {
3214 this->onFBOChanged();
3215 }
3216 }
3217
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)3218 bool GrGLGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
3219 GrSurface* src, const SkIRect& srcRect,
3220 GrSamplerState::Filter filter) {
3221 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3222 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
3223 bool preferCopy = SkToBool(dst->asRenderTarget());
3224 bool scalingCopy = dstRect.size() != srcRect.size();
3225 auto dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3226 if (preferCopy &&
3227 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3228 GrRenderTarget* dstRT = dst->asRenderTarget();
3229 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3230 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3231 return true;
3232 }
3233 }
3234
3235 // Prefer copying as with glCopyTexSubImage when the dimensions are the same.
3236 if (!scalingCopy && can_copy_texsubimage(dst, src, this->glCaps())) {
3237 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstRect.topLeft());
3238 return true;
3239 }
3240
3241 if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps())) {
3242 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstRect, filter);
3243 }
3244
3245 if (!preferCopy &&
3246 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3247 GrRenderTarget* dstRT = dst->asRenderTarget();
3248 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3249 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3250 return true;
3251 }
3252 }
3253
3254 return false;
3255 }
3256
createCopyProgram(GrTexture * srcTex)3257 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3258 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
3259
3260 int progIdx = TextureToCopyProgramIdx(srcTex);
3261 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3262 SkSLType samplerType = SkSLCombinedSamplerTypeForTextureType(srcTex->textureType());
3263
3264 if (!fCopyProgramArrayBuffer) {
3265 static const GrGLfloat vdata[] = {
3266 0, 0,
3267 0, 1,
3268 1, 0,
3269 1, 1
3270 };
3271 fCopyProgramArrayBuffer = GrGLBuffer::Make(this,
3272 sizeof(vdata),
3273 GrGpuBufferType::kVertex,
3274 kStatic_GrAccessPattern);
3275 if (fCopyProgramArrayBuffer) {
3276 fCopyProgramArrayBuffer->updateData(
3277 vdata, /*offset=*/0, sizeof(vdata), /*preserve=*/false);
3278 }
3279 }
3280 if (!fCopyProgramArrayBuffer) {
3281 return false;
3282 }
3283
3284 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3285 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3286 if (!fCopyPrograms[progIdx].fProgram) {
3287 return false;
3288 }
3289
3290 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3291 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3292 GrShaderVar::TypeModifier::Uniform);
3293 GrShaderVar uPosXform("u_posXform", SkSLType::kHalf4, GrShaderVar::TypeModifier::Uniform);
3294 GrShaderVar uTexture("u_texture", samplerType);
3295 GrShaderVar vTexCoord("v_texCoord", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out);
3296 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4, GrShaderVar::TypeModifier::Out);
3297
3298 SkString vshaderTxt;
3299 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3300 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3301 vshaderTxt.appendf("#extension %s : require\n", extension);
3302 }
3303 vTexCoord.addModifier("noperspective");
3304 }
3305
3306 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3307 vshaderTxt.append(";");
3308 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3309 vshaderTxt.append(";");
3310 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3311 vshaderTxt.append(";");
3312 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3313 vshaderTxt.append(";");
3314
3315 vshaderTxt.append(
3316 // Copy Program VS
3317 "void main() {"
3318 "v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);"
3319 "sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3320 "sk_Position.zw = half2(0, 1);"
3321 "}"
3322 );
3323
3324 SkString fshaderTxt;
3325 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3326 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3327 fshaderTxt.appendf("#extension %s : require\n", extension);
3328 }
3329 }
3330 vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In);
3331 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3332 fshaderTxt.append(";");
3333 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3334 fshaderTxt.append(";");
3335 fshaderTxt.appendf(
3336 // Copy Program FS
3337 "void main() {"
3338 "sk_FragColor = sample(u_texture, v_texCoord);"
3339 "}"
3340 );
3341 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3342 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3343
3344 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3345 std::string glsl[kGrShaderTypeCount];
3346 SkSL::ProgramSettings settings;
3347 SkSL::Program::Interface interface;
3348 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3349 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3350 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3351 fCopyPrograms[progIdx].fProgram,
3352 GR_GL_VERTEX_SHADER,
3353 glsl[kVertex_GrShaderType],
3354 /*shaderWasCached=*/false,
3355 fProgramCache->stats(),
3356 errorHandler);
3357 SkASSERT(interface == SkSL::Program::Interface());
3358 if (!vshader) {
3359 // Just delete the program, no shaders to delete
3360 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, nullptr, nullptr);
3361 return false;
3362 }
3363
3364 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3365 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3366 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3367 fCopyPrograms[progIdx].fProgram,
3368 GR_GL_FRAGMENT_SHADER,
3369 glsl[kFragment_GrShaderType],
3370 /*shaderWasCached=*/false,
3371 fProgramCache->stats(),
3372 errorHandler);
3373 SkASSERT(interface == SkSL::Program::Interface());
3374 if (!fshader) {
3375 // Delete the program and previously compiled vertex shader
3376 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, nullptr);
3377 return false;
3378 }
3379
3380 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3381 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3382 if (!GrGLCheckLinkStatus(this,
3383 fCopyPrograms[progIdx].fProgram,
3384 /*shaderWasCached=*/false,
3385 errorHandler,
3386 sksl,
3387 glsl)) {
3388 // Failed to link, delete everything
3389 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, &fshader);
3390 return false;
3391 }
3392
3393 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3394 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3395 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3396 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3397 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3398 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3399
3400 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3401
3402 // Cleanup the shaders, but not the program
3403 cleanup_program(this, nullptr, &vshader, &fshader);
3404
3405 return true;
3406 }
3407
createMipmapProgram(int progIdx)3408 bool GrGLGpu::createMipmapProgram(int progIdx) {
3409 const bool oddWidth = SkToBool(progIdx & 0x2);
3410 const bool oddHeight = SkToBool(progIdx & 0x1);
3411 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3412
3413 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3414
3415 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3416 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3417 if (!fMipmapPrograms[progIdx].fProgram) {
3418 return false;
3419 }
3420
3421 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3422 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3423 GrShaderVar::TypeModifier::Uniform);
3424 GrShaderVar uTexture("u_texture", SkSLType::kTexture2DSampler);
3425 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3426 GrShaderVar vTexCoords[] = {
3427 GrShaderVar("v_texCoord0", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3428 GrShaderVar("v_texCoord1", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3429 GrShaderVar("v_texCoord2", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3430 GrShaderVar("v_texCoord3", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3431 };
3432 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4,GrShaderVar::TypeModifier::Out);
3433
3434 SkString vshaderTxt;
3435 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3436 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3437 vshaderTxt.appendf("#extension %s : require\n", extension);
3438 }
3439 vTexCoords[0].addModifier("noperspective");
3440 vTexCoords[1].addModifier("noperspective");
3441 vTexCoords[2].addModifier("noperspective");
3442 vTexCoords[3].addModifier("noperspective");
3443 }
3444
3445 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3446 vshaderTxt.append(";");
3447 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3448 vshaderTxt.append(";");
3449 for (int i = 0; i < numTaps; ++i) {
3450 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3451 vshaderTxt.append(";");
3452 }
3453
3454 vshaderTxt.append(
3455 // Mipmap Program VS
3456 "void main() {"
3457 "sk_Position.xy = a_vertex * half2(2) - half2(1);"
3458 "sk_Position.zw = half2(0, 1);"
3459 );
3460
3461 // Insert texture coordinate computation:
3462 if (oddWidth && oddHeight) {
3463 vshaderTxt.append(
3464 "v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3465 "v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
3466 "v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
3467 "v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3468 );
3469 } else if (oddWidth) {
3470 vshaderTxt.append(
3471 "v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
3472 "v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
3473 );
3474 } else if (oddHeight) {
3475 vshaderTxt.append(
3476 "v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
3477 "v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
3478 );
3479 } else {
3480 vshaderTxt.append(
3481 "v_texCoord0 = a_vertex.xy;"
3482 );
3483 }
3484
3485 vshaderTxt.append("}");
3486
3487 SkString fshaderTxt;
3488 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3489 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3490 fshaderTxt.appendf("#extension %s : require\n", extension);
3491 }
3492 }
3493 for (int i = 0; i < numTaps; ++i) {
3494 vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In);
3495 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3496 fshaderTxt.append(";");
3497 }
3498 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3499 fshaderTxt.append(";");
3500 fshaderTxt.append(
3501 // Mipmap Program FS
3502 "void main() {"
3503 );
3504
3505 if (oddWidth && oddHeight) {
3506 fshaderTxt.append(
3507 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3508 "sample(u_texture, v_texCoord1) + "
3509 "sample(u_texture, v_texCoord2) + "
3510 "sample(u_texture, v_texCoord3)) * 0.25;"
3511 );
3512 } else if (oddWidth || oddHeight) {
3513 fshaderTxt.append(
3514 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3515 "sample(u_texture, v_texCoord1)) * 0.5;"
3516 );
3517 } else {
3518 fshaderTxt.append(
3519 "sk_FragColor = sample(u_texture, v_texCoord0);"
3520 );
3521 }
3522
3523 fshaderTxt.append("}");
3524
3525 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3526 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3527
3528 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3529 std::string glsl[kGrShaderTypeCount];
3530 SkSL::ProgramSettings settings;
3531 SkSL::Program::Interface interface;
3532
3533 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3534 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3535 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3536 fMipmapPrograms[progIdx].fProgram,
3537 GR_GL_VERTEX_SHADER,
3538 glsl[kVertex_GrShaderType],
3539 /*shaderWasCached=*/false,
3540 fProgramCache->stats(),
3541 errorHandler);
3542 SkASSERT(interface == SkSL::Program::Interface());
3543 if (!vshader) {
3544 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, nullptr, nullptr);
3545 return false;
3546 }
3547
3548 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3549 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3550 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3551 fMipmapPrograms[progIdx].fProgram,
3552 GR_GL_FRAGMENT_SHADER,
3553 glsl[kFragment_GrShaderType],
3554 /*shaderWasCached=*/false,
3555 fProgramCache->stats(),
3556 errorHandler);
3557 SkASSERT(interface == SkSL::Program::Interface());
3558 if (!fshader) {
3559 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, nullptr);
3560 return false;
3561 }
3562
3563 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3564 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3565 if (!GrGLCheckLinkStatus(this,
3566 fMipmapPrograms[progIdx].fProgram,
3567 /*shaderWasCached=*/false,
3568 errorHandler,
3569 sksl,
3570 glsl)) {
3571 // Program linking failed, clean up
3572 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, &fshader);
3573 return false;
3574 }
3575
3576 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3577 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3578 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3579 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3580
3581 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3582
3583 // Clean up the shaders
3584 cleanup_program(this, nullptr, &vshader, &fshader);
3585
3586 return true;
3587 }
3588
copySurfaceAsDraw(GrSurface * dst,bool drawToMultisampleFBO,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3589 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, bool drawToMultisampleFBO, GrSurface* src,
3590 const SkIRect& srcRect, const SkIRect& dstRect,
3591 GrSamplerState::Filter filter) {
3592 auto* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3593 if (!srcTex) {
3594 return false;
3595 }
3596 // We don't swizzle at all in our copies.
3597 this->bindTexture(0, filter, skgpu::Swizzle::RGBA(), srcTex);
3598 if (auto* dstRT = static_cast<GrGLRenderTarget*>(dst->asRenderTarget())) {
3599 this->flushRenderTarget(dstRT, drawToMultisampleFBO);
3600 } else {
3601 auto* dstTex = static_cast<GrGLTexture*>(src->asTexture());
3602 SkASSERT(dstTex);
3603 SkASSERT(!drawToMultisampleFBO);
3604 if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) {
3605 return false;
3606 }
3607 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget);
3608 fHWBoundRenderTargetUniqueID.makeInvalid();
3609 }
3610 int progIdx = TextureToCopyProgramIdx(srcTex);
3611 if (!fCopyPrograms[progIdx].fProgram) {
3612 if (!this->createCopyProgram(srcTex)) {
3613 SkDebugf("Failed to create copy program.\n");
3614 return false;
3615 }
3616 }
3617 this->flushViewport(SkIRect::MakeSize(dst->dimensions()),
3618 dst->height(),
3619 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
3620 this->flushProgram(fCopyPrograms[progIdx].fProgram);
3621 fHWVertexArrayState.setVertexArrayID(this, 0);
3622 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3623 attribs->enableVertexArrays(this, 1);
3624 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3625 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3626 // dst rect edges in NDC (-1 to 1)
3627 int dw = dst->width();
3628 int dh = dst->height();
3629 GrGLfloat dx0 = 2.f * dstRect.fLeft / dw - 1.f;
3630 GrGLfloat dx1 = 2.f * dstRect.fRight / dw - 1.f;
3631 GrGLfloat dy0 = 2.f * dstRect.fTop / dh - 1.f;
3632 GrGLfloat dy1 = 2.f * dstRect.fBottom / dh - 1.f;
3633 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3634 GrGLfloat sx1 = (GrGLfloat)(srcRect.fRight);
3635 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3636 GrGLfloat sy1 = (GrGLfloat)(srcRect.fBottom);
3637 int sw = src->width();
3638 int sh = src->height();
3639 if (srcTex->textureType() != GrTextureType::kRectangle) {
3640 // src rect edges in normalized texture space (0 to 1)
3641 sx0 /= sw;
3642 sx1 /= sw;
3643 sy0 /= sh;
3644 sy1 /= sh;
3645 }
3646 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3647 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3648 sx1 - sx0, sy1 - sy0, sx0, sy0));
3649 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3650 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3651 this->flushConservativeRasterState(false);
3652 this->flushWireframeState(false);
3653 this->flushScissorTest(GrScissorTest::kDisabled);
3654 this->disableWindowRectangles();
3655 this->disableStencil();
3656 if (this->glCaps().srgbWriteControl()) {
3657 this->flushFramebufferSRGB(true);
3658 }
3659 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3660 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER);
3661 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3662 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3663 return true;
3664 }
3665
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3666 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3667 const SkIPoint& dstPoint) {
3668 SkASSERT(can_copy_texsubimage(dst, src, this->glCaps()));
3669 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
3670 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3671 SkASSERT(dstTex);
3672 // We modified the bound FBO
3673 fHWBoundRenderTargetUniqueID.makeInvalid();
3674
3675 this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID());
3676 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3677 dstPoint.fX, dstPoint.fY,
3678 srcRect.fLeft, srcRect.fTop,
3679 srcRect.width(), srcRect.height()));
3680 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER);
3681 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3682 srcRect.width(), srcRect.height());
3683 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3684 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3685 }
3686
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3687 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3688 const SkIRect& dstRect, GrSamplerState::Filter filter) {
3689 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps()));
3690 if (dst == src) {
3691 if (SkIRect::Intersects(dstRect, srcRect)) {
3692 return false;
3693 }
3694 }
3695
3696 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget);
3697 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget);
3698 // We modified the bound FBO
3699 fHWBoundRenderTargetUniqueID.makeInvalid();
3700
3701 // BlitFrameBuffer respects the scissor, so disable it.
3702 this->flushScissorTest(GrScissorTest::kDisabled);
3703 this->disableWindowRectangles();
3704
3705 GL_CALL(BlitFramebuffer(srcRect.fLeft,
3706 srcRect.fTop,
3707 srcRect.fRight,
3708 srcRect.fBottom,
3709 dstRect.fLeft,
3710 dstRect.fTop,
3711 dstRect.fRight,
3712 dstRect.fBottom,
3713 GR_GL_COLOR_BUFFER_BIT,
3714 filter_to_gl_mag_filter(filter)));
3715 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER);
3716 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER);
3717
3718 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3719 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3720 return true;
3721 }
3722
onRegenerateMipMapLevels(GrTexture * texture)3723 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
3724 using RegenerateMipmapType = GrGLCaps::RegenerateMipmapType;
3725
3726 auto glTex = static_cast<GrGLTexture*>(texture);
3727 // Mipmaps are only supported on 2D textures:
3728 if (GR_GL_TEXTURE_2D != glTex->target()) {
3729 return false;
3730 }
3731 GrGLFormat format = glTex->format();
3732 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
3733 // Uses draw calls to do a series of downsample operations to successive mips.
3734
3735 // The manual approach requires the ability to limit which level we're sampling and that the
3736 // destination can be bound to a FBO:
3737 if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) {
3738 GrGLenum target = glTex->target();
3739 this->bindTextureToScratchUnit(target, glTex->textureID());
3740 GL_CALL(GenerateMipmap(glTex->target()));
3741 return true;
3742 }
3743
3744 int width = texture->width();
3745 int height = texture->height();
3746 int levelCount = SkMipmap::ComputeLevelCount(width, height) + 1;
3747 SkASSERT(levelCount == texture->maxMipmapLevel() + 1);
3748
3749 // Create (if necessary), then bind temporary FBO:
3750 if (0 == fTempDstFBOID) {
3751 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
3752 }
3753 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
3754 fHWBoundRenderTargetUniqueID.makeInvalid();
3755
3756 // Bind the texture, to get things configured for filtering.
3757 // We'll be changing our base level and max level further below:
3758 this->setTextureUnit(0);
3759 // The mipmap program does not do any swizzling.
3760 this->bindTexture(0, GrSamplerState::Filter::kLinear, skgpu::Swizzle::RGBA(), glTex);
3761
3762 // Vertex data:
3763 if (!fMipmapProgramArrayBuffer) {
3764 static const GrGLfloat vdata[] = {
3765 0, 0,
3766 0, 1,
3767 1, 0,
3768 1, 1
3769 };
3770 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this,
3771 sizeof(vdata),
3772 GrGpuBufferType::kVertex,
3773 kStatic_GrAccessPattern);
3774 fMipmapProgramArrayBuffer->updateData(vdata, /*offset=*/0,
3775
3776 sizeof(vdata),
3777 /*preserve=*/false);
3778 }
3779 if (!fMipmapProgramArrayBuffer) {
3780 return false;
3781 }
3782
3783 fHWVertexArrayState.setVertexArrayID(this, 0);
3784
3785 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3786 attribs->enableVertexArrays(this, 1);
3787 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3788 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3789
3790 // Set "simple" state once:
3791 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3792 this->flushScissorTest(GrScissorTest::kDisabled);
3793 this->disableWindowRectangles();
3794 this->disableStencil();
3795
3796 // Do all the blits:
3797 width = texture->width();
3798 height = texture->height();
3799
3800 std::unique_ptr<GrSemaphore> semaphore;
3801 for (GrGLint level = 1; level < levelCount; ++level) {
3802 // Get and bind the program for this particular downsample (filter shape can vary):
3803 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
3804 if (!fMipmapPrograms[progIdx].fProgram) {
3805 if (!this->createMipmapProgram(progIdx)) {
3806 SkDebugf("Failed to create mipmap program.\n");
3807 // Invalidate all params to cover base and max level change in a previous iteration.
3808 glTex->textureParamsModified();
3809 return false;
3810 }
3811 }
3812 this->flushProgram(fMipmapPrograms[progIdx].fProgram);
3813
3814 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3815 level > 1) {
3816 this->waitSemaphore(semaphore.get());
3817 semaphore.reset();
3818 }
3819
3820 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
3821 const float invWidth = 1.0f / width;
3822 const float invHeight = 1.0f / height;
3823 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3824 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
3825 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
3826
3827 // Set the base level so that we only sample from the previous mip.
3828 SkASSERT(this->glCaps().mipmapLevelControlSupport());
3829 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
3830 // Setting the max level is technically unnecessary and can affect
3831 // validation for the framebuffer. However, by making it clear that a
3832 // rendering feedback loop is not occurring, we avoid hitting a slow
3833 // path on some drivers.
3834 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3835 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_MAX_LEVEL, level - 1));
3836 }
3837
3838 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
3839 glTex->textureID(), level));
3840
3841 width = std::max(1, width / 2);
3842 height = std::max(1, height / 2);
3843 this->flushViewport(SkIRect::MakeWH(width, height), height, kTopLeft_GrSurfaceOrigin);
3844
3845 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3846
3847 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3848 level < levelCount-1) {
3849 semaphore = this->makeSemaphore(true);
3850 this->insertSemaphore(semaphore.get());
3851 }
3852 }
3853
3854 // Unbind:
3855 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3856 GR_GL_TEXTURE_2D, 0, 0));
3857
3858 // We modified the base level and max level params.
3859 GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState();
3860 // We drew the 2nd to last level into the last level.
3861 nonsamplerState.fBaseMipMapLevel = levelCount - 2;
3862 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3863 nonsamplerState.fMaxMipmapLevel = levelCount - 2;
3864 }
3865 glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3866
3867 return true;
3868 }
3869
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3870 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3871 SkASSERT(type);
3872 switch (type) {
3873 case kTexture_GrXferBarrierType: {
3874 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3875 SkASSERT(glrt->asTexture());
3876 SkASSERT(!glrt->isFBO0(false/*multisample*/));
3877 if (glrt->requiresManualMSAAResolve()) {
3878 // The render target uses separate storage so no need for glTextureBarrier.
3879 // FIXME: The render target will resolve automatically when its texture is bound,
3880 // but we could resolve only the bounds that will be read if we do it here instead.
3881 return;
3882 }
3883 SkASSERT(this->caps()->textureBarrierSupport());
3884 GL_CALL(TextureBarrier());
3885 return;
3886 }
3887 case kBlend_GrXferBarrierType:
3888 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3889 this->caps()->blendEquationSupport());
3890 GL_CALL(BlendBarrier());
3891 return;
3892 default: break; // placate compiler warnings that kNone not handled
3893 }
3894 }
3895
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)3896 GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions,
3897 const GrBackendFormat& format,
3898 GrRenderable renderable,
3899 skgpu::Mipmapped mipmapped,
3900 GrProtected isProtected,
3901 std::string_view label) {
3902 this->handleDirtyContext();
3903
3904 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
3905 if (glFormat == GrGLFormat::kUnknown) {
3906 return {};
3907 }
3908
3909 int numMipLevels = 1;
3910 if (mipmapped == skgpu::Mipmapped::kYes) {
3911 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
3912 }
3913
3914 // Compressed formats go through onCreateCompressedBackendTexture
3915 SkASSERT(!GrGLFormatIsCompressed(glFormat));
3916
3917 GrGLTextureInfo info;
3918 GrGLTextureParameters::SamplerOverriddenState initialState;
3919
3920 if (glFormat == GrGLFormat::kUnknown) {
3921 return {};
3922 }
3923 switch (format.textureType()) {
3924 case GrTextureType::kNone:
3925 case GrTextureType::kExternal:
3926 return {};
3927 case GrTextureType::k2D:
3928 info.fTarget = GR_GL_TEXTURE_2D;
3929 break;
3930 case GrTextureType::kRectangle:
3931 if (!this->glCaps().rectangleTextureSupport() || mipmapped == skgpu::Mipmapped::kYes) {
3932 return {};
3933 }
3934 info.fTarget = GR_GL_TEXTURE_RECTANGLE;
3935 break;
3936 }
3937 info.fFormat = GrGLFormatToEnum(glFormat);
3938 info.fID = this->createTexture(dimensions, glFormat, info.fTarget, renderable, &initialState,
3939 numMipLevels, isProtected, label);
3940 if (!info.fID) {
3941 return {};
3942 }
3943 info.fProtected = isProtected;
3944
3945 // Unbind this texture from the scratch texture unit.
3946 this->bindTextureToScratchUnit(info.fTarget, 0);
3947
3948 auto parameters = sk_make_sp<GrGLTextureParameters>();
3949 // The non-sampler params are still at their default values.
3950 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
3951 fResetTimestampForTextureParameters);
3952
3953 return GrBackendTextures::MakeGL(
3954 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters), label);
3955 }
3956
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)3957 bool GrGLGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
3958 sk_sp<skgpu::RefCntedCallback> finishedCallback,
3959 std::array<float, 4> color) {
3960 this->handleDirtyContext();
3961
3962 GrGLTextureInfo info;
3963 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
3964
3965 int numMipLevels = 1;
3966 if (backendTexture.hasMipmaps()) {
3967 numMipLevels =
3968 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
3969 }
3970
3971 GrGLFormat glFormat = GrGLFormatFromGLEnum(info.fFormat);
3972
3973 this->bindTextureToScratchUnit(info.fTarget, info.fID);
3974
3975 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
3976 // so that the uploads go to the right levels.
3977 if (numMipLevels && this->glCaps().mipmapLevelControlSupport()) {
3978 auto params = get_gl_texture_params(backendTexture);
3979 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
3980 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
3981 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
3982 nonsamplerState.fBaseMipMapLevel = 0;
3983 }
3984 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
3985 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
3986 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
3987 }
3988 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3989 }
3990
3991 uint32_t levelMask = (1 << numMipLevels) - 1;
3992 bool result = this->uploadColorToTex(glFormat,
3993 backendTexture.dimensions(),
3994 info.fTarget,
3995 color,
3996 levelMask);
3997
3998 // Unbind this texture from the scratch texture unit.
3999 this->bindTextureToScratchUnit(info.fTarget, 0);
4000 return result;
4001 }
4002
deleteBackendTexture(const GrBackendTexture & tex)4003 void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) {
4004 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4005
4006 GrGLTextureInfo info;
4007 if (GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4008 GL_CALL(DeleteTextures(1, &info.fID));
4009 }
4010 }
4011
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)4012 bool GrGLGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
4013 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
4014
4015 sk_sp<GrGLProgram> tmp = fProgramCache->findOrCreateProgram(this->getContext(),
4016 desc, programInfo, &stat);
4017 if (!tmp) {
4018 return false;
4019 }
4020
4021 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
4022 }
4023
4024 #if defined(GR_TEST_UTILS)
4025
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const4026 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
4027 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4028
4029 GrGLTextureInfo info;
4030 if (!GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4031 return false;
4032 }
4033
4034 GrGLboolean result;
4035 GL_CALL_RET(result, IsTexture(info.fID));
4036
4037 return (GR_GL_TRUE == result);
4038 }
4039
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)4040 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
4041 GrColorType colorType,
4042 int sampleCnt,
4043 GrProtected isProtected) {
4044 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
4045 dimensions.height() > this->caps()->maxRenderTargetSize()) {
4046 return {};
4047 }
4048 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
4049 return {};
4050 }
4051
4052 this->handleDirtyContext();
4053 auto format = this->glCaps().getFormatFromColorType(colorType);
4054 sampleCnt = this->glCaps().getRenderTargetSampleCount(sampleCnt, format);
4055 if (!sampleCnt) {
4056 return {};
4057 }
4058 // We make a texture instead of a render target if we're using a
4059 // "multisampled_render_to_texture" style extension or have a BGRA format that
4060 // is allowed for textures but not render buffer internal formats.
4061 bool useTexture = false;
4062 if (sampleCnt > 1 && !this->glCaps().usesMSAARenderBuffers()) {
4063 useTexture = true;
4064 } else if (format == GrGLFormat::kBGRA8 &&
4065 this->glCaps().getRenderbufferInternalFormat(GrGLFormat::kBGRA8) != GR_GL_BGRA8) {
4066 // We have a BGRA extension that doesn't support BGRA render buffers. We can use a texture
4067 // unless we've been asked for MSAA. Note we already checked above for render-to-
4068 // multisampled-texture style extensions.
4069 if (sampleCnt > 1) {
4070 return {};
4071 }
4072 useTexture = true;
4073 }
4074 int sFormatIdx = this->getCompatibleStencilIndex(format);
4075 if (sFormatIdx < 0) {
4076 return {};
4077 }
4078 GrGLuint colorID = 0;
4079 GrGLuint stencilID = 0;
4080 GrGLFramebufferInfo info;
4081 info.fFBOID = 0;
4082 info.fFormat = GrGLFormatToEnum(format);
4083 info.fProtected = isProtected;
4084
4085 auto deleteIDs = [&](bool saveFBO = false) {
4086 if (colorID) {
4087 if (useTexture) {
4088 GL_CALL(DeleteTextures(1, &colorID));
4089 } else {
4090 GL_CALL(DeleteRenderbuffers(1, &colorID));
4091 }
4092 }
4093 if (stencilID) {
4094 GL_CALL(DeleteRenderbuffers(1, &stencilID));
4095 }
4096 if (!saveFBO && info.fFBOID) {
4097 this->deleteFramebuffer(info.fFBOID);
4098 }
4099 };
4100
4101 if (useTexture) {
4102 GL_CALL(GenTextures(1, &colorID));
4103 } else {
4104 GL_CALL(GenRenderbuffers(1, &colorID));
4105 }
4106 GL_CALL(GenRenderbuffers(1, &stencilID));
4107 if (!stencilID || !colorID) {
4108 deleteIDs();
4109 return {};
4110 }
4111
4112 GL_CALL(GenFramebuffers(1, &info.fFBOID));
4113 if (!info.fFBOID) {
4114 deleteIDs();
4115 return {};
4116 }
4117
4118 this->invalidateBoundRenderTarget();
4119
4120 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4121 if (useTexture) {
4122 GrGLTextureParameters::SamplerOverriddenState initialState;
4123 colorID = this->createTexture(dimensions, format, GR_GL_TEXTURE_2D, GrRenderable::kYes,
4124 &initialState,
4125 1,
4126 info.fProtected,
4127 /*label=*/"Skia");
4128 if (!colorID) {
4129 deleteIDs();
4130 return {};
4131 }
4132 if (sampleCnt == 1) {
4133 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4134 GR_GL_TEXTURE_2D, colorID, 0));
4135 } else {
4136 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4137 GR_GL_TEXTURE_2D, colorID, 0, sampleCnt));
4138 }
4139 } else {
4140 GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format);
4141 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
4142 if (sampleCnt == 1) {
4143 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, dimensions.width(),
4144 dimensions.height()));
4145 } else {
4146 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt, renderBufferFormat,
4147 dimensions.width(), dimensions.height())) {
4148 deleteIDs();
4149 return {};
4150 }
4151 }
4152 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4153 GR_GL_RENDERBUFFER, colorID));
4154 }
4155 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
4156 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx];
4157 if (sampleCnt == 1) {
4158 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, GrGLFormatToEnum(stencilBufferFormat),
4159 dimensions.width(), dimensions.height()));
4160 } else {
4161 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt,
4162 GrGLFormatToEnum(stencilBufferFormat),
4163 dimensions.width(), dimensions.height())) {
4164 deleteIDs();
4165 return {};
4166 }
4167 }
4168 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER,
4169 stencilID));
4170 if (GrGLFormatIsPackedDepthStencil(this->glCaps().stencilFormats()[sFormatIdx])) {
4171 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
4172 GR_GL_RENDERBUFFER, stencilID));
4173 }
4174
4175 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
4176 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
4177 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
4178 // renderbuffers/texture.
4179 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
4180 deleteIDs(/* saveFBO = */ true);
4181
4182 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4183 GrGLenum status;
4184 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
4185 if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
4186 this->deleteFramebuffer(info.fFBOID);
4187 return {};
4188 }
4189
4190 auto stencilBits = SkToInt(GrGLFormatStencilBits(this->glCaps().stencilFormats()[sFormatIdx]));
4191
4192 GrBackendRenderTarget beRT = GrBackendRenderTargets::MakeGL(
4193 dimensions.width(), dimensions.height(), sampleCnt, stencilBits, info);
4194 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat()));
4195 return beRT;
4196 }
4197
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & backendRT)4198 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
4199 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
4200 GrGLFramebufferInfo info;
4201 if (GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
4202 if (info.fFBOID) {
4203 this->deleteFramebuffer(info.fFBOID);
4204 }
4205 }
4206 }
4207 #endif
4208
4209 ///////////////////////////////////////////////////////////////////////////////
4210
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)4211 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
4212 const GrBuffer* ibuf) {
4213 SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped());
4214 GrGLAttribArrayState* attribState;
4215
4216 if (gpu->glCaps().isCoreProfile()) {
4217 if (!fCoreProfileVertexArray) {
4218 GrGLuint arrayID;
4219 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
4220 int attrCount = gpu->glCaps().maxVertexAttributes();
4221 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
4222 }
4223 if (ibuf) {
4224 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
4225 } else {
4226 attribState = fCoreProfileVertexArray->bind(gpu);
4227 }
4228 } else {
4229 if (ibuf) {
4230 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
4231 gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
4232 } else {
4233 this->setVertexArrayID(gpu, 0);
4234 }
4235 int attrCount = gpu->glCaps().maxVertexAttributes();
4236 if (fDefaultVertexArrayAttribState.count() != attrCount) {
4237 fDefaultVertexArrayAttribState.resize(attrCount);
4238 }
4239 attribState = &fDefaultVertexArrayAttribState;
4240 }
4241 return attribState;
4242 }
4243
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)4244 void GrGLGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
4245 GrGpuFinishedContext finishedContext) {
4246 fFinishCallbacks.add(finishedProc, finishedContext);
4247 }
4248
flush(FlushType flushType)4249 void GrGLGpu::flush(FlushType flushType) {
4250 if (fNeedsGLFlush || flushType == FlushType::kForce) {
4251 GL_CALL(Flush());
4252 fNeedsGLFlush = false;
4253 }
4254 }
4255
onSubmitToGpu(GrSyncCpu sync)4256 bool GrGLGpu::onSubmitToGpu(GrSyncCpu sync) {
4257 if (sync == GrSyncCpu::kYes ||
4258 (!fFinishCallbacks.empty() && !this->glCaps().fenceSyncSupport())) {
4259 this->finishOutstandingGpuWork();
4260 fFinishCallbacks.callAll(true);
4261 } else {
4262 this->flush();
4263 // See if any previously inserted finish procs are good to go.
4264 fFinishCallbacks.check();
4265 }
4266 if (!this->glCaps().skipErrorChecks()) {
4267 this->clearErrorsAndCheckForOOM();
4268 }
4269 return true;
4270 }
4271
willExecute()4272 void GrGLGpu::willExecute() {
4273 // Because our transfers will be submitted to GL to perfom immediately (no command buffer to
4274 // submit), we must unmap any staging buffers.
4275 if (fStagingBufferManager) {
4276 fStagingBufferManager->detachBuffers();
4277 }
4278 }
4279
submit(GrOpsRenderPass * renderPass)4280 void GrGLGpu::submit(GrOpsRenderPass* renderPass) {
4281 // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here
4282 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
4283 fCachedOpsRenderPass->reset();
4284 }
4285
insertFence()4286 [[nodiscard]] GrGLsync GrGLGpu::insertFence() {
4287 if (!this->glCaps().fenceSyncSupport()) {
4288 return nullptr;
4289 }
4290 GrGLsync sync;
4291 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4292 static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint));
4293 GrGLuint fence = 0;
4294 GL_CALL(GenFences(1, &fence));
4295 GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED));
4296 sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence));
4297 } else {
4298 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4299 }
4300 this->setNeedsFlush();
4301 return sync;
4302 }
4303
waitSync(GrGLsync sync,uint64_t timeout,bool flush)4304 bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) {
4305 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4306 GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync));
4307 if (!timeout) {
4308 if (flush) {
4309 this->flush(FlushType::kForce);
4310 }
4311 GrGLboolean result;
4312 GL_CALL_RET(result, TestFence(nvFence));
4313 return result == GR_GL_TRUE;
4314 }
4315 // Ignore non-zero timeouts. GL_NV_fence has no timeout functionality.
4316 // If this really becomes necessary we could poll TestFence().
4317 // FinishFence always flushes so no need to check flush param.
4318 GL_CALL(FinishFence(nvFence));
4319 return true;
4320 } else {
4321 GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0;
4322 GrGLenum result;
4323 GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout));
4324 return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
4325 }
4326 }
4327
waitFence(GrGLsync fence)4328 bool GrGLGpu::waitFence(GrGLsync fence) {
4329 if (!this->glCaps().fenceSyncSupport()) {
4330 return true;
4331 }
4332 return this->waitSync(fence, 0, false);
4333 }
4334
deleteFence(GrGLsync fence)4335 void GrGLGpu::deleteFence(GrGLsync fence) {
4336 if (this->glCaps().fenceSyncSupport()) {
4337 this->deleteSync(fence);
4338 }
4339 }
4340
makeSemaphore(bool isOwned)4341 [[nodiscard]] std::unique_ptr<GrSemaphore> GrGLGpu::makeSemaphore(bool isOwned) {
4342 SkASSERT(this->caps()->semaphoreSupport());
4343 return GrGLSemaphore::Make(this, isOwned);
4344 }
4345
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)4346 std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore&,
4347 GrSemaphoreWrapType,
4348 GrWrapOwnership) {
4349 SK_ABORT("Unsupported");
4350 }
4351
insertSemaphore(GrSemaphore * semaphore)4352 void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) {
4353 SkASSERT(semaphore);
4354 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4355
4356 GrGLsync sync;
4357 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4358 glSem->setSync(sync);
4359 this->setNeedsFlush();
4360 }
4361
waitSemaphore(GrSemaphore * semaphore)4362 void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) {
4363 SkASSERT(semaphore);
4364 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4365
4366 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4367 }
4368
checkFinishProcs()4369 void GrGLGpu::checkFinishProcs() {
4370 fFinishCallbacks.check();
4371 }
4372
finishOutstandingGpuWork()4373 void GrGLGpu::finishOutstandingGpuWork() {
4374 GL_CALL(Finish());
4375 }
4376
clearErrorsAndCheckForOOM()4377 void GrGLGpu::clearErrorsAndCheckForOOM() {
4378 while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {}
4379 }
4380
getErrorAndCheckForOOM()4381 GrGLenum GrGLGpu::getErrorAndCheckForOOM() {
4382 #if GR_GL_CHECK_ERROR
4383 if (this->glInterface()->checkAndResetOOMed()) {
4384 this->setOOMed();
4385 }
4386 #endif
4387 GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError();
4388 if (error == GR_GL_OUT_OF_MEMORY) {
4389 this->setOOMed();
4390 }
4391 return error;
4392 }
4393
deleteSync(GrGLsync sync)4394 void GrGLGpu::deleteSync(GrGLsync sync) {
4395 if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) {
4396 GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync));
4397 GL_CALL(DeleteFences(1, &nvFence));
4398 } else {
4399 GL_CALL(DeleteSync(sync));
4400 }
4401 }
4402
prepareTextureForCrossContextUsage(GrTexture * texture)4403 std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
4404 // Set up a semaphore to be signaled once the data is ready, and flush GL
4405 std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true);
4406 SkASSERT(semaphore);
4407 this->insertSemaphore(semaphore.get());
4408 // We must call flush here to make sure the GrGLsync object gets created and sent to the gpu.
4409 this->flush(FlushType::kForce);
4410
4411 return semaphore;
4412 }
4413
TextureToCopyProgramIdx(GrTexture * texture)4414 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
4415 switch (SkSLCombinedSamplerTypeForTextureType(texture->textureType())) {
4416 case SkSLType::kTexture2DSampler:
4417 return 0;
4418 case SkSLType::kTexture2DRectSampler:
4419 return 1;
4420 case SkSLType::kTextureExternalSampler:
4421 return 2;
4422 default:
4423 SK_ABORT("Unexpected samper type");
4424 }
4425 }
4426
4427 #ifdef SK_ENABLE_DUMP_GPU
4428 #include "src/utils/SkJSONWriter.h"
onDumpJSON(SkJSONWriter * writer) const4429 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
4430 // We are called by the base class, which has already called beginObject(). We choose to nest
4431 // all of our caps information in a named sub-object.
4432 writer->beginObject("GL GPU");
4433
4434 const GrGLubyte* str;
4435 GL_CALL_RET(str, GetString(GR_GL_VERSION));
4436 writer->appendCString("GL_VERSION", (const char*)(str));
4437 GL_CALL_RET(str, GetString(GR_GL_RENDERER));
4438 writer->appendCString("GL_RENDERER", (const char*)(str));
4439 GL_CALL_RET(str, GetString(GR_GL_VENDOR));
4440 writer->appendCString("GL_VENDOR", (const char*)(str));
4441 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
4442 writer->appendCString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
4443
4444 writer->appendName("extensions");
4445 glInterface()->fExtensions.dumpJSON(writer);
4446
4447 writer->endObject();
4448 }
4449 #endif
4450