1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/ganesh/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/base/SkMathPriv.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkMipmap.h"
17 #include "src/gpu/ganesh/GrAttachment.h"
18 #include "src/gpu/ganesh/GrBackendUtils.h"
19 #include "src/gpu/ganesh/GrCaps.h"
20 #include "src/gpu/ganesh/GrDataUtils.h"
21 #include "src/gpu/ganesh/GrDirectContextPriv.h"
22 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
23 #include "src/gpu/ganesh/GrNativeRect.h"
24 #include "src/gpu/ganesh/GrPipeline.h"
25 #include "src/gpu/ganesh/GrRenderTarget.h"
26 #include "src/gpu/ganesh/GrResourceCache.h"
27 #include "src/gpu/ganesh/GrResourceProvider.h"
28 #include "src/gpu/ganesh/GrRingBuffer.h"
29 #include "src/gpu/ganesh/GrSemaphore.h"
30 #include "src/gpu/ganesh/GrStagingBufferManager.h"
31 #include "src/gpu/ganesh/GrStencilSettings.h"
32 #include "src/gpu/ganesh/GrTexture.h"
33 #include "src/gpu/ganesh/GrTextureProxyPriv.h"
34 #include "src/gpu/ganesh/GrTracing.h"
35 #include "src/sksl/SkSLCompiler.h"
36
37 ////////////////////////////////////////////////////////////////////////////////
38
GrGpu(GrDirectContext * direct)39 GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
40
~GrGpu()41 GrGpu::~GrGpu() {
42 this->callSubmittedProcs(false);
43 }
44
initCapsAndCompiler(sk_sp<const GrCaps> caps)45 void GrGpu::initCapsAndCompiler(sk_sp<const GrCaps> caps) {
46 fCaps = std::move(caps);
47 fCompiler = std::make_unique<SkSL::Compiler>(fCaps->shaderCaps());
48 }
49
disconnect(DisconnectType type)50 void GrGpu::disconnect(DisconnectType type) {}
51
52 ////////////////////////////////////////////////////////////////////////////////
53
validate_texel_levels(SkISize dimensions,GrColorType texelColorType,const GrMipLevel * texels,int mipLevelCount,const GrCaps * caps)54 static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
55 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
56 SkASSERT(mipLevelCount > 0);
57 bool hasBasePixels = texels[0].fPixels;
58 int levelsWithPixelsCnt = 0;
59 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
60 int w = dimensions.fWidth;
61 int h = dimensions.fHeight;
62 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
63 if (texels[currentMipLevel].fPixels) {
64 const size_t minRowBytes = w * bpp;
65 if (caps->writePixelsRowBytesSupport()) {
66 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
67 return false;
68 }
69 if (texels[currentMipLevel].fRowBytes % bpp) {
70 return false;
71 }
72 } else {
73 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
74 return false;
75 }
76 }
77 ++levelsWithPixelsCnt;
78 }
79 if (w == 1 && h == 1) {
80 if (currentMipLevel != mipLevelCount - 1) {
81 return false;
82 }
83 } else {
84 w = std::max(w / 2, 1);
85 h = std::max(h / 2, 1);
86 }
87 }
88 // Either just a base layer or a full stack is required.
89 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
90 return false;
91 }
92 // Can specify just the base, all levels, or no levels.
93 if (!hasBasePixels) {
94 return levelsWithPixelsCnt == 0;
95 }
96 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
97 }
98
createTextureCommon(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)99 sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
100 const GrBackendFormat& format,
101 GrTextureType textureType,
102 GrRenderable renderable,
103 int renderTargetSampleCnt,
104 skgpu::Budgeted budgeted,
105 GrProtected isProtected,
106 int mipLevelCount,
107 uint32_t levelClearMask,
108 std::string_view label) {
109 if (this->caps()->isFormatCompressed(format)) {
110 // Call GrGpu::createCompressedTexture.
111 return nullptr;
112 }
113
114 GrMipmapped mipmapped = mipLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
115 if (!this->caps()->validateSurfaceParams(dimensions,
116 format,
117 renderable,
118 renderTargetSampleCnt,
119 mipmapped,
120 textureType)) {
121 return nullptr;
122 }
123
124 if (renderable == GrRenderable::kYes) {
125 renderTargetSampleCnt =
126 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
127 }
128 // Attempt to catch un- or wrongly initialized sample counts.
129 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
130 this->handleDirtyContext();
131 auto tex = this->onCreateTexture(dimensions,
132 format,
133 renderable,
134 renderTargetSampleCnt,
135 budgeted,
136 isProtected,
137 mipLevelCount,
138 levelClearMask,
139 label);
140 if (tex) {
141 SkASSERT(tex->backendFormat() == format);
142 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
143 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
144 tex->resourcePriv().removeScratchKey();
145 }
146 fStats.incTextureCreates();
147 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
148 SkASSERT(GrRenderable::kYes == renderable);
149 tex->asRenderTarget()->setRequiresManualMSAAResolve();
150 }
151 }
152 return tex;
153 }
154
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,GrMipmapped mipmapped,skgpu::Budgeted budgeted,GrProtected isProtected,std::string_view label)155 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
156 const GrBackendFormat& format,
157 GrTextureType textureType,
158 GrRenderable renderable,
159 int renderTargetSampleCnt,
160 GrMipmapped mipmapped,
161 skgpu::Budgeted budgeted,
162 GrProtected isProtected,
163 std::string_view label) {
164 int mipLevelCount = 1;
165 if (mipmapped == GrMipmapped::kYes) {
166 mipLevelCount =
167 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
168 }
169 uint32_t levelClearMask =
170 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
171 auto tex = this->createTextureCommon(dimensions,
172 format,
173 textureType,
174 renderable,
175 renderTargetSampleCnt,
176 budgeted,
177 isProtected,
178 mipLevelCount,
179 levelClearMask,
180 label);
181 if (tex && mipmapped == GrMipmapped::kYes && levelClearMask) {
182 tex->markMipmapsClean();
183 }
184
185 return tex;
186 }
187
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,GrColorType textureColorType,GrColorType srcColorType,const GrMipLevel texels[],int texelLevelCount,std::string_view label)188 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
189 const GrBackendFormat& format,
190 GrTextureType textureType,
191 GrRenderable renderable,
192 int renderTargetSampleCnt,
193 skgpu::Budgeted budgeted,
194 GrProtected isProtected,
195 GrColorType textureColorType,
196 GrColorType srcColorType,
197 const GrMipLevel texels[],
198 int texelLevelCount,
199 std::string_view label) {
200 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
201 if (texelLevelCount) {
202 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
203 this->caps())) {
204 return nullptr;
205 }
206 }
207
208 int mipLevelCount = std::max(1, texelLevelCount);
209 uint32_t levelClearMask = 0;
210 if (this->caps()->shouldInitializeTextures()) {
211 if (texelLevelCount) {
212 for (int i = 0; i < mipLevelCount; ++i) {
213 if (!texels->fPixels) {
214 levelClearMask |= static_cast<uint32_t>(1 << i);
215 }
216 }
217 } else {
218 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
219 }
220 }
221
222 auto tex = this->createTextureCommon(dimensions,
223 format,
224 textureType,
225 renderable,
226 renderTargetSampleCnt,
227 budgeted,
228 isProtected,
229 texelLevelCount,
230 levelClearMask,
231 label);
232 if (tex) {
233 bool markMipLevelsClean = false;
234 // Currently if level 0 does not have pixels then no other level may, as enforced by
235 // validate_texel_levels.
236 if (texelLevelCount && texels[0].fPixels) {
237 if (!this->writePixels(tex.get(),
238 SkIRect::MakeSize(dimensions),
239 textureColorType,
240 srcColorType,
241 texels,
242 texelLevelCount)) {
243 return nullptr;
244 }
245 // Currently if level[1] of mip map has pixel data then so must all other levels.
246 // as enforced by validate_texel_levels.
247 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
248 fStats.incTextureUploads();
249 } else if (levelClearMask && mipLevelCount > 1) {
250 markMipLevelsClean = true;
251 }
252 if (markMipLevelsClean) {
253 tex->markMipmapsClean();
254 }
255 }
256 return tex;
257 }
258
createCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,GrMipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)259 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
260 const GrBackendFormat& format,
261 skgpu::Budgeted budgeted,
262 GrMipmapped mipmapped,
263 GrProtected isProtected,
264 const void* data,
265 size_t dataSize) {
266 this->handleDirtyContext();
267 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
268 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
269 return nullptr;
270 }
271 // Note if we relax the requirement that data must be provided then we must check
272 // caps()->shouldInitializeTextures() here.
273 if (!data) {
274 return nullptr;
275 }
276
277 // TODO: expand CompressedDataIsCorrect to work here too
278 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
279 if (compressionType == SkImage::CompressionType::kNone) {
280 return nullptr;
281 }
282
283 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
284 return nullptr;
285 }
286
287 if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
288 mipmapped == GrMipmapped::kYes)) {
289 return nullptr;
290 }
291 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipmapped, isProtected,
292 data, dataSize);
293 }
294
wrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)295 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
296 GrWrapOwnership ownership,
297 GrWrapCacheable cacheable,
298 GrIOType ioType) {
299 SkASSERT(ioType != kWrite_GrIOType);
300 this->handleDirtyContext();
301
302 const GrCaps* caps = this->caps();
303 SkASSERT(caps);
304
305 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
306 return nullptr;
307 }
308 if (backendTex.width() > caps->maxTextureSize() ||
309 backendTex.height() > caps->maxTextureSize()) {
310 return nullptr;
311 }
312
313 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
314 }
315
wrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)316 sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex,
317 GrWrapOwnership ownership,
318 GrWrapCacheable cacheable) {
319 this->handleDirtyContext();
320
321 const GrCaps* caps = this->caps();
322 SkASSERT(caps);
323
324 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
325 return nullptr;
326 }
327 if (backendTex.width() > caps->maxTextureSize() ||
328 backendTex.height() > caps->maxTextureSize()) {
329 return nullptr;
330 }
331
332 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
333 }
334
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)335 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
336 int sampleCnt,
337 GrWrapOwnership ownership,
338 GrWrapCacheable cacheable) {
339 this->handleDirtyContext();
340 if (sampleCnt < 1) {
341 return nullptr;
342 }
343
344 const GrCaps* caps = this->caps();
345
346 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType()) ||
347 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
348 return nullptr;
349 }
350
351 if (backendTex.width() > caps->maxRenderTargetSize() ||
352 backendTex.height() > caps->maxRenderTargetSize()) {
353 return nullptr;
354 }
355 sk_sp<GrTexture> tex =
356 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
357 SkASSERT(!tex || tex->asRenderTarget());
358 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
359 tex->asRenderTarget()->setRequiresManualMSAAResolve();
360 }
361 return tex;
362 }
363
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)364 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
365 this->handleDirtyContext();
366
367 const GrCaps* caps = this->caps();
368
369 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
370 return nullptr;
371 }
372
373 sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT);
374 if (backendRT.isFramebufferOnly()) {
375 rt->setFramebufferOnly();
376 }
377 return rt;
378 }
379
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)380 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
381 const GrVkDrawableInfo& vkInfo) {
382 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
383 }
384
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)385 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
386 const GrVkDrawableInfo& vkInfo) {
387 // This is only supported on Vulkan so we default to returning nullptr here
388 return nullptr;
389 }
390
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)391 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size,
392 GrGpuBufferType intendedType,
393 GrAccessPattern accessPattern) {
394 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
395 this->handleDirtyContext();
396 if ((intendedType == GrGpuBufferType::kXferCpuToGpu ||
397 intendedType == GrGpuBufferType::kXferGpuToCpu) &&
398 accessPattern == kStatic_GrAccessPattern) {
399 return nullptr;
400 }
401 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern);
402 if (!this->caps()->reuseScratchBuffers()) {
403 buffer->resourcePriv().removeScratchKey();
404 }
405 return buffer;
406 }
407
copySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)408 bool GrGpu::copySurface(GrSurface* dst, const SkIRect& dstRect,
409 GrSurface* src, const SkIRect& srcRect,
410 GrSamplerState::Filter filter) {
411 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
412 SkASSERT(dst && src);
413 SkASSERT(!src->framebufferOnly());
414
415 if (dst->readOnly()) {
416 return false;
417 }
418
419 this->handleDirtyContext();
420
421 return this->onCopySurface(dst, dstRect, src, srcRect, filter);
422 }
423
readPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)424 bool GrGpu::readPixels(GrSurface* surface,
425 SkIRect rect,
426 GrColorType surfaceColorType,
427 GrColorType dstColorType,
428 void* buffer,
429 size_t rowBytes) {
430 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
431 SkASSERT(surface);
432 SkASSERT(!surface->framebufferOnly());
433 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
434 surface->backendFormat()));
435
436 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
437 return false;
438 }
439
440 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * rect.width());
441 if (!this->caps()->readPixelsRowBytesSupport()) {
442 if (rowBytes != minRowBytes) {
443 return false;
444 }
445 } else {
446 if (rowBytes < minRowBytes) {
447 return false;
448 }
449 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
450 return false;
451 }
452 }
453
454 this->handleDirtyContext();
455
456 return this->onReadPixels(surface, rect, surfaceColorType, dstColorType, buffer, rowBytes);
457 }
458
writePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)459 bool GrGpu::writePixels(GrSurface* surface,
460 SkIRect rect,
461 GrColorType surfaceColorType,
462 GrColorType srcColorType,
463 const GrMipLevel texels[],
464 int mipLevelCount,
465 bool prepForTexSampling) {
466 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
467 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
468 surface->uniqueID().asUInt(), rect.width(), rect.height());
469 SkASSERT(surface);
470 SkASSERT(!surface->framebufferOnly());
471
472 if (surface->readOnly()) {
473 return false;
474 }
475
476 if (mipLevelCount == 0) {
477 return false;
478 } else if (mipLevelCount == 1) {
479 // We require that if we are not mipped, then the write region is contained in the surface
480 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
481 return false;
482 }
483 } else if (rect != SkIRect::MakeSize(surface->dimensions())) {
484 // We require that if the texels are mipped, than the write region is the entire surface
485 return false;
486 }
487
488 if (!validate_texel_levels(rect.size(), srcColorType, texels, mipLevelCount, this->caps())) {
489 return false;
490 }
491
492 this->handleDirtyContext();
493 if (!this->onWritePixels(surface,
494 rect,
495 surfaceColorType,
496 srcColorType,
497 texels,
498 mipLevelCount,
499 prepForTexSampling)) {
500 return false;
501 }
502
503 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
504 fStats.incTextureUploads();
505
506 return true;
507 }
508
transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)509 bool GrGpu::transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
510 size_t srcOffset,
511 sk_sp<GrGpuBuffer> dst,
512 size_t dstOffset,
513 size_t size) {
514 SkASSERT(src);
515 SkASSERT(dst);
516 SkASSERT(srcOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
517 SkASSERT(dstOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
518 SkASSERT(size % this->caps()->transferFromBufferToBufferAlignment() == 0);
519 SkASSERT(srcOffset + size <= src->size());
520 SkASSERT(dstOffset + size <= dst->size());
521 SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
522 SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
523
524 this->handleDirtyContext();
525 if (!this->onTransferFromBufferToBuffer(std::move(src),
526 srcOffset,
527 std::move(dst),
528 dstOffset,
529 size)) {
530 return false;
531 }
532
533 fStats.incBufferTransfers();
534
535 return true;
536 }
537
transferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)538 bool GrGpu::transferPixelsTo(GrTexture* texture,
539 SkIRect rect,
540 GrColorType textureColorType,
541 GrColorType bufferColorType,
542 sk_sp<GrGpuBuffer> transferBuffer,
543 size_t offset,
544 size_t rowBytes) {
545 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
546 SkASSERT(texture);
547 SkASSERT(transferBuffer);
548 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferCpuToGpu);
549
550 if (texture->readOnly()) {
551 return false;
552 }
553
554 // We require that the write region is contained in the texture
555 if (!SkIRect::MakeSize(texture->dimensions()).contains(rect)) {
556 return false;
557 }
558
559 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
560 if (this->caps()->writePixelsRowBytesSupport()) {
561 if (rowBytes < SkToSizeT(bpp*rect.width())) {
562 return false;
563 }
564 if (rowBytes % bpp) {
565 return false;
566 }
567 } else {
568 if (rowBytes != SkToSizeT(bpp*rect.width())) {
569 return false;
570 }
571 }
572
573 this->handleDirtyContext();
574 if (!this->onTransferPixelsTo(texture,
575 rect,
576 textureColorType,
577 bufferColorType,
578 std::move(transferBuffer),
579 offset,
580 rowBytes)) {
581 return false;
582 }
583
584 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
585 fStats.incTransfersToTexture();
586
587 return true;
588 }
589
transferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)590 bool GrGpu::transferPixelsFrom(GrSurface* surface,
591 SkIRect rect,
592 GrColorType surfaceColorType,
593 GrColorType bufferColorType,
594 sk_sp<GrGpuBuffer> transferBuffer,
595 size_t offset) {
596 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
597 SkASSERT(surface);
598 SkASSERT(transferBuffer);
599 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferGpuToCpu);
600 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
601 surface->backendFormat()));
602
603 #ifdef SK_DEBUG
604 auto supportedRead = this->caps()->supportedReadPixelsColorType(
605 surfaceColorType, surface->backendFormat(), bufferColorType);
606 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
607 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
608 #endif
609
610 // We require that the write region is contained in the texture
611 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
612 return false;
613 }
614
615 this->handleDirtyContext();
616 if (!this->onTransferPixelsFrom(surface,
617 rect,
618 surfaceColorType,
619 bufferColorType,
620 std::move(transferBuffer),
621 offset)) {
622 return false;
623 }
624
625 fStats.incTransfersFromSurface();
626
627 return true;
628 }
629
regenerateMipMapLevels(GrTexture * texture)630 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
631 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
632 SkASSERT(texture);
633 SkASSERT(this->caps()->mipmapSupport());
634 SkASSERT(texture->mipmapped() == GrMipmapped::kYes);
635 if (!texture->mipmapsAreDirty()) {
636 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
637 // actual target. This may be caused by things that the drawingManager could not predict,
638 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
639 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
640 return true;
641 }
642 if (texture->readOnly()) {
643 return false;
644 }
645 if (this->onRegenerateMipMapLevels(texture)) {
646 texture->markMipmapsClean();
647 return true;
648 }
649 return false;
650 }
651
resetTextureBindings()652 void GrGpu::resetTextureBindings() {
653 this->handleDirtyContext();
654 this->onResetTextureBindings();
655 }
656
resolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)657 void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
658 SkASSERT(target);
659 this->handleDirtyContext();
660 this->onResolveRenderTarget(target, resolveRect);
661 }
662
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const663 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
664 uint32_t mipLevels) const {
665 SkASSERT(surface);
666 SkASSERT(!surface->readOnly());
667 // Mark any MIP chain as dirty if and only if there is a non-empty bounds.
668 if (nullptr == bounds || !bounds->isEmpty()) {
669 GrTexture* texture = surface->asTexture();
670 if (texture) {
671 if (mipLevels == 1) {
672 texture->markMipmapsDirty();
673 } else {
674 texture->markMipmapsClean();
675 }
676 }
677 }
678 }
679
executeFlushInfo(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)680 void GrGpu::executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies,
681 SkSurface::BackendSurfaceAccess access,
682 const GrFlushInfo& info,
683 const skgpu::MutableTextureState* newState) {
684 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
685
686 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
687
688 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
689 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
690 if (this->caps()->semaphoreSupport() && info.fNumSemaphores) {
691 for (size_t i = 0; i < info.fNumSemaphores; ++i) {
692 if (info.fSignalSemaphores[i].isInitialized()) {
693 semaphores[i] = resourceProvider->wrapBackendSemaphore(
694 info.fSignalSemaphores[i],
695 GrSemaphoreWrapType::kWillSignal,
696 kBorrow_GrWrapOwnership);
697 // If we failed to wrap the semaphore it means the client didn't give us a valid
698 // semaphore to begin with. Therefore, it is fine to not signal it.
699 if (semaphores[i]) {
700 this->insertSemaphore(semaphores[i].get());
701 }
702 } else {
703 semaphores[i] = resourceProvider->makeSemaphore(false);
704 if (semaphores[i]) {
705 this->insertSemaphore(semaphores[i].get());
706 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
707 }
708 }
709 }
710 }
711
712 if (info.fFinishedProc) {
713 this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
714 }
715
716 if (info.fSubmittedProc) {
717 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
718 }
719
720 // We currently don't support passing in new surface state for multiple proxies here. The only
721 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
722 // updates anyways.
723 SkASSERT(!newState || proxies.size() == 1);
724 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
725 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
726 }
727
getOpsRenderPass(GrRenderTarget * renderTarget,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)728 GrOpsRenderPass* GrGpu::getOpsRenderPass(
729 GrRenderTarget* renderTarget,
730 bool useMSAASurface,
731 GrAttachment* stencil,
732 GrSurfaceOrigin origin,
733 const SkIRect& bounds,
734 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
735 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
736 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
737 GrXferBarrierFlags renderPassXferBarriers) {
738 #if SK_HISTOGRAMS_ENABLED
739 fCurrentSubmitRenderPassCount++;
740 #endif
741 fStats.incRenderPasses();
742 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
743 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
744 }
745
submitToGpu(bool syncCpu)746 bool GrGpu::submitToGpu(bool syncCpu) {
747 this->stats()->incNumSubmitToGpus();
748
749 if (auto manager = this->stagingBufferManager()) {
750 manager->detachBuffers();
751 }
752
753 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
754 uniformsBuffer->startSubmit(this);
755 }
756
757 bool submitted = this->onSubmitToGpu(syncCpu);
758
759 this->callSubmittedProcs(submitted);
760
761 this->reportSubmitHistograms();
762
763 return submitted;
764 }
765
reportSubmitHistograms()766 void GrGpu::reportSubmitHistograms() {
767 #if SK_HISTOGRAMS_ENABLED
768 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
769 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
770 // than the number of actual values
771 static constexpr int kMaxRenderPassBucketValue = 100;
772 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
773 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
774 kMaxRenderPassBucketValue);
775 fCurrentSubmitRenderPassCount = 0;
776 #endif
777
778 this->onReportSubmitHistograms();
779 }
780
checkAndResetOOMed()781 bool GrGpu::checkAndResetOOMed() {
782 if (fOOMed) {
783 fOOMed = false;
784 return true;
785 }
786 return false;
787 }
788
callSubmittedProcs(bool success)789 void GrGpu::callSubmittedProcs(bool success) {
790 for (int i = 0; i < fSubmittedProcs.size(); ++i) {
791 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
792 }
793 fSubmittedProcs.clear();
794 }
795
796 #ifdef SK_ENABLE_DUMP_GPU
797 #include "src/utils/SkJSONWriter.h"
798
dumpJSON(SkJSONWriter * writer) const799 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
800 writer->beginObject();
801
802 // TODO: Is there anything useful in the base class to dump here?
803
804 this->onDumpJSON(writer);
805
806 writer->endObject();
807 }
808 #else
dumpJSON(SkJSONWriter * writer) const809 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
810 #endif
811
812 #if GR_TEST_UTILS
813
814 #if GR_GPU_STATS
815
dump(SkString * out)816 void GrGpu::Stats::dump(SkString* out) {
817 out->appendf("Textures Created: %d\n", fTextureCreates);
818 out->appendf("Texture Uploads: %d\n", fTextureUploads);
819 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
820 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
821 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
822 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
823 out->appendf("Number of draws: %d\n", fNumDraws);
824 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
825 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
826 fNumScratchMSAAAttachmentsReused);
827 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
828 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
829
830 // enable this block to output CSV-style stats for program pre-compilation
831 #if 0
832 SkASSERT(fNumInlineCompilationFailures == 0);
833 SkASSERT(fNumPreCompilationFailures == 0);
834 SkASSERT(fNumCompilationFailures == 0);
835 SkASSERT(fNumPartialCompilationSuccesses == 0);
836
837 SkDebugf("%d, %d, %d, %d, %d\n",
838 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
839 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
840 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
841 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
842 fNumCompilationSuccesses);
843 #endif
844 }
845
dumpKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values)846 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
847 keys->push_back(SkString("render_passes"));
848 values->push_back(fRenderPasses);
849 keys->push_back(SkString("reordered_dags_over_budget"));
850 values->push_back(fNumReorderedDAGsOverBudget);
851 }
852
853 #endif // GR_GPU_STATS
854 #endif // GR_TEST_UTILS
855
CompressedDataIsCorrect(SkISize dimensions,SkImage::CompressionType compressionType,GrMipmapped mipmapped,const void * data,size_t length)856 bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
857 SkImage::CompressionType compressionType,
858 GrMipmapped mipmapped,
859 const void* data,
860 size_t length) {
861 size_t computedSize = SkCompressedDataSize(compressionType,
862 dimensions,
863 nullptr,
864 mipmapped == GrMipmapped::kYes);
865 return computedSize == length;
866 }
867
createBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipmapped,GrProtected isProtected,std::string_view label)868 GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
869 const GrBackendFormat& format,
870 GrRenderable renderable,
871 GrMipmapped mipmapped,
872 GrProtected isProtected,
873 std::string_view label) {
874 const GrCaps* caps = this->caps();
875
876 if (!format.isValid()) {
877 return {};
878 }
879
880 if (caps->isFormatCompressed(format)) {
881 // Compressed formats must go through the createCompressedBackendTexture API
882 return {};
883 }
884
885 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
886 dimensions.height() > caps->maxTextureSize()) {
887 return {};
888 }
889
890 if (mipmapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
891 return {};
892 }
893
894 return this->onCreateBackendTexture(
895 dimensions, format, renderable, mipmapped, isProtected, label);
896 }
897
clearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)898 bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
899 sk_sp<skgpu::RefCntedCallback> finishedCallback,
900 std::array<float, 4> color) {
901 if (!backendTexture.isValid()) {
902 return false;
903 }
904
905 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
906 return false;
907 }
908
909 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
910 }
911
createCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipmapped,GrProtected isProtected)912 GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
913 const GrBackendFormat& format,
914 GrMipmapped mipmapped,
915 GrProtected isProtected) {
916 const GrCaps* caps = this->caps();
917
918 if (!format.isValid()) {
919 return {};
920 }
921
922 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
923 if (compressionType == SkImage::CompressionType::kNone) {
924 // Uncompressed formats must go through the createBackendTexture API
925 return {};
926 }
927
928 if (dimensions.isEmpty() ||
929 dimensions.width() > caps->maxTextureSize() ||
930 dimensions.height() > caps->maxTextureSize()) {
931 return {};
932 }
933
934 if (mipmapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
935 return {};
936 }
937
938 return this->onCreateCompressedBackendTexture(dimensions, format, mipmapped, isProtected);
939 }
940
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t length)941 bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
942 sk_sp<skgpu::RefCntedCallback> finishedCallback,
943 const void* data,
944 size_t length) {
945 SkASSERT(data);
946
947 if (!backendTexture.isValid()) {
948 return false;
949 }
950
951 GrBackendFormat format = backendTexture.getBackendFormat();
952
953 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
954 if (compressionType == SkImage::CompressionType::kNone) {
955 // Uncompressed formats must go through the createBackendTexture API
956 return false;
957 }
958
959 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
960 return false;
961 }
962
963 GrMipmapped mipmapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
964
965 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
966 compressionType,
967 mipmapped,
968 data,
969 length)) {
970 return false;
971 }
972
973 return this->onUpdateCompressedBackendTexture(backendTexture,
974 std::move(finishedCallback),
975 data,
976 length);
977 }
978