1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "src/gpu/ganesh/GrGpu.h"
8
9 #include "include/core/SkSize.h"
10 #include "include/core/SkString.h"
11 #include "include/core/SkSurface.h"
12 #include "include/core/SkTextureCompressionType.h"
13 #include "include/gpu/ganesh/GrBackendSemaphore.h"
14 #include "include/gpu/ganesh/GrBackendSurface.h"
15 #include "include/gpu/ganesh/GrDirectContext.h"
16 #include "include/private/base/SkTo.h"
17 #include "src/base/SkMathPriv.h"
18 #include "src/core/SkCompressedDataUtils.h"
19 #include "src/core/SkTraceEvent.h"
20 #include "src/gpu/RefCntedCallback.h"
21 #include "src/gpu/ganesh/GrBackendUtils.h"
22 #include "src/gpu/ganesh/GrCaps.h"
23 #include "src/gpu/ganesh/GrDirectContextPriv.h"
24 #include "src/gpu/ganesh/GrGpuBuffer.h"
25 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
26 #include "src/gpu/ganesh/GrRenderTarget.h"
27 #include "src/gpu/ganesh/GrResourceProvider.h"
28 #include "src/gpu/ganesh/GrRingBuffer.h"
29 #include "src/gpu/ganesh/GrSemaphore.h"
30 #include "src/gpu/ganesh/GrStagingBufferManager.h"
31 #include "src/gpu/ganesh/GrSurface.h"
32 #include "src/gpu/ganesh/GrTexture.h"
33
34 #include <algorithm>
35 #include <utility>
36
37 using namespace skia_private;
38
39 ////////////////////////////////////////////////////////////////////////////////
40
GrGpu(GrDirectContext * direct)41 GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
42
~GrGpu()43 GrGpu::~GrGpu() {
44 this->callSubmittedProcs(false);
45 }
46
initCaps(sk_sp<const GrCaps> caps)47 void GrGpu::initCaps(sk_sp<const GrCaps> caps) {
48 fCaps = std::move(caps);
49 }
50
disconnect(DisconnectType type)51 void GrGpu::disconnect(DisconnectType type) {}
52
53 ////////////////////////////////////////////////////////////////////////////////
54
validate_texel_levels(SkISize dimensions,GrColorType texelColorType,const GrMipLevel * texels,int mipLevelCount,const GrCaps * caps)55 static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
56 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
57 SkASSERT(mipLevelCount > 0);
58 bool hasBasePixels = texels[0].fPixels;
59 int levelsWithPixelsCnt = 0;
60 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
61 int w = dimensions.fWidth;
62 int h = dimensions.fHeight;
63 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
64 if (texels[currentMipLevel].fPixels) {
65 const size_t minRowBytes = w * bpp;
66 if (caps->writePixelsRowBytesSupport()) {
67 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
68 return false;
69 }
70 if (texels[currentMipLevel].fRowBytes % bpp) {
71 return false;
72 }
73 } else {
74 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
75 return false;
76 }
77 }
78 ++levelsWithPixelsCnt;
79 }
80 if (w == 1 && h == 1) {
81 if (currentMipLevel != mipLevelCount - 1) {
82 return false;
83 }
84 } else {
85 w = std::max(w / 2, 1);
86 h = std::max(h / 2, 1);
87 }
88 }
89 // Either just a base layer or a full stack is required.
90 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
91 return false;
92 }
93 // Can specify just the base, all levels, or no levels.
94 if (!hasBasePixels) {
95 return levelsWithPixelsCnt == 0;
96 }
97 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
98 }
99
createTextureCommon(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)100 sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
101 const GrBackendFormat& format,
102 GrTextureType textureType,
103 GrRenderable renderable,
104 int renderTargetSampleCnt,
105 skgpu::Budgeted budgeted,
106 GrProtected isProtected,
107 int mipLevelCount,
108 uint32_t levelClearMask,
109 std::string_view label) {
110 if (this->caps()->isFormatCompressed(format)) {
111 // Call GrGpu::createCompressedTexture.
112 return nullptr;
113 }
114
115 skgpu::Mipmapped mipmapped = mipLevelCount > 1 ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
116 if (!this->caps()->validateSurfaceParams(dimensions,
117 format,
118 renderable,
119 renderTargetSampleCnt,
120 mipmapped,
121 textureType)) {
122 return nullptr;
123 }
124
125 if (renderable == GrRenderable::kYes) {
126 renderTargetSampleCnt =
127 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
128 }
129 // Attempt to catch un- or wrongly initialized sample counts.
130 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
131 this->handleDirtyContext();
132 auto tex = this->onCreateTexture(dimensions,
133 format,
134 renderable,
135 renderTargetSampleCnt,
136 budgeted,
137 isProtected,
138 mipLevelCount,
139 levelClearMask,
140 label);
141 if (tex) {
142 SkASSERT(tex->backendFormat() == format);
143 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
144 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
145 tex->resourcePriv().removeScratchKey();
146 }
147 fStats.incTextureCreates();
148 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
149 SkASSERT(GrRenderable::kYes == renderable);
150 tex->asRenderTarget()->setRequiresManualMSAAResolve();
151 }
152 }
153 return tex;
154 }
155
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Mipmapped mipmapped,skgpu::Budgeted budgeted,GrProtected isProtected,std::string_view label)156 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
157 const GrBackendFormat& format,
158 GrTextureType textureType,
159 GrRenderable renderable,
160 int renderTargetSampleCnt,
161 skgpu::Mipmapped mipmapped,
162 skgpu::Budgeted budgeted,
163 GrProtected isProtected,
164 std::string_view label) {
165 int mipLevelCount = 1;
166 if (mipmapped == skgpu::Mipmapped::kYes) {
167 mipLevelCount =
168 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
169 }
170 uint32_t levelClearMask =
171 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
172 auto tex = this->createTextureCommon(dimensions,
173 format,
174 textureType,
175 renderable,
176 renderTargetSampleCnt,
177 budgeted,
178 isProtected,
179 mipLevelCount,
180 levelClearMask,
181 label);
182 if (tex && mipmapped == skgpu::Mipmapped::kYes && levelClearMask) {
183 tex->markMipmapsClean();
184 }
185
186 return tex;
187 }
188
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,GrColorType textureColorType,GrColorType srcColorType,const GrMipLevel texels[],int texelLevelCount,std::string_view label)189 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
190 const GrBackendFormat& format,
191 GrTextureType textureType,
192 GrRenderable renderable,
193 int renderTargetSampleCnt,
194 skgpu::Budgeted budgeted,
195 GrProtected isProtected,
196 GrColorType textureColorType,
197 GrColorType srcColorType,
198 const GrMipLevel texels[],
199 int texelLevelCount,
200 std::string_view label) {
201 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
202 if (texelLevelCount) {
203 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
204 this->caps())) {
205 return nullptr;
206 }
207 }
208
209 int mipLevelCount = std::max(1, texelLevelCount);
210 uint32_t levelClearMask = 0;
211 if (this->caps()->shouldInitializeTextures()) {
212 if (texelLevelCount) {
213 for (int i = 0; i < mipLevelCount; ++i) {
214 if (!texels->fPixels) {
215 levelClearMask |= static_cast<uint32_t>(1 << i);
216 }
217 }
218 } else {
219 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
220 }
221 }
222
223 auto tex = this->createTextureCommon(dimensions,
224 format,
225 textureType,
226 renderable,
227 renderTargetSampleCnt,
228 budgeted,
229 isProtected,
230 texelLevelCount,
231 levelClearMask,
232 label);
233 if (tex) {
234 bool markMipLevelsClean = false;
235 // Currently if level 0 does not have pixels then no other level may, as enforced by
236 // validate_texel_levels.
237 if (texelLevelCount && texels[0].fPixels) {
238 if (!this->writePixels(tex.get(),
239 SkIRect::MakeSize(dimensions),
240 textureColorType,
241 srcColorType,
242 texels,
243 texelLevelCount)) {
244 return nullptr;
245 }
246 // Currently if level[1] of mip map has pixel data then so must all other levels.
247 // as enforced by validate_texel_levels.
248 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
249 fStats.incTextureUploads();
250 } else if (levelClearMask && mipLevelCount > 1) {
251 markMipLevelsClean = true;
252 }
253 if (markMipLevelsClean) {
254 tex->markMipmapsClean();
255 }
256 }
257 return tex;
258 }
259
createCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)260 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
261 const GrBackendFormat& format,
262 skgpu::Budgeted budgeted,
263 skgpu::Mipmapped mipmapped,
264 GrProtected isProtected,
265 const void* data,
266 size_t dataSize) {
267 this->handleDirtyContext();
268 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
269 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
270 return nullptr;
271 }
272 // Note if we relax the requirement that data must be provided then we must check
273 // caps()->shouldInitializeTextures() here.
274 if (!data) {
275 return nullptr;
276 }
277
278 // TODO: expand CompressedDataIsCorrect to work here too
279 SkTextureCompressionType compressionType = GrBackendFormatToCompressionType(format);
280 if (compressionType == SkTextureCompressionType::kNone) {
281 return nullptr;
282 }
283
284 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
285 return nullptr;
286 }
287
288 if (dataSize <
289 SkCompressedDataSize(
290 compressionType, dimensions, nullptr, mipmapped == skgpu::Mipmapped::kYes)) {
291 return nullptr;
292 }
293 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipmapped, isProtected,
294 data, dataSize);
295 }
296
createCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)297 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
298 const GrBackendFormat& format,
299 skgpu::Budgeted budgeted,
300 skgpu::Mipmapped mipMapped,
301 GrProtected isProtected,
302 OH_NativeBuffer* nativeBuffer,
303 size_t bufferSize) {
304 this->handleDirtyContext();
305 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
306 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
307 return nullptr;
308 }
309 if (!nativeBuffer) {
310 return nullptr;
311 }
312
313 SkTextureCompressionType compressionType = GrBackendFormatToCompressionType(format);
314 if (compressionType == SkTextureCompressionType::kNone) {
315 return nullptr;
316 }
317
318 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
319 return nullptr;
320 }
321
322 if (bufferSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
323 mipMapped == skgpu::Mipmapped::kYes)) {
324 return nullptr;
325 }
326 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected,
327 nativeBuffer, bufferSize);
328 }
329
wrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)330 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
331 GrWrapOwnership ownership,
332 GrWrapCacheable cacheable,
333 GrIOType ioType) {
334 SkASSERT(ioType != kWrite_GrIOType);
335 this->handleDirtyContext();
336
337 const GrCaps* caps = this->caps();
338 SkASSERT(caps);
339
340 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
341 return nullptr;
342 }
343 if (backendTex.width() > caps->maxTextureSize() ||
344 backendTex.height() > caps->maxTextureSize()) {
345 return nullptr;
346 }
347
348 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
349 }
350
wrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)351 sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex,
352 GrWrapOwnership ownership,
353 GrWrapCacheable cacheable) {
354 this->handleDirtyContext();
355
356 const GrCaps* caps = this->caps();
357 SkASSERT(caps);
358
359 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
360 return nullptr;
361 }
362 if (backendTex.width() > caps->maxTextureSize() ||
363 backendTex.height() > caps->maxTextureSize()) {
364 return nullptr;
365 }
366
367 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
368 }
369
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)370 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
371 int sampleCnt,
372 GrWrapOwnership ownership,
373 GrWrapCacheable cacheable) {
374 this->handleDirtyContext();
375 if (sampleCnt < 1) {
376 return nullptr;
377 }
378
379 const GrCaps* caps = this->caps();
380
381 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType()) ||
382 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
383 return nullptr;
384 }
385
386 if (backendTex.width() > caps->maxRenderTargetSize() ||
387 backendTex.height() > caps->maxRenderTargetSize()) {
388 return nullptr;
389 }
390 sk_sp<GrTexture> tex =
391 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
392 SkASSERT(!tex || tex->asRenderTarget());
393 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
394 tex->asRenderTarget()->setRequiresManualMSAAResolve();
395 }
396 return tex;
397 }
398
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)399 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
400 this->handleDirtyContext();
401
402 const GrCaps* caps = this->caps();
403
404 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
405 return nullptr;
406 }
407
408 sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT);
409 if (backendRT.isFramebufferOnly()) {
410 rt->setFramebufferOnly();
411 }
412 return rt;
413 }
414
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)415 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
416 const GrVkDrawableInfo& vkInfo) {
417 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
418 }
419
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)420 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
421 const GrVkDrawableInfo& vkInfo) {
422 // This is only supported on Vulkan so we default to returning nullptr here
423 return nullptr;
424 }
425
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)426 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size,
427 GrGpuBufferType intendedType,
428 GrAccessPattern accessPattern) {
429 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
430 this->handleDirtyContext();
431 if ((intendedType == GrGpuBufferType::kXferCpuToGpu ||
432 intendedType == GrGpuBufferType::kXferGpuToCpu) &&
433 accessPattern == kStatic_GrAccessPattern) {
434 return nullptr;
435 }
436 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern);
437 if (buffer && !this->caps()->reuseScratchBuffers()) {
438 buffer->resourcePriv().removeScratchKey();
439 }
440 return buffer;
441 }
442
copySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)443 bool GrGpu::copySurface(GrSurface* dst, const SkIRect& dstRect,
444 GrSurface* src, const SkIRect& srcRect,
445 GrSamplerState::Filter filter) {
446 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
447 SkASSERT(dst && src);
448 SkASSERT(!src->framebufferOnly());
449
450 if (dst->readOnly()) {
451 return false;
452 }
453
454 this->handleDirtyContext();
455
456 return this->onCopySurface(dst, dstRect, src, srcRect, filter);
457 }
458
readPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)459 bool GrGpu::readPixels(GrSurface* surface,
460 SkIRect rect,
461 GrColorType surfaceColorType,
462 GrColorType dstColorType,
463 void* buffer,
464 size_t rowBytes) {
465 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
466 SkASSERT(surface);
467 SkASSERT(!surface->framebufferOnly());
468 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
469 surface->backendFormat()));
470 SkASSERT(dstColorType != GrColorType::kUnknown);
471
472 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
473 return false;
474 }
475
476 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * rect.width());
477 if (!this->caps()->readPixelsRowBytesSupport()) {
478 if (rowBytes != minRowBytes) {
479 return false;
480 }
481 } else {
482 if (rowBytes < minRowBytes) {
483 return false;
484 }
485 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
486 return false;
487 }
488 }
489
490 this->handleDirtyContext();
491
492 return this->onReadPixels(surface, rect, surfaceColorType, dstColorType, buffer, rowBytes);
493 }
494
writePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)495 bool GrGpu::writePixels(GrSurface* surface,
496 SkIRect rect,
497 GrColorType surfaceColorType,
498 GrColorType srcColorType,
499 const GrMipLevel texels[],
500 int mipLevelCount,
501 bool prepForTexSampling) {
502 #ifdef SKIA_OHOS
503 HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::DETAIL, "Texture upload(%u) %ix%i",
504 surface->uniqueID().asUInt(), rect.width(), rect.height());
505 #else
506 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
507 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
508 surface->uniqueID().asUInt(), rect.width(), rect.height());
509 #endif
510 SkASSERT(surface);
511 SkASSERT(!surface->framebufferOnly());
512
513 if (surface->readOnly()) {
514 return false;
515 }
516
517 if (mipLevelCount == 0) {
518 return false;
519 } else if (mipLevelCount == 1) {
520 // We require that if we are not mipped, then the write region is contained in the surface
521 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
522 return false;
523 }
524 } else if (rect != SkIRect::MakeSize(surface->dimensions())) {
525 // We require that if the texels are mipped, than the write region is the entire surface
526 return false;
527 }
528
529 if (!validate_texel_levels(rect.size(), srcColorType, texels, mipLevelCount, this->caps())) {
530 return false;
531 }
532
533 this->handleDirtyContext();
534 if (!this->onWritePixels(surface,
535 rect,
536 surfaceColorType,
537 srcColorType,
538 texels,
539 mipLevelCount,
540 prepForTexSampling)) {
541 return false;
542 }
543
544 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
545 fStats.incTextureUploads();
546
547 return true;
548 }
549
transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)550 bool GrGpu::transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
551 size_t srcOffset,
552 sk_sp<GrGpuBuffer> dst,
553 size_t dstOffset,
554 size_t size) {
555 SkASSERT(src);
556 SkASSERT(dst);
557 SkASSERT(srcOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
558 SkASSERT(dstOffset % this->caps()->transferFromBufferToBufferAlignment() == 0);
559 SkASSERT(size % this->caps()->transferFromBufferToBufferAlignment() == 0);
560 SkASSERT(srcOffset + size <= src->size());
561 SkASSERT(dstOffset + size <= dst->size());
562 SkASSERT(src->intendedType() == GrGpuBufferType::kXferCpuToGpu);
563 SkASSERT(dst->intendedType() != GrGpuBufferType::kXferCpuToGpu);
564
565 this->handleDirtyContext();
566 if (!this->onTransferFromBufferToBuffer(std::move(src),
567 srcOffset,
568 std::move(dst),
569 dstOffset,
570 size)) {
571 return false;
572 }
573
574 fStats.incBufferTransfers();
575
576 return true;
577 }
578
transferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)579 bool GrGpu::transferPixelsTo(GrTexture* texture,
580 SkIRect rect,
581 GrColorType textureColorType,
582 GrColorType bufferColorType,
583 sk_sp<GrGpuBuffer> transferBuffer,
584 size_t offset,
585 size_t rowBytes) {
586 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
587 SkASSERT(texture);
588 SkASSERT(transferBuffer);
589 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferCpuToGpu);
590
591 if (texture->readOnly()) {
592 return false;
593 }
594
595 // We require that the write region is contained in the texture
596 if (!SkIRect::MakeSize(texture->dimensions()).contains(rect)) {
597 return false;
598 }
599
600 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
601 if (this->caps()->writePixelsRowBytesSupport()) {
602 if (rowBytes < SkToSizeT(bpp*rect.width())) {
603 return false;
604 }
605 if (rowBytes % bpp) {
606 return false;
607 }
608 } else {
609 if (rowBytes != SkToSizeT(bpp*rect.width())) {
610 return false;
611 }
612 }
613
614 this->handleDirtyContext();
615 if (!this->onTransferPixelsTo(texture,
616 rect,
617 textureColorType,
618 bufferColorType,
619 std::move(transferBuffer),
620 offset,
621 rowBytes)) {
622 return false;
623 }
624
625 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
626 fStats.incTransfersToTexture();
627
628 return true;
629 }
630
transferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)631 bool GrGpu::transferPixelsFrom(GrSurface* surface,
632 SkIRect rect,
633 GrColorType surfaceColorType,
634 GrColorType bufferColorType,
635 sk_sp<GrGpuBuffer> transferBuffer,
636 size_t offset) {
637 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
638 SkASSERT(surface);
639 SkASSERT(transferBuffer);
640 SkASSERT(transferBuffer->intendedType() == GrGpuBufferType::kXferGpuToCpu);
641 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
642 surface->backendFormat()));
643
644 #ifdef SK_DEBUG
645 auto supportedRead = this->caps()->supportedReadPixelsColorType(
646 surfaceColorType, surface->backendFormat(), bufferColorType);
647 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
648 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
649 #endif
650
651 // We require that the write region is contained in the texture
652 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
653 return false;
654 }
655
656 this->handleDirtyContext();
657 if (!this->onTransferPixelsFrom(surface,
658 rect,
659 surfaceColorType,
660 bufferColorType,
661 std::move(transferBuffer),
662 offset)) {
663 return false;
664 }
665
666 fStats.incTransfersFromSurface();
667
668 return true;
669 }
670
regenerateMipMapLevels(GrTexture * texture)671 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
672 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
673 SkASSERT(texture);
674 SkASSERT(this->caps()->mipmapSupport());
675 SkASSERT(texture->mipmapped() == skgpu::Mipmapped::kYes);
676 if (!texture->mipmapsAreDirty()) {
677 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
678 // actual target. This may be caused by things that the drawingManager could not predict,
679 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
680 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
681 return true;
682 }
683 if (texture->readOnly()) {
684 return false;
685 }
686 if (this->onRegenerateMipMapLevels(texture)) {
687 texture->markMipmapsClean();
688 return true;
689 }
690 return false;
691 }
692
resetTextureBindings()693 void GrGpu::resetTextureBindings() {
694 this->handleDirtyContext();
695 this->onResetTextureBindings();
696 }
697
resolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)698 void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
699 SkASSERT(target);
700 this->handleDirtyContext();
701 this->onResolveRenderTarget(target, resolveRect);
702 }
703
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const704 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
705 uint32_t mipLevels) const {
706 SkASSERT(surface);
707 SkASSERT(!surface->readOnly());
708 // Mark any MIP chain as dirty if and only if there is a non-empty bounds.
709 if (nullptr == bounds || !bounds->isEmpty()) {
710 GrTexture* texture = surface->asTexture();
711 if (texture) {
712 if (mipLevels == 1) {
713 texture->markMipmapsDirty();
714 } else {
715 texture->markMipmapsClean();
716 }
717 }
718 }
719 }
720
executeFlushInfo(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info,std::optional<GrTimerQuery> timerQuery,const skgpu::MutableTextureState * newState)721 void GrGpu::executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies,
722 SkSurfaces::BackendSurfaceAccess access,
723 const GrFlushInfo& info,
724 std::optional<GrTimerQuery> timerQuery,
725 const skgpu::MutableTextureState* newState) {
726 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
727
728 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
729
730 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
731 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
732 if (this->caps()->backendSemaphoreSupport() && info.fNumSemaphores) {
733 for (size_t i = 0; i < info.fNumSemaphores; ++i) {
734 if (info.fSignalSemaphores[i].isInitialized()) {
735 semaphores[i] = resourceProvider->wrapBackendSemaphore(
736 info.fSignalSemaphores[i],
737 GrSemaphoreWrapType::kWillSignal,
738 kBorrow_GrWrapOwnership);
739 // If we failed to wrap the semaphore it means the client didn't give us a valid
740 // semaphore to begin with. Therefore, it is fine to not signal it.
741 if (semaphores[i]) {
742 this->insertSemaphore(semaphores[i].get());
743 }
744 } else {
745 semaphores[i] = resourceProvider->makeSemaphore(false);
746 if (semaphores[i]) {
747 this->insertSemaphore(semaphores[i].get());
748 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
749 }
750 }
751 }
752 }
753
754 if (timerQuery) {
755 this->endTimerQuery(*timerQuery);
756 }
757
758 skgpu::AutoCallback callback;
759 if (info.fFinishedWithStatsProc) {
760 callback = skgpu::AutoCallback(info.fFinishedWithStatsProc, info.fFinishedContext);
761 } else {
762 callback = skgpu::AutoCallback(info.fFinishedProc, info.fFinishedContext);
763 }
764 if (callback) {
765 this->addFinishedCallback(std::move(callback), std::move(timerQuery));
766 }
767
768 if (info.fSubmittedProc) {
769 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
770 }
771
772 // We currently don't support passing in new surface state for multiple proxies here. The only
773 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
774 // updates anyways.
775 SkASSERT(!newState || proxies.size() == 1);
776 SkASSERT(!newState || access == SkSurfaces::BackendSurfaceAccess::kNoAccess);
777 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
778 }
779
getOpsRenderPass(GrRenderTarget * renderTarget,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)780 GrOpsRenderPass* GrGpu::getOpsRenderPass(
781 GrRenderTarget* renderTarget,
782 bool useMSAASurface,
783 GrAttachment* stencil,
784 GrSurfaceOrigin origin,
785 const SkIRect& bounds,
786 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
787 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
788 const TArray<GrSurfaceProxy*, true>& sampledProxies,
789 GrXferBarrierFlags renderPassXferBarriers) {
790 #if SK_HISTOGRAMS_ENABLED
791 fCurrentSubmitRenderPassCount++;
792 #endif
793 fStats.incRenderPasses();
794 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
795 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
796 }
797
submitToGpu(const GrSubmitInfo & info)798 bool GrGpu::submitToGpu(const GrSubmitInfo& info) {
799 this->stats()->incNumSubmitToGpus();
800
801 if (auto manager = this->stagingBufferManager()) {
802 manager->detachBuffers();
803 }
804
805 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
806 uniformsBuffer->startSubmit(this);
807 }
808
809 bool submitted = this->onSubmitToGpu(info);
810
811 this->callSubmittedProcs(submitted);
812
813 this->reportSubmitHistograms();
814
815 return submitted;
816 }
817
reportSubmitHistograms()818 void GrGpu::reportSubmitHistograms() {
819 #if SK_HISTOGRAMS_ENABLED
820 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
821 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
822 // than the number of actual values
823 [[maybe_unused]] static constexpr int kMaxRenderPassBucketValue = 100;
824 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
825 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
826 kMaxRenderPassBucketValue);
827 fCurrentSubmitRenderPassCount = 0;
828 #endif
829
830 this->onReportSubmitHistograms();
831 }
832
checkAndResetOOMed()833 bool GrGpu::checkAndResetOOMed() {
834 if (fOOMed) {
835 fOOMed = false;
836 return true;
837 }
838 return false;
839 }
840
callSubmittedProcs(bool success)841 void GrGpu::callSubmittedProcs(bool success) {
842 for (int i = 0; i < fSubmittedProcs.size(); ++i) {
843 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
844 }
845 fSubmittedProcs.clear();
846 }
847
848 #ifdef SK_ENABLE_DUMP_GPU
849 #include "src/utils/SkJSONWriter.h"
850
dumpJSON(SkJSONWriter * writer) const851 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
852 writer->beginObject();
853
854 // TODO: Is there anything useful in the base class to dump here?
855
856 this->onDumpJSON(writer);
857
858 writer->endObject();
859 }
860 #else
dumpJSON(SkJSONWriter * writer) const861 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
862 #endif
863
864 #if defined(GPU_TEST_UTILS)
865
866 #if GR_GPU_STATS
867
dump(SkString * out)868 void GrGpu::Stats::dump(SkString* out) {
869 out->appendf("Textures Created: %d\n", fTextureCreates);
870 out->appendf("Texture Uploads: %d\n", fTextureUploads);
871 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
872 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
873 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
874 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
875 out->appendf("Number of draws: %d\n", fNumDraws);
876 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
877 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
878 fNumScratchMSAAAttachmentsReused);
879 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
880 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
881
882 // enable this block to output CSV-style stats for program pre-compilation
883 #if 0
884 SkASSERT(fNumInlineCompilationFailures == 0);
885 SkASSERT(fNumPreCompilationFailures == 0);
886 SkASSERT(fNumCompilationFailures == 0);
887 SkASSERT(fNumPartialCompilationSuccesses == 0);
888
889 SkDebugf("%d, %d, %d, %d, %d\n",
890 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
891 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
892 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
893 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
894 fNumCompilationSuccesses);
895 #endif
896 }
897
dumpKeyValuePairs(TArray<SkString> * keys,TArray<double> * values)898 void GrGpu::Stats::dumpKeyValuePairs(TArray<SkString>* keys, TArray<double>* values) {
899 keys->push_back(SkString("render_passes"));
900 values->push_back(fRenderPasses);
901 keys->push_back(SkString("reordered_dags_over_budget"));
902 values->push_back(fNumReorderedDAGsOverBudget);
903 }
904
905 #endif // GR_GPU_STATS
906 #endif // defined(GPU_TEST_UTILS)
907
CompressedDataIsCorrect(SkISize dimensions,SkTextureCompressionType compressionType,skgpu::Mipmapped mipmapped,const void * data,size_t length)908 bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
909 SkTextureCompressionType compressionType,
910 skgpu::Mipmapped mipmapped,
911 const void* data,
912 size_t length) {
913 size_t computedSize = SkCompressedDataSize(
914 compressionType, dimensions, nullptr, mipmapped == skgpu::Mipmapped::kYes);
915 return computedSize == length;
916 }
917
createBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)918 GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
919 const GrBackendFormat& format,
920 GrRenderable renderable,
921 skgpu::Mipmapped mipmapped,
922 GrProtected isProtected,
923 std::string_view label) {
924 const GrCaps* caps = this->caps();
925
926 if (!format.isValid()) {
927 return {};
928 }
929
930 if (caps->isFormatCompressed(format)) {
931 // Compressed formats must go through the createCompressedBackendTexture API
932 return {};
933 }
934
935 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
936 dimensions.height() > caps->maxTextureSize()) {
937 return {};
938 }
939
940 if (mipmapped == skgpu::Mipmapped::kYes && !this->caps()->mipmapSupport()) {
941 return {};
942 }
943
944 return this->onCreateBackendTexture(
945 dimensions, format, renderable, mipmapped, isProtected, label);
946 }
947
clearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)948 bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
949 sk_sp<skgpu::RefCntedCallback> finishedCallback,
950 std::array<float, 4> color) {
951 if (!backendTexture.isValid()) {
952 return false;
953 }
954
955 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
956 return false;
957 }
958
959 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
960 }
961
createCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)962 GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
963 const GrBackendFormat& format,
964 skgpu::Mipmapped mipmapped,
965 GrProtected isProtected) {
966 const GrCaps* caps = this->caps();
967
968 if (!format.isValid()) {
969 return {};
970 }
971
972 SkTextureCompressionType compressionType = GrBackendFormatToCompressionType(format);
973 if (compressionType == SkTextureCompressionType::kNone) {
974 // Uncompressed formats must go through the createBackendTexture API
975 return {};
976 }
977
978 if (dimensions.isEmpty() ||
979 dimensions.width() > caps->maxTextureSize() ||
980 dimensions.height() > caps->maxTextureSize()) {
981 return {};
982 }
983
984 if (mipmapped == skgpu::Mipmapped::kYes && !this->caps()->mipmapSupport()) {
985 return {};
986 }
987
988 return this->onCreateCompressedBackendTexture(dimensions, format, mipmapped, isProtected);
989 }
990
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t length)991 bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
992 sk_sp<skgpu::RefCntedCallback> finishedCallback,
993 const void* data,
994 size_t length) {
995 SkASSERT(data);
996
997 if (!backendTexture.isValid()) {
998 return false;
999 }
1000
1001 GrBackendFormat format = backendTexture.getBackendFormat();
1002
1003 SkTextureCompressionType compressionType = GrBackendFormatToCompressionType(format);
1004 if (compressionType == SkTextureCompressionType::kNone) {
1005 // Uncompressed formats must go through the createBackendTexture API
1006 return false;
1007 }
1008
1009 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
1010 return false;
1011 }
1012
1013 skgpu::Mipmapped mipmapped =
1014 backendTexture.hasMipmaps() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1015
1016 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
1017 compressionType,
1018 mipmapped,
1019 data,
1020 length)) {
1021 return false;
1022 }
1023
1024 return this->onUpdateCompressedBackendTexture(backendTexture,
1025 std::move(finishedCallback),
1026 data,
1027 length);
1028 }
1029