1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkCompressedDataUtils.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMipmap.h"
17 #include "src/gpu/GrAttachment.h"
18 #include "src/gpu/GrBackendUtils.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDataUtils.h"
21 #include "src/gpu/GrDirectContextPriv.h"
22 #include "src/gpu/GrGpuResourcePriv.h"
23 #include "src/gpu/GrNativeRect.h"
24 #include "src/gpu/GrPipeline.h"
25 #include "src/gpu/GrRenderTarget.h"
26 #include "src/gpu/GrResourceCache.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrRingBuffer.h"
29 #include "src/gpu/GrSemaphore.h"
30 #include "src/gpu/GrStagingBufferManager.h"
31 #include "src/gpu/GrStencilSettings.h"
32 #include "src/gpu/GrTextureProxyPriv.h"
33 #include "src/gpu/GrTracing.h"
34 #include "src/sksl/SkSLCompiler.h"
35
36 ////////////////////////////////////////////////////////////////////////////////
37
GrGpu(GrDirectContext * direct)38 GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
39
~GrGpu()40 GrGpu::~GrGpu() {
41 this->callSubmittedProcs(false);
42 }
43
initCapsAndCompiler(sk_sp<const GrCaps> caps)44 void GrGpu::initCapsAndCompiler(sk_sp<const GrCaps> caps) {
45 fCaps = std::move(caps);
46 fCompiler = std::make_unique<SkSL::Compiler>(fCaps->shaderCaps());
47 }
48
disconnect(DisconnectType type)49 void GrGpu::disconnect(DisconnectType type) {}
50
51 ////////////////////////////////////////////////////////////////////////////////
52
validate_texel_levels(SkISize dimensions,GrColorType texelColorType,const GrMipLevel * texels,int mipLevelCount,const GrCaps * caps)53 static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
54 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
55 SkASSERT(mipLevelCount > 0);
56 bool hasBasePixels = texels[0].fPixels;
57 int levelsWithPixelsCnt = 0;
58 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
59 int w = dimensions.fWidth;
60 int h = dimensions.fHeight;
61 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
62 if (texels[currentMipLevel].fPixels) {
63 const size_t minRowBytes = w * bpp;
64 if (caps->writePixelsRowBytesSupport()) {
65 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
66 return false;
67 }
68 if (texels[currentMipLevel].fRowBytes % bpp) {
69 return false;
70 }
71 } else {
72 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
73 return false;
74 }
75 }
76 ++levelsWithPixelsCnt;
77 }
78 if (w == 1 && h == 1) {
79 if (currentMipLevel != mipLevelCount - 1) {
80 return false;
81 }
82 } else {
83 w = std::max(w / 2, 1);
84 h = std::max(h / 2, 1);
85 }
86 }
87 // Either just a base layer or a full stack is required.
88 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
89 return false;
90 }
91 // Can specify just the base, all levels, or no levels.
92 if (!hasBasePixels) {
93 return levelsWithPixelsCnt == 0;
94 }
95 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
96 }
97
createTextureCommon(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)98 sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
99 const GrBackendFormat& format,
100 GrTextureType textureType,
101 GrRenderable renderable,
102 int renderTargetSampleCnt,
103 SkBudgeted budgeted,
104 GrProtected isProtected,
105 int mipLevelCount,
106 uint32_t levelClearMask) {
107 if (this->caps()->isFormatCompressed(format)) {
108 // Call GrGpu::createCompressedTexture.
109 return nullptr;
110 }
111
112 GrMipmapped mipMapped = mipLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
113 if (!this->caps()->validateSurfaceParams(dimensions,
114 format,
115 renderable,
116 renderTargetSampleCnt,
117 mipMapped,
118 textureType)) {
119 return nullptr;
120 }
121
122 if (renderable == GrRenderable::kYes) {
123 renderTargetSampleCnt =
124 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
125 }
126 // Attempt to catch un- or wrongly initialized sample counts.
127 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
128 this->handleDirtyContext();
129 auto tex = this->onCreateTexture(dimensions,
130 format,
131 renderable,
132 renderTargetSampleCnt,
133 budgeted,
134 isProtected,
135 mipLevelCount,
136 levelClearMask);
137 if (tex) {
138 SkASSERT(tex->backendFormat() == format);
139 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
140 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
141 tex->resourcePriv().removeScratchKey();
142 }
143 fStats.incTextureCreates();
144 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
145 SkASSERT(GrRenderable::kYes == renderable);
146 tex->asRenderTarget()->setRequiresManualMSAAResolve();
147 }
148 }
149 return tex;
150 }
151
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,GrMipmapped mipMapped,SkBudgeted budgeted,GrProtected isProtected)152 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
153 const GrBackendFormat& format,
154 GrTextureType textureType,
155 GrRenderable renderable,
156 int renderTargetSampleCnt,
157 GrMipmapped mipMapped,
158 SkBudgeted budgeted,
159 GrProtected isProtected) {
160 int mipLevelCount = 1;
161 if (mipMapped == GrMipmapped::kYes) {
162 mipLevelCount =
163 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
164 }
165 uint32_t levelClearMask =
166 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
167 auto tex = this->createTextureCommon(dimensions,
168 format,
169 textureType,
170 renderable,
171 renderTargetSampleCnt,
172 budgeted,
173 isProtected,
174 mipLevelCount,
175 levelClearMask);
176 if (tex && mipMapped == GrMipmapped::kYes && levelClearMask) {
177 tex->markMipmapsClean();
178 }
179 return tex;
180 }
181
createTexture(SkISize dimensions,const GrBackendFormat & format,GrTextureType textureType,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,GrColorType textureColorType,GrColorType srcColorType,const GrMipLevel texels[],int texelLevelCount)182 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
183 const GrBackendFormat& format,
184 GrTextureType textureType,
185 GrRenderable renderable,
186 int renderTargetSampleCnt,
187 SkBudgeted budgeted,
188 GrProtected isProtected,
189 GrColorType textureColorType,
190 GrColorType srcColorType,
191 const GrMipLevel texels[],
192 int texelLevelCount) {
193 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
194 if (texelLevelCount) {
195 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
196 this->caps())) {
197 return nullptr;
198 }
199 }
200
201 int mipLevelCount = std::max(1, texelLevelCount);
202 uint32_t levelClearMask = 0;
203 if (this->caps()->shouldInitializeTextures()) {
204 if (texelLevelCount) {
205 for (int i = 0; i < mipLevelCount; ++i) {
206 if (!texels->fPixels) {
207 levelClearMask |= static_cast<uint32_t>(1 << i);
208 }
209 }
210 } else {
211 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
212 }
213 }
214
215 auto tex = this->createTextureCommon(dimensions,
216 format,
217 textureType,
218 renderable,
219 renderTargetSampleCnt,
220 budgeted,
221 isProtected,
222 texelLevelCount,
223 levelClearMask);
224 if (tex) {
225 bool markMipLevelsClean = false;
226 // Currently if level 0 does not have pixels then no other level may, as enforced by
227 // validate_texel_levels.
228 if (texelLevelCount && texels[0].fPixels) {
229 if (!this->writePixels(tex.get(),
230 SkIRect::MakeSize(dimensions),
231 textureColorType,
232 srcColorType,
233 texels,
234 texelLevelCount)) {
235 return nullptr;
236 }
237 // Currently if level[1] of mip map has pixel data then so must all other levels.
238 // as enforced by validate_texel_levels.
239 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
240 fStats.incTextureUploads();
241 } else if (levelClearMask && mipLevelCount > 1) {
242 markMipLevelsClean = true;
243 }
244 if (markMipLevelsClean) {
245 tex->markMipmapsClean();
246 }
247 }
248 return tex;
249 }
250
createCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)251 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
252 const GrBackendFormat& format,
253 SkBudgeted budgeted,
254 GrMipmapped mipMapped,
255 GrProtected isProtected,
256 const void* data,
257 size_t dataSize) {
258 this->handleDirtyContext();
259 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
260 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
261 return nullptr;
262 }
263 // Note if we relax the requirement that data must be provided then we must check
264 // caps()->shouldInitializeTextures() here.
265 if (!data) {
266 return nullptr;
267 }
268
269 // TODO: expand CompressedDataIsCorrect to work here too
270 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
271 if (compressionType == SkImage::CompressionType::kNone) {
272 return nullptr;
273 }
274
275 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
276 return nullptr;
277 }
278
279 if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
280 mipMapped == GrMipmapped::kYes)) {
281 return nullptr;
282 }
283 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected,
284 data, dataSize);
285 }
286
wrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)287 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
288 GrWrapOwnership ownership,
289 GrWrapCacheable cacheable,
290 GrIOType ioType) {
291 SkASSERT(ioType != kWrite_GrIOType);
292 this->handleDirtyContext();
293
294 const GrCaps* caps = this->caps();
295 SkASSERT(caps);
296
297 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
298 return nullptr;
299 }
300 if (backendTex.width() > caps->maxTextureSize() ||
301 backendTex.height() > caps->maxTextureSize()) {
302 return nullptr;
303 }
304
305 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
306 }
307
wrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)308 sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex,
309 GrWrapOwnership ownership,
310 GrWrapCacheable cacheable) {
311 this->handleDirtyContext();
312
313 const GrCaps* caps = this->caps();
314 SkASSERT(caps);
315
316 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType())) {
317 return nullptr;
318 }
319 if (backendTex.width() > caps->maxTextureSize() ||
320 backendTex.height() > caps->maxTextureSize()) {
321 return nullptr;
322 }
323
324 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
325 }
326
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)327 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
328 int sampleCnt,
329 GrWrapOwnership ownership,
330 GrWrapCacheable cacheable) {
331 this->handleDirtyContext();
332 if (sampleCnt < 1) {
333 return nullptr;
334 }
335
336 const GrCaps* caps = this->caps();
337
338 if (!caps->isFormatTexturable(backendTex.getBackendFormat(), backendTex.textureType()) ||
339 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
340 return nullptr;
341 }
342
343 if (backendTex.width() > caps->maxRenderTargetSize() ||
344 backendTex.height() > caps->maxRenderTargetSize()) {
345 return nullptr;
346 }
347 sk_sp<GrTexture> tex =
348 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
349 SkASSERT(!tex || tex->asRenderTarget());
350 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
351 tex->asRenderTarget()->setRequiresManualMSAAResolve();
352 }
353 return tex;
354 }
355
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)356 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
357 this->handleDirtyContext();
358
359 const GrCaps* caps = this->caps();
360
361 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
362 return nullptr;
363 }
364
365 sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT);
366 if (backendRT.isFramebufferOnly()) {
367 rt->setFramebufferOnly();
368 }
369 return rt;
370 }
371
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)372 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
373 const GrVkDrawableInfo& vkInfo) {
374 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
375 }
376
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)377 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
378 const GrVkDrawableInfo& vkInfo) {
379 // This is only supported on Vulkan so we default to returning nullptr here
380 return nullptr;
381 }
382
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)383 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
384 GrAccessPattern accessPattern, const void* data) {
385 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
386 this->handleDirtyContext();
387 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
388 if (!this->caps()->reuseScratchBuffers()) {
389 buffer->resourcePriv().removeScratchKey();
390 }
391 return buffer;
392 }
393
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)394 bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
395 const SkIPoint& dstPoint) {
396 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
397 SkASSERT(dst && src);
398 SkASSERT(!src->framebufferOnly());
399
400 if (dst->readOnly()) {
401 return false;
402 }
403
404 this->handleDirtyContext();
405
406 return this->onCopySurface(dst, src, srcRect, dstPoint);
407 }
408
readPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)409 bool GrGpu::readPixels(GrSurface* surface,
410 SkIRect rect,
411 GrColorType surfaceColorType,
412 GrColorType dstColorType,
413 void* buffer,
414 size_t rowBytes) {
415 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
416 SkASSERT(surface);
417 SkASSERT(!surface->framebufferOnly());
418 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
419 surface->backendFormat()));
420
421 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
422 return false;
423 }
424
425 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * rect.width());
426 if (!this->caps()->readPixelsRowBytesSupport()) {
427 if (rowBytes != minRowBytes) {
428 return false;
429 }
430 } else {
431 if (rowBytes < minRowBytes) {
432 return false;
433 }
434 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
435 return false;
436 }
437 }
438
439 this->handleDirtyContext();
440
441 return this->onReadPixels(surface, rect, surfaceColorType, dstColorType, buffer, rowBytes);
442 }
443
writePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)444 bool GrGpu::writePixels(GrSurface* surface,
445 SkIRect rect,
446 GrColorType surfaceColorType,
447 GrColorType srcColorType,
448 const GrMipLevel texels[],
449 int mipLevelCount,
450 bool prepForTexSampling) {
451 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
452 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
453 surface->uniqueID().asUInt(), rect.width(), rect.height());
454 SkASSERT(surface);
455 SkASSERT(!surface->framebufferOnly());
456
457 if (surface->readOnly()) {
458 return false;
459 }
460
461 if (mipLevelCount == 0) {
462 return false;
463 } else if (mipLevelCount == 1) {
464 // We require that if we are not mipped, then the write region is contained in the surface
465 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
466 return false;
467 }
468 } else if (rect != SkIRect::MakeSize(surface->dimensions())) {
469 // We require that if the texels are mipped, than the write region is the entire surface
470 return false;
471 }
472
473 if (!validate_texel_levels(rect.size(), srcColorType, texels, mipLevelCount, this->caps())) {
474 return false;
475 }
476
477 this->handleDirtyContext();
478 if (this->onWritePixels(surface,
479 rect,
480 surfaceColorType,
481 srcColorType,
482 texels,
483 mipLevelCount,
484 prepForTexSampling)) {
485 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
486 fStats.incTextureUploads();
487 return true;
488 }
489 return false;
490 }
491
transferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)492 bool GrGpu::transferPixelsTo(GrTexture* texture,
493 SkIRect rect,
494 GrColorType textureColorType,
495 GrColorType bufferColorType,
496 sk_sp<GrGpuBuffer> transferBuffer,
497 size_t offset,
498 size_t rowBytes) {
499 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
500 SkASSERT(texture);
501 SkASSERT(transferBuffer);
502
503 if (texture->readOnly()) {
504 return false;
505 }
506
507 // We require that the write region is contained in the texture
508 if (!SkIRect::MakeSize(texture->dimensions()).contains(rect)) {
509 return false;
510 }
511
512 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
513 if (this->caps()->writePixelsRowBytesSupport()) {
514 if (rowBytes < SkToSizeT(bpp*rect.width())) {
515 return false;
516 }
517 if (rowBytes % bpp) {
518 return false;
519 }
520 } else {
521 if (rowBytes != SkToSizeT(bpp*rect.width())) {
522 return false;
523 }
524 }
525
526 this->handleDirtyContext();
527 if (this->onTransferPixelsTo(texture,
528 rect,
529 textureColorType,
530 bufferColorType,
531 std::move(transferBuffer),
532 offset,
533 rowBytes)) {
534 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
535 fStats.incTransfersToTexture();
536
537 return true;
538 }
539 return false;
540 }
541
transferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)542 bool GrGpu::transferPixelsFrom(GrSurface* surface,
543 SkIRect rect,
544 GrColorType surfaceColorType,
545 GrColorType bufferColorType,
546 sk_sp<GrGpuBuffer> transferBuffer,
547 size_t offset) {
548 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
549 SkASSERT(surface);
550 SkASSERT(transferBuffer);
551 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(surfaceColorType,
552 surface->backendFormat()));
553
554 #ifdef SK_DEBUG
555 auto supportedRead = this->caps()->supportedReadPixelsColorType(
556 surfaceColorType, surface->backendFormat(), bufferColorType);
557 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
558 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
559 #endif
560
561 // We require that the write region is contained in the texture
562 if (!SkIRect::MakeSize(surface->dimensions()).contains(rect)) {
563 return false;
564 }
565
566 this->handleDirtyContext();
567 if (this->onTransferPixelsFrom(surface,
568 rect,
569 surfaceColorType,
570 bufferColorType,
571 std::move(transferBuffer),
572 offset)) {
573 fStats.incTransfersFromSurface();
574 return true;
575 }
576 return false;
577 }
578
regenerateMipMapLevels(GrTexture * texture)579 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
580 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
581 SkASSERT(texture);
582 SkASSERT(this->caps()->mipmapSupport());
583 SkASSERT(texture->mipmapped() == GrMipmapped::kYes);
584 if (!texture->mipmapsAreDirty()) {
585 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
586 // actual target. This may be caused by things that the drawingManager could not predict,
587 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
588 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
589 return true;
590 }
591 if (texture->readOnly()) {
592 return false;
593 }
594 if (this->onRegenerateMipMapLevels(texture)) {
595 texture->markMipmapsClean();
596 return true;
597 }
598 return false;
599 }
600
resetTextureBindings()601 void GrGpu::resetTextureBindings() {
602 this->handleDirtyContext();
603 this->onResetTextureBindings();
604 }
605
resolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)606 void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
607 SkASSERT(target);
608 this->handleDirtyContext();
609 this->onResolveRenderTarget(target, resolveRect);
610 }
611
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const612 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
613 uint32_t mipLevels) const {
614 SkASSERT(surface);
615 SkASSERT(!surface->readOnly());
616 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
617 if (nullptr == bounds || !bounds->isEmpty()) {
618 GrTexture* texture = surface->asTexture();
619 if (texture) {
620 if (mipLevels == 1) {
621 texture->markMipmapsDirty();
622 } else {
623 texture->markMipmapsClean();
624 }
625 }
626 }
627 }
628
executeFlushInfo(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)629 void GrGpu::executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies,
630 SkSurface::BackendSurfaceAccess access,
631 const GrFlushInfo& info,
632 const GrBackendSurfaceMutableState* newState) {
633 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
634
635 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
636
637 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
638 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
639 if (this->caps()->semaphoreSupport() && info.fNumSemaphores) {
640 for (size_t i = 0; i < info.fNumSemaphores; ++i) {
641 if (info.fSignalSemaphores[i].isInitialized()) {
642 semaphores[i] = resourceProvider->wrapBackendSemaphore(
643 info.fSignalSemaphores[i],
644 GrSemaphoreWrapType::kWillSignal,
645 kBorrow_GrWrapOwnership);
646 // If we failed to wrap the semaphore it means the client didn't give us a valid
647 // semaphore to begin with. Therefore, it is fine to not signal it.
648 if (semaphores[i]) {
649 this->insertSemaphore(semaphores[i].get());
650 }
651 } else {
652 semaphores[i] = resourceProvider->makeSemaphore(false);
653 if (semaphores[i]) {
654 this->insertSemaphore(semaphores[i].get());
655 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
656 }
657 }
658 }
659 }
660
661 if (info.fFinishedProc) {
662 this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
663 }
664
665 if (info.fSubmittedProc) {
666 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
667 }
668
669 // We currently don't support passing in new surface state for multiple proxies here. The only
670 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
671 // updates anyways.
672 SkASSERT(!newState || proxies.size() == 1);
673 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
674 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
675 }
676
getOpsRenderPass(GrRenderTarget * renderTarget,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)677 GrOpsRenderPass* GrGpu::getOpsRenderPass(
678 GrRenderTarget* renderTarget,
679 bool useMSAASurface,
680 GrAttachment* stencil,
681 GrSurfaceOrigin origin,
682 const SkIRect& bounds,
683 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
684 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
685 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
686 GrXferBarrierFlags renderPassXferBarriers) {
687 #if SK_HISTOGRAMS_ENABLED
688 fCurrentSubmitRenderPassCount++;
689 #endif
690 fStats.incRenderPasses();
691 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
692 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
693 }
694
submitToGpu(bool syncCpu)695 bool GrGpu::submitToGpu(bool syncCpu) {
696 this->stats()->incNumSubmitToGpus();
697
698 if (auto manager = this->stagingBufferManager()) {
699 manager->detachBuffers();
700 }
701
702 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
703 uniformsBuffer->startSubmit(this);
704 }
705
706 bool submitted = this->onSubmitToGpu(syncCpu);
707
708 this->callSubmittedProcs(submitted);
709
710 this->reportSubmitHistograms();
711
712 return submitted;
713 }
714
reportSubmitHistograms()715 void GrGpu::reportSubmitHistograms() {
716 #if SK_HISTOGRAMS_ENABLED
717 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
718 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
719 // than the number of actual values
720 static constexpr int kMaxRenderPassBucketValue = 100;
721 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
722 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
723 kMaxRenderPassBucketValue);
724 fCurrentSubmitRenderPassCount = 0;
725 #endif
726
727 this->onReportSubmitHistograms();
728 }
729
checkAndResetOOMed()730 bool GrGpu::checkAndResetOOMed() {
731 if (fOOMed) {
732 fOOMed = false;
733 return true;
734 }
735 return false;
736 }
737
callSubmittedProcs(bool success)738 void GrGpu::callSubmittedProcs(bool success) {
739 for (int i = 0; i < fSubmittedProcs.count(); ++i) {
740 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
741 }
742 fSubmittedProcs.reset();
743 }
744
745 #ifdef SK_ENABLE_DUMP_GPU
746 #include "src/utils/SkJSONWriter.h"
747
dumpJSON(SkJSONWriter * writer) const748 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
749 writer->beginObject();
750
751 // TODO: Is there anything useful in the base class to dump here?
752
753 this->onDumpJSON(writer);
754
755 writer->endObject();
756 }
757 #else
dumpJSON(SkJSONWriter * writer) const758 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
759 #endif
760
761 #if GR_TEST_UTILS
762
763 #if GR_GPU_STATS
764
dump(SkString * out)765 void GrGpu::Stats::dump(SkString* out) {
766 out->appendf("Textures Created: %d\n", fTextureCreates);
767 out->appendf("Texture Uploads: %d\n", fTextureUploads);
768 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
769 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
770 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
771 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
772 out->appendf("Number of draws: %d\n", fNumDraws);
773 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
774 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
775 fNumScratchMSAAAttachmentsReused);
776 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
777 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
778
779 // enable this block to output CSV-style stats for program pre-compilation
780 #if 0
781 SkASSERT(fNumInlineCompilationFailures == 0);
782 SkASSERT(fNumPreCompilationFailures == 0);
783 SkASSERT(fNumCompilationFailures == 0);
784 SkASSERT(fNumPartialCompilationSuccesses == 0);
785
786 SkDebugf("%d, %d, %d, %d, %d\n",
787 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
788 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
789 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
790 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
791 fNumCompilationSuccesses);
792 #endif
793 }
794
dumpKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values)795 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
796 keys->push_back(SkString("render_passes"));
797 values->push_back(fRenderPasses);
798 keys->push_back(SkString("reordered_dags_over_budget"));
799 values->push_back(fNumReorderedDAGsOverBudget);
800 }
801
802 #endif // GR_GPU_STATS
803 #endif // GR_TEST_UTILS
804
CompressedDataIsCorrect(SkISize dimensions,SkImage::CompressionType compressionType,GrMipmapped mipMapped,const void * data,size_t length)805 bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
806 SkImage::CompressionType compressionType,
807 GrMipmapped mipMapped,
808 const void* data,
809 size_t length) {
810 size_t computedSize = SkCompressedDataSize(compressionType,
811 dimensions,
812 nullptr,
813 mipMapped == GrMipmapped::kYes);
814 return computedSize == length;
815 }
816
createBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)817 GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
818 const GrBackendFormat& format,
819 GrRenderable renderable,
820 GrMipmapped mipMapped,
821 GrProtected isProtected) {
822 const GrCaps* caps = this->caps();
823
824 if (!format.isValid()) {
825 return {};
826 }
827
828 if (caps->isFormatCompressed(format)) {
829 // Compressed formats must go through the createCompressedBackendTexture API
830 return {};
831 }
832
833 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
834 dimensions.height() > caps->maxTextureSize()) {
835 return {};
836 }
837
838 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
839 return {};
840 }
841
842 return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, isProtected);
843 }
844
clearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)845 bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
846 sk_sp<GrRefCntedCallback> finishedCallback,
847 std::array<float, 4> color) {
848 if (!backendTexture.isValid()) {
849 return false;
850 }
851
852 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
853 return false;
854 }
855
856 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
857 }
858
createCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)859 GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
860 const GrBackendFormat& format,
861 GrMipmapped mipMapped,
862 GrProtected isProtected) {
863 const GrCaps* caps = this->caps();
864
865 if (!format.isValid()) {
866 return {};
867 }
868
869 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
870 if (compressionType == SkImage::CompressionType::kNone) {
871 // Uncompressed formats must go through the createBackendTexture API
872 return {};
873 }
874
875 if (dimensions.isEmpty() ||
876 dimensions.width() > caps->maxTextureSize() ||
877 dimensions.height() > caps->maxTextureSize()) {
878 return {};
879 }
880
881 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
882 return {};
883 }
884
885 return this->onCreateCompressedBackendTexture(dimensions, format, mipMapped, isProtected);
886 }
887
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t length)888 bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
889 sk_sp<GrRefCntedCallback> finishedCallback,
890 const void* data,
891 size_t length) {
892 SkASSERT(data);
893
894 if (!backendTexture.isValid()) {
895 return false;
896 }
897
898 GrBackendFormat format = backendTexture.getBackendFormat();
899
900 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
901 if (compressionType == SkImage::CompressionType::kNone) {
902 // Uncompressed formats must go through the createBackendTexture API
903 return false;
904 }
905
906 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
907 return false;
908 }
909
910 GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
911
912 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
913 compressionType,
914 mipMapped,
915 data,
916 length)) {
917 return false;
918 }
919
920 return this->onUpdateCompressedBackendTexture(backendTexture,
921 std::move(finishedCallback),
922 data,
923 length);
924 }
925