1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/core/SkCompressedDataUtils.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMipmap.h"
17 #include "src/gpu/GrAttachment.h"
18 #include "src/gpu/GrAuditTrail.h"
19 #include "src/gpu/GrBackendUtils.h"
20 #include "src/gpu/GrCaps.h"
21 #include "src/gpu/GrDataUtils.h"
22 #include "src/gpu/GrDirectContextPriv.h"
23 #include "src/gpu/GrGpuResourcePriv.h"
24 #include "src/gpu/GrNativeRect.h"
25 #include "src/gpu/GrPipeline.h"
26 #include "src/gpu/GrRenderTarget.h"
27 #include "src/gpu/GrResourceCache.h"
28 #include "src/gpu/GrResourceProvider.h"
29 #include "src/gpu/GrRingBuffer.h"
30 #include "src/gpu/GrSemaphore.h"
31 #include "src/gpu/GrStagingBufferManager.h"
32 #include "src/gpu/GrStencilSettings.h"
33 #include "src/gpu/GrTextureProxyPriv.h"
34 #include "src/gpu/GrTracing.h"
35 #include "src/sksl/SkSLCompiler.h"
36 #include "src/utils/SkJSONWriter.h"
37
38 ////////////////////////////////////////////////////////////////////////////////
39
GrGpu(GrDirectContext * direct)40 GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {}
41
~GrGpu()42 GrGpu::~GrGpu() {
43 this->callSubmittedProcs(false);
44 }
45
initCapsAndCompiler(sk_sp<const GrCaps> caps)46 void GrGpu::initCapsAndCompiler(sk_sp<const GrCaps> caps) {
47 fCaps = std::move(caps);
48 fCompiler = std::make_unique<SkSL::Compiler>(fCaps->shaderCaps());
49 }
50
disconnect(DisconnectType type)51 void GrGpu::disconnect(DisconnectType type) {}
52
53 ////////////////////////////////////////////////////////////////////////////////
54
validate_texel_levels(SkISize dimensions,GrColorType texelColorType,const GrMipLevel * texels,int mipLevelCount,const GrCaps * caps)55 static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType,
56 const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) {
57 SkASSERT(mipLevelCount > 0);
58 bool hasBasePixels = texels[0].fPixels;
59 int levelsWithPixelsCnt = 0;
60 auto bpp = GrColorTypeBytesPerPixel(texelColorType);
61 int w = dimensions.fWidth;
62 int h = dimensions.fHeight;
63 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
64 if (texels[currentMipLevel].fPixels) {
65 const size_t minRowBytes = w * bpp;
66 if (caps->writePixelsRowBytesSupport()) {
67 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
68 return false;
69 }
70 if (texels[currentMipLevel].fRowBytes % bpp) {
71 return false;
72 }
73 } else {
74 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
75 return false;
76 }
77 }
78 ++levelsWithPixelsCnt;
79 }
80 if (w == 1 && h == 1) {
81 if (currentMipLevel != mipLevelCount - 1) {
82 return false;
83 }
84 } else {
85 w = std::max(w / 2, 1);
86 h = std::max(h / 2, 1);
87 }
88 }
89 // Either just a base layer or a full stack is required.
90 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
91 return false;
92 }
93 // Can specify just the base, all levels, or no levels.
94 if (!hasBasePixels) {
95 return levelsWithPixelsCnt == 0;
96 }
97 return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount;
98 }
99
createTextureCommon(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask)100 sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions,
101 const GrBackendFormat& format,
102 GrRenderable renderable,
103 int renderTargetSampleCnt,
104 SkBudgeted budgeted,
105 GrProtected isProtected,
106 int mipLevelCount,
107 uint32_t levelClearMask) {
108 if (this->caps()->isFormatCompressed(format)) {
109 // Call GrGpu::createCompressedTexture.
110 return nullptr;
111 }
112
113 GrMipmapped mipMapped = mipLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo;
114 if (!this->caps()->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
115 mipMapped)) {
116 return nullptr;
117 }
118
119 if (renderable == GrRenderable::kYes) {
120 renderTargetSampleCnt =
121 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
122 }
123 // Attempt to catch un- or wrongly initialized sample counts.
124 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
125 this->handleDirtyContext();
126 auto tex = this->onCreateTexture(dimensions,
127 format,
128 renderable,
129 renderTargetSampleCnt,
130 budgeted,
131 isProtected,
132 mipLevelCount,
133 levelClearMask);
134 if (tex) {
135 SkASSERT(tex->backendFormat() == format);
136 SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget());
137 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
138 tex->resourcePriv().removeScratchKey();
139 }
140 fStats.incTextureCreates();
141 if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) {
142 SkASSERT(GrRenderable::kYes == renderable);
143 tex->asRenderTarget()->setRequiresManualMSAAResolve();
144 }
145 }
146 return tex;
147 }
148
createTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,GrMipmapped mipMapped,SkBudgeted budgeted,GrProtected isProtected)149 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
150 const GrBackendFormat& format,
151 GrRenderable renderable,
152 int renderTargetSampleCnt,
153 GrMipmapped mipMapped,
154 SkBudgeted budgeted,
155 GrProtected isProtected) {
156 int mipLevelCount = 1;
157 if (mipMapped == GrMipmapped::kYes) {
158 mipLevelCount =
159 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight)));
160 }
161 uint32_t levelClearMask =
162 this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0;
163 auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt,
164 budgeted, isProtected, mipLevelCount, levelClearMask);
165 if (tex && mipMapped == GrMipmapped::kYes && levelClearMask) {
166 tex->markMipmapsClean();
167 }
168 return tex;
169 }
170
createTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,GrColorType textureColorType,GrColorType srcColorType,const GrMipLevel texels[],int texelLevelCount)171 sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions,
172 const GrBackendFormat& format,
173 GrRenderable renderable,
174 int renderTargetSampleCnt,
175 SkBudgeted budgeted,
176 GrProtected isProtected,
177 GrColorType textureColorType,
178 GrColorType srcColorType,
179 const GrMipLevel texels[],
180 int texelLevelCount) {
181 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
182 if (texelLevelCount) {
183 if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount,
184 this->caps())) {
185 return nullptr;
186 }
187 }
188
189 int mipLevelCount = std::max(1, texelLevelCount);
190 uint32_t levelClearMask = 0;
191 if (this->caps()->shouldInitializeTextures()) {
192 if (texelLevelCount) {
193 for (int i = 0; i < mipLevelCount; ++i) {
194 if (!texels->fPixels) {
195 levelClearMask |= static_cast<uint32_t>(1 << i);
196 }
197 }
198 } else {
199 levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1);
200 }
201 }
202
203 auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt,
204 budgeted, isProtected, texelLevelCount, levelClearMask);
205 if (tex) {
206 bool markMipLevelsClean = false;
207 // Currently if level 0 does not have pixels then no other level may, as enforced by
208 // validate_texel_levels.
209 if (texelLevelCount && texels[0].fPixels) {
210 if (!this->writePixels(tex.get(), 0, 0, dimensions.fWidth, dimensions.fHeight,
211 textureColorType, srcColorType, texels, texelLevelCount)) {
212 return nullptr;
213 }
214 // Currently if level[1] of mip map has pixel data then so must all other levels.
215 // as enforced by validate_texel_levels.
216 markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels);
217 fStats.incTextureUploads();
218 } else if (levelClearMask && mipLevelCount > 1) {
219 markMipLevelsClean = true;
220 }
221 if (markMipLevelsClean) {
222 tex->markMipmapsClean();
223 }
224 }
225 return tex;
226 }
227
createCompressedTexture(SkISize dimensions,const GrBackendFormat & format,SkBudgeted budgeted,GrMipmapped mipMapped,GrProtected isProtected,const void * data,size_t dataSize)228 sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions,
229 const GrBackendFormat& format,
230 SkBudgeted budgeted,
231 GrMipmapped mipMapped,
232 GrProtected isProtected,
233 const void* data,
234 size_t dataSize) {
235 this->handleDirtyContext();
236 if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() ||
237 dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) {
238 return nullptr;
239 }
240 // Note if we relax the requirement that data must be provided then we must check
241 // caps()->shouldInitializeTextures() here.
242 if (!data) {
243 return nullptr;
244 }
245 if (!this->caps()->isFormatTexturable(format)) {
246 return nullptr;
247 }
248
249 // TODO: expand CompressedDataIsCorrect to work here too
250 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
251
252 if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr,
253 mipMapped == GrMipmapped::kYes)) {
254 return nullptr;
255 }
256 return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected,
257 data, dataSize);
258 }
259
wrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)260 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
261 GrWrapOwnership ownership,
262 GrWrapCacheable cacheable,
263 GrIOType ioType) {
264 SkASSERT(ioType != kWrite_GrIOType);
265 this->handleDirtyContext();
266
267 const GrCaps* caps = this->caps();
268 SkASSERT(caps);
269
270 if (!caps->isFormatTexturable(backendTex.getBackendFormat())) {
271 return nullptr;
272 }
273 if (backendTex.width() > caps->maxTextureSize() ||
274 backendTex.height() > caps->maxTextureSize()) {
275 return nullptr;
276 }
277
278 return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType);
279 }
280
wrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)281 sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex,
282 GrWrapOwnership ownership,
283 GrWrapCacheable cacheable) {
284 this->handleDirtyContext();
285
286 const GrCaps* caps = this->caps();
287 SkASSERT(caps);
288
289 if (!caps->isFormatTexturable(backendTex.getBackendFormat())) {
290 return nullptr;
291 }
292 if (backendTex.width() > caps->maxTextureSize() ||
293 backendTex.height() > caps->maxTextureSize()) {
294 return nullptr;
295 }
296
297 return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable);
298 }
299
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)300 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
301 int sampleCnt,
302 GrWrapOwnership ownership,
303 GrWrapCacheable cacheable) {
304 this->handleDirtyContext();
305 if (sampleCnt < 1) {
306 return nullptr;
307 }
308
309 const GrCaps* caps = this->caps();
310
311 if (!caps->isFormatTexturable(backendTex.getBackendFormat()) ||
312 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
313 return nullptr;
314 }
315
316 if (backendTex.width() > caps->maxRenderTargetSize() ||
317 backendTex.height() > caps->maxRenderTargetSize()) {
318 return nullptr;
319 }
320 sk_sp<GrTexture> tex =
321 this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable);
322 SkASSERT(!tex || tex->asRenderTarget());
323 if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) {
324 tex->asRenderTarget()->setRequiresManualMSAAResolve();
325 }
326 return tex;
327 }
328
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)329 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
330 this->handleDirtyContext();
331
332 const GrCaps* caps = this->caps();
333
334 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
335 return nullptr;
336 }
337
338 sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT);
339 if (backendRT.isFramebufferOnly()) {
340 rt->setFramebufferOnly();
341 }
342 return rt;
343 }
344
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)345 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
346 const GrVkDrawableInfo& vkInfo) {
347 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
348 }
349
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)350 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
351 const GrVkDrawableInfo& vkInfo) {
352 // This is only supported on Vulkan so we default to returning nullptr here
353 return nullptr;
354 }
355
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)356 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
357 GrAccessPattern accessPattern, const void* data) {
358 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
359 this->handleDirtyContext();
360 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
361 if (!this->caps()->reuseScratchBuffers()) {
362 buffer->resourcePriv().removeScratchKey();
363 }
364 return buffer;
365 }
366
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)367 bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
368 const SkIPoint& dstPoint) {
369 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
370 SkASSERT(dst && src);
371 SkASSERT(!src->framebufferOnly());
372
373 if (dst->readOnly()) {
374 return false;
375 }
376
377 this->handleDirtyContext();
378
379 return this->onCopySurface(dst, src, srcRect, dstPoint);
380 }
381
readPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)382 bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
383 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
384 size_t rowBytes) {
385 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
386 SkASSERT(surface);
387 SkASSERT(!surface->framebufferOnly());
388 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
389
390 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
391 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
392 if (!bounds.contains(subRect)) {
393 return false;
394 }
395
396 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width);
397 if (!this->caps()->readPixelsRowBytesSupport()) {
398 if (rowBytes != minRowBytes) {
399 return false;
400 }
401 } else {
402 if (rowBytes < minRowBytes) {
403 return false;
404 }
405 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
406 return false;
407 }
408 }
409
410 this->handleDirtyContext();
411
412 return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType,
413 buffer, rowBytes);
414 }
415
writePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)416 bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
417 GrColorType surfaceColorType, GrColorType srcColorType,
418 const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling) {
419 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
420 ATRACE_ANDROID_FRAMEWORK_ALWAYS("Texture upload(%u) %ix%i",
421 surface->uniqueID().asUInt(), width, height);
422 SkASSERT(surface);
423 SkASSERT(!surface->framebufferOnly());
424
425 if (surface->readOnly()) {
426 return false;
427 }
428
429 if (mipLevelCount == 0) {
430 return false;
431 } else if (mipLevelCount == 1) {
432 // We require that if we are not mipped, then the write region is contained in the surface
433 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
434 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
435 if (!bounds.contains(subRect)) {
436 return false;
437 }
438 } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
439 // We require that if the texels are mipped, than the write region is the entire surface
440 return false;
441 }
442
443 if (!validate_texel_levels({width, height}, srcColorType, texels, mipLevelCount,
444 this->caps())) {
445 return false;
446 }
447
448 this->handleDirtyContext();
449 if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
450 texels, mipLevelCount, prepForTexSampling)) {
451 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
452 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
453 fStats.incTextureUploads();
454 return true;
455 }
456 return false;
457 }
458
transferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)459 bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
460 GrColorType textureColorType, GrColorType bufferColorType,
461 sk_sp<GrGpuBuffer> transferBuffer, size_t offset, size_t rowBytes) {
462 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
463 SkASSERT(texture);
464 SkASSERT(transferBuffer);
465
466 if (texture->readOnly()) {
467 return false;
468 }
469
470 // We require that the write region is contained in the texture
471 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
472 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
473 if (!bounds.contains(subRect)) {
474 return false;
475 }
476
477 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
478 if (this->caps()->writePixelsRowBytesSupport()) {
479 if (rowBytes < SkToSizeT(bpp * width)) {
480 return false;
481 }
482 if (rowBytes % bpp) {
483 return false;
484 }
485 } else {
486 if (rowBytes != SkToSizeT(bpp * width)) {
487 return false;
488 }
489 }
490
491 this->handleDirtyContext();
492 if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType,
493 bufferColorType, std::move(transferBuffer), offset, rowBytes)) {
494 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
495 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
496 fStats.incTransfersToTexture();
497
498 return true;
499 }
500 return false;
501 }
502
transferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)503 bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
504 GrColorType surfaceColorType, GrColorType bufferColorType,
505 sk_sp<GrGpuBuffer> transferBuffer, size_t offset) {
506 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
507 SkASSERT(surface);
508 SkASSERT(transferBuffer);
509 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
510
511 #ifdef SK_DEBUG
512 auto supportedRead = this->caps()->supportedReadPixelsColorType(
513 surfaceColorType, surface->backendFormat(), bufferColorType);
514 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
515 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
516 #endif
517
518 // We require that the write region is contained in the texture
519 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
520 SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
521 if (!bounds.contains(subRect)) {
522 return false;
523 }
524
525 this->handleDirtyContext();
526 if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
527 bufferColorType, std::move(transferBuffer), offset)) {
528 fStats.incTransfersFromSurface();
529 return true;
530 }
531 return false;
532 }
533
regenerateMipMapLevels(GrTexture * texture)534 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
535 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
536 SkASSERT(texture);
537 SkASSERT(this->caps()->mipmapSupport());
538 SkASSERT(texture->mipmapped() == GrMipmapped::kYes);
539 if (!texture->mipmapsAreDirty()) {
540 // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the
541 // actual target. This may be caused by things that the drawingManager could not predict,
542 // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc.
543 // NOTE: This goes away once we quit tracking mipmap state on the actual texture.
544 return true;
545 }
546 if (texture->readOnly()) {
547 return false;
548 }
549 if (this->onRegenerateMipMapLevels(texture)) {
550 texture->markMipmapsClean();
551 return true;
552 }
553 return false;
554 }
555
resetTextureBindings()556 void GrGpu::resetTextureBindings() {
557 this->handleDirtyContext();
558 this->onResetTextureBindings();
559 }
560
resolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)561 void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
562 SkASSERT(target);
563 this->handleDirtyContext();
564 this->onResolveRenderTarget(target, resolveRect);
565 }
566
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const567 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
568 uint32_t mipLevels) const {
569 SkASSERT(surface);
570 SkASSERT(!surface->readOnly());
571 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
572 if (nullptr == bounds || !bounds->isEmpty()) {
573 GrTexture* texture = surface->asTexture();
574 if (texture) {
575 if (mipLevels == 1) {
576 texture->markMipmapsDirty();
577 } else {
578 texture->markMipmapsClean();
579 }
580 }
581 }
582 }
583
executeFlushInfo(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrBackendSurfaceMutableState * newState)584 void GrGpu::executeFlushInfo(SkSpan<GrSurfaceProxy*> proxies,
585 SkSurface::BackendSurfaceAccess access,
586 const GrFlushInfo& info,
587 const GrBackendSurfaceMutableState* newState) {
588 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
589
590 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
591
592 std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores(
593 new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]);
594 if (this->caps()->semaphoreSupport() && info.fNumSemaphores) {
595 for (int i = 0; i < info.fNumSemaphores; ++i) {
596 if (info.fSignalSemaphores[i].isInitialized()) {
597 semaphores[i] = resourceProvider->wrapBackendSemaphore(
598 info.fSignalSemaphores[i],
599 GrResourceProvider::SemaphoreWrapType::kWillSignal,
600 kBorrow_GrWrapOwnership);
601 // If we failed to wrap the semaphore it means the client didn't give us a valid
602 // semaphore to begin with. Therefore, it is fine to not signal it.
603 if (semaphores[i]) {
604 this->insertSemaphore(semaphores[i].get());
605 }
606 } else {
607 semaphores[i] = resourceProvider->makeSemaphore(false);
608 if (semaphores[i]) {
609 this->insertSemaphore(semaphores[i].get());
610 info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore();
611 }
612 }
613 }
614 }
615
616 if (info.fFinishedProc) {
617 this->addFinishedProc(info.fFinishedProc, info.fFinishedContext);
618 }
619
620 if (info.fSubmittedProc) {
621 fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext);
622 }
623
624 // We currently don't support passing in new surface state for multiple proxies here. The only
625 // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state
626 // updates anyways.
627 SkASSERT(!newState || proxies.size() == 1);
628 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
629 this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, access, newState);
630 }
631
getOpsRenderPass(GrRenderTarget * renderTarget,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)632 GrOpsRenderPass* GrGpu::getOpsRenderPass(
633 GrRenderTarget* renderTarget,
634 bool useMSAASurface,
635 GrAttachment* stencil,
636 GrSurfaceOrigin origin,
637 const SkIRect& bounds,
638 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
639 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
640 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
641 GrXferBarrierFlags renderPassXferBarriers) {
642 #if SK_HISTOGRAMS_ENABLED
643 fCurrentSubmitRenderPassCount++;
644 #endif
645 fStats.incRenderPasses();
646 return this->onGetOpsRenderPass(renderTarget, useMSAASurface, stencil, origin, bounds,
647 colorInfo, stencilInfo, sampledProxies, renderPassXferBarriers);
648 }
649
submitToGpu(bool syncCpu)650 bool GrGpu::submitToGpu(bool syncCpu) {
651 this->stats()->incNumSubmitToGpus();
652
653 if (auto manager = this->stagingBufferManager()) {
654 manager->detachBuffers();
655 }
656
657 if (auto uniformsBuffer = this->uniformsRingBuffer()) {
658 uniformsBuffer->startSubmit(this);
659 }
660
661 bool submitted = this->onSubmitToGpu(syncCpu);
662
663 this->callSubmittedProcs(submitted);
664
665 this->reportSubmitHistograms();
666
667 return submitted;
668 }
669
reportSubmitHistograms()670 void GrGpu::reportSubmitHistograms() {
671 #if SK_HISTOGRAMS_ENABLED
672 // The max allowed value for SK_HISTOGRAM_EXACT_LINEAR is 100. If we want to support higher
673 // values we can add SK_HISTOGRAM_CUSTOM_COUNTS but this has a number of buckets that is less
674 // than the number of actual values
675 static constexpr int kMaxRenderPassBucketValue = 100;
676 SK_HISTOGRAM_EXACT_LINEAR("SubmitRenderPasses",
677 std::min(fCurrentSubmitRenderPassCount, kMaxRenderPassBucketValue),
678 kMaxRenderPassBucketValue);
679 fCurrentSubmitRenderPassCount = 0;
680 #endif
681
682 this->onReportSubmitHistograms();
683 }
684
checkAndResetOOMed()685 bool GrGpu::checkAndResetOOMed() {
686 if (fOOMed) {
687 fOOMed = false;
688 return true;
689 }
690 return false;
691 }
692
callSubmittedProcs(bool success)693 void GrGpu::callSubmittedProcs(bool success) {
694 for (int i = 0; i < fSubmittedProcs.count(); ++i) {
695 fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success);
696 }
697 fSubmittedProcs.reset();
698 }
699
700 #ifdef SK_ENABLE_DUMP_GPU
dumpJSON(SkJSONWriter * writer) const701 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
702 writer->beginObject();
703
704 // TODO: Is there anything useful in the base class to dump here?
705
706 this->onDumpJSON(writer);
707
708 writer->endObject();
709 }
710 #else
dumpJSON(SkJSONWriter * writer) const711 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
712 #endif
713
714 #if GR_TEST_UTILS
715
716 #if GR_GPU_STATS
717
dump(SkString * out)718 void GrGpu::Stats::dump(SkString* out) {
719 out->appendf("Textures Created: %d\n", fTextureCreates);
720 out->appendf("Texture Uploads: %d\n", fTextureUploads);
721 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
722 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
723 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
724 out->appendf("MSAA Attachment Creates: %d\n", fMSAAAttachmentCreates);
725 out->appendf("Number of draws: %d\n", fNumDraws);
726 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
727 out->appendf("Number of Scratch MSAA Attachments reused %d\n",
728 fNumScratchMSAAAttachmentsReused);
729 out->appendf("Number of Render Passes: %d\n", fRenderPasses);
730 out->appendf("Reordered DAGs Over Budget: %d\n", fNumReorderedDAGsOverBudget);
731
732 // enable this block to output CSV-style stats for program pre-compilation
733 #if 0
734 SkASSERT(fNumInlineCompilationFailures == 0);
735 SkASSERT(fNumPreCompilationFailures == 0);
736 SkASSERT(fNumCompilationFailures == 0);
737 SkASSERT(fNumPartialCompilationSuccesses == 0);
738
739 SkDebugf("%d, %d, %d, %d, %d\n",
740 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
741 fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
742 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit],
743 fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss],
744 fNumCompilationSuccesses);
745 #endif
746 }
747
dumpKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values)748 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
749 keys->push_back(SkString("render_passes"));
750 values->push_back(fRenderPasses);
751 keys->push_back(SkString("reordered_dags_over_budget"));
752 values->push_back(fNumReorderedDAGsOverBudget);
753 }
754
755 #endif // GR_GPU_STATS
756 #endif // GR_TEST_UTILS
757
CompressedDataIsCorrect(SkISize dimensions,SkImage::CompressionType compressionType,GrMipmapped mipMapped,const void * data,size_t length)758 bool GrGpu::CompressedDataIsCorrect(SkISize dimensions,
759 SkImage::CompressionType compressionType,
760 GrMipmapped mipMapped,
761 const void* data,
762 size_t length) {
763 size_t computedSize = SkCompressedDataSize(compressionType,
764 dimensions,
765 nullptr,
766 mipMapped == GrMipmapped::kYes);
767 return computedSize == length;
768 }
769
createBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipMapped,GrProtected isProtected)770 GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions,
771 const GrBackendFormat& format,
772 GrRenderable renderable,
773 GrMipmapped mipMapped,
774 GrProtected isProtected) {
775 const GrCaps* caps = this->caps();
776
777 if (!format.isValid()) {
778 return {};
779 }
780
781 if (caps->isFormatCompressed(format)) {
782 // Compressed formats must go through the createCompressedBackendTexture API
783 return {};
784 }
785
786 if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() ||
787 dimensions.height() > caps->maxTextureSize()) {
788 return {};
789 }
790
791 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
792 return {};
793 }
794
795 return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, isProtected);
796 }
797
clearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,std::array<float,4> color)798 bool GrGpu::clearBackendTexture(const GrBackendTexture& backendTexture,
799 sk_sp<GrRefCntedCallback> finishedCallback,
800 std::array<float, 4> color) {
801 if (!backendTexture.isValid()) {
802 return false;
803 }
804
805 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
806 return false;
807 }
808
809 return this->onClearBackendTexture(backendTexture, std::move(finishedCallback), color);
810 }
811
createCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipMapped,GrProtected isProtected)812 GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions,
813 const GrBackendFormat& format,
814 GrMipmapped mipMapped,
815 GrProtected isProtected) {
816 const GrCaps* caps = this->caps();
817
818 if (!format.isValid()) {
819 return {};
820 }
821
822 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
823 if (compressionType == SkImage::CompressionType::kNone) {
824 // Uncompressed formats must go through the createBackendTexture API
825 return {};
826 }
827
828 if (dimensions.isEmpty() ||
829 dimensions.width() > caps->maxTextureSize() ||
830 dimensions.height() > caps->maxTextureSize()) {
831 return {};
832 }
833
834 if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) {
835 return {};
836 }
837
838 return this->onCreateCompressedBackendTexture(dimensions, format, mipMapped, isProtected);
839 }
840
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<GrRefCntedCallback> finishedCallback,const void * data,size_t length)841 bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
842 sk_sp<GrRefCntedCallback> finishedCallback,
843 const void* data,
844 size_t length) {
845 SkASSERT(data);
846
847 if (!backendTexture.isValid()) {
848 return false;
849 }
850
851 GrBackendFormat format = backendTexture.getBackendFormat();
852
853 SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format);
854 if (compressionType == SkImage::CompressionType::kNone) {
855 // Uncompressed formats must go through the createBackendTexture API
856 return false;
857 }
858
859 if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) {
860 return false;
861 }
862
863 GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo;
864
865 if (!CompressedDataIsCorrect(backendTexture.dimensions(),
866 compressionType,
867 mipMapped,
868 data,
869 length)) {
870 return false;
871 }
872
873 return this->onUpdateCompressedBackendTexture(backendTexture,
874 std::move(finishedCallback),
875 data,
876 length);
877 }
878