1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrContext.h"
14 #include "src/core/SkMathPriv.h"
15 #include "src/gpu/GrAuditTrail.h"
16 #include "src/gpu/GrCaps.h"
17 #include "src/gpu/GrContextPriv.h"
18 #include "src/gpu/GrDataUtils.h"
19 #include "src/gpu/GrGpuResourcePriv.h"
20 #include "src/gpu/GrMesh.h"
21 #include "src/gpu/GrPathRendering.h"
22 #include "src/gpu/GrPipeline.h"
23 #include "src/gpu/GrRenderTargetPriv.h"
24 #include "src/gpu/GrResourceCache.h"
25 #include "src/gpu/GrResourceProvider.h"
26 #include "src/gpu/GrSemaphore.h"
27 #include "src/gpu/GrStencilAttachment.h"
28 #include "src/gpu/GrStencilSettings.h"
29 #include "src/gpu/GrSurfacePriv.h"
30 #include "src/gpu/GrTexturePriv.h"
31 #include "src/gpu/GrTextureProxyPriv.h"
32 #include "src/gpu/GrTracing.h"
33 #include "src/utils/SkJSONWriter.h"
34
35 ////////////////////////////////////////////////////////////////////////////////
36
GrGpu(GrContext * context)37 GrGpu::GrGpu(GrContext* context) : fResetBits(kAll_GrBackendState), fContext(context) {}
38
~GrGpu()39 GrGpu::~GrGpu() {}
40
disconnect(DisconnectType)41 void GrGpu::disconnect(DisconnectType) {}
42
43 ////////////////////////////////////////////////////////////////////////////////
44
IsACopyNeededForRepeatWrapMode(const GrCaps * caps,GrTextureProxy * texProxy,int width,int height,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams,SkScalar scaleAdjust[2])45 bool GrGpu::IsACopyNeededForRepeatWrapMode(const GrCaps* caps, GrTextureProxy* texProxy,
46 int width, int height,
47 GrSamplerState::Filter filter,
48 GrTextureProducer::CopyParams* copyParams,
49 SkScalar scaleAdjust[2]) {
50 if (!caps->npotTextureTileSupport() &&
51 (!SkIsPow2(width) || !SkIsPow2(height))) {
52 SkASSERT(scaleAdjust);
53 copyParams->fWidth = GrNextPow2(width);
54 copyParams->fHeight = GrNextPow2(height);
55 SkASSERT(scaleAdjust);
56 scaleAdjust[0] = ((SkScalar)copyParams->fWidth) / width;
57 scaleAdjust[1] = ((SkScalar)copyParams->fHeight) / height;
58 switch (filter) {
59 case GrSamplerState::Filter::kNearest:
60 copyParams->fFilter = GrSamplerState::Filter::kNearest;
61 break;
62 case GrSamplerState::Filter::kBilerp:
63 case GrSamplerState::Filter::kMipMap:
64 // We are only ever scaling up so no reason to ever indicate kMipMap.
65 copyParams->fFilter = GrSamplerState::Filter::kBilerp;
66 break;
67 }
68 return true;
69 }
70
71 if (texProxy) {
72 // If the texture format itself doesn't support repeat wrap mode or mipmapping (and
73 // those capabilities are required) force a copy.
74 if (texProxy->hasRestrictedSampling()) {
75 copyParams->fFilter = GrSamplerState::Filter::kNearest;
76 copyParams->fWidth = texProxy->width();
77 copyParams->fHeight = texProxy->height();
78 return true;
79 }
80 }
81
82 return false;
83 }
84
IsACopyNeededForMips(const GrCaps * caps,const GrTextureProxy * texProxy,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams)85 bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
86 GrSamplerState::Filter filter,
87 GrTextureProducer::CopyParams* copyParams) {
88 SkASSERT(texProxy);
89 bool willNeedMips = GrSamplerState::Filter::kMipMap == filter && caps->mipMapSupport();
90 // If the texture format itself doesn't support mipmapping (and those capabilities are required)
91 // force a copy.
92 if (willNeedMips && texProxy->mipMapped() == GrMipMapped::kNo) {
93 copyParams->fFilter = GrSamplerState::Filter::kNearest;
94 copyParams->fWidth = texProxy->width();
95 copyParams->fHeight = texProxy->height();
96 return true;
97 }
98
99 return false;
100 }
101
validate_levels(int w,int h,const GrMipLevel texels[],int mipLevelCount,int bpp,const GrCaps * caps,bool mustHaveDataForAllLevels=false)102 static bool validate_levels(int w, int h, const GrMipLevel texels[], int mipLevelCount, int bpp,
103 const GrCaps* caps, bool mustHaveDataForAllLevels = false) {
104 SkASSERT(mipLevelCount > 0);
105 bool hasBasePixels = texels[0].fPixels;
106 int levelsWithPixelsCnt = 0;
107 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
108 if (texels[currentMipLevel].fPixels) {
109 const size_t minRowBytes = w * bpp;
110 if (caps->writePixelsRowBytesSupport()) {
111 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
112 return false;
113 }
114 if (texels[currentMipLevel].fRowBytes % bpp) {
115 return false;
116 }
117 } else {
118 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
119 return false;
120 }
121 }
122 ++levelsWithPixelsCnt;
123 }
124 if (w == 1 && h == 1) {
125 if (currentMipLevel != mipLevelCount - 1) {
126 return false;
127 }
128 } else {
129 w = std::max(w / 2, 1);
130 h = std::max(h / 2, 1);
131 }
132 }
133 // Either just a base layer or a full stack is required.
134 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
135 return false;
136 }
137 // Can specify just the base, all levels, or no levels.
138 if (!hasBasePixels) {
139 return levelsWithPixelsCnt == 0;
140 }
141 if (levelsWithPixelsCnt == 1 && !mustHaveDataForAllLevels) {
142 return true;
143 }
144 return levelsWithPixelsCnt == mipLevelCount;
145 }
146
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,const GrMipLevel texels[],int mipLevelCount)147 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc,
148 const GrBackendFormat& format,
149 GrRenderable renderable,
150 int renderTargetSampleCnt,
151 SkBudgeted budgeted,
152 GrProtected isProtected,
153 const GrMipLevel texels[],
154 int mipLevelCount) {
155 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
156 if (this->caps()->isFormatCompressed(format)) {
157 // Call GrGpu::createCompressedTexture.
158 return nullptr;
159 }
160
161 GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
162 if (!this->caps()->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
163 renderable, renderTargetSampleCnt, mipMapped)) {
164 return nullptr;
165 }
166
167 if (renderable == GrRenderable::kYes) {
168 renderTargetSampleCnt =
169 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
170 }
171 // Attempt to catch un- or wrongly initialized sample counts.
172 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
173
174 bool mustHaveDataForAllLevels = this->caps()->createTextureMustSpecifyAllLevels();
175 if (mipLevelCount) {
176 int bpp = GrBytesPerPixel(desc.fConfig);
177 if (!validate_levels(desc.fWidth, desc.fHeight, texels, mipLevelCount, bpp, this->caps(),
178 mustHaveDataForAllLevels)) {
179 return nullptr;
180 }
181 } else if (mustHaveDataForAllLevels) {
182 return nullptr;
183 }
184
185 this->handleDirtyContext();
186 sk_sp<GrTexture> tex = this->onCreateTexture(desc,
187 format,
188 renderable,
189 renderTargetSampleCnt,
190 budgeted,
191 isProtected,
192 texels,
193 mipLevelCount);
194 if (tex) {
195 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
196 tex->resourcePriv().removeScratchKey();
197 }
198 fStats.incTextureCreates();
199 if (mipLevelCount) {
200 if (texels[0].fPixels) {
201 fStats.incTextureUploads();
202 }
203 }
204 SkASSERT(tex->backendFormat() == format);
205 if (desc.isGrTagValid()) {
206 // 0, 1, 2, 3: index of array fGrTag.
207 GrGpuResourceTag tag(desc.fGrTag[0], desc.fGrTag[1], desc.fGrTag[2], desc.fGrTag[3]);
208 tex->setResourceTag(tag);
209 }
210 }
211 return tex;
212 }
213
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected)214 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc, const GrBackendFormat& format,
215 GrRenderable renderable, int renderTargetSampleCnt,
216 SkBudgeted budgeted, GrProtected isProtected) {
217 return this->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
218 isProtected, nullptr, 0);
219 }
220
createCompressedTexture(int width,int height,const GrBackendFormat & format,SkImage::CompressionType compressionType,SkBudgeted budgeted,const void * data,size_t dataSize)221 sk_sp<GrTexture> GrGpu::createCompressedTexture(int width, int height,
222 const GrBackendFormat& format,
223 SkImage::CompressionType compressionType,
224 SkBudgeted budgeted, const void* data,
225 size_t dataSize) {
226 // If we ever add a new CompressionType, we should add a check here to make sure the
227 // GrBackendFormat and CompressionType are compatible with eachother.
228 SkASSERT(compressionType == SkImage::kETC1_CompressionType || compressionType == SkImage::kASTC_CompressionType);
229
230 this->handleDirtyContext();
231 if (width < 1 || width > this->caps()->maxTextureSize() ||
232 height < 1 || height > this->caps()->maxTextureSize()) {
233 return nullptr;
234 }
235 // Note if we relax the requirement that data must be provided then we must check
236 // caps()->shouldInitializeTextures() here.
237 if (!data) {
238 return nullptr;
239 }
240 if (!this->caps()->isFormatTexturable(format)) {
241 return nullptr;
242 }
243 if (dataSize < GrCompressedDataSize(compressionType, width, height)) {
244 return nullptr;
245 }
246 return this->onCreateCompressedTexture(width, height, format, compressionType, budgeted, data);
247 }
248
wrapBackendTexture(const GrBackendTexture & backendTex,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)249 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
250 GrColorType colorType,
251 GrWrapOwnership ownership, GrWrapCacheable cacheable,
252 GrIOType ioType) {
253 SkASSERT(ioType != kWrite_GrIOType);
254 this->handleDirtyContext();
255
256 const GrCaps* caps = this->caps();
257 SkASSERT(caps);
258
259 if (!caps->isFormatTexturable(backendTex.getBackendFormat())) {
260 return nullptr;
261 }
262 if (backendTex.width() > caps->maxTextureSize() ||
263 backendTex.height() > caps->maxTextureSize()) {
264 return nullptr;
265 }
266
267 return this->onWrapBackendTexture(backendTex, colorType, ownership, cacheable, ioType);
268 }
269
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable)270 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
271 int sampleCnt, GrColorType colorType,
272 GrWrapOwnership ownership,
273 GrWrapCacheable cacheable) {
274 this->handleDirtyContext();
275 if (sampleCnt < 1) {
276 return nullptr;
277 }
278
279 const GrCaps* caps = this->caps();
280
281 if (!caps->isFormatTexturable(backendTex.getBackendFormat()) ||
282 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
283 return nullptr;
284 }
285
286 if (backendTex.width() > caps->maxRenderTargetSize() ||
287 backendTex.height() > caps->maxRenderTargetSize()) {
288 return nullptr;
289 }
290 sk_sp<GrTexture> tex = this->onWrapRenderableBackendTexture(backendTex, sampleCnt, colorType,
291 ownership, cacheable);
292 SkASSERT(!tex || tex->asRenderTarget());
293 return tex;
294 }
295
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT,GrColorType colorType)296 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
297 GrColorType colorType) {
298 this->handleDirtyContext();
299
300 const GrCaps* caps = this->caps();
301
302 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
303 return nullptr;
304 }
305
306 return this->onWrapBackendRenderTarget(backendRT, colorType);
307 }
308
wrapBackendTextureAsRenderTarget(const GrBackendTexture & backendTex,int sampleCnt,GrColorType colorType)309 sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex,
310 int sampleCnt,
311 GrColorType colorType) {
312 this->handleDirtyContext();
313
314 const GrCaps* caps = this->caps();
315
316 int maxSize = caps->maxTextureSize();
317 if (backendTex.width() > maxSize || backendTex.height() > maxSize) {
318 return nullptr;
319 }
320
321 if (!caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
322 return nullptr;
323 }
324
325 return this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt, colorType);
326 }
327
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)328 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
329 const GrVkDrawableInfo& vkInfo) {
330 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
331 }
332
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)333 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
334 const GrVkDrawableInfo& vkInfo) {
335 // This is only supported on Vulkan so we default to returning nullptr here
336 return nullptr;
337 }
338
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)339 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
340 GrAccessPattern accessPattern, const void* data) {
341 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
342 this->handleDirtyContext();
343 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
344 if (!this->caps()->reuseScratchBuffers()) {
345 buffer->resourcePriv().removeScratchKey();
346 }
347 return buffer;
348 }
349
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint,bool canDiscardOutsideDstRect)350 bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
351 const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) {
352 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
353 SkASSERT(dst && src);
354
355 if (dst->readOnly()) {
356 return false;
357 }
358
359 this->handleDirtyContext();
360
361 return this->onCopySurface(dst, src, srcRect, dstPoint, canDiscardOutsideDstRect);
362 }
363
readPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)364 bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
365 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
366 size_t rowBytes) {
367 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
368 SkASSERT(surface);
369 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
370
371 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
372 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
373 if (!bounds.contains(subRect)) {
374 return false;
375 }
376
377 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width);
378 if (!this->caps()->readPixelsRowBytesSupport()) {
379 if (rowBytes != minRowBytes) {
380 return false;
381 }
382 } else {
383 if (rowBytes < minRowBytes) {
384 return false;
385 }
386 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
387 return false;
388 }
389 }
390
391 if (this->caps()->isFormatCompressed(surface->backendFormat())) {
392 return false;
393 }
394
395 this->handleDirtyContext();
396
397 return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType,
398 buffer, rowBytes);
399 }
400
writePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)401 bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
402 GrColorType surfaceColorType, GrColorType srcColorType,
403 const GrMipLevel texels[], int mipLevelCount) {
404 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
405 SkASSERT(surface);
406 SkASSERT(this->caps()->isFormatTexturableAndUploadable(surfaceColorType,
407 surface->backendFormat()));
408
409 if (surface->readOnly()) {
410 return false;
411 }
412
413 if (mipLevelCount == 0) {
414 return false;
415 } else if (mipLevelCount == 1) {
416 // We require that if we are not mipped, then the write region is contained in the surface
417 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
418 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
419 if (!bounds.contains(subRect)) {
420 return false;
421 }
422 } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
423 // We require that if the texels are mipped, than the write region is the entire surface
424 return false;
425 }
426
427 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
428 if (!validate_levels(width, height, texels, mipLevelCount, bpp, this->caps())) {
429 return false;
430 }
431
432 this->handleDirtyContext();
433 if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
434 texels, mipLevelCount)) {
435 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
436 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
437 fStats.incTextureUploads();
438 return true;
439 }
440 return false;
441 }
442
transferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset,size_t rowBytes)443 bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
444 GrColorType textureColorType, GrColorType bufferColorType,
445 GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) {
446 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
447 SkASSERT(texture);
448 SkASSERT(transferBuffer);
449 SkASSERT(this->caps()->isFormatTexturableAndUploadable(textureColorType,
450 texture->backendFormat()));
451
452 if (texture->readOnly()) {
453 return false;
454 }
455
456 // We require that the write region is contained in the texture
457 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
458 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
459 if (!bounds.contains(subRect)) {
460 return false;
461 }
462
463 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
464 if (this->caps()->writePixelsRowBytesSupport()) {
465 if (rowBytes < SkToSizeT(bpp * width)) {
466 return false;
467 }
468 if (rowBytes % bpp) {
469 return false;
470 }
471 } else {
472 if (rowBytes != SkToSizeT(bpp * width)) {
473 return false;
474 }
475 }
476
477 this->handleDirtyContext();
478 if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType,
479 bufferColorType, transferBuffer, offset, rowBytes)) {
480 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
481 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
482 fStats.incTransfersToTexture();
483
484 return true;
485 }
486 return false;
487 }
488
transferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)489 bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
490 GrColorType surfaceColorType, GrColorType bufferColorType,
491 GrGpuBuffer* transferBuffer, size_t offset) {
492 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
493 SkASSERT(surface);
494 SkASSERT(transferBuffer);
495 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
496
497 #ifdef SK_DEBUG
498 auto supportedRead = this->caps()->supportedReadPixelsColorType(
499 surfaceColorType, surface->backendFormat(), bufferColorType);
500 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
501 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
502 #endif
503
504 // We require that the write region is contained in the texture
505 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
506 SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
507 if (!bounds.contains(subRect)) {
508 return false;
509 }
510
511 this->handleDirtyContext();
512 if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
513 bufferColorType, transferBuffer, offset)) {
514 fStats.incTransfersFromSurface();
515 return true;
516 }
517 return false;
518 }
519
regenerateMipMapLevels(GrTexture * texture)520 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
521 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
522 SkASSERT(texture);
523 SkASSERT(this->caps()->mipMapSupport());
524 SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
525 SkASSERT(texture->texturePriv().mipMapsAreDirty());
526 SkASSERT(!texture->asRenderTarget() || !texture->asRenderTarget()->needsResolve());
527 if (texture->readOnly()) {
528 return false;
529 }
530 if (this->onRegenerateMipMapLevels(texture)) {
531 texture->texturePriv().markMipMapsClean();
532 return true;
533 }
534 return false;
535 }
536
resetTextureBindings()537 void GrGpu::resetTextureBindings() {
538 this->handleDirtyContext();
539 this->onResetTextureBindings();
540 }
541
resolveRenderTarget(GrRenderTarget * target)542 void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
543 SkASSERT(target);
544 this->handleDirtyContext();
545 this->onResolveRenderTarget(target);
546 }
547
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const548 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
549 uint32_t mipLevels) const {
550 SkASSERT(surface);
551 SkASSERT(!surface->readOnly());
552 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
553 if (nullptr == bounds || !bounds->isEmpty()) {
554 if (GrRenderTarget* target = surface->asRenderTarget()) {
555 SkIRect flippedBounds;
556 if (kBottomLeft_GrSurfaceOrigin == origin && bounds) {
557 flippedBounds = {bounds->fLeft, surface->height() - bounds->fBottom,
558 bounds->fRight, surface->height() - bounds->fTop};
559 bounds = &flippedBounds;
560 }
561 target->flagAsNeedingResolve(bounds);
562 }
563 GrTexture* texture = surface->asTexture();
564 if (texture && 1 == mipLevels) {
565 texture->texturePriv().markMipMapsDirty();
566 }
567 }
568 }
569
findOrAssignSamplePatternKey(GrRenderTarget * renderTarget)570 int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) {
571 SkASSERT(this->caps()->sampleLocationsSupport());
572 SkASSERT(renderTarget->numSamples() > 1 ||
573 (renderTarget->renderTargetPriv().getStencilAttachment() &&
574 renderTarget->renderTargetPriv().getStencilAttachment()->numSamples() > 1));
575
576 SkSTArray<16, SkPoint> sampleLocations;
577 this->querySampleLocations(renderTarget, &sampleLocations);
578 return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations);
579 }
580
finishFlush(GrSurfaceProxy * proxies[],int n,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests & externalRequests)581 GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
582 int n,
583 SkSurface::BackendSurfaceAccess access,
584 const GrFlushInfo& info,
585 const GrPrepareForExternalIORequests& externalRequests) {
586 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
587 this->stats()->incNumFinishFlushes();
588 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
589
590 if (this->caps()->semaphoreSupport()) {
591 for (int i = 0; i < info.fNumSemaphores; ++i) {
592 sk_sp<GrSemaphore> semaphore;
593 if (info.fSignalSemaphores[i].isInitialized()) {
594 semaphore = resourceProvider->wrapBackendSemaphore(
595 info.fSignalSemaphores[i],
596 GrResourceProvider::SemaphoreWrapType::kWillSignal,
597 kBorrow_GrWrapOwnership);
598 } else {
599 semaphore = resourceProvider->makeSemaphore(false);
600 }
601 this->insertSemaphore(semaphore);
602
603 if (!info.fSignalSemaphores[i].isInitialized()) {
604 info.fSignalSemaphores[i] = semaphore->backendSemaphore();
605 }
606 }
607 }
608 this->onFinishFlush(proxies, n, access, info, externalRequests);
609 return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
610 : GrSemaphoresSubmitted::kNo;
611 }
612
613 #ifdef SK_ENABLE_DUMP_GPU
dumpJSON(SkJSONWriter * writer) const614 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
615 writer->beginObject();
616
617 // TODO: Is there anything useful in the base class to dump here?
618
619 this->onDumpJSON(writer);
620
621 writer->endObject();
622 }
623 #else
dumpJSON(SkJSONWriter * writer) const624 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
625 #endif
626
627 #if GR_TEST_UTILS
628
629 #if GR_GPU_STATS
dump(SkString * out)630 void GrGpu::Stats::dump(SkString* out) {
631 out->appendf("Render Target Binds: %d\n", fRenderTargetBinds);
632 out->appendf("Shader Compilations: %d\n", fShaderCompilations);
633 out->appendf("Textures Created: %d\n", fTextureCreates);
634 out->appendf("Texture Uploads: %d\n", fTextureUploads);
635 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
636 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
637 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
638 out->appendf("Number of draws: %d\n", fNumDraws);
639 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
640 }
641
dumpKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values)642 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
643 keys->push_back(SkString("render_target_binds")); values->push_back(fRenderTargetBinds);
644 keys->push_back(SkString("shader_compilations")); values->push_back(fShaderCompilations);
645 }
646
647 #endif
648
649 #endif
650