1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9 #include "src/gpu/GrGpu.h"
10
11 #include "include/gpu/GrBackendSemaphore.h"
12 #include "include/gpu/GrBackendSurface.h"
13 #include "include/gpu/GrContext.h"
14 #include "src/core/SkMathPriv.h"
15 #include "src/gpu/GrAuditTrail.h"
16 #include "src/gpu/GrCaps.h"
17 #include "src/gpu/GrContextPriv.h"
18 #include "src/gpu/GrDataUtils.h"
19 #include "src/gpu/GrGpuResourcePriv.h"
20 #include "src/gpu/GrMesh.h"
21 #include "src/gpu/GrPathRendering.h"
22 #include "src/gpu/GrPipeline.h"
23 #include "src/gpu/GrRenderTargetPriv.h"
24 #include "src/gpu/GrResourceCache.h"
25 #include "src/gpu/GrResourceProvider.h"
26 #include "src/gpu/GrSemaphore.h"
27 #include "src/gpu/GrStencilAttachment.h"
28 #include "src/gpu/GrStencilSettings.h"
29 #include "src/gpu/GrSurfacePriv.h"
30 #include "src/gpu/GrTexturePriv.h"
31 #include "src/gpu/GrTextureProxyPriv.h"
32 #include "src/gpu/GrTracing.h"
33 #include "src/utils/SkJSONWriter.h"
34
35 ////////////////////////////////////////////////////////////////////////////////
36
GrGpu(GrContext * context)37 GrGpu::GrGpu(GrContext* context) : fResetBits(kAll_GrBackendState), fContext(context) {}
38
~GrGpu()39 GrGpu::~GrGpu() {}
40
disconnect(DisconnectType)41 void GrGpu::disconnect(DisconnectType) {}
42
43 ////////////////////////////////////////////////////////////////////////////////
44
IsACopyNeededForRepeatWrapMode(const GrCaps * caps,GrTextureProxy * texProxy,int width,int height,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams,SkScalar scaleAdjust[2])45 bool GrGpu::IsACopyNeededForRepeatWrapMode(const GrCaps* caps, GrTextureProxy* texProxy,
46 int width, int height,
47 GrSamplerState::Filter filter,
48 GrTextureProducer::CopyParams* copyParams,
49 SkScalar scaleAdjust[2]) {
50 if (!caps->npotTextureTileSupport() &&
51 (!SkIsPow2(width) || !SkIsPow2(height))) {
52 SkASSERT(scaleAdjust);
53 copyParams->fWidth = GrNextPow2(width);
54 copyParams->fHeight = GrNextPow2(height);
55 SkASSERT(scaleAdjust);
56 scaleAdjust[0] = ((SkScalar)copyParams->fWidth) / width;
57 scaleAdjust[1] = ((SkScalar)copyParams->fHeight) / height;
58 switch (filter) {
59 case GrSamplerState::Filter::kNearest:
60 copyParams->fFilter = GrSamplerState::Filter::kNearest;
61 break;
62 case GrSamplerState::Filter::kBilerp:
63 case GrSamplerState::Filter::kMipMap:
64 // We are only ever scaling up so no reason to ever indicate kMipMap.
65 copyParams->fFilter = GrSamplerState::Filter::kBilerp;
66 break;
67 }
68 return true;
69 }
70
71 if (texProxy) {
72 // If the texture format itself doesn't support repeat wrap mode or mipmapping (and
73 // those capabilities are required) force a copy.
74 if (texProxy->hasRestrictedSampling()) {
75 copyParams->fFilter = GrSamplerState::Filter::kNearest;
76 copyParams->fWidth = texProxy->width();
77 copyParams->fHeight = texProxy->height();
78 return true;
79 }
80 }
81
82 return false;
83 }
84
IsACopyNeededForMips(const GrCaps * caps,const GrTextureProxy * texProxy,GrSamplerState::Filter filter,GrTextureProducer::CopyParams * copyParams)85 bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
86 GrSamplerState::Filter filter,
87 GrTextureProducer::CopyParams* copyParams) {
88 SkASSERT(texProxy);
89 bool willNeedMips = GrSamplerState::Filter::kMipMap == filter && caps->mipMapSupport();
90 // If the texture format itself doesn't support mipmapping (and those capabilities are required)
91 // force a copy.
92 if (willNeedMips && texProxy->mipMapped() == GrMipMapped::kNo) {
93 copyParams->fFilter = GrSamplerState::Filter::kNearest;
94 copyParams->fWidth = texProxy->width();
95 copyParams->fHeight = texProxy->height();
96 return true;
97 }
98
99 return false;
100 }
101
validate_levels(int w,int h,const GrMipLevel texels[],int mipLevelCount,int bpp,const GrCaps * caps,bool mustHaveDataForAllLevels=false)102 static bool validate_levels(int w, int h, const GrMipLevel texels[], int mipLevelCount, int bpp,
103 const GrCaps* caps, bool mustHaveDataForAllLevels = false) {
104 SkASSERT(mipLevelCount > 0);
105 bool hasBasePixels = texels[0].fPixels;
106 int levelsWithPixelsCnt = 0;
107 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) {
108 if (texels[currentMipLevel].fPixels) {
109 const size_t minRowBytes = w * bpp;
110 if (caps->writePixelsRowBytesSupport()) {
111 if (texels[currentMipLevel].fRowBytes < minRowBytes) {
112 return false;
113 }
114 if (texels[currentMipLevel].fRowBytes % bpp) {
115 return false;
116 }
117 } else {
118 if (texels[currentMipLevel].fRowBytes != minRowBytes) {
119 return false;
120 }
121 }
122 ++levelsWithPixelsCnt;
123 }
124 if (w == 1 && h == 1) {
125 if (currentMipLevel != mipLevelCount - 1) {
126 return false;
127 }
128 } else {
129 w = std::max(w / 2, 1);
130 h = std::max(h / 2, 1);
131 }
132 }
133 // Either just a base layer or a full stack is required.
134 if (mipLevelCount != 1 && (w != 1 || h != 1)) {
135 return false;
136 }
137 // Can specify just the base, all levels, or no levels.
138 if (!hasBasePixels) {
139 return levelsWithPixelsCnt == 0;
140 }
141 if (levelsWithPixelsCnt == 1 && !mustHaveDataForAllLevels) {
142 return true;
143 }
144 return levelsWithPixelsCnt == mipLevelCount;
145 }
146
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,const GrMipLevel texels[],int mipLevelCount)147 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc,
148 const GrBackendFormat& format,
149 GrRenderable renderable,
150 int renderTargetSampleCnt,
151 SkBudgeted budgeted,
152 GrProtected isProtected,
153 const GrMipLevel texels[],
154 int mipLevelCount) {
155 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
156 if (this->caps()->isFormatCompressed(format)) {
157 // Call GrGpu::createCompressedTexture.
158 return nullptr;
159 }
160
161 GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
162 if (!this->caps()->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
163 renderable, renderTargetSampleCnt, mipMapped)) {
164 return nullptr;
165 }
166
167 if (renderable == GrRenderable::kYes) {
168 renderTargetSampleCnt =
169 this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format);
170 }
171 // Attempt to catch un- or wrongly initialized sample counts.
172 SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64);
173
174 bool mustHaveDataForAllLevels = this->caps()->createTextureMustSpecifyAllLevels();
175 if (mipLevelCount) {
176 int bpp = GrBytesPerPixel(desc.fConfig);
177 if (!validate_levels(desc.fWidth, desc.fHeight, texels, mipLevelCount, bpp, this->caps(),
178 mustHaveDataForAllLevels)) {
179 return nullptr;
180 }
181 } else if (mustHaveDataForAllLevels) {
182 return nullptr;
183 }
184
185 this->handleDirtyContext();
186 sk_sp<GrTexture> tex = this->onCreateTexture(desc,
187 format,
188 renderable,
189 renderTargetSampleCnt,
190 budgeted,
191 isProtected,
192 texels,
193 mipLevelCount);
194 if (tex) {
195 if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) {
196 tex->resourcePriv().removeScratchKey();
197 }
198 fStats.incTextureCreates();
199 if (mipLevelCount) {
200 if (texels[0].fPixels) {
201 fStats.incTextureUploads();
202 }
203 }
204 SkASSERT(tex->backendFormat() == format);
205 }
206 return tex;
207 }
208
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected)209 sk_sp<GrTexture> GrGpu::createTexture(const GrSurfaceDesc& desc, const GrBackendFormat& format,
210 GrRenderable renderable, int renderTargetSampleCnt,
211 SkBudgeted budgeted, GrProtected isProtected) {
212 return this->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
213 isProtected, nullptr, 0);
214 }
215
createCompressedTexture(int width,int height,const GrBackendFormat & format,SkImage::CompressionType compressionType,SkBudgeted budgeted,const void * data,size_t dataSize)216 sk_sp<GrTexture> GrGpu::createCompressedTexture(int width, int height,
217 const GrBackendFormat& format,
218 SkImage::CompressionType compressionType,
219 SkBudgeted budgeted, const void* data,
220 size_t dataSize) {
221 // If we ever add a new CompressionType, we should add a check here to make sure the
222 // GrBackendFormat and CompressionType are compatible with eachother.
223 SkASSERT(compressionType == SkImage::kETC1_CompressionType || compressionType == SkImage::kASTC_CompressionType);
224
225 this->handleDirtyContext();
226 if (width < 1 || width > this->caps()->maxTextureSize() ||
227 height < 1 || height > this->caps()->maxTextureSize()) {
228 return nullptr;
229 }
230 // Note if we relax the requirement that data must be provided then we must check
231 // caps()->shouldInitializeTextures() here.
232 if (!data) {
233 return nullptr;
234 }
235 if (!this->caps()->isFormatTexturable(format)) {
236 return nullptr;
237 }
238 if (dataSize < GrCompressedDataSize(compressionType, width, height)) {
239 return nullptr;
240 }
241 return this->onCreateCompressedTexture(width, height, format, compressionType, budgeted, data);
242 }
243
wrapBackendTexture(const GrBackendTexture & backendTex,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)244 sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex,
245 GrColorType colorType,
246 GrWrapOwnership ownership, GrWrapCacheable cacheable,
247 GrIOType ioType) {
248 SkASSERT(ioType != kWrite_GrIOType);
249 this->handleDirtyContext();
250
251 const GrCaps* caps = this->caps();
252 SkASSERT(caps);
253
254 if (!caps->isFormatTexturable(backendTex.getBackendFormat())) {
255 return nullptr;
256 }
257 if (backendTex.width() > caps->maxTextureSize() ||
258 backendTex.height() > caps->maxTextureSize()) {
259 return nullptr;
260 }
261
262 return this->onWrapBackendTexture(backendTex, colorType, ownership, cacheable, ioType);
263 }
264
wrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable)265 sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex,
266 int sampleCnt, GrColorType colorType,
267 GrWrapOwnership ownership,
268 GrWrapCacheable cacheable) {
269 this->handleDirtyContext();
270 if (sampleCnt < 1) {
271 return nullptr;
272 }
273
274 const GrCaps* caps = this->caps();
275
276 if (!caps->isFormatTexturable(backendTex.getBackendFormat()) ||
277 !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
278 return nullptr;
279 }
280
281 if (backendTex.width() > caps->maxRenderTargetSize() ||
282 backendTex.height() > caps->maxRenderTargetSize()) {
283 return nullptr;
284 }
285 sk_sp<GrTexture> tex = this->onWrapRenderableBackendTexture(backendTex, sampleCnt, colorType,
286 ownership, cacheable);
287 SkASSERT(!tex || tex->asRenderTarget());
288 return tex;
289 }
290
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT,GrColorType colorType)291 sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
292 GrColorType colorType) {
293 this->handleDirtyContext();
294
295 const GrCaps* caps = this->caps();
296
297 if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) {
298 return nullptr;
299 }
300
301 return this->onWrapBackendRenderTarget(backendRT, colorType);
302 }
303
wrapBackendTextureAsRenderTarget(const GrBackendTexture & backendTex,int sampleCnt,GrColorType colorType)304 sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex,
305 int sampleCnt,
306 GrColorType colorType) {
307 this->handleDirtyContext();
308
309 const GrCaps* caps = this->caps();
310
311 int maxSize = caps->maxTextureSize();
312 if (backendTex.width() > maxSize || backendTex.height() > maxSize) {
313 return nullptr;
314 }
315
316 if (!caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) {
317 return nullptr;
318 }
319
320 return this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt, colorType);
321 }
322
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)323 sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
324 const GrVkDrawableInfo& vkInfo) {
325 return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo);
326 }
327
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)328 sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo,
329 const GrVkDrawableInfo& vkInfo) {
330 // This is only supported on Vulkan so we default to returning nullptr here
331 return nullptr;
332 }
333
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)334 sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
335 GrAccessPattern accessPattern, const void* data) {
336 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
337 this->handleDirtyContext();
338 sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
339 if (!this->caps()->reuseScratchBuffers()) {
340 buffer->resourcePriv().removeScratchKey();
341 }
342 return buffer;
343 }
344
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint,bool canDiscardOutsideDstRect)345 bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
346 const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) {
347 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
348 SkASSERT(dst && src);
349
350 if (dst->readOnly()) {
351 return false;
352 }
353
354 this->handleDirtyContext();
355
356 return this->onCopySurface(dst, src, srcRect, dstPoint, canDiscardOutsideDstRect);
357 }
358
readPixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)359 bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height,
360 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
361 size_t rowBytes) {
362 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
363 SkASSERT(surface);
364 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
365
366 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
367 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
368 if (!bounds.contains(subRect)) {
369 return false;
370 }
371
372 size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width);
373 if (!this->caps()->readPixelsRowBytesSupport()) {
374 if (rowBytes != minRowBytes) {
375 return false;
376 }
377 } else {
378 if (rowBytes < minRowBytes) {
379 return false;
380 }
381 if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) {
382 return false;
383 }
384 }
385
386 if (this->caps()->isFormatCompressed(surface->backendFormat())) {
387 return false;
388 }
389
390 this->handleDirtyContext();
391
392 return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType,
393 buffer, rowBytes);
394 }
395
writePixels(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)396 bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height,
397 GrColorType surfaceColorType, GrColorType srcColorType,
398 const GrMipLevel texels[], int mipLevelCount) {
399 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
400 SkASSERT(surface);
401 SkASSERT(this->caps()->isFormatTexturableAndUploadable(surfaceColorType,
402 surface->backendFormat()));
403
404 if (surface->readOnly()) {
405 return false;
406 }
407
408 if (mipLevelCount == 0) {
409 return false;
410 } else if (mipLevelCount == 1) {
411 // We require that if we are not mipped, then the write region is contained in the surface
412 auto subRect = SkIRect::MakeXYWH(left, top, width, height);
413 auto bounds = SkIRect::MakeWH(surface->width(), surface->height());
414 if (!bounds.contains(subRect)) {
415 return false;
416 }
417 } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) {
418 // We require that if the texels are mipped, than the write region is the entire surface
419 return false;
420 }
421
422 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
423 if (!validate_levels(width, height, texels, mipLevelCount, bpp, this->caps())) {
424 return false;
425 }
426
427 this->handleDirtyContext();
428 if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
429 texels, mipLevelCount)) {
430 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
431 this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount);
432 fStats.incTextureUploads();
433 return true;
434 }
435 return false;
436 }
437
transferPixelsTo(GrTexture * texture,int left,int top,int width,int height,GrColorType textureColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset,size_t rowBytes)438 bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
439 GrColorType textureColorType, GrColorType bufferColorType,
440 GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) {
441 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
442 SkASSERT(texture);
443 SkASSERT(transferBuffer);
444 SkASSERT(this->caps()->isFormatTexturableAndUploadable(textureColorType,
445 texture->backendFormat()));
446
447 if (texture->readOnly()) {
448 return false;
449 }
450
451 // We require that the write region is contained in the texture
452 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
453 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
454 if (!bounds.contains(subRect)) {
455 return false;
456 }
457
458 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
459 if (this->caps()->writePixelsRowBytesSupport()) {
460 if (rowBytes < SkToSizeT(bpp * width)) {
461 return false;
462 }
463 if (rowBytes % bpp) {
464 return false;
465 }
466 } else {
467 if (rowBytes != SkToSizeT(bpp * width)) {
468 return false;
469 }
470 }
471
472 this->handleDirtyContext();
473 if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType,
474 bufferColorType, transferBuffer, offset, rowBytes)) {
475 SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
476 this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect);
477 fStats.incTransfersToTexture();
478
479 return true;
480 }
481 return false;
482 }
483
transferPixelsFrom(GrSurface * surface,int left,int top,int width,int height,GrColorType surfaceColorType,GrColorType bufferColorType,GrGpuBuffer * transferBuffer,size_t offset)484 bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
485 GrColorType surfaceColorType, GrColorType bufferColorType,
486 GrGpuBuffer* transferBuffer, size_t offset) {
487 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
488 SkASSERT(surface);
489 SkASSERT(transferBuffer);
490 SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat()));
491
492 #ifdef SK_DEBUG
493 auto supportedRead = this->caps()->supportedReadPixelsColorType(
494 surfaceColorType, surface->backendFormat(), bufferColorType);
495 SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer);
496 SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0);
497 #endif
498
499 // We require that the write region is contained in the texture
500 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
501 SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height());
502 if (!bounds.contains(subRect)) {
503 return false;
504 }
505
506 this->handleDirtyContext();
507 if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType,
508 bufferColorType, transferBuffer, offset)) {
509 fStats.incTransfersFromSurface();
510 return true;
511 }
512 return false;
513 }
514
regenerateMipMapLevels(GrTexture * texture)515 bool GrGpu::regenerateMipMapLevels(GrTexture* texture) {
516 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
517 SkASSERT(texture);
518 SkASSERT(this->caps()->mipMapSupport());
519 SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
520 SkASSERT(texture->texturePriv().mipMapsAreDirty());
521 SkASSERT(!texture->asRenderTarget() || !texture->asRenderTarget()->needsResolve());
522 if (texture->readOnly()) {
523 return false;
524 }
525 if (this->onRegenerateMipMapLevels(texture)) {
526 texture->texturePriv().markMipMapsClean();
527 return true;
528 }
529 return false;
530 }
531
resetTextureBindings()532 void GrGpu::resetTextureBindings() {
533 this->handleDirtyContext();
534 this->onResetTextureBindings();
535 }
536
resolveRenderTarget(GrRenderTarget * target)537 void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
538 SkASSERT(target);
539 this->handleDirtyContext();
540 this->onResolveRenderTarget(target);
541 }
542
didWriteToSurface(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds,uint32_t mipLevels) const543 void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
544 uint32_t mipLevels) const {
545 SkASSERT(surface);
546 SkASSERT(!surface->readOnly());
547 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
548 if (nullptr == bounds || !bounds->isEmpty()) {
549 if (GrRenderTarget* target = surface->asRenderTarget()) {
550 SkIRect flippedBounds;
551 if (kBottomLeft_GrSurfaceOrigin == origin && bounds) {
552 flippedBounds = {bounds->fLeft, surface->height() - bounds->fBottom,
553 bounds->fRight, surface->height() - bounds->fTop};
554 bounds = &flippedBounds;
555 }
556 target->flagAsNeedingResolve(bounds);
557 }
558 GrTexture* texture = surface->asTexture();
559 if (texture && 1 == mipLevels) {
560 texture->texturePriv().markMipMapsDirty();
561 }
562 }
563 }
564
findOrAssignSamplePatternKey(GrRenderTarget * renderTarget)565 int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) {
566 SkASSERT(this->caps()->sampleLocationsSupport());
567 SkASSERT(renderTarget->numSamples() > 1 ||
568 (renderTarget->renderTargetPriv().getStencilAttachment() &&
569 renderTarget->renderTargetPriv().getStencilAttachment()->numSamples() > 1));
570
571 SkSTArray<16, SkPoint> sampleLocations;
572 this->querySampleLocations(renderTarget, &sampleLocations);
573 return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations);
574 }
575
finishFlush(GrSurfaceProxy * proxies[],int n,SkSurface::BackendSurfaceAccess access,const GrFlushInfo & info,const GrPrepareForExternalIORequests & externalRequests)576 GrSemaphoresSubmitted GrGpu::finishFlush(GrSurfaceProxy* proxies[],
577 int n,
578 SkSurface::BackendSurfaceAccess access,
579 const GrFlushInfo& info,
580 const GrPrepareForExternalIORequests& externalRequests) {
581 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
582 this->stats()->incNumFinishFlushes();
583 GrResourceProvider* resourceProvider = fContext->priv().resourceProvider();
584
585 if (this->caps()->semaphoreSupport()) {
586 for (int i = 0; i < info.fNumSemaphores; ++i) {
587 sk_sp<GrSemaphore> semaphore;
588 if (info.fSignalSemaphores[i].isInitialized()) {
589 semaphore = resourceProvider->wrapBackendSemaphore(
590 info.fSignalSemaphores[i],
591 GrResourceProvider::SemaphoreWrapType::kWillSignal,
592 kBorrow_GrWrapOwnership);
593 } else {
594 semaphore = resourceProvider->makeSemaphore(false);
595 }
596 this->insertSemaphore(semaphore);
597
598 if (!info.fSignalSemaphores[i].isInitialized()) {
599 info.fSignalSemaphores[i] = semaphore->backendSemaphore();
600 }
601 }
602 }
603 this->onFinishFlush(proxies, n, access, info, externalRequests);
604 return this->caps()->semaphoreSupport() ? GrSemaphoresSubmitted::kYes
605 : GrSemaphoresSubmitted::kNo;
606 }
607
608 #ifdef SK_ENABLE_DUMP_GPU
dumpJSON(SkJSONWriter * writer) const609 void GrGpu::dumpJSON(SkJSONWriter* writer) const {
610 writer->beginObject();
611
612 // TODO: Is there anything useful in the base class to dump here?
613
614 this->onDumpJSON(writer);
615
616 writer->endObject();
617 }
618 #else
dumpJSON(SkJSONWriter * writer) const619 void GrGpu::dumpJSON(SkJSONWriter* writer) const { }
620 #endif
621
622 #if GR_TEST_UTILS
623
624 #if GR_GPU_STATS
dump(SkString * out)625 void GrGpu::Stats::dump(SkString* out) {
626 out->appendf("Render Target Binds: %d\n", fRenderTargetBinds);
627 out->appendf("Shader Compilations: %d\n", fShaderCompilations);
628 out->appendf("Textures Created: %d\n", fTextureCreates);
629 out->appendf("Texture Uploads: %d\n", fTextureUploads);
630 out->appendf("Transfers to Texture: %d\n", fTransfersToTexture);
631 out->appendf("Transfers from Surface: %d\n", fTransfersFromSurface);
632 out->appendf("Stencil Buffer Creates: %d\n", fStencilAttachmentCreates);
633 out->appendf("Number of draws: %d\n", fNumDraws);
634 out->appendf("Number of Scratch Textures reused %d\n", fNumScratchTexturesReused);
635 }
636
dumpKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values)637 void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) {
638 keys->push_back(SkString("render_target_binds")); values->push_back(fRenderTargetBinds);
639 keys->push_back(SkString("shader_compilations")); values->push_back(fShaderCompilations);
640 }
641
642 #endif
643
644 #endif
645