1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceProvider.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrContext.h"
12 #include "include/private/GrResourceKey.h"
13 #include "include/private/GrSingleOwner.h"
14 #include "src/core/SkConvertPixels.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/gpu/GrCaps.h"
17 #include "src/gpu/GrContextPriv.h"
18 #include "src/gpu/GrGpu.h"
19 #include "src/gpu/GrGpuBuffer.h"
20 #include "src/gpu/GrPath.h"
21 #include "src/gpu/GrPathRendering.h"
22 #include "src/gpu/GrProxyProvider.h"
23 #include "src/gpu/GrRenderTargetPriv.h"
24 #include "src/gpu/GrResourceCache.h"
25 #include "src/gpu/GrSemaphore.h"
26 #include "src/gpu/GrStencilAttachment.h"
27 #include "src/gpu/GrTexturePriv.h"
28 #include "src/gpu/SkGr.h"
29
30 const uint32_t GrResourceProvider::kMinScratchTextureSize = 16;
31
32 #define ASSERT_SINGLE_OWNER \
33 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
34
GrResourceProvider(GrGpu * gpu,GrResourceCache * cache,GrSingleOwner * owner)35 GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
36 : fCache(cache)
37 , fGpu(gpu)
38 #ifdef SK_DEBUG
39 , fSingleOwner(owner)
40 #endif
41 {
42 fCaps = sk_ref_sp(fGpu->caps());
43 }
44
45 // Ensures the row bytes are populated (not 0) and makes a copy to a temporary
46 // to make the row bytes tight if necessary. Returns false if the input row bytes are invalid.
prepare_level(const GrMipLevel & inLevel,size_t bpp,int w,int h,bool rowBytesSupport,bool mustInitializeAllLevels,GrMipLevel * outLevel,std::unique_ptr<char[]> * data)47 static bool prepare_level(const GrMipLevel& inLevel, size_t bpp, int w, int h, bool rowBytesSupport,
48 bool mustInitializeAllLevels, GrMipLevel* outLevel,
49 std::unique_ptr<char[]>* data) {
50 size_t minRB = w * bpp;
51 if (!inLevel.fPixels) {
52 if (mustInitializeAllLevels) {
53 data->reset(new char[minRB * h]());
54 outLevel->fPixels = data->get();
55 outLevel->fRowBytes = minRB;
56 } else {
57 outLevel->fPixels = nullptr;
58 outLevel->fRowBytes = 0;
59 }
60 return true;
61 }
62 size_t actualRB = inLevel.fRowBytes ? inLevel.fRowBytes : minRB;
63 if (actualRB < minRB) {
64 return false;
65 }
66 if (actualRB == minRB || rowBytesSupport) {
67 outLevel->fRowBytes = actualRB;
68 outLevel->fPixels = inLevel.fPixels;
69 } else {
70 data->reset(new char[minRB * h]);
71 outLevel->fPixels = data->get();
72 outLevel->fRowBytes = minRB;
73 SkRectMemcpy(data->get(), outLevel->fRowBytes, inLevel.fPixels, inLevel.fRowBytes, minRB,
74 h);
75 }
76 return true;
77 }
78
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,const GrMipLevel texels[],int mipLevelCount)79 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
80 const GrBackendFormat& format,
81 GrRenderable renderable,
82 int renderTargetSampleCnt,
83 SkBudgeted budgeted,
84 GrProtected isProtected,
85 const GrMipLevel texels[],
86 int mipLevelCount) {
87 ASSERT_SINGLE_OWNER
88
89 SkASSERT(mipLevelCount > 0);
90
91 if (this->isAbandoned()) {
92 return nullptr;
93 }
94
95 GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
96 if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
97 renderTargetSampleCnt, mipMapped)) {
98 return nullptr;
99 }
100 bool mustInitializeAllLevels = this->caps()->createTextureMustSpecifyAllLevels();
101 bool rowBytesSupport = this->caps()->writePixelsRowBytesSupport();
102 SkAutoSTMalloc<14, GrMipLevel> tmpTexels;
103 SkAutoSTArray<14, std::unique_ptr<char[]>> tmpDatas;
104 if (mipLevelCount > 0 && texels) {
105 tmpTexels.reset(mipLevelCount);
106 tmpDatas.reset(mipLevelCount);
107 int w = desc.fWidth;
108 int h = desc.fHeight;
109 size_t bpp = GrBytesPerPixel(desc.fConfig);
110 for (int i = 0; i < mipLevelCount; ++i) {
111 if (!prepare_level(texels[i], bpp, w, h, rowBytesSupport, mustInitializeAllLevels,
112 &tmpTexels[i], &tmpDatas[i])) {
113 return nullptr;
114 }
115 w = std::max(w / 2, 1);
116 h = std::max(h / 2, 1);
117 }
118 }
119 return fGpu->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
120 isProtected, tmpTexels.get(), mipLevelCount);
121 }
122
getExactScratch(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,Flags flags)123 sk_sp<GrTexture> GrResourceProvider::getExactScratch(const GrSurfaceDesc& desc,
124 const GrBackendFormat& format,
125 GrRenderable renderable,
126 int renderTargetSampleCnt,
127 SkBudgeted budgeted,
128 GrProtected isProtected,
129 Flags flags) {
130 sk_sp<GrTexture> tex(this->refScratchTexture(desc, format, renderable, renderTargetSampleCnt,
131 isProtected, flags));
132 if (tex && SkBudgeted::kNo == budgeted) {
133 tex->resourcePriv().makeUnbudgeted();
134 }
135
136 return tex;
137 }
138
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,SkBackingFit fit,GrProtected isProtected,GrColorType srcColorType,const GrMipLevel & mipLevel,Flags flags)139 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
140 const GrBackendFormat& format,
141 GrRenderable renderable,
142 int renderTargetSampleCnt,
143 SkBudgeted budgeted,
144 SkBackingFit fit,
145 GrProtected isProtected,
146 GrColorType srcColorType,
147 const GrMipLevel& mipLevel,
148 Flags flags) {
149 ASSERT_SINGLE_OWNER
150
151 if (this->isAbandoned()) {
152 return nullptr;
153 }
154
155 if (!mipLevel.fPixels) {
156 return nullptr;
157 }
158
159 if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
160 renderTargetSampleCnt, GrMipMapped::kNo)) {
161 return nullptr;
162 }
163
164 GrContext* context = fGpu->getContext();
165 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
166
167 bool mustInitialize = this->caps()->createTextureMustSpecifyAllLevels();
168 bool rowBytesSupport = this->caps()->writePixelsRowBytesSupport();
169
170 size_t bpp = GrBytesPerPixel(desc.fConfig);
171 std::unique_ptr<char[]> tmpData;
172 GrMipLevel tmpLevel;
173 if (!prepare_level(mipLevel, bpp, desc.fWidth, desc.fHeight, rowBytesSupport, mustInitialize,
174 &tmpLevel, &tmpData)) {
175 return nullptr;
176 }
177
178 sk_sp<GrTexture> tex =
179 (SkBackingFit::kApprox == fit)
180 ? this->createApproxTexture(desc, format, renderable, renderTargetSampleCnt,
181 isProtected, flags)
182 : this->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
183 isProtected, flags);
184 if (!tex) {
185 return nullptr;
186 }
187
188 sk_sp<GrTextureProxy> proxy =
189 proxyProvider->createWrapped(tex, srcColorType, kTopLeft_GrSurfaceOrigin);
190 if (!proxy) {
191 return nullptr;
192 }
193 // Here we don't really know the alpha type of the data we want to upload. All we really
194 // care about is that it is not converted. So we use the same alpha type for the data
195 // and the surface context.
196 static constexpr auto kAlphaType = kUnpremul_SkAlphaType;
197 sk_sp<GrSurfaceContext> sContext =
198 context->priv().makeWrappedSurfaceContext(std::move(proxy), srcColorType, kAlphaType);
199 if (!sContext) {
200 return nullptr;
201 }
202 GrPixelInfo srcInfo(srcColorType, kAlphaType, nullptr, desc.fWidth, desc.fHeight);
203 SkAssertResult(sContext->writePixels(srcInfo, tmpLevel.fPixels, tmpLevel.fRowBytes, {0, 0}));
204 return tex;
205 }
206
createCompressedTexture(int width,int height,const GrBackendFormat & format,SkImage::CompressionType compression,SkBudgeted budgeted,SkData * data)207 sk_sp<GrTexture> GrResourceProvider::createCompressedTexture(int width, int height,
208 const GrBackendFormat& format,
209 SkImage::CompressionType compression,
210 SkBudgeted budgeted, SkData* data) {
211 ASSERT_SINGLE_OWNER
212 if (this->isAbandoned()) {
213 return nullptr;
214 }
215 return fGpu->createCompressedTexture(width, height, format, compression, budgeted, data->data(),
216 data->size());
217 }
218
createTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,SkBudgeted budgeted,GrProtected isProtected,Flags flags)219 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc,
220 const GrBackendFormat& format,
221 GrRenderable renderable,
222 int renderTargetSampleCnt,
223 SkBudgeted budgeted,
224 GrProtected isProtected,
225 Flags flags) {
226 ASSERT_SINGLE_OWNER
227 if (this->isAbandoned()) {
228 return nullptr;
229 }
230
231 if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
232 renderTargetSampleCnt, GrMipMapped::kNo)) {
233 return nullptr;
234 }
235
236 // Compressed textures are read-only so they don't support re-use for scratch.
237 if (!GrPixelConfigIsCompressed(desc.fConfig)) {
238 sk_sp<GrTexture> tex = this->getExactScratch(
239 desc, format, renderable, renderTargetSampleCnt, budgeted, isProtected, flags);
240 if (tex) {
241 return tex;
242 }
243 }
244
245 if (fCaps->createTextureMustSpecifyAllLevels()) {
246 size_t rowBytes = GrBytesPerPixel(desc.fConfig) * desc.fWidth;
247 size_t size = rowBytes * desc.fHeight;
248 std::unique_ptr<char[]> zeros(new char[size]());
249 GrMipLevel level;
250 level.fRowBytes = rowBytes;
251 level.fPixels = zeros.get();
252 return fGpu->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
253 isProtected, &level, 1);
254 }
255
256 return fGpu->createTexture(desc, format, renderable, renderTargetSampleCnt, budgeted,
257 isProtected);
258 }
259
260 // Map 'value' to a larger multiple of 2. Values <= 'kMagicTol' will pop up to
261 // the next power of 2. Those above 'kMagicTol' will only go up half the floor power of 2.
MakeApprox(uint32_t value)262 uint32_t GrResourceProvider::MakeApprox(uint32_t value) {
263 static const int kMagicTol = 1024;
264
265 value = SkTMax(kMinScratchTextureSize, value);
266
267 if (SkIsPow2(value)) {
268 return value;
269 }
270
271 uint32_t ceilPow2 = GrNextPow2(value);
272 if (value <= kMagicTol) {
273 return ceilPow2;
274 }
275
276 uint32_t floorPow2 = ceilPow2 >> 1;
277 uint32_t mid = floorPow2 + (floorPow2 >> 1);
278
279 if (value <= mid) {
280 return mid;
281 }
282
283 return ceilPow2;
284 }
285
createApproxTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,GrProtected isProtected,Flags flags)286 sk_sp<GrTexture> GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc,
287 const GrBackendFormat& format,
288 GrRenderable renderable,
289 int renderTargetSampleCnt,
290 GrProtected isProtected,
291 Flags flags) {
292 ASSERT_SINGLE_OWNER
293 SkASSERT(Flags::kNone == flags || Flags::kNoPendingIO == flags);
294
295 if (this->isAbandoned()) {
296 return nullptr;
297 }
298
299 // Currently we don't recycle compressed textures as scratch.
300 if (GrPixelConfigIsCompressed(desc.fConfig)) {
301 return nullptr;
302 }
303
304 if (!fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig, renderable,
305 renderTargetSampleCnt, GrMipMapped::kNo)) {
306 return nullptr;
307 }
308
309 if (auto tex = this->refScratchTexture(desc, format, renderable, renderTargetSampleCnt,
310 isProtected, flags)) {
311 return tex;
312 }
313
314 SkTCopyOnFirstWrite<GrSurfaceDesc> copyDesc(desc);
315
316 // bin by some multiple or power of 2 with a reasonable min
317 if (fGpu->caps()->reuseScratchTextures() || renderable == GrRenderable::kYes) {
318 GrSurfaceDesc* wdesc = copyDesc.writable();
319 wdesc->fWidth = MakeApprox(wdesc->fWidth);
320 wdesc->fHeight = MakeApprox(wdesc->fHeight);
321 }
322
323 if (auto tex = this->refScratchTexture(*copyDesc, format, renderable, renderTargetSampleCnt,
324 isProtected, flags)) {
325 return tex;
326 }
327
328 if (this->caps()->createTextureMustSpecifyAllLevels()) {
329 size_t rowBytes = GrBytesPerPixel(copyDesc->fConfig) * copyDesc->fWidth;
330 size_t size = rowBytes * copyDesc->fHeight;
331 std::unique_ptr<char[]> zeros(new char[size]());
332 GrMipLevel level;
333 level.fRowBytes = rowBytes;
334 level.fPixels = zeros.get();
335 return fGpu->createTexture(*copyDesc, format, renderable, renderTargetSampleCnt,
336 SkBudgeted::kYes, isProtected, &level, 1);
337 }
338 return fGpu->createTexture(*copyDesc, format, renderable, renderTargetSampleCnt,
339 SkBudgeted::kYes, isProtected);
340 }
341
refScratchTexture(const GrSurfaceDesc & desc,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,GrProtected isProtected,Flags flags)342 sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& desc,
343 const GrBackendFormat& format,
344 GrRenderable renderable,
345 int renderTargetSampleCnt,
346 GrProtected isProtected,
347 Flags flags) {
348 ASSERT_SINGLE_OWNER
349 SkASSERT(!this->isAbandoned());
350 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
351 SkASSERT(fCaps->validateSurfaceParams({desc.fWidth, desc.fHeight}, format, desc.fConfig,
352 renderable, renderTargetSampleCnt, GrMipMapped::kNo));
353
354 // We could make initial clears work with scratch textures but it is a rare case so we just opt
355 // to fall back to making a new texture.
356 if (fGpu->caps()->reuseScratchTextures() || renderable == GrRenderable::kYes) {
357 GrScratchKey key;
358 GrTexturePriv::ComputeScratchKey(desc, renderable, renderTargetSampleCnt, &key);
359 auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
360 if (Flags::kNoPendingIO & flags) {
361 scratchFlags |= GrResourceCache::ScratchFlags::kRequireNoPendingIO;
362 } else if (renderable == GrRenderable::kNo) {
363 // If it is not a render target then it will most likely be populated by
364 // writePixels() which will trigger a flush if the texture has pending IO.
365 scratchFlags |= GrResourceCache::ScratchFlags::kPreferNoPendingIO;
366 }
367 GrGpuResource* resource = fCache->findAndRefScratchResource(
368 key, GrSurface::WorstCaseSize(desc, renderable, renderTargetSampleCnt),
369 scratchFlags);
370 if (resource) {
371 fGpu->stats()->incNumScratchTexturesReused();
372 GrSurface* surface = static_cast<GrSurface*>(resource);
373 return sk_sp<GrTexture>(surface->asTexture());
374 }
375 }
376
377 return nullptr;
378 }
379
wrapBackendTexture(const GrBackendTexture & tex,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)380 sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTexture& tex,
381 GrColorType colorType,
382 GrWrapOwnership ownership,
383 GrWrapCacheable cacheable,
384 GrIOType ioType) {
385 ASSERT_SINGLE_OWNER
386 if (this->isAbandoned()) {
387 return nullptr;
388 }
389 return fGpu->wrapBackendTexture(tex, colorType, ownership, cacheable, ioType);
390 }
391
wrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrColorType colorType,GrWrapOwnership ownership,GrWrapCacheable cacheable)392 sk_sp<GrTexture> GrResourceProvider::wrapRenderableBackendTexture(const GrBackendTexture& tex,
393 int sampleCnt,
394 GrColorType colorType,
395 GrWrapOwnership ownership,
396 GrWrapCacheable cacheable) {
397 ASSERT_SINGLE_OWNER
398 if (this->isAbandoned()) {
399 return nullptr;
400 }
401 return fGpu->wrapRenderableBackendTexture(tex, sampleCnt, colorType, ownership, cacheable);
402 }
403
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT,GrColorType colorType)404 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
405 const GrBackendRenderTarget& backendRT, GrColorType colorType)
406 {
407 ASSERT_SINGLE_OWNER
408 return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(backendRT, colorType);
409 }
410
wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)411 sk_sp<GrRenderTarget> GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget(
412 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
413 ASSERT_SINGLE_OWNER
414 return this->isAbandoned() ? nullptr : fGpu->wrapVulkanSecondaryCBAsRenderTarget(imageInfo,
415 vkInfo);
416
417 }
418
assignUniqueKeyToResource(const GrUniqueKey & key,GrGpuResource * resource)419 void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
420 GrGpuResource* resource) {
421 ASSERT_SINGLE_OWNER
422 if (this->isAbandoned() || !resource) {
423 return;
424 }
425 resource->resourcePriv().setUniqueKey(key);
426 }
427
findResourceByUniqueKey(const GrUniqueKey & key)428 sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueKey& key) {
429 ASSERT_SINGLE_OWNER
430 return this->isAbandoned() ? nullptr
431 : sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
432 }
433
findOrMakeStaticBuffer(GrGpuBufferType intendedType,size_t size,const void * data,const GrUniqueKey & key)434 sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
435 size_t size,
436 const void* data,
437 const GrUniqueKey& key) {
438 if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
439 return buffer;
440 }
441 if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) {
442 // We shouldn't bin and/or cache static buffers.
443 SkASSERT(buffer->size() == size);
444 SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
445 SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly());
446 buffer->resourcePriv().setUniqueKey(key);
447 return sk_sp<const GrGpuBuffer>(buffer);
448 }
449 return nullptr;
450 }
451
createPatternedIndexBuffer(const uint16_t * pattern,int patternSize,int reps,int vertCount,const GrUniqueKey * key)452 sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
453 int patternSize,
454 int reps,
455 int vertCount,
456 const GrUniqueKey* key) {
457 size_t bufferSize = patternSize * reps * sizeof(uint16_t);
458
459 // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
460 sk_sp<GrGpuBuffer> buffer(
461 this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern));
462 if (!buffer) {
463 return nullptr;
464 }
465 uint16_t* data = (uint16_t*) buffer->map();
466 SkAutoTArray<uint16_t> temp;
467 if (!data) {
468 temp.reset(reps * patternSize);
469 data = temp.get();
470 }
471 for (int i = 0; i < reps; ++i) {
472 int baseIdx = i * patternSize;
473 uint16_t baseVert = (uint16_t)(i * vertCount);
474 for (int j = 0; j < patternSize; ++j) {
475 data[baseIdx+j] = baseVert + pattern[j];
476 }
477 }
478 if (temp.get()) {
479 if (!buffer->updateData(data, bufferSize)) {
480 return nullptr;
481 }
482 } else {
483 buffer->unmap();
484 }
485 if (key) {
486 SkASSERT(key->isValid());
487 this->assignUniqueKeyToResource(*key, buffer.get());
488 }
489 return buffer;
490 }
491
492 static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
493
createQuadIndexBuffer()494 sk_sp<const GrGpuBuffer> GrResourceProvider::createQuadIndexBuffer() {
495 GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
496 static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 };
497 return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, nullptr);
498 }
499
QuadCountOfQuadBuffer()500 int GrResourceProvider::QuadCountOfQuadBuffer() { return kMaxQuads; }
501
createPath(const SkPath & path,const GrStyle & style)502 sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
503 if (this->isAbandoned()) {
504 return nullptr;
505 }
506
507 SkASSERT(this->gpu()->pathRendering());
508 return this->gpu()->pathRendering()->createPath(path, style);
509 }
510
createBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)511 sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
512 GrAccessPattern accessPattern,
513 const void* data) {
514 if (this->isAbandoned()) {
515 return nullptr;
516 }
517 if (kDynamic_GrAccessPattern != accessPattern) {
518 return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
519 }
520 // bin by pow2 with a reasonable min
521 static const size_t MIN_SIZE = 1 << 12;
522 size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
523
524 GrScratchKey key;
525 GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
526 auto buffer =
527 sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
528 key, allocSize, GrResourceCache::ScratchFlags::kNone)));
529 if (!buffer) {
530 buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
531 if (!buffer) {
532 return nullptr;
533 }
534 }
535 if (data) {
536 buffer->updateData(data, size);
537 }
538 return buffer;
539 }
540
attachStencilAttachment(GrRenderTarget * rt,int minStencilSampleCount)541 bool GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt, int minStencilSampleCount) {
542 SkASSERT(rt);
543 GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
544 if (stencil && stencil->numSamples() >= minStencilSampleCount) {
545 return true;
546 }
547
548 if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
549 GrUniqueKey sbKey;
550
551 int width = rt->width();
552 int height = rt->height();
553 #if 0
554 if (this->caps()->oversizedStencilSupport()) {
555 width = SkNextPow2(width);
556 height = SkNextPow2(height);
557 }
558 #endif
559 GrStencilAttachment::ComputeSharedStencilAttachmentKey(
560 width, height, minStencilSampleCount, &sbKey);
561 auto stencil = this->findByUniqueKey<GrStencilAttachment>(sbKey);
562 if (!stencil) {
563 // Need to try and create a new stencil
564 stencil.reset(this->gpu()->createStencilAttachmentForRenderTarget(
565 rt, width, height, minStencilSampleCount));
566 if (!stencil) {
567 return false;
568 }
569 this->assignUniqueKeyToResource(sbKey, stencil.get());
570 }
571 rt->renderTargetPriv().attachStencilAttachment(std::move(stencil));
572 }
573
574 if (GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment()) {
575 return stencil->numSamples() >= minStencilSampleCount;
576 }
577 return false;
578 }
579
wrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,int sampleCnt,GrColorType colorType)580 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
581 const GrBackendTexture& tex, int sampleCnt, GrColorType colorType)
582 {
583 if (this->isAbandoned()) {
584 return nullptr;
585 }
586 return fGpu->wrapBackendTextureAsRenderTarget(tex, sampleCnt, colorType);
587 }
588
makeSemaphore(bool isOwned)589 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore(bool isOwned) {
590 return fGpu->makeSemaphore(isOwned);
591 }
592
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,SemaphoreWrapType wrapType,GrWrapOwnership ownership)593 sk_sp<GrSemaphore> GrResourceProvider::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
594 SemaphoreWrapType wrapType,
595 GrWrapOwnership ownership) {
596 ASSERT_SINGLE_OWNER
597 return this->isAbandoned() ? nullptr : fGpu->wrapBackendSemaphore(semaphore,
598 wrapType,
599 ownership);
600 }
601