1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrResourceProvider.h"
9
10 #include "GrBackendSemaphore.h"
11 #include "GrBuffer.h"
12 #include "GrCaps.h"
13 #include "GrContext.h"
14 #include "GrContextPriv.h"
15 #include "GrGpu.h"
16 #include "GrPath.h"
17 #include "GrPathRendering.h"
18 #include "GrRenderTarget.h"
19 #include "GrRenderTargetPriv.h"
20 #include "GrResourceCache.h"
21 #include "GrResourceKey.h"
22 #include "GrSemaphore.h"
23 #include "GrStencilAttachment.h"
24 #include "GrSurfaceProxyPriv.h"
25 #include "GrTexturePriv.h"
26 #include "../private/GrSingleOwner.h"
27 #include "SkGr.h"
28 #include "SkMathPriv.h"
29
30 GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
31
32 const uint32_t GrResourceProvider::kMinScratchTextureSize = 16;
33
34 #define ASSERT_SINGLE_OWNER \
35 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
36
GrResourceProvider(GrGpu * gpu,GrResourceCache * cache,GrSingleOwner * owner)37 GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
38 : fCache(cache)
39 , fGpu(gpu)
40 #ifdef SK_DEBUG
41 , fSingleOwner(owner)
42 #endif
43 {
44 fCaps = sk_ref_sp(fGpu->caps());
45
46 GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
47 fQuadIndexBufferKey = gQuadIndexBufferKey;
48 }
49
IsFunctionallyExact(GrSurfaceProxy * proxy)50 bool GrResourceProvider::IsFunctionallyExact(GrSurfaceProxy* proxy) {
51 return proxy->priv().isExact() || (SkIsPow2(proxy->width()) && SkIsPow2(proxy->height()));
52 }
53
validate_desc(const GrSurfaceDesc & desc,const GrCaps & caps,int levelCount=0)54 bool validate_desc(const GrSurfaceDesc& desc, const GrCaps& caps, int levelCount = 0) {
55 if (desc.fWidth <= 0 || desc.fHeight <= 0) {
56 return false;
57 }
58 if (!caps.isConfigTexturable(desc.fConfig)) {
59 return false;
60 }
61 if (desc.fFlags & kRenderTarget_GrSurfaceFlag) {
62 if (!caps.isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
63 return false;
64 }
65 } else {
66 if (desc.fSampleCnt) {
67 return false;
68 }
69 }
70 if (levelCount > 1 && (GrPixelConfigIsSint(desc.fConfig) || !caps.mipMapSupport())) {
71 return false;
72 }
73 return true;
74 }
75
createTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const GrMipLevel texels[],int mipLevelCount,SkDestinationSurfaceColorMode mipColorMode)76 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
77 const GrMipLevel texels[], int mipLevelCount,
78 SkDestinationSurfaceColorMode mipColorMode) {
79 ASSERT_SINGLE_OWNER
80
81 SkASSERT(mipLevelCount >= 1);
82
83 if (this->isAbandoned()) {
84 return nullptr;
85 }
86
87 if (!validate_desc(desc, *fCaps, mipLevelCount)) {
88 return nullptr;
89 }
90
91 sk_sp<GrTexture> tex(fGpu->createTexture(desc, budgeted, texels, mipLevelCount));
92 if (tex) {
93 tex->texturePriv().setMipColorMode(mipColorMode);
94 }
95
96 return tex;
97 }
98
getExactScratch(const GrSurfaceDesc & desc,SkBudgeted budgeted,uint32_t flags)99 sk_sp<GrTexture> GrResourceProvider::getExactScratch(const GrSurfaceDesc& desc,
100 SkBudgeted budgeted, uint32_t flags) {
101 flags |= kExact_Flag | kNoCreate_Flag;
102 sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags));
103 if (tex && SkBudgeted::kNo == budgeted) {
104 tex->resourcePriv().makeUnbudgeted();
105 }
106
107 return tex;
108 }
109
make_info(int w,int h,GrPixelConfig config,SkImageInfo * ii)110 static bool make_info(int w, int h, GrPixelConfig config, SkImageInfo* ii) {
111 SkColorType colorType;
112 if (!GrPixelConfigToColorType(config, &colorType)) {
113 return false;
114 }
115
116 *ii = SkImageInfo::Make(w, h, colorType, kUnknown_SkAlphaType, nullptr);
117 return true;
118 }
119
createTextureProxy(const GrSurfaceDesc & desc,SkBudgeted budgeted,const GrMipLevel & mipLevel)120 sk_sp<GrTextureProxy> GrResourceProvider::createTextureProxy(const GrSurfaceDesc& desc,
121 SkBudgeted budgeted,
122 const GrMipLevel& mipLevel) {
123 ASSERT_SINGLE_OWNER
124
125 if (this->isAbandoned()) {
126 return nullptr;
127 }
128
129 if (!mipLevel.fPixels) {
130 return nullptr;
131 }
132
133 if (!validate_desc(desc, *fCaps)) {
134 return nullptr;
135 }
136
137 GrContext* context = fGpu->getContext();
138
139 SkImageInfo srcInfo;
140
141 if (make_info(desc.fWidth, desc.fHeight, desc.fConfig, &srcInfo)) {
142 sk_sp<GrTexture> tex = this->getExactScratch(desc, budgeted, 0);
143 sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex));
144 if (proxy) {
145 sk_sp<GrSurfaceContext> sContext =
146 context->contextPriv().makeWrappedSurfaceContext(std::move(proxy), nullptr);
147 if (sContext) {
148 if (sContext->writePixels(srcInfo, mipLevel.fPixels, mipLevel.fRowBytes, 0, 0)) {
149 return sContext->asTextureProxyRef();
150 }
151 }
152 }
153 }
154
155 sk_sp<GrTexture> tex(fGpu->createTexture(desc, budgeted, &mipLevel, 1));
156 return GrSurfaceProxy::MakeWrapped(std::move(tex));
157 }
158
createTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,uint32_t flags)159 sk_sp<GrTexture> GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
160 uint32_t flags) {
161 ASSERT_SINGLE_OWNER
162
163 if (this->isAbandoned()) {
164 return nullptr;
165 }
166
167 if (!validate_desc(desc, *fCaps)) {
168 return nullptr;
169 }
170
171 sk_sp<GrTexture> tex = this->getExactScratch(desc, budgeted, flags);
172 if (tex) {
173 return tex;
174 }
175
176 return fGpu->createTexture(desc, budgeted);
177 }
178
createApproxTexture(const GrSurfaceDesc & desc,uint32_t flags)179 sk_sp<GrTexture> GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc,
180 uint32_t flags) {
181 ASSERT_SINGLE_OWNER
182 SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
183
184 if (this->isAbandoned()) {
185 return nullptr;
186 }
187
188 if (!validate_desc(desc, *fCaps)) {
189 return nullptr;
190 }
191
192 return this->refScratchTexture(desc, flags);
193 }
194
refScratchTexture(const GrSurfaceDesc & inDesc,uint32_t flags)195 sk_sp<GrTexture> GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
196 uint32_t flags) {
197 ASSERT_SINGLE_OWNER
198 SkASSERT(!this->isAbandoned());
199 SkASSERT(validate_desc(inDesc, *fCaps));
200
201 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
202
203 // We could make initial clears work with scratch textures but it is a rare case so we just opt
204 // to fall back to making a new texture.
205 if (!SkToBool(inDesc.fFlags & kPerformInitialClear_GrSurfaceFlag) &&
206 (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag))) {
207 if (!(kExact_Flag & flags)) {
208 // bin by pow2 with a reasonable min
209 GrSurfaceDesc* wdesc = desc.writable();
210 wdesc->fWidth = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fWidth));
211 wdesc->fHeight = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fHeight));
212 }
213
214 GrScratchKey key;
215 GrTexturePriv::ComputeScratchKey(*desc, &key);
216 uint32_t scratchFlags = 0;
217 if (kNoPendingIO_Flag & flags) {
218 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
219 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
220 // If it is not a render target then it will most likely be populated by
221 // writePixels() which will trigger a flush if the texture has pending IO.
222 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
223 }
224 GrGpuResource* resource = fCache->findAndRefScratchResource(key,
225 GrSurface::WorstCaseSize(*desc),
226 scratchFlags);
227 if (resource) {
228 GrSurface* surface = static_cast<GrSurface*>(resource);
229 return sk_sp<GrTexture>(surface->asTexture());
230 }
231 }
232
233 if (!(kNoCreate_Flag & flags)) {
234 return fGpu->createTexture(*desc, SkBudgeted::kYes);
235 }
236
237 return nullptr;
238 }
239
wrapBackendTexture(const GrBackendTexture & tex,GrSurfaceOrigin origin,GrBackendTextureFlags flags,int sampleCnt,GrWrapOwnership ownership)240 sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTexture& tex,
241 GrSurfaceOrigin origin,
242 GrBackendTextureFlags flags,
243 int sampleCnt,
244 GrWrapOwnership ownership) {
245 ASSERT_SINGLE_OWNER
246 if (this->isAbandoned()) {
247 return nullptr;
248 }
249 return fGpu->wrapBackendTexture(tex, origin, flags, sampleCnt, ownership);
250 }
251
wrapBackendRenderTarget(const GrBackendRenderTarget & backendRT,GrSurfaceOrigin origin)252 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
253 const GrBackendRenderTarget& backendRT, GrSurfaceOrigin origin)
254 {
255 ASSERT_SINGLE_OWNER
256 return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(backendRT, origin);
257 }
258
assignUniqueKeyToResource(const GrUniqueKey & key,GrGpuResource * resource)259 void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
260 GrGpuResource* resource) {
261 ASSERT_SINGLE_OWNER
262 if (this->isAbandoned() || !resource) {
263 return;
264 }
265 resource->resourcePriv().setUniqueKey(key);
266 }
267
findAndRefResourceByUniqueKey(const GrUniqueKey & key)268 GrGpuResource* GrResourceProvider::findAndRefResourceByUniqueKey(const GrUniqueKey& key) {
269 ASSERT_SINGLE_OWNER
270 return this->isAbandoned() ? nullptr : fCache->findAndRefUniqueResource(key);
271 }
272
findAndRefTextureByUniqueKey(const GrUniqueKey & key)273 GrTexture* GrResourceProvider::findAndRefTextureByUniqueKey(const GrUniqueKey& key) {
274 ASSERT_SINGLE_OWNER
275 GrGpuResource* resource = this->findAndRefResourceByUniqueKey(key);
276 if (resource) {
277 GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture();
278 SkASSERT(texture);
279 return texture;
280 }
281 return NULL;
282 }
283
assignUniqueKeyToTexture(const GrUniqueKey & key,GrTexture * texture)284 void GrResourceProvider::assignUniqueKeyToTexture(const GrUniqueKey& key, GrTexture* texture) {
285 SkASSERT(key.isValid());
286 this->assignUniqueKeyToResource(key, texture);
287 }
288
289 // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs
assignUniqueKeyToProxy(const GrUniqueKey & key,GrTextureProxy * proxy)290 void GrResourceProvider::assignUniqueKeyToProxy(const GrUniqueKey& key, GrTextureProxy* proxy) {
291 ASSERT_SINGLE_OWNER
292 SkASSERT(key.isValid());
293 if (this->isAbandoned() || !proxy) {
294 return;
295 }
296
297 if (!proxy->instantiate(this)) {
298 return;
299 }
300 GrTexture* texture = proxy->priv().peekTexture();
301
302 this->assignUniqueKeyToResource(key, texture);
303 }
304
305 // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs
findProxyByUniqueKey(const GrUniqueKey & key)306 sk_sp<GrTextureProxy> GrResourceProvider::findProxyByUniqueKey(const GrUniqueKey& key) {
307 ASSERT_SINGLE_OWNER
308
309 sk_sp<GrTexture> texture(this->findAndRefTextureByUniqueKey(key));
310 if (!texture) {
311 return nullptr;
312 }
313
314 return GrSurfaceProxy::MakeWrapped(std::move(texture));
315 }
316
createPatternedIndexBuffer(const uint16_t * pattern,int patternSize,int reps,int vertCount,const GrUniqueKey & key)317 const GrBuffer* GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
318 int patternSize,
319 int reps,
320 int vertCount,
321 const GrUniqueKey& key) {
322 size_t bufferSize = patternSize * reps * sizeof(uint16_t);
323
324 // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
325 GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern,
326 kNoPendingIO_Flag);
327 if (!buffer) {
328 return nullptr;
329 }
330 uint16_t* data = (uint16_t*) buffer->map();
331 bool useTempData = (nullptr == data);
332 if (useTempData) {
333 data = new uint16_t[reps * patternSize];
334 }
335 for (int i = 0; i < reps; ++i) {
336 int baseIdx = i * patternSize;
337 uint16_t baseVert = (uint16_t)(i * vertCount);
338 for (int j = 0; j < patternSize; ++j) {
339 data[baseIdx+j] = baseVert + pattern[j];
340 }
341 }
342 if (useTempData) {
343 if (!buffer->updateData(data, bufferSize)) {
344 buffer->unref();
345 return nullptr;
346 }
347 delete[] data;
348 } else {
349 buffer->unmap();
350 }
351 this->assignUniqueKeyToResource(key, buffer);
352 return buffer;
353 }
354
createQuadIndexBuffer()355 const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
356 static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
357 GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
358 static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
359
360 return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
361 }
362
createPath(const SkPath & path,const GrStyle & style)363 sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
364 SkASSERT(this->gpu()->pathRendering());
365 return this->gpu()->pathRendering()->createPath(path, style);
366 }
367
createPathRange(GrPathRange::PathGenerator * gen,const GrStyle & style)368 sk_sp<GrPathRange> GrResourceProvider::createPathRange(GrPathRange::PathGenerator* gen,
369 const GrStyle& style) {
370 SkASSERT(this->gpu()->pathRendering());
371 return this->gpu()->pathRendering()->createPathRange(gen, style);
372 }
373
createGlyphs(const SkTypeface * tf,const SkScalerContextEffects & effects,const SkDescriptor * desc,const GrStyle & style)374 sk_sp<GrPathRange> GrResourceProvider::createGlyphs(const SkTypeface* tf,
375 const SkScalerContextEffects& effects,
376 const SkDescriptor* desc,
377 const GrStyle& style) {
378
379 SkASSERT(this->gpu()->pathRendering());
380 return this->gpu()->pathRendering()->createGlyphs(tf, effects, desc, style);
381 }
382
createBuffer(size_t size,GrBufferType intendedType,GrAccessPattern accessPattern,uint32_t flags,const void * data)383 GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
384 GrAccessPattern accessPattern, uint32_t flags,
385 const void* data) {
386 if (this->isAbandoned()) {
387 return nullptr;
388 }
389 if (kDynamic_GrAccessPattern != accessPattern) {
390 return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
391 }
392 if (!(flags & kRequireGpuMemory_Flag) &&
393 this->gpu()->caps()->preferClientSideDynamicBuffers() &&
394 GrBufferTypeIsVertexOrIndex(intendedType) &&
395 kDynamic_GrAccessPattern == accessPattern) {
396 return GrBuffer::CreateCPUBacked(this->gpu(), size, intendedType, data);
397 }
398
399 // bin by pow2 with a reasonable min
400 static const size_t MIN_SIZE = 1 << 12;
401 size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
402
403 GrScratchKey key;
404 GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
405 uint32_t scratchFlags = 0;
406 if (flags & kNoPendingIO_Flag) {
407 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
408 } else {
409 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
410 }
411 GrBuffer* buffer = static_cast<GrBuffer*>(
412 this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags));
413 if (!buffer) {
414 buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
415 if (!buffer) {
416 return nullptr;
417 }
418 }
419 if (data) {
420 buffer->updateData(data, size);
421 }
422 SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
423 return buffer;
424 }
425
attachStencilAttachment(GrRenderTarget * rt)426 GrStencilAttachment* GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) {
427 SkASSERT(rt);
428 if (rt->renderTargetPriv().getStencilAttachment()) {
429 return rt->renderTargetPriv().getStencilAttachment();
430 }
431
432 if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
433 GrUniqueKey sbKey;
434
435 int width = rt->width();
436 int height = rt->height();
437 #if 0
438 if (this->caps()->oversizedStencilSupport()) {
439 width = SkNextPow2(width);
440 height = SkNextPow2(height);
441 }
442 #endif
443 bool newStencil = false;
444 GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height,
445 rt->numStencilSamples(), &sbKey);
446 GrStencilAttachment* stencil = static_cast<GrStencilAttachment*>(
447 this->findAndRefResourceByUniqueKey(sbKey));
448 if (!stencil) {
449 // Need to try and create a new stencil
450 stencil = this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height);
451 if (stencil) {
452 this->assignUniqueKeyToResource(sbKey, stencil);
453 newStencil = true;
454 }
455 }
456 if (rt->renderTargetPriv().attachStencilAttachment(stencil)) {
457 if (newStencil) {
458 // Right now we're clearing the stencil attachment here after it is
459 // attached to a RT for the first time. When we start matching
460 // stencil buffers with smaller color targets this will no longer
461 // be correct because it won't be guaranteed to clear the entire
462 // sb.
463 // We used to clear down in the GL subclass using a special purpose
464 // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
465 // FBO status.
466 this->gpu()->clearStencil(rt);
467 }
468 }
469 }
470 return rt->renderTargetPriv().getStencilAttachment();
471 }
472
wrapBackendTextureAsRenderTarget(const GrBackendTexture & tex,GrSurfaceOrigin origin,int sampleCnt)473 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
474 const GrBackendTexture& tex, GrSurfaceOrigin origin, int sampleCnt)
475 {
476 if (this->isAbandoned()) {
477 return nullptr;
478 }
479 return this->gpu()->wrapBackendTextureAsRenderTarget(tex, origin, sampleCnt);
480 }
481
makeSemaphore(bool isOwned)482 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore(bool isOwned) {
483 return fGpu->makeSemaphore(isOwned);
484 }
485
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrWrapOwnership ownership)486 sk_sp<GrSemaphore> GrResourceProvider::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
487 GrWrapOwnership ownership) {
488 ASSERT_SINGLE_OWNER
489 return this->isAbandoned() ? nullptr : fGpu->wrapBackendSemaphore(semaphore, ownership);
490 }
491
takeOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore)492 void GrResourceProvider::takeOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) {
493 semaphore->resetGpu(fGpu);
494 }
495
releaseOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore)496 void GrResourceProvider::releaseOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) {
497 semaphore->resetGpu(nullptr);
498 }
499