1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/image/SkImage_Lazy.h"
9
10 #include "include/core/SkBitmap.h"
11 #include "include/core/SkData.h"
12 #include "include/core/SkImageGenerator.h"
13 #include "src/core/SkBitmapCache.h"
14 #include "src/core/SkCachedData.h"
15 #include "src/core/SkImagePriv.h"
16 #include "src/core/SkNextID.h"
17
18 #if SK_SUPPORT_GPU
19 #include "include/private/GrRecordingContext.h"
20 #include "include/private/GrResourceKey.h"
21 #include "src/gpu/GrCaps.h"
22 #include "src/gpu/GrGpuResourcePriv.h"
23 #include "src/gpu/GrImageTextureMaker.h"
24 #include "src/gpu/GrProxyProvider.h"
25 #include "src/gpu/GrRecordingContextPriv.h"
26 #include "src/gpu/GrSamplerState.h"
27 #include "src/gpu/GrYUVProvider.h"
28 #include "src/gpu/SkGr.h"
29 #endif
30
31 // Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing one generator among N images
32 class SharedGenerator final : public SkNVRefCnt<SharedGenerator> {
33 public:
Make(std::unique_ptr<SkImageGenerator> gen)34 static sk_sp<SharedGenerator> Make(std::unique_ptr<SkImageGenerator> gen) {
35 return gen ? sk_sp<SharedGenerator>(new SharedGenerator(std::move(gen))) : nullptr;
36 }
37
38 // This is thread safe. It is a const field set in the constructor.
getInfo()39 const SkImageInfo& getInfo() { return fGenerator->getInfo(); }
40
41 private:
SharedGenerator(std::unique_ptr<SkImageGenerator> gen)42 explicit SharedGenerator(std::unique_ptr<SkImageGenerator> gen)
43 : fGenerator(std::move(gen)) {
44 SkASSERT(fGenerator);
45 }
46
47 friend class ScopedGenerator;
48 friend class SkImage_Lazy;
49
50 std::unique_ptr<SkImageGenerator> fGenerator;
51 SkMutex fMutex;
52 };
53
54 ///////////////////////////////////////////////////////////////////////////////
55
Validator(sk_sp<SharedGenerator> gen,const SkIRect * subset,const SkColorType * colorType,sk_sp<SkColorSpace> colorSpace)56 SkImage_Lazy::Validator::Validator(sk_sp<SharedGenerator> gen, const SkIRect* subset,
57 const SkColorType* colorType, sk_sp<SkColorSpace> colorSpace)
58 : fSharedGenerator(std::move(gen)) {
59 if (!fSharedGenerator) {
60 return;
61 }
62
63 // The following generator accessors are safe without acquiring the mutex (const getters).
64 // TODO: refactor to use a ScopedGenerator instead, for clarity.
65 const SkImageInfo& info = fSharedGenerator->fGenerator->getInfo();
66 if (info.isEmpty()) {
67 fSharedGenerator.reset();
68 return;
69 }
70
71 fUniqueID = fSharedGenerator->fGenerator->uniqueID();
72 const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height());
73 if (subset) {
74 if (!bounds.contains(*subset)) {
75 fSharedGenerator.reset();
76 return;
77 }
78 if (*subset != bounds) {
79 // we need a different uniqueID since we really are a subset of the raw generator
80 fUniqueID = SkNextID::ImageID();
81 }
82 } else {
83 subset = &bounds;
84 }
85
86 fInfo = info.makeWH(subset->width(), subset->height());
87 fOrigin = SkIPoint::Make(subset->x(), subset->y());
88 if (colorType || colorSpace) {
89 if (colorType) {
90 fInfo = fInfo.makeColorType(*colorType);
91 }
92 if (colorSpace) {
93 fInfo = fInfo.makeColorSpace(colorSpace);
94 }
95 fUniqueID = SkNextID::ImageID();
96 }
97 }
98
99 ///////////////////////////////////////////////////////////////////////////////
100
101 // Helper for exclusive access to a shared generator.
102 class SkImage_Lazy::ScopedGenerator {
103 public:
ScopedGenerator(const sk_sp<SharedGenerator> & gen)104 ScopedGenerator(const sk_sp<SharedGenerator>& gen)
105 : fSharedGenerator(gen)
106 , fAutoAquire(gen->fMutex) {}
107
operator ->() const108 SkImageGenerator* operator->() const {
109 fSharedGenerator->fMutex.assertHeld();
110 return fSharedGenerator->fGenerator.get();
111 }
112
operator SkImageGenerator*() const113 operator SkImageGenerator*() const {
114 fSharedGenerator->fMutex.assertHeld();
115 return fSharedGenerator->fGenerator.get();
116 }
117
118 private:
119 const sk_sp<SharedGenerator>& fSharedGenerator;
120 SkAutoMutexExclusive fAutoAquire;
121 };
122
123 ///////////////////////////////////////////////////////////////////////////////
124
SkImage_Lazy(Validator * validator)125 SkImage_Lazy::SkImage_Lazy(Validator* validator)
126 : INHERITED(validator->fInfo, validator->fUniqueID)
127 , fSharedGenerator(std::move(validator->fSharedGenerator))
128 , fOrigin(validator->fOrigin) {
129 SkASSERT(fSharedGenerator);
130 fUniqueID = validator->fUniqueID;
131 }
132
~SkImage_Lazy()133 SkImage_Lazy::~SkImage_Lazy() {
134 #if SK_SUPPORT_GPU
135 for (int i = 0; i < fUniqueKeyInvalidatedMessages.count(); ++i) {
136 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(*fUniqueKeyInvalidatedMessages[i]);
137 }
138 fUniqueKeyInvalidatedMessages.deleteAll();
139 #endif
140 }
141
142 //////////////////////////////////////////////////////////////////////////////////////////////////
143
generate_pixels(SkImageGenerator * gen,const SkPixmap & pmap,int originX,int originY)144 static bool generate_pixels(SkImageGenerator* gen, const SkPixmap& pmap, int originX, int originY) {
145 const int genW = gen->getInfo().width();
146 const int genH = gen->getInfo().height();
147 const SkIRect srcR = SkIRect::MakeWH(genW, genH);
148 const SkIRect dstR = SkIRect::MakeXYWH(originX, originY, pmap.width(), pmap.height());
149 if (!srcR.contains(dstR)) {
150 return false;
151 }
152
153 // If they are requesting a subset, we have to have a temp allocation for full image, and
154 // then copy the subset into their allocation
155 SkBitmap full;
156 SkPixmap fullPM;
157 const SkPixmap* dstPM = &pmap;
158 if (srcR != dstR) {
159 if (!full.tryAllocPixels(pmap.info().makeWH(genW, genH))) {
160 return false;
161 }
162 if (!full.peekPixels(&fullPM)) {
163 return false;
164 }
165 dstPM = &fullPM;
166 }
167
168 if (!gen->getPixels(dstPM->info(), dstPM->writable_addr(), dstPM->rowBytes())) {
169 return false;
170 }
171
172 if (srcR != dstR) {
173 if (!full.readPixels(pmap, originX, originY)) {
174 return false;
175 }
176 }
177 return true;
178 }
179
getROPixels(SkBitmap * bitmap,SkImage::CachingHint chint) const180 bool SkImage_Lazy::getROPixels(SkBitmap* bitmap, SkImage::CachingHint chint) const {
181 auto check_output_bitmap = [bitmap]() {
182 SkASSERT(bitmap->isImmutable());
183 SkASSERT(bitmap->getPixels());
184 (void)bitmap;
185 };
186
187 auto desc = SkBitmapCacheDesc::Make(this);
188 if (SkBitmapCache::Find(desc, bitmap)) {
189 check_output_bitmap();
190 return true;
191 }
192
193 if (SkImage::kAllow_CachingHint == chint) {
194 SkPixmap pmap;
195 SkBitmapCache::RecPtr cacheRec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
196 if (!cacheRec ||
197 !generate_pixels(ScopedGenerator(fSharedGenerator), pmap,
198 fOrigin.x(), fOrigin.y())) {
199 return false;
200 }
201 SkBitmapCache::Add(std::move(cacheRec), bitmap);
202 this->notifyAddedToRasterCache();
203 } else {
204 if (!bitmap->tryAllocPixels(this->imageInfo()) ||
205 !generate_pixels(ScopedGenerator(fSharedGenerator), bitmap->pixmap(), fOrigin.x(),
206 fOrigin.y())) {
207 return false;
208 }
209 bitmap->setImmutable();
210 }
211
212 check_output_bitmap();
213 return true;
214 }
215
216 //////////////////////////////////////////////////////////////////////////////////////////////////
217
onReadPixels(const SkImageInfo & dstInfo,void * dstPixels,size_t dstRB,int srcX,int srcY,CachingHint chint) const218 bool SkImage_Lazy::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
219 int srcX, int srcY, CachingHint chint) const {
220 SkBitmap bm;
221 if (this->getROPixels(&bm, chint)) {
222 return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
223 }
224 return false;
225 }
226
onRefEncoded() const227 sk_sp<SkData> SkImage_Lazy::onRefEncoded() const {
228 ScopedGenerator generator(fSharedGenerator);
229 return generator->refEncodedData();
230 }
231
onIsValid(GrContext * context) const232 bool SkImage_Lazy::onIsValid(GrContext* context) const {
233 ScopedGenerator generator(fSharedGenerator);
234 return generator->isValid(context);
235 }
236
237 ///////////////////////////////////////////////////////////////////////////////////////////////////
238
239 #if SK_SUPPORT_GPU
asTextureProxyRef(GrRecordingContext * context,const GrSamplerState & params,SkScalar scaleAdjust[2]) const240 sk_sp<GrTextureProxy> SkImage_Lazy::asTextureProxyRef(GrRecordingContext* context,
241 const GrSamplerState& params,
242 SkScalar scaleAdjust[2]) const {
243 if (!context) {
244 return nullptr;
245 }
246
247 GrImageTextureMaker textureMaker(context, this, kAllow_CachingHint);
248 return textureMaker.refTextureProxyForParams(params, scaleAdjust);
249 }
250 #endif
251
onMakeSubset(GrRecordingContext * context,const SkIRect & subset) const252 sk_sp<SkImage> SkImage_Lazy::onMakeSubset(GrRecordingContext* context,
253 const SkIRect& subset) const {
254 SkASSERT(this->bounds().contains(subset));
255 SkASSERT(this->bounds() != subset);
256
257 const SkIRect generatorSubset = subset.makeOffset(fOrigin.x(), fOrigin.y());
258 const SkColorType colorType = this->colorType();
259 Validator validator(fSharedGenerator, &generatorSubset, &colorType, this->refColorSpace());
260 return validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
261 }
262
onMakeColorTypeAndColorSpace(GrRecordingContext *,SkColorType targetCT,sk_sp<SkColorSpace> targetCS) const263 sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(GrRecordingContext*,
264 SkColorType targetCT,
265 sk_sp<SkColorSpace> targetCS) const {
266 SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex);
267 if (fOnMakeColorTypeAndSpaceResult &&
268 targetCT == fOnMakeColorTypeAndSpaceResult->colorType() &&
269 SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) {
270 return fOnMakeColorTypeAndSpaceResult;
271 }
272 const SkIRect generatorSubset =
273 SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), this->width(), this->height());
274 Validator validator(fSharedGenerator, &generatorSubset, &targetCT, targetCS);
275 sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
276 if (result) {
277 fOnMakeColorTypeAndSpaceResult = result;
278 }
279 return result;
280 }
281
onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const282 sk_sp<SkImage> SkImage_Lazy::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
283 // TODO: The correct thing is to clone the generator, and modify its color space. That's hard,
284 // because we don't have a clone method, and generator is public (and derived-from by clients).
285 // So do the simple/inefficient thing here, and fallback to raster when this is called.
286
287 // We allocate the bitmap with the new color space, then generate the image using the original.
288 SkBitmap bitmap;
289 if (bitmap.tryAllocPixels(this->imageInfo().makeColorSpace(std::move(newCS)))) {
290 SkPixmap pixmap = bitmap.pixmap();
291 pixmap.setColorSpace(this->refColorSpace());
292 if (generate_pixels(ScopedGenerator(fSharedGenerator), pixmap, fOrigin.x(), fOrigin.y())) {
293 bitmap.setImmutable();
294 return SkImage::MakeFromBitmap(bitmap);
295 }
296 }
297 return nullptr;
298 }
299
MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator,const SkIRect * subset)300 sk_sp<SkImage> SkImage::MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator,
301 const SkIRect* subset) {
302 SkImage_Lazy::Validator
303 validator(SharedGenerator::Make(std::move(generator)), subset, nullptr, nullptr);
304
305 return validator ? sk_make_sp<SkImage_Lazy>(&validator) : nullptr;
306 }
307
DecodeToRaster(const void * encoded,size_t length,const SkIRect * subset)308 sk_sp<SkImage> SkImage::DecodeToRaster(const void* encoded, size_t length, const SkIRect* subset) {
309 // The generator will not outlive this function, so we can wrap the encoded data without copy
310 auto gen = SkImageGenerator::MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length));
311 if (!gen) {
312 return nullptr;
313 }
314 SkImageInfo info = gen->getInfo();
315 if (info.isEmpty()) {
316 return nullptr;
317 }
318
319 SkIPoint origin = {0, 0};
320 if (subset) {
321 if (!SkIRect::MakeWH(info.width(), info.height()).contains(*subset)) {
322 return nullptr;
323 }
324 info = info.makeWH(subset->width(), subset->height());
325 origin = {subset->x(), subset->y()};
326 }
327
328 size_t rb = info.minRowBytes();
329 if (rb == 0) {
330 return nullptr; // rb was too big
331 }
332 size_t size = info.computeByteSize(rb);
333 if (size == SIZE_MAX) {
334 return nullptr;
335 }
336 auto data = SkData::MakeUninitialized(size);
337
338 SkPixmap pmap(info, data->writable_data(), rb);
339 if (!generate_pixels(gen.get(), pmap, origin.x(), origin.y())) {
340 return nullptr;
341 }
342
343 return SkImage::MakeRasterData(info, data, rb);
344 }
345
346 //////////////////////////////////////////////////////////////////////////////////////////////////
347
348 #if SK_SUPPORT_GPU
349
makeCacheKeyFromOrigKey(const GrUniqueKey & origKey,GrUniqueKey * cacheKey) const350 void SkImage_Lazy::makeCacheKeyFromOrigKey(const GrUniqueKey& origKey,
351 GrUniqueKey* cacheKey) const {
352 SkASSERT(!cacheKey->isValid());
353 if (origKey.isValid()) {
354 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
355 GrUniqueKey::Builder builder(cacheKey, origKey, kDomain, 0, "Image");
356 }
357 }
358
359 class Generator_GrYUVProvider : public GrYUVProvider {
360 public:
Generator_GrYUVProvider(SkImageGenerator * gen)361 Generator_GrYUVProvider(SkImageGenerator* gen) : fGen(gen) {}
362
363 private:
onGetID() const364 uint32_t onGetID() const override { return fGen->uniqueID(); }
onQueryYUVA8(SkYUVASizeInfo * sizeInfo,SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],SkYUVColorSpace * colorSpace) const365 bool onQueryYUVA8(SkYUVASizeInfo* sizeInfo,
366 SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
367 SkYUVColorSpace* colorSpace) const override {
368 return fGen->queryYUVA8(sizeInfo, yuvaIndices, colorSpace);
369 }
onGetYUVA8Planes(const SkYUVASizeInfo & sizeInfo,const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],void * planes[])370 bool onGetYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
371 const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
372 void* planes[]) override {
373 return fGen->getYUVA8Planes(sizeInfo, yuvaIndices, planes);
374 }
375
376 SkImageGenerator* fGen;
377
378 typedef GrYUVProvider INHERITED;
379 };
380
set_key_on_proxy(GrProxyProvider * proxyProvider,GrTextureProxy * proxy,GrTextureProxy * originalProxy,const GrUniqueKey & key)381 static void set_key_on_proxy(GrProxyProvider* proxyProvider,
382 GrTextureProxy* proxy, GrTextureProxy* originalProxy,
383 const GrUniqueKey& key) {
384 if (key.isValid()) {
385 if (originalProxy && originalProxy->getUniqueKey().isValid()) {
386 SkASSERT(originalProxy->getUniqueKey() == key);
387 SkASSERT(GrMipMapped::kYes == proxy->mipMapped() &&
388 GrMipMapped::kNo == originalProxy->mipMapped());
389 // If we had an originalProxy with a valid key, that means there already is a proxy in
390 // the cache which matches the key, but it does not have mip levels and we require them.
391 // Thus we must remove the unique key from that proxy.
392 SkASSERT(originalProxy->getUniqueKey() == key);
393 proxyProvider->removeUniqueKeyFromProxy(originalProxy);
394 }
395 proxyProvider->assignUniqueKeyToProxy(key, proxy);
396 }
397 }
398
getPlanes(SkYUVASizeInfo * yuvaSizeInfo,SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],SkYUVColorSpace * yuvColorSpace,const void * planes[SkYUVASizeInfo::kMaxCount])399 sk_sp<SkCachedData> SkImage_Lazy::getPlanes(SkYUVASizeInfo* yuvaSizeInfo,
400 SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
401 SkYUVColorSpace* yuvColorSpace,
402 const void* planes[SkYUVASizeInfo::kMaxCount]) {
403 ScopedGenerator generator(fSharedGenerator);
404 Generator_GrYUVProvider provider(generator);
405
406 sk_sp<SkCachedData> data = provider.getPlanes(yuvaSizeInfo, yuvaIndices, yuvColorSpace, planes);
407 if (!data) {
408 return nullptr;
409 }
410
411 return data;
412 }
413
414
415 /*
416 * We have 4 ways to try to return a texture (in sorted order)
417 *
418 * 1. Check the cache for a pre-existing one
419 * 2. Ask the generator to natively create one
420 * 3. Ask the generator to return YUV planes, which the GPU can convert
421 * 4. Ask the generator to return RGB(A) data, which the GPU can convert
422 */
lockTextureProxy(GrRecordingContext * ctx,const GrUniqueKey & origKey,SkImage::CachingHint chint,bool willBeMipped,GrTextureMaker::AllowedTexGenType genType) const423 sk_sp<GrTextureProxy> SkImage_Lazy::lockTextureProxy(
424 GrRecordingContext* ctx,
425 const GrUniqueKey& origKey,
426 SkImage::CachingHint chint,
427 bool willBeMipped,
428 GrTextureMaker::AllowedTexGenType genType) const {
429 // Values representing the various texture lock paths we can take. Used for logging the path
430 // taken to a histogram.
431 enum LockTexturePath {
432 kFailure_LockTexturePath,
433 kPreExisting_LockTexturePath,
434 kNative_LockTexturePath,
435 kCompressed_LockTexturePath, // Deprecated
436 kYUV_LockTexturePath,
437 kRGBA_LockTexturePath,
438 };
439
440 enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
441
442 // Build our texture key.
443 // Even though some proxies created here may have a specific origin and use that origin, we do
444 // not include that in the key. Since SkImages are meant to be immutable, a given SkImage will
445 // always have an associated proxy that is always one origin or the other. It never can change
446 // origins. Thus we don't need to include that info in the key iteself.
447 GrUniqueKey key;
448 this->makeCacheKeyFromOrigKey(origKey, &key);
449
450 GrProxyProvider* proxyProvider = ctx->priv().proxyProvider();
451 sk_sp<GrTextureProxy> proxy;
452
453 // 1. Check the cache for a pre-existing one
454 if (key.isValid()) {
455 auto ct = SkColorTypeToGrColorType(this->colorType());
456 proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, ct, kTopLeft_GrSurfaceOrigin);
457 if (proxy) {
458 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath,
459 kLockTexturePathCount);
460 if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) {
461 return proxy;
462 }
463 }
464 }
465
466 // 2. Ask the generator to natively create one
467 if (!proxy) {
468 ScopedGenerator generator(fSharedGenerator);
469 if (GrTextureMaker::AllowedTexGenType::kCheap == genType &&
470 SkImageGenerator::TexGenType::kCheap != generator->onCanGenerateTexture()) {
471 return nullptr;
472 }
473 if ((proxy = generator->generateTexture(ctx, this->imageInfo(), fOrigin, willBeMipped))) {
474 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath,
475 kLockTexturePathCount);
476 set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
477 if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) {
478 // Delete the message in the queue with same key, avoid memory leak.
479 for (size_t i = 0; i < fUniqueKeyInvalidatedMessages.size(); i++) {
480 auto message = fUniqueKeyInvalidatedMessages[i];
481 if (key == message->key()) {
482 delete message;
483 fUniqueKeyInvalidatedMessages[i] =
484 new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
485 return proxy;
486 }
487 }
488 *fUniqueKeyInvalidatedMessages.append() =
489 new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
490 return proxy;
491 }
492 }
493 }
494
495 // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping
496 // the texture we fall through here and have the CPU generate the mip maps for us.
497 if (!proxy && !willBeMipped && !ctx->priv().options().fDisableGpuYUVConversion) {
498 const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(this->imageInfo());
499
500 SkColorType colorType = this->colorType();
501
502 ScopedGenerator generator(fSharedGenerator);
503 Generator_GrYUVProvider provider(generator);
504
505 // The pixels in the texture will be in the generator's color space.
506 // If onMakeColorTypeAndColorSpace has been called then this will not match this image's
507 // color space. To correct this, apply a color space conversion from the generator's color
508 // space to this image's color space.
509 SkColorSpace* generatorColorSpace = fSharedGenerator->fGenerator->getInfo().colorSpace();
510 SkColorSpace* thisColorSpace = this->colorSpace();
511
512 // TODO: Update to create the mipped surface in the YUV generator and draw the base
513 // layer directly into the mipped surface.
514 proxy = provider.refAsTextureProxy(ctx, desc, SkColorTypeToGrColorType(colorType),
515 generatorColorSpace, thisColorSpace);
516 if (proxy) {
517 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath,
518 kLockTexturePathCount);
519 set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
520 *fUniqueKeyInvalidatedMessages.append() =
521 new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
522 return proxy;
523 }
524 }
525
526 // 4. Ask the generator to return RGB(A) data, which the GPU can convert
527 SkBitmap bitmap;
528 if (!proxy && this->getROPixels(&bitmap, chint)) {
529 proxy = proxyProvider->createProxyFromBitmap(bitmap, willBeMipped ? GrMipMapped::kYes
530 : GrMipMapped::kNo);
531 if (proxy && (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped())) {
532 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath,
533 kLockTexturePathCount);
534 set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key);
535 *fUniqueKeyInvalidatedMessages.append() =
536 new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
537 return proxy;
538 }
539 }
540
541 if (proxy) {
542 // We need a mipped proxy, but we either found a proxy earlier that wasn't mipped, generated
543 // a native non mipped proxy, or generated a non-mipped yuv proxy. Thus we generate a new
544 // mipped surface and copy the original proxy into the base layer. We will then let the gpu
545 // generate the rest of the mips.
546 SkASSERT(willBeMipped);
547 SkASSERT(GrMipMapped::kNo == proxy->mipMapped());
548 *fUniqueKeyInvalidatedMessages.append() =
549 new GrUniqueKeyInvalidatedMessage(key, ctx->priv().contextID());
550 if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(ctx, proxy.get())) {
551 set_key_on_proxy(proxyProvider, mippedProxy.get(), proxy.get(), key);
552 return mippedProxy;
553 }
554 // We failed to make a mipped proxy with the base copied into it. This could have
555 // been from failure to make the proxy or failure to do the copy. Thus we will fall
556 // back to just using the non mipped proxy; See skbug.com/7094.
557 return proxy;
558 }
559
560 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath,
561 kLockTexturePathCount);
562 return nullptr;
563 }
564
565 ///////////////////////////////////////////////////////////////////////////////////////////////////
566
DecodeToTexture(GrContext * ctx,const void * encoded,size_t length,const SkIRect * subset)567 sk_sp<SkImage> SkImage::DecodeToTexture(GrContext* ctx, const void* encoded, size_t length,
568 const SkIRect* subset) {
569 // img will not survive this function, so we don't need to copy/own the encoded data,
570 auto img = MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length), subset);
571 if (!img) {
572 return nullptr;
573 }
574 return img->makeTextureImage(ctx);
575 }
576
577 #endif
578