• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*
3  * Copyright 2010 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8 
9 
10 
11 #include "SkGrPixelRef.h"
12 
13 #include "GrContext.h"
14 #include "GrTexture.h"
15 #include "SkBitmapCache.h"
16 #include "SkGr.h"
17 #include "SkRect.h"
18 
19 // since we call lockPixels recursively on fBitmap, we need a distinct mutex,
20 // to avoid deadlock with the default one provided by SkPixelRef.
21 SK_DECLARE_STATIC_MUTEX(gROLockPixelsPixelRefMutex);
22 
SkROLockPixelsPixelRef(const SkImageInfo & info)23 SkROLockPixelsPixelRef::SkROLockPixelsPixelRef(const SkImageInfo& info)
24     : INHERITED(info, &gROLockPixelsPixelRefMutex) {}
25 
~SkROLockPixelsPixelRef()26 SkROLockPixelsPixelRef::~SkROLockPixelsPixelRef() {}
27 
onNewLockPixels(LockRec * rec)28 bool SkROLockPixelsPixelRef::onNewLockPixels(LockRec* rec) {
29     fBitmap.reset();
30 //    SkDebugf("---------- calling readpixels in support of lockpixels\n");
31     if (!this->onReadPixels(&fBitmap, NULL)) {
32         SkDebugf("SkROLockPixelsPixelRef::onLockPixels failed!\n");
33         return false;
34     }
35     fBitmap.lockPixels();
36     if (NULL == fBitmap.getPixels()) {
37         return false;
38     }
39 
40     rec->fPixels = fBitmap.getPixels();
41     rec->fColorTable = NULL;
42     rec->fRowBytes = fBitmap.rowBytes();
43     return true;
44 }
45 
onUnlockPixels()46 void SkROLockPixelsPixelRef::onUnlockPixels() {
47     fBitmap.unlockPixels();
48 }
49 
onLockPixelsAreWritable() const50 bool SkROLockPixelsPixelRef::onLockPixelsAreWritable() const {
51     return false;
52 }
53 
54 ///////////////////////////////////////////////////////////////////////////////
55 
copy_to_new_texture_pixelref(GrTexture * texture,SkColorType dstCT,SkColorProfileType dstPT,const SkIRect * subset)56 static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorType dstCT,
57                                                   SkColorProfileType dstPT, const SkIRect* subset) {
58     if (NULL == texture || kUnknown_SkColorType == dstCT) {
59         return NULL;
60     }
61     GrContext* context = texture->getContext();
62     if (NULL == context) {
63         return NULL;
64     }
65     GrSurfaceDesc desc;
66 
67     SkIRect srcRect;
68 
69     if (!subset) {
70         desc.fWidth  = texture->width();
71         desc.fHeight = texture->height();
72         srcRect = SkIRect::MakeWH(texture->width(), texture->height());
73     } else {
74         SkASSERT(SkIRect::MakeWH(texture->width(), texture->height()).contains(*subset));
75         // Create a new texture that is the size of subset.
76         desc.fWidth = subset->width();
77         desc.fHeight = subset->height();
78         srcRect = *subset;
79     }
80     desc.fFlags = kRenderTarget_GrSurfaceFlag;
81     desc.fConfig = SkImageInfo2GrPixelConfig(dstCT, kPremul_SkAlphaType, dstPT);
82 
83     GrTexture* dst = context->textureProvider()->createTexture(desc, false, NULL, 0);
84     if (NULL == dst) {
85         return NULL;
86     }
87 
88     // Blink is relying on the above copy being sent to GL immediately in the case when the source
89     // is a WebGL canvas backing store. We could have a TODO to remove this flush flag, but we have
90     // a larger TODO to remove SkGrPixelRef entirely.
91     context->copySurface(dst->asRenderTarget(), texture, srcRect, SkIPoint::Make(0,0),
92                          GrContext::kFlushWrites_PixelOp);
93 
94     SkImageInfo info = SkImageInfo::Make(desc.fWidth, desc.fHeight, dstCT, kPremul_SkAlphaType,
95                                          dstPT);
96     SkGrPixelRef* pixelRef = SkNEW_ARGS(SkGrPixelRef, (info, dst));
97     SkSafeUnref(dst);
98     return pixelRef;
99 }
100 
101 ///////////////////////////////////////////////////////////////////////////////
102 
SkGrPixelRef(const SkImageInfo & info,GrSurface * surface)103 SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERITED(info) {
104     // For surfaces that are both textures and render targets, the texture owns the
105     // render target but not vice versa. So we ref the texture to keep both alive for
106     // the lifetime of this pixel ref.
107     fSurface = SkSafeRef(surface->asTexture());
108     if (NULL == fSurface) {
109         fSurface = SkSafeRef(surface);
110     }
111 
112     if (fSurface) {
113         SkASSERT(info.width() <= fSurface->width());
114         SkASSERT(info.height() <= fSurface->height());
115     }
116 }
117 
~SkGrPixelRef()118 SkGrPixelRef::~SkGrPixelRef() {
119     SkSafeUnref(fSurface);
120 }
121 
getTexture()122 GrTexture* SkGrPixelRef::getTexture() {
123     if (fSurface) {
124         return fSurface->asTexture();
125     }
126     return NULL;
127 }
128 
deepCopy(SkColorType dstCT,SkColorProfileType dstPT,const SkIRect * subset)129 SkPixelRef* SkGrPixelRef::deepCopy(SkColorType dstCT, SkColorProfileType dstPT,
130                                    const SkIRect* subset) {
131     if (NULL == fSurface) {
132         return NULL;
133     }
134 
135     // Note that when copying a render-target-backed pixel ref, we
136     // return a texture-backed pixel ref instead.  This is because
137     // render-target pixel refs are usually created in conjunction with
138     // a GrTexture owned elsewhere (e.g., SkGpuDevice), and cannot live
139     // independently of that texture.  Texture-backed pixel refs, on the other
140     // hand, own their GrTextures, and are thus self-contained.
141     return copy_to_new_texture_pixelref(fSurface->asTexture(), dstCT, dstPT, subset);
142 }
143 
tryAllocBitmapPixels(SkBitmap * bitmap)144 static bool tryAllocBitmapPixels(SkBitmap* bitmap) {
145     SkBitmap::Allocator* allocator = SkBitmapCache::GetAllocator();
146     if (NULL != allocator) {
147         return allocator->allocPixelRef(bitmap, 0);
148     } else {
149         // DiscardableMemory is not available, fallback to default allocator
150         return bitmap->tryAllocPixels();
151     }
152 }
153 
onReadPixels(SkBitmap * dst,const SkIRect * subset)154 bool SkGrPixelRef::onReadPixels(SkBitmap* dst, const SkIRect* subset) {
155     if (NULL == fSurface || fSurface->wasDestroyed()) {
156         return false;
157     }
158 
159     SkIRect bounds;
160     if (subset) {
161         bounds = *subset;
162     } else {
163         bounds = SkIRect::MakeWH(this->info().width(), this->info().height());
164     }
165 
166     //Check the cache
167     if(!SkBitmapCache::Find(this->getGenerationID(), bounds, dst)) {
168         //Cache miss
169 
170         SkBitmap cachedBitmap;
171         cachedBitmap.setInfo(this->info().makeWH(bounds.width(), bounds.height()));
172 
173         // If we can't alloc the pixels, then fail
174         if (!tryAllocBitmapPixels(&cachedBitmap)) {
175             return false;
176         }
177 
178         // Try to read the pixels from the surface
179         void* buffer = cachedBitmap.getPixels();
180         bool readPixelsOk = fSurface->readPixels(bounds.fLeft, bounds.fTop,
181                                 bounds.width(), bounds.height(),
182                                 kSkia8888_GrPixelConfig,
183                                 buffer, cachedBitmap.rowBytes());
184 
185         if (!readPixelsOk) {
186             return false;
187         }
188 
189         // If we are here, pixels were read correctly from the surface.
190         cachedBitmap.setImmutable();
191         //Add to the cache
192         SkBitmapCache::Add(this, bounds, cachedBitmap);
193 
194         dst->swap(cachedBitmap);
195     }
196 
197     return true;
198 
199 }
200