• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrCCPathCache.h"
9 
10 #include "GrOnFlushResourceProvider.h"
11 #include "GrProxyProvider.h"
12 #include "SkNx.h"
13 
14 static constexpr int kMaxKeyDataCountU32 = 256;  // 1kB of uint32_t's.
15 
16 DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
17 
next_path_cache_id()18 static inline uint32_t next_path_cache_id() {
19     static std::atomic<uint32_t> gNextID(1);
20     for (;;) {
21         uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
22         if (SK_InvalidUniqueID != id) {
23             return id;
24         }
25     }
26 }
27 
SkShouldPostMessageToBus(const sk_sp<GrCCPathCache::Key> & key,uint32_t msgBusUniqueID)28 static inline bool SkShouldPostMessageToBus(
29         const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
30     return key->pathCacheUniqueID() == msgBusUniqueID;
31 }
32 
33 // The maximum number of cache entries we allow in our own cache.
34 static constexpr int kMaxCacheCount = 1 << 16;
35 
36 
MaskTransform(const SkMatrix & m,SkIVector * shift)37 GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
38         : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
39     SkASSERT(!m.hasPerspective());
40     Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
41     Sk2f transFloor;
42 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
43     // On Android framework we pre-round view matrix translates to integers for better caching.
44     transFloor = translate;
45 #else
46     transFloor = translate.floor();
47     (translate - transFloor).store(fSubpixelTranslate);
48 #endif
49     shift->set((int)transFloor[0], (int)transFloor[1]);
50     SkASSERT((float)shift->fX == transFloor[0]);  // Make sure transFloor had integer values.
51     SkASSERT((float)shift->fY == transFloor[1]);
52 }
53 
fuzzy_equals(const GrCCPathCache::MaskTransform & a,const GrCCPathCache::MaskTransform & b)54 inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
55                                 const GrCCPathCache::MaskTransform& b) {
56     if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
57         return false;
58     }
59 #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
60     if (((Sk2f::Load(a.fSubpixelTranslate) -
61           Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
62         return false;
63     }
64 #endif
65     return true;
66 }
67 
Make(uint32_t pathCacheUniqueID,int dataCountU32,const void * data)68 sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
69                                                    int dataCountU32, const void* data) {
70     void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
71     sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
72     if (data) {
73         memcpy(key->data(), data, key->dataSizeInBytes());
74     }
75     return key;
76 }
77 
data() const78 const uint32_t* GrCCPathCache::Key::data() const {
79     // The shape key is a variable-length footer to the entry allocation.
80     return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
81 }
82 
data()83 uint32_t* GrCCPathCache::Key::data() {
84     // The shape key is a variable-length footer to the entry allocation.
85     return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
86 }
87 
onChange()88 void GrCCPathCache::Key::onChange() {
89     // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
90     SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
91 }
92 
GrCCPathCache(uint32_t contextUniqueID)93 GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
94         : fContextUniqueID(contextUniqueID)
95         , fInvalidatedKeysInbox(next_path_cache_id())
96         , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
97 }
98 
~GrCCPathCache()99 GrCCPathCache::~GrCCPathCache() {
100     while (!fLRU.isEmpty()) {
101         this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
102     }
103     SkASSERT(0 == fHashTable.count());  // Ensure the hash table and LRU list were coherent.
104 
105     // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
106     // We just purge via message bus since we don't have any access to the resource cache right now.
107     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
108         SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
109                 GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
110     }
111     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
112         SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
113                 GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
114     }
115 }
116 
117 namespace {
118 
119 // Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
120 class WriteKeyHelper {
121 public:
122     static constexpr int kStrokeWidthIdx = 0;
123     static constexpr int kStrokeMiterIdx = 1;
124     static constexpr int kStrokeCapJoinIdx = 2;
125     static constexpr int kShapeUnstyledKeyIdx = 3;
126 
WriteKeyHelper(const GrShape & shape)127     WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
128 
129     // Returns the total number of uint32_t's to allocate for the key.
allocCountU32() const130     int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
131 
132     // Writes the key data to out[].
write(const GrShape & shape,uint32_t * out)133     void write(const GrShape& shape, uint32_t* out) {
134         // Stroke key.
135         // We don't use GrStyle::WriteKey() because it does not account for hairlines.
136         // http://skbug.com/8273
137         SkASSERT(!shape.style().hasPathEffect());
138         const SkStrokeRec& stroke = shape.style().strokeRec();
139         if (stroke.isFillStyle()) {
140             // Use a value for width that won't collide with a valid fp32 value >= 0.
141             out[kStrokeWidthIdx] = ~0;
142             out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
143         } else {
144             float width = stroke.getWidth(), miterLimit = stroke.getMiter();
145             memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
146             memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
147             out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
148             GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
149         }
150 
151         // Shape unstyled key.
152         shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
153     }
154 
155 private:
156     int fShapeUnstyledKeyCount;
157 };
158 
159 }
160 
find(GrOnFlushResourceProvider * onFlushRP,const GrShape & shape,const SkIRect & clippedDrawBounds,const SkMatrix & viewMatrix,SkIVector * maskShift)161 GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
162         GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
163         const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
164     if (!shape.hasUnstyledKey()) {
165         return OnFlushEntryRef();
166     }
167 
168     WriteKeyHelper writeKeyHelper(shape);
169     if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
170         return OnFlushEntryRef();
171     }
172 
173     SkASSERT(fScratchKey->unique());
174     fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
175     writeKeyHelper.write(shape, fScratchKey->data());
176 
177     MaskTransform m(viewMatrix, maskShift);
178     GrCCPathCacheEntry* entry = nullptr;
179     if (HashNode* node = fHashTable.find(*fScratchKey)) {
180         entry = node->entry();
181         SkASSERT(fLRU.isInList(entry));
182 
183         if (!fuzzy_equals(m, entry->fMaskTransform)) {
184             // The path was reused with an incompatible matrix.
185             if (entry->unique()) {
186                 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
187                 SkASSERT(0 == entry->fOnFlushRefCnt);  // Because we are unique.
188                 entry->fMaskTransform = m;
189                 entry->fHitCount = 0;
190                 entry->fHitRect = SkIRect::MakeEmpty();
191                 entry->releaseCachedAtlas(this);
192             } else {
193                 this->evict(*fScratchKey);
194                 entry = nullptr;
195             }
196         }
197     }
198 
199     if (!entry) {
200         if (fHashTable.count() >= kMaxCacheCount) {
201             SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
202             SkASSERT(node && node->entry() == fLRU.tail());
203             this->evict(*fLRU.tail()->fCacheKey);  // We've exceeded our limit.
204         }
205 
206         // Create a new entry in the cache.
207         sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
208                                             writeKeyHelper.allocCountU32(), fScratchKey->data());
209         SkASSERT(*permanentKey == *fScratchKey);
210         SkASSERT(!fHashTable.find(*permanentKey));
211         entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
212 
213         SkASSERT(fHashTable.count() <= kMaxCacheCount);
214     } else {
215         fLRU.remove(entry);  // Will be re-added at head.
216     }
217 
218     SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
219     SkASSERT(node && node->entry() == entry);
220     fLRU.addToHead(entry);
221 
222     if (0 == entry->fOnFlushRefCnt) {
223         // Only update the time stamp and hit count if we haven't seen this entry yet during the
224         // current flush.
225         entry->fTimestamp = this->quickPerFlushTimestamp();
226         ++entry->fHitCount;
227 
228         if (entry->fCachedAtlas) {
229             SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt())
230                              == SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
231             if (!entry->fCachedAtlas->getOnFlushProxy()) {
232                 entry->fCachedAtlas->setOnFlushProxy(
233                     onFlushRP->findOrCreateProxyByUniqueKey(entry->fCachedAtlas->textureKey(),
234                                                             GrCCAtlas::kTextureOrigin));
235             }
236             if (!entry->fCachedAtlas->getOnFlushProxy()) {
237                 // Our atlas's backing texture got purged from the GrResourceCache. Release the
238                 // cached atlas.
239                 entry->releaseCachedAtlas(this);
240             }
241         }
242     }
243     entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
244     SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
245     return OnFlushEntryRef::OnFlushRef(entry);
246 }
247 
evict(const GrCCPathCache::Key & key,GrCCPathCacheEntry * entry)248 void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
249     if (!entry) {
250         HashNode* node = fHashTable.find(key);
251         SkASSERT(node);
252         entry = node->entry();
253     }
254     SkASSERT(*entry->fCacheKey == key);
255     SkASSERT(!entry->hasBeenEvicted());
256     entry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
257     entry->releaseCachedAtlas(this);
258     fLRU.remove(entry);
259     fHashTable.remove(key);
260 }
261 
doPreFlushProcessing()262 void GrCCPathCache::doPreFlushProcessing() {
263     this->evictInvalidatedCacheKeys();
264 
265     // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
266     fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
267 }
268 
purgeEntriesOlderThan(GrProxyProvider * proxyProvider,const GrStdSteadyClock::time_point & purgeTime)269 void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
270                                           const GrStdSteadyClock::time_point& purgeTime) {
271     this->evictInvalidatedCacheKeys();
272 
273 #ifdef SK_DEBUG
274     auto lastTimestamp = (fLRU.isEmpty())
275             ? GrStdSteadyClock::time_point::max()
276             : fLRU.tail()->fTimestamp;
277 #endif
278 
279     // Evict every entry from our local path cache whose timestamp is older than purgeTime.
280     while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
281 #ifdef SK_DEBUG
282         // Verify that fLRU is sorted by timestamp.
283         auto timestamp = fLRU.tail()->fTimestamp;
284         SkASSERT(timestamp >= lastTimestamp);
285         lastTimestamp = timestamp;
286 #endif
287         this->evict(*fLRU.tail()->fCacheKey);
288     }
289 
290     // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
291     this->purgeInvalidatedAtlasTextures(proxyProvider);
292 }
293 
purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider * onFlushRP)294 void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
295     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
296         onFlushRP->removeUniqueKeyFromProxy(proxy.get());
297     }
298     fInvalidatedProxies.reset();
299 
300     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
301         onFlushRP->processInvalidUniqueKey(key);
302     }
303     fInvalidatedProxyUniqueKeys.reset();
304 }
305 
purgeInvalidatedAtlasTextures(GrProxyProvider * proxyProvider)306 void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
307     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
308         proxyProvider->removeUniqueKeyFromProxy(proxy.get());
309     }
310     fInvalidatedProxies.reset();
311 
312     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
313         proxyProvider->processInvalidUniqueKey(key, nullptr,
314                                                GrProxyProvider::InvalidateGPUResource::kYes);
315     }
316     fInvalidatedProxyUniqueKeys.reset();
317 }
318 
evictInvalidatedCacheKeys()319 void GrCCPathCache::evictInvalidatedCacheKeys() {
320     SkTArray<sk_sp<Key>> invalidatedKeys;
321     fInvalidatedKeysInbox.poll(&invalidatedKeys);
322     for (const sk_sp<Key>& key : invalidatedKeys) {
323         bool isInCache = !key->shouldUnregisterFromPath();  // Gets set upon exiting the cache.
324         if (isInCache) {
325             this->evict(*key);
326         }
327     }
328 }
329 
330 GrCCPathCache::OnFlushEntryRef
OnFlushRef(GrCCPathCacheEntry * entry)331 GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
332     entry->ref();
333     ++entry->fOnFlushRefCnt;
334     if (entry->fCachedAtlas) {
335         entry->fCachedAtlas->incrOnFlushRefCnt();
336     }
337     return OnFlushEntryRef(entry);
338 }
339 
~OnFlushEntryRef()340 GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
341     if (!fEntry) {
342         return;
343     }
344     --fEntry->fOnFlushRefCnt;
345     SkASSERT(fEntry->fOnFlushRefCnt >= 0);
346     if (fEntry->fCachedAtlas) {
347         fEntry->fCachedAtlas->decrOnFlushRefCnt();
348     }
349     fEntry->unref();
350 }
351 
352 
setCoverageCountAtlas(GrOnFlushResourceProvider * onFlushRP,GrCCAtlas * atlas,const SkIVector & atlasOffset,const SkRect & devBounds,const SkRect & devBounds45,const SkIRect & devIBounds,const SkIVector & maskShift)353 void GrCCPathCacheEntry::setCoverageCountAtlas(
354         GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
355         const SkRect& devBounds, const SkRect& devBounds45, const SkIRect& devIBounds,
356         const SkIVector& maskShift) {
357     SkASSERT(fOnFlushRefCnt > 0);
358     SkASSERT(!fCachedAtlas);  // Otherwise we would need to call releaseCachedAtlas().
359 
360     if (this->hasBeenEvicted()) {
361         // This entry will never be found in the path cache again. Don't bother trying to save an
362         // atlas texture for it in the GrResourceCache.
363         return;
364     }
365 
366     fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
367     fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
368     fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
369 
370     fAtlasOffset = atlasOffset + maskShift;
371 
372     float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
373     fDevBounds = devBounds.makeOffset(-dx, -dy);
374     fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
375     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
376 }
377 
upgradeToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCAtlas * atlas,const SkIVector & newAtlasOffset)378 GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
379         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
380         const SkIVector& newAtlasOffset) {
381     SkASSERT(!this->hasBeenEvicted());
382     SkASSERT(fOnFlushRefCnt > 0);
383     SkASSERT(fCachedAtlas);
384     SkASSERT(GrCCAtlas::CoverageType::kFP16_CoverageCount == fCachedAtlas->coverageType());
385 
386     ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
387 
388     fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
389     fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
390     fCachedAtlas->addPathPixels(this->height() * this->width());
391 
392     fAtlasOffset = newAtlasOffset;
393     return releaseAtlasResult;
394 }
395 
releaseCachedAtlas(GrCCPathCache * pathCache)396 GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
397         GrCCPathCache* pathCache) {
398     ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
399     if (fCachedAtlas) {
400         result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
401         if (fOnFlushRefCnt) {
402             SkASSERT(fOnFlushRefCnt > 0);
403             fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
404         }
405         fCachedAtlas = nullptr;
406     }
407     return result;
408 }
409 
invalidatePathPixels(GrCCPathCache * pathCache,int numPixels)410 GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
411         GrCCPathCache* pathCache, int numPixels) {
412     // Mark the pixels invalid in the cached atlas texture.
413     fNumInvalidatedPathPixels += numPixels;
414     SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
415     if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
416         // Too many invalidated pixels: purge the atlas texture from the resource cache.
417         if (fOnFlushProxy) {
418             // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
419             // reference on this atlas and expect to use our proxy during the current flush.
420             // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
421             pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
422         } else {
423             pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
424         }
425         fIsInvalidatedFromResourceCache = true;
426         return ReleaseAtlasResult::kDidInvalidateFromCache;
427     }
428     return ReleaseAtlasResult::kNone;
429 }
430 
decrOnFlushRefCnt(int count) const431 void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
432     SkASSERT(count > 0);
433     fOnFlushRefCnt -= count;
434     SkASSERT(fOnFlushRefCnt >= 0);
435     if (0 == fOnFlushRefCnt) {
436         // Don't hold the actual proxy past the end of the current flush.
437         SkASSERT(fOnFlushProxy);
438         fOnFlushProxy = nullptr;
439     }
440 }
441