• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ccpr/GrCCPathCache.h"
9 
10 #include "include/private/SkNx.h"
11 #include "src/gpu/GrOnFlushResourceProvider.h"
12 #include "src/gpu/GrProxyProvider.h"
13 
14 static constexpr int kMaxKeyDataCountU32 = 256;  // 1kB of uint32_t's.
15 
16 DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
17 
next_path_cache_id()18 static inline uint32_t next_path_cache_id() {
19     static std::atomic<uint32_t> gNextID(1);
20     for (;;) {
21         uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
22         if (SK_InvalidUniqueID != id) {
23             return id;
24         }
25     }
26 }
27 
SkShouldPostMessageToBus(const sk_sp<GrCCPathCache::Key> & key,uint32_t msgBusUniqueID)28 static inline bool SkShouldPostMessageToBus(
29         const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
30     return key->pathCacheUniqueID() == msgBusUniqueID;
31 }
32 
33 // The maximum number of cache entries we allow in our own cache.
34 static constexpr int kMaxCacheCount = 1 << 16;
35 
36 
MaskTransform(const SkMatrix & m,SkIVector * shift)37 GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
38         : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
39     SkASSERT(!m.hasPerspective());
40     Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
41     Sk2f transFloor;
42 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
43     // On Android framework we pre-round view matrix translates to integers for better caching.
44     transFloor = translate;
45 #else
46     transFloor = translate.floor();
47     (translate - transFloor).store(fSubpixelTranslate);
48 #endif
49     shift->set((int)transFloor[0], (int)transFloor[1]);
50     SkASSERT((float)shift->fX == transFloor[0]);  // Make sure transFloor had integer values.
51     SkASSERT((float)shift->fY == transFloor[1]);
52 }
53 
fuzzy_equals(const GrCCPathCache::MaskTransform & a,const GrCCPathCache::MaskTransform & b)54 inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
55                                 const GrCCPathCache::MaskTransform& b) {
56     if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
57         return false;
58     }
59 #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
60     if (((Sk2f::Load(a.fSubpixelTranslate) -
61           Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
62         return false;
63     }
64 #endif
65     return true;
66 }
67 
Make(uint32_t pathCacheUniqueID,int dataCountU32,const void * data)68 sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
69                                                    int dataCountU32, const void* data) {
70     void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
71     sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
72     if (data) {
73         memcpy(key->data(), data, key->dataSizeInBytes());
74     }
75     return key;
76 }
77 
data() const78 const uint32_t* GrCCPathCache::Key::data() const {
79     // The shape key is a variable-length footer to the entry allocation.
80     return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
81 }
82 
data()83 uint32_t* GrCCPathCache::Key::data() {
84     // The shape key is a variable-length footer to the entry allocation.
85     return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
86 }
87 
onChange()88 void GrCCPathCache::Key::onChange() {
89     // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
90     SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
91 }
92 
GrCCPathCache(uint32_t contextUniqueID)93 GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
94         : fContextUniqueID(contextUniqueID)
95         , fInvalidatedKeysInbox(next_path_cache_id())
96         , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
97 }
98 
~GrCCPathCache()99 GrCCPathCache::~GrCCPathCache() {
100     while (!fLRU.isEmpty()) {
101         this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
102     }
103     SkASSERT(0 == fHashTable.count());  // Ensure the hash table and LRU list were coherent.
104 
105     // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
106     // We just purge via message bus since we don't have any access to the resource cache right now.
107     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
108         SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
109                 GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
110     }
111     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
112         SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
113                 GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
114     }
115 }
116 
117 namespace {
118 
119 // Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
120 class WriteKeyHelper {
121 public:
122     static constexpr int kStrokeWidthIdx = 0;
123     static constexpr int kStrokeMiterIdx = 1;
124     static constexpr int kStrokeCapJoinIdx = 2;
125     static constexpr int kShapeUnstyledKeyIdx = 3;
126 
WriteKeyHelper(const GrShape & shape)127     WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
128 
129     // Returns the total number of uint32_t's to allocate for the key.
allocCountU32() const130     int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
131 
132     // Writes the key data to out[].
write(const GrShape & shape,uint32_t * out)133     void write(const GrShape& shape, uint32_t* out) {
134         // Stroke key.
135         // We don't use GrStyle::WriteKey() because it does not account for hairlines.
136         // http://skbug.com/8273
137         SkASSERT(!shape.style().hasPathEffect());
138         const SkStrokeRec& stroke = shape.style().strokeRec();
139         if (stroke.isFillStyle()) {
140             // Use a value for width that won't collide with a valid fp32 value >= 0.
141             out[kStrokeWidthIdx] = ~0;
142             out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
143         } else {
144             float width = stroke.getWidth(), miterLimit = stroke.getMiter();
145             memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
146             memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
147             out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
148             GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
149         }
150 
151         // Shape unstyled key.
152         shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
153     }
154 
155 private:
156     int fShapeUnstyledKeyCount;
157 };
158 
159 }
160 
find(GrOnFlushResourceProvider * onFlushRP,const GrShape & shape,const SkIRect & clippedDrawBounds,const SkMatrix & viewMatrix,SkIVector * maskShift)161 GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
162         GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
163         const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
164     if (!shape.hasUnstyledKey()) {
165         return OnFlushEntryRef();
166     }
167 
168     WriteKeyHelper writeKeyHelper(shape);
169     if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
170         return OnFlushEntryRef();
171     }
172 
173     SkASSERT(fScratchKey->unique());
174     fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
175     writeKeyHelper.write(shape, fScratchKey->data());
176 
177     MaskTransform m(viewMatrix, maskShift);
178     GrCCPathCacheEntry* entry = nullptr;
179     if (HashNode* node = fHashTable.find(*fScratchKey)) {
180         entry = node->entry();
181         SkASSERT(fLRU.isInList(entry));
182 
183         if (!fuzzy_equals(m, entry->fMaskTransform)) {
184             // The path was reused with an incompatible matrix.
185             if (entry->unique()) {
186                 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
187                 SkASSERT(0 == entry->fOnFlushRefCnt);  // Because we are unique.
188                 entry->fMaskTransform = m;
189                 entry->fHitCount = 0;
190                 entry->fHitRect = SkIRect::MakeEmpty();
191                 entry->releaseCachedAtlas(this);
192             } else {
193                 this->evict(*fScratchKey);
194                 entry = nullptr;
195             }
196         }
197     }
198 
199     if (!entry) {
200         if (fHashTable.count() >= kMaxCacheCount) {
201             SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
202             SkASSERT(node && node->entry() == fLRU.tail());
203             this->evict(*fLRU.tail()->fCacheKey);  // We've exceeded our limit.
204         }
205 
206         // Create a new entry in the cache.
207         sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
208                                             writeKeyHelper.allocCountU32(), fScratchKey->data());
209         SkASSERT(*permanentKey == *fScratchKey);
210         SkASSERT(!fHashTable.find(*permanentKey));
211         entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
212 
213         SkASSERT(fHashTable.count() <= kMaxCacheCount);
214     } else {
215         fLRU.remove(entry);  // Will be re-added at head.
216     }
217 
218     SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
219     SkASSERT(node && node->entry() == entry);
220     fLRU.addToHead(entry);
221 
222     if (0 == entry->fOnFlushRefCnt) {
223         // Only update the time stamp and hit count if we haven't seen this entry yet during the
224         // current flush.
225         entry->fTimestamp = this->quickPerFlushTimestamp();
226         ++entry->fHitCount;
227 
228         if (entry->fCachedAtlas) {
229             SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
230                      SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
231             if (!entry->fCachedAtlas->getOnFlushProxy()) {
232                 auto ct = GrCCAtlas::CoverageTypeToColorType(entry->fCachedAtlas->coverageType());
233                 if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
234                             entry->fCachedAtlas->textureKey(), ct, GrCCAtlas::kTextureOrigin)) {
235                     onFlushProxy->priv().setIgnoredByResourceAllocator();
236                     entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
237                 }
238             }
239             if (!entry->fCachedAtlas->getOnFlushProxy()) {
240                 // Our atlas's backing texture got purged from the GrResourceCache. Release the
241                 // cached atlas.
242                 entry->releaseCachedAtlas(this);
243             }
244         }
245     }
246     entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
247     SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
248     return OnFlushEntryRef::OnFlushRef(entry);
249 }
250 
evict(const GrCCPathCache::Key & key,GrCCPathCacheEntry * entry)251 void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
252     if (!entry) {
253         HashNode* node = fHashTable.find(key);
254         SkASSERT(node);
255         entry = node->entry();
256     }
257     SkASSERT(*entry->fCacheKey == key);
258     SkASSERT(!entry->hasBeenEvicted());
259     entry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
260     entry->releaseCachedAtlas(this);
261     fLRU.remove(entry);
262     fHashTable.remove(key);
263 }
264 
doPreFlushProcessing()265 void GrCCPathCache::doPreFlushProcessing() {
266     this->evictInvalidatedCacheKeys();
267 
268     // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
269     fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
270 }
271 
purgeEntriesOlderThan(GrProxyProvider * proxyProvider,const GrStdSteadyClock::time_point & purgeTime)272 void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
273                                           const GrStdSteadyClock::time_point& purgeTime) {
274     this->evictInvalidatedCacheKeys();
275 
276 #ifdef SK_DEBUG
277     auto lastTimestamp = (fLRU.isEmpty())
278             ? GrStdSteadyClock::time_point::max()
279             : fLRU.tail()->fTimestamp;
280 #endif
281 
282     // Evict every entry from our local path cache whose timestamp is older than purgeTime.
283     while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
284 #ifdef SK_DEBUG
285         // Verify that fLRU is sorted by timestamp.
286         auto timestamp = fLRU.tail()->fTimestamp;
287         SkASSERT(timestamp >= lastTimestamp);
288         lastTimestamp = timestamp;
289 #endif
290         this->evict(*fLRU.tail()->fCacheKey);
291     }
292 
293     // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
294     this->purgeInvalidatedAtlasTextures(proxyProvider);
295 }
296 
purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider * onFlushRP)297 void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
298     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
299         onFlushRP->removeUniqueKeyFromProxy(proxy.get());
300     }
301     fInvalidatedProxies.reset();
302 
303     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
304         onFlushRP->processInvalidUniqueKey(key);
305     }
306     fInvalidatedProxyUniqueKeys.reset();
307 }
308 
purgeInvalidatedAtlasTextures(GrProxyProvider * proxyProvider)309 void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
310     for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
311         proxyProvider->removeUniqueKeyFromProxy(proxy.get());
312     }
313     fInvalidatedProxies.reset();
314 
315     for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
316         proxyProvider->processInvalidUniqueKey(key, nullptr,
317                                                GrProxyProvider::InvalidateGPUResource::kYes);
318     }
319     fInvalidatedProxyUniqueKeys.reset();
320 }
321 
evictInvalidatedCacheKeys()322 void GrCCPathCache::evictInvalidatedCacheKeys() {
323     SkTArray<sk_sp<Key>> invalidatedKeys;
324     fInvalidatedKeysInbox.poll(&invalidatedKeys);
325     for (const sk_sp<Key>& key : invalidatedKeys) {
326         bool isInCache = !key->shouldUnregisterFromPath();  // Gets set upon exiting the cache.
327         if (isInCache) {
328             this->evict(*key);
329         }
330     }
331 }
332 
333 GrCCPathCache::OnFlushEntryRef
OnFlushRef(GrCCPathCacheEntry * entry)334 GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
335     entry->ref();
336     ++entry->fOnFlushRefCnt;
337     if (entry->fCachedAtlas) {
338         entry->fCachedAtlas->incrOnFlushRefCnt();
339     }
340     return OnFlushEntryRef(entry);
341 }
342 
~OnFlushEntryRef()343 GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
344     if (!fEntry) {
345         return;
346     }
347     --fEntry->fOnFlushRefCnt;
348     SkASSERT(fEntry->fOnFlushRefCnt >= 0);
349     if (fEntry->fCachedAtlas) {
350         fEntry->fCachedAtlas->decrOnFlushRefCnt();
351     }
352     fEntry->unref();
353 }
354 
355 
setCoverageCountAtlas(GrOnFlushResourceProvider * onFlushRP,GrCCAtlas * atlas,const SkIVector & atlasOffset,const GrOctoBounds & octoBounds,const SkIRect & devIBounds,const SkIVector & maskShift)356 void GrCCPathCacheEntry::setCoverageCountAtlas(
357         GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
358         const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
359     SkASSERT(fOnFlushRefCnt > 0);
360     SkASSERT(!fCachedAtlas);  // Otherwise we would need to call releaseCachedAtlas().
361 
362     if (this->hasBeenEvicted()) {
363         // This entry will never be found in the path cache again. Don't bother trying to save an
364         // atlas texture for it in the GrResourceCache.
365         return;
366     }
367 
368     fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
369     fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
370     fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
371 
372     fAtlasOffset = atlasOffset + maskShift;
373 
374     fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
375     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
376 }
377 
upgradeToLiteralCoverageAtlas(GrCCPathCache * pathCache,GrOnFlushResourceProvider * onFlushRP,GrCCAtlas * atlas,const SkIVector & newAtlasOffset)378 GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
379         GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
380         const SkIVector& newAtlasOffset) {
381     SkASSERT(!this->hasBeenEvicted());
382     SkASSERT(fOnFlushRefCnt > 0);
383     SkASSERT(fCachedAtlas);
384     SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
385 
386     ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
387 
388     fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
389     fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
390     fCachedAtlas->addPathPixels(this->height() * this->width());
391 
392     fAtlasOffset = newAtlasOffset;
393     return releaseAtlasResult;
394 }
395 
releaseCachedAtlas(GrCCPathCache * pathCache)396 GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
397         GrCCPathCache* pathCache) {
398     ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
399     if (fCachedAtlas) {
400         result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
401         if (fOnFlushRefCnt) {
402             SkASSERT(fOnFlushRefCnt > 0);
403             fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
404         }
405         fCachedAtlas = nullptr;
406     }
407     return result;
408 }
409 
invalidatePathPixels(GrCCPathCache * pathCache,int numPixels)410 GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
411         GrCCPathCache* pathCache, int numPixels) {
412     // Mark the pixels invalid in the cached atlas texture.
413     fNumInvalidatedPathPixels += numPixels;
414     SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
415     if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
416         // Too many invalidated pixels: purge the atlas texture from the resource cache.
417         if (fOnFlushProxy) {
418             // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
419             // reference on this atlas and expect to use our proxy during the current flush.
420             // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
421             pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
422         } else {
423             pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
424         }
425         fIsInvalidatedFromResourceCache = true;
426         return ReleaseAtlasResult::kDidInvalidateFromCache;
427     }
428     return ReleaseAtlasResult::kNone;
429 }
430 
decrOnFlushRefCnt(int count) const431 void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
432     SkASSERT(count > 0);
433     fOnFlushRefCnt -= count;
434     SkASSERT(fOnFlushRefCnt >= 0);
435     if (0 == fOnFlushRefCnt) {
436         // Don't hold the actual proxy past the end of the current flush.
437         SkASSERT(fOnFlushProxy);
438         fOnFlushProxy = nullptr;
439     }
440 }
441