1 /*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrThreadSafeCache.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/gpu/GrDirectContextPriv.h"
12 #include "src/gpu/GrGpuBuffer.h"
13 #include "src/gpu/GrProxyProvider.h"
14 #include "src/gpu/GrResourceCache.h"
15
~VertexData()16 GrThreadSafeCache::VertexData::~VertexData () {
17 this->reset();
18 }
19
GrThreadSafeCache()20 GrThreadSafeCache::GrThreadSafeCache()
21 : fFreeEntryList(nullptr) {
22 }
23
~GrThreadSafeCache()24 GrThreadSafeCache::~GrThreadSafeCache() {
25 this->dropAllRefs();
26 }
27
28 #if GR_TEST_UTILS
numEntries() const29 int GrThreadSafeCache::numEntries() const {
30 SkAutoSpinlock lock{fSpinLock};
31
32 return fUniquelyKeyedEntryMap.count();
33 }
34
approxBytesUsedForHash() const35 size_t GrThreadSafeCache::approxBytesUsedForHash() const {
36 SkAutoSpinlock lock{fSpinLock};
37
38 return fUniquelyKeyedEntryMap.approxBytesUsed();
39 }
40 #endif
41
dropAllRefs()42 void GrThreadSafeCache::dropAllRefs() {
43 SkAutoSpinlock lock{fSpinLock};
44
45 fUniquelyKeyedEntryMap.reset();
46 while (auto tmp = fUniquelyKeyedEntryList.head()) {
47 fUniquelyKeyedEntryList.remove(tmp);
48 this->recycleEntry(tmp);
49 }
50 // TODO: should we empty out the fFreeEntryList and reset fEntryAllocator?
51 }
52
53 // TODO: If iterating becomes too expensive switch to using something like GrIORef for the
54 // GrSurfaceProxy
dropUniqueRefs(GrResourceCache * resourceCache)55 void GrThreadSafeCache::dropUniqueRefs(GrResourceCache* resourceCache) {
56 SkAutoSpinlock lock{fSpinLock};
57
58 // Iterate from LRU to MRU
59 Entry* cur = fUniquelyKeyedEntryList.tail();
60 Entry* prev = cur ? cur->fPrev : nullptr;
61
62 while (cur) {
63 if (resourceCache && !resourceCache->overBudget()) {
64 return;
65 }
66
67 if (cur->uniquelyHeld()) {
68 fUniquelyKeyedEntryMap.remove(cur->key());
69 fUniquelyKeyedEntryList.remove(cur);
70 this->recycleEntry(cur);
71 }
72
73 cur = prev;
74 prev = cur ? cur->fPrev : nullptr;
75 }
76 }
77
dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime)78 void GrThreadSafeCache::dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) {
79 SkAutoSpinlock lock{fSpinLock};
80
81 // Iterate from LRU to MRU
82 Entry* cur = fUniquelyKeyedEntryList.tail();
83 Entry* prev = cur ? cur->fPrev : nullptr;
84
85 while (cur) {
86 if (cur->fLastAccess >= purgeTime) {
87 // This entry and all the remaining ones in the list will be newer than 'purgeTime'
88 return;
89 }
90
91 if (cur->uniquelyHeld()) {
92 fUniquelyKeyedEntryMap.remove(cur->key());
93 fUniquelyKeyedEntryList.remove(cur);
94 this->recycleEntry(cur);
95 }
96
97 cur = prev;
98 prev = cur ? cur->fPrev : nullptr;
99 }
100 }
101
makeExistingEntryMRU(Entry * entry)102 void GrThreadSafeCache::makeExistingEntryMRU(Entry* entry) {
103 SkASSERT(fUniquelyKeyedEntryList.isInList(entry));
104
105 entry->fLastAccess = GrStdSteadyClock::now();
106 fUniquelyKeyedEntryList.remove(entry);
107 fUniquelyKeyedEntryList.addToHead(entry);
108 }
109
internalFind(const skgpu::UniqueKey & key)110 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
111 const skgpu::UniqueKey& key) {
112 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
113 if (tmp) {
114 this->makeExistingEntryMRU(tmp);
115 return { tmp->view(), tmp->refCustomData() };
116 }
117
118 return {};
119 }
120
121 #ifdef SK_DEBUG
has(const skgpu::UniqueKey & key)122 bool GrThreadSafeCache::has(const skgpu::UniqueKey& key) {
123 SkAutoSpinlock lock{fSpinLock};
124
125 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
126 return SkToBool(tmp);
127 }
128 #endif
129
find(const skgpu::UniqueKey & key)130 GrSurfaceProxyView GrThreadSafeCache::find(const skgpu::UniqueKey& key) {
131 SkAutoSpinlock lock{fSpinLock};
132
133 GrSurfaceProxyView view;
134 std::tie(view, std::ignore) = this->internalFind(key);
135 return view;
136 }
137
findWithData(const skgpu::UniqueKey & key)138 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findWithData(
139 const skgpu::UniqueKey& key) {
140 SkAutoSpinlock lock{fSpinLock};
141
142 return this->internalFind(key);
143 }
144
getEntry(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)145 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
146 const GrSurfaceProxyView& view) {
147 Entry* entry;
148
149 if (fFreeEntryList) {
150 entry = fFreeEntryList;
151 fFreeEntryList = entry->fNext;
152 entry->fNext = nullptr;
153
154 entry->set(key, view);
155 } else {
156 entry = fEntryAllocator.make<Entry>(key, view);
157 }
158
159 return this->makeNewEntryMRU(entry);
160 }
161
makeNewEntryMRU(Entry * entry)162 GrThreadSafeCache::Entry* GrThreadSafeCache::makeNewEntryMRU(Entry* entry) {
163 entry->fLastAccess = GrStdSteadyClock::now();
164 fUniquelyKeyedEntryList.addToHead(entry);
165 fUniquelyKeyedEntryMap.add(entry);
166 return entry;
167 }
168
getEntry(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData)169 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
170 sk_sp<VertexData> vertData) {
171 Entry* entry;
172
173 if (fFreeEntryList) {
174 entry = fFreeEntryList;
175 fFreeEntryList = entry->fNext;
176 entry->fNext = nullptr;
177
178 entry->set(key, std::move(vertData));
179 } else {
180 entry = fEntryAllocator.make<Entry>(key, std::move(vertData));
181 }
182
183 return this->makeNewEntryMRU(entry);
184 }
185
recycleEntry(Entry * dead)186 void GrThreadSafeCache::recycleEntry(Entry* dead) {
187 SkASSERT(!dead->fPrev && !dead->fNext && !dead->fList);
188
189 dead->makeEmpty();
190
191 dead->fNext = fFreeEntryList;
192 fFreeEntryList = dead;
193 }
194
internalAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)195 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
196 const skgpu::UniqueKey& key,
197 const GrSurfaceProxyView& view) {
198 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
199 if (!tmp) {
200 tmp = this->getEntry(key, view);
201
202 SkASSERT(fUniquelyKeyedEntryMap.find(key));
203 }
204
205 return { tmp->view(), tmp->refCustomData() };
206 }
207
add(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)208 GrSurfaceProxyView GrThreadSafeCache::add(const skgpu::UniqueKey& key,
209 const GrSurfaceProxyView& view) {
210 SkAutoSpinlock lock{fSpinLock};
211
212 GrSurfaceProxyView newView;
213 std::tie(newView, std::ignore) = this->internalAdd(key, view);
214 return newView;
215 }
216
addWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)217 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::addWithData(
218 const skgpu::UniqueKey& key,
219 const GrSurfaceProxyView& view) {
220 SkAutoSpinlock lock{fSpinLock};
221
222 return this->internalAdd(key, view);
223 }
224
findOrAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)225 GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const skgpu::UniqueKey& key,
226 const GrSurfaceProxyView& v) {
227 SkAutoSpinlock lock{fSpinLock};
228
229 GrSurfaceProxyView view;
230 std::tie(view, std::ignore) = this->internalFind(key);
231 if (view) {
232 return view;
233 }
234
235 std::tie(view, std::ignore) = this->internalAdd(key, v);
236 return view;
237 }
238
findOrAddWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)239 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findOrAddWithData(
240 const skgpu::UniqueKey& key,
241 const GrSurfaceProxyView& v) {
242 SkAutoSpinlock lock{fSpinLock};
243
244 auto [view, data] = this->internalFind(key);
245 if (view) {
246 return { std::move(view), std::move(data) };
247 }
248
249 return this->internalAdd(key, v);
250 }
251
MakeVertexData(const void * vertices,int vertexCount,size_t vertexSize)252 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(const void* vertices,
253 int vertexCount,
254 size_t vertexSize) {
255 return sk_sp<VertexData>(new VertexData(vertices, vertexCount, vertexSize));
256 }
257
MakeVertexData(sk_sp<GrGpuBuffer> buffer,int vertexCount,size_t vertexSize)258 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(sk_sp<GrGpuBuffer> buffer,
259 int vertexCount,
260 size_t vertexSize) {
261 return sk_sp<VertexData>(new VertexData(std::move(buffer), vertexCount, vertexSize));
262 }
263
264 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
internalFindVerts(const skgpu::UniqueKey & key)265 GrThreadSafeCache::internalFindVerts(const skgpu::UniqueKey& key) {
266 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
267 if (tmp) {
268 this->makeExistingEntryMRU(tmp);
269 return { tmp->vertexData(), tmp->refCustomData() };
270 }
271
272 return {};
273 }
274
275 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
findVertsWithData(const skgpu::UniqueKey & key)276 GrThreadSafeCache::findVertsWithData(const skgpu::UniqueKey& key) {
277 SkAutoSpinlock lock{fSpinLock};
278
279 return this->internalFindVerts(key);
280 }
281
internalAddVerts(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)282 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
283 const skgpu::UniqueKey& key,
284 sk_sp<VertexData> vertData,
285 IsNewerBetter isNewerBetter) {
286 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
287 if (!tmp) {
288 tmp = this->getEntry(key, std::move(vertData));
289
290 SkASSERT(fUniquelyKeyedEntryMap.find(key));
291 } else if (isNewerBetter(tmp->getCustomData(), key.getCustomData())) {
292 // This orphans any existing uses of the prior vertex data but ensures the best
293 // version is in the cache.
294 tmp->set(key, std::move(vertData));
295 }
296
297 return { tmp->vertexData(), tmp->refCustomData() };
298 }
299
addVertsWithData(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)300 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::addVertsWithData(
301 const skgpu::UniqueKey& key,
302 sk_sp<VertexData> vertData,
303 IsNewerBetter isNewerBetter) {
304 SkAutoSpinlock lock{fSpinLock};
305
306 return this->internalAddVerts(key, std::move(vertData), isNewerBetter);
307 }
308
remove(const skgpu::UniqueKey & key)309 void GrThreadSafeCache::remove(const skgpu::UniqueKey& key) {
310 SkAutoSpinlock lock{fSpinLock};
311
312 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
313 if (tmp) {
314 fUniquelyKeyedEntryMap.remove(key);
315 fUniquelyKeyedEntryList.remove(tmp);
316 this->recycleEntry(tmp);
317 }
318 }
319
320 std::tuple<GrSurfaceProxyView, sk_sp<GrThreadSafeCache::Trampoline>>
CreateLazyView(GrDirectContext * dContext,GrColorType origCT,SkISize dimensions,GrSurfaceOrigin origin,SkBackingFit fit)321 GrThreadSafeCache::CreateLazyView(GrDirectContext* dContext,
322 GrColorType origCT,
323 SkISize dimensions,
324 GrSurfaceOrigin origin,
325 SkBackingFit fit) {
326 GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
327 const GrCaps* caps = dContext->priv().caps();
328
329 constexpr int kSampleCnt = 1;
330 auto [newCT, format] = caps->getFallbackColorTypeAndFormat(origCT, kSampleCnt);
331
332 if (newCT == GrColorType::kUnknown) {
333 return {GrSurfaceProxyView(nullptr), nullptr};
334 }
335
336 sk_sp<Trampoline> trampoline(new Trampoline);
337
338 GrProxyProvider::TextureInfo texInfo{ GrMipMapped::kNo, GrTextureType::k2D };
339
340 sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
341 [trampoline](
342 GrResourceProvider* resourceProvider,
343 const GrSurfaceProxy::LazySurfaceDesc&) -> GrSurfaceProxy::LazyCallbackResult {
344 if (!resourceProvider || !trampoline->fProxy ||
345 !trampoline->fProxy->isInstantiated()) {
346 return GrSurfaceProxy::LazyCallbackResult(nullptr, true);
347 }
348
349 SkASSERT(!trampoline->fProxy->peekTexture()->getUniqueKey().isValid());
350 return GrSurfaceProxy::LazyCallbackResult(
351 sk_ref_sp(trampoline->fProxy->peekTexture()));
352 },
353 format,
354 dimensions,
355 kSampleCnt,
356 GrInternalSurfaceFlags::kNone,
357 &texInfo,
358 GrMipmapStatus::kNotAllocated,
359 fit,
360 SkBudgeted::kYes,
361 GrProtected::kNo,
362 /* wrapsVkSecondaryCB */ false,
363 GrSurfaceProxy::UseAllocator::kYes);
364
365 // TODO: It seems like this 'newCT' usage should be 'origCT' but this is
366 // what skgpu::v1::SurfaceDrawContext::MakeWithFallback does
367 skgpu::Swizzle swizzle = dContext->priv().caps()->getReadSwizzle(format, newCT);
368
369 return {{std::move(proxy), origin, swizzle}, std::move(trampoline)};
370 }
371