1 /*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/GrThreadSafeCache.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/gpu/GpuTypesPriv.h"
12 #include "src/gpu/ganesh/GrCaps.h"
13 #include "src/gpu/ganesh/GrDirectContextPriv.h"
14 #include "src/gpu/ganesh/GrGpuBuffer.h"
15 #include "src/gpu/ganesh/GrProxyProvider.h"
16 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
17 #include "src/gpu/ganesh/GrResourceCache.h"
18 #include "src/gpu/ganesh/GrTexture.h"
19
~VertexData()20 GrThreadSafeCache::VertexData::~VertexData () {
21 this->reset();
22 }
23
GrThreadSafeCache()24 GrThreadSafeCache::GrThreadSafeCache()
25 : fFreeEntryList(nullptr) {
26 }
27
~GrThreadSafeCache()28 GrThreadSafeCache::~GrThreadSafeCache() {
29 this->dropAllRefs();
30 }
31
32 #if defined(GR_TEST_UTILS)
numEntries() const33 int GrThreadSafeCache::numEntries() const {
34 SkAutoSpinlock lock{fSpinLock};
35
36 return fUniquelyKeyedEntryMap.count();
37 }
38
approxBytesUsedForHash() const39 size_t GrThreadSafeCache::approxBytesUsedForHash() const {
40 SkAutoSpinlock lock{fSpinLock};
41
42 return fUniquelyKeyedEntryMap.approxBytesUsed();
43 }
44 #endif
45
dropAllRefs()46 void GrThreadSafeCache::dropAllRefs() {
47 SkAutoSpinlock lock{fSpinLock};
48
49 fUniquelyKeyedEntryMap.reset();
50 while (auto tmp = fUniquelyKeyedEntryList.head()) {
51 fUniquelyKeyedEntryList.remove(tmp);
52 this->recycleEntry(tmp);
53 }
54 // TODO: should we empty out the fFreeEntryList and reset fEntryAllocator?
55 }
56
57 // TODO: If iterating becomes too expensive switch to using something like GrIORef for the
58 // GrSurfaceProxy
dropUniqueRefs(GrResourceCache * resourceCache)59 void GrThreadSafeCache::dropUniqueRefs(GrResourceCache* resourceCache) {
60 SkAutoSpinlock lock{fSpinLock};
61
62 // Iterate from LRU to MRU
63 Entry* cur = fUniquelyKeyedEntryList.tail();
64 Entry* prev = cur ? cur->fPrev : nullptr;
65
66 while (cur) {
67 if (resourceCache && !resourceCache->overBudget()) {
68 return;
69 }
70
71 if (cur->uniquelyHeld()) {
72 fUniquelyKeyedEntryMap.remove(cur->key());
73 fUniquelyKeyedEntryList.remove(cur);
74 this->recycleEntry(cur);
75 }
76
77 cur = prev;
78 prev = cur ? cur->fPrev : nullptr;
79 }
80 }
81
dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime)82 void GrThreadSafeCache::dropUniqueRefsOlderThan(skgpu::StdSteadyClock::time_point purgeTime) {
83 SkAutoSpinlock lock{fSpinLock};
84
85 // Iterate from LRU to MRU
86 Entry* cur = fUniquelyKeyedEntryList.tail();
87 Entry* prev = cur ? cur->fPrev : nullptr;
88
89 while (cur) {
90 if (cur->fLastAccess >= purgeTime) {
91 // This entry and all the remaining ones in the list will be newer than 'purgeTime'
92 return;
93 }
94
95 if (cur->uniquelyHeld()) {
96 fUniquelyKeyedEntryMap.remove(cur->key());
97 fUniquelyKeyedEntryList.remove(cur);
98 this->recycleEntry(cur);
99 }
100
101 cur = prev;
102 prev = cur ? cur->fPrev : nullptr;
103 }
104 }
105
makeExistingEntryMRU(Entry * entry)106 void GrThreadSafeCache::makeExistingEntryMRU(Entry* entry) {
107 SkASSERT(fUniquelyKeyedEntryList.isInList(entry));
108
109 entry->fLastAccess = skgpu::StdSteadyClock::now();
110 fUniquelyKeyedEntryList.remove(entry);
111 fUniquelyKeyedEntryList.addToHead(entry);
112 }
113
internalFind(const skgpu::UniqueKey & key)114 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
115 const skgpu::UniqueKey& key) {
116 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
117 if (tmp) {
118 this->makeExistingEntryMRU(tmp);
119 return { tmp->view(), tmp->refCustomData() };
120 }
121
122 return {};
123 }
124
125 #ifdef SK_DEBUG
has(const skgpu::UniqueKey & key)126 bool GrThreadSafeCache::has(const skgpu::UniqueKey& key) {
127 SkAutoSpinlock lock{fSpinLock};
128
129 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
130 return SkToBool(tmp);
131 }
132 #endif
133
find(const skgpu::UniqueKey & key)134 GrSurfaceProxyView GrThreadSafeCache::find(const skgpu::UniqueKey& key) {
135 SkAutoSpinlock lock{fSpinLock};
136
137 GrSurfaceProxyView view;
138 std::tie(view, std::ignore) = this->internalFind(key);
139 return view;
140 }
141
findWithData(const skgpu::UniqueKey & key)142 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findWithData(
143 const skgpu::UniqueKey& key) {
144 SkAutoSpinlock lock{fSpinLock};
145
146 return this->internalFind(key);
147 }
148
getEntry(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)149 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
150 const GrSurfaceProxyView& view) {
151 Entry* entry;
152
153 if (fFreeEntryList) {
154 entry = fFreeEntryList;
155 fFreeEntryList = entry->fNext;
156 entry->fNext = nullptr;
157
158 entry->set(key, view);
159 } else {
160 entry = fEntryAllocator.make<Entry>(key, view);
161 }
162
163 return this->makeNewEntryMRU(entry);
164 }
165
makeNewEntryMRU(Entry * entry)166 GrThreadSafeCache::Entry* GrThreadSafeCache::makeNewEntryMRU(Entry* entry) {
167 entry->fLastAccess = skgpu::StdSteadyClock::now();
168 fUniquelyKeyedEntryList.addToHead(entry);
169 fUniquelyKeyedEntryMap.add(entry);
170 return entry;
171 }
172
getEntry(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData)173 GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
174 sk_sp<VertexData> vertData) {
175 Entry* entry;
176
177 if (fFreeEntryList) {
178 entry = fFreeEntryList;
179 fFreeEntryList = entry->fNext;
180 entry->fNext = nullptr;
181
182 entry->set(key, std::move(vertData));
183 } else {
184 entry = fEntryAllocator.make<Entry>(key, std::move(vertData));
185 }
186
187 return this->makeNewEntryMRU(entry);
188 }
189
recycleEntry(Entry * dead)190 void GrThreadSafeCache::recycleEntry(Entry* dead) {
191 SkASSERT(!dead->fPrev && !dead->fNext && !dead->fList);
192
193 dead->makeEmpty();
194
195 dead->fNext = fFreeEntryList;
196 fFreeEntryList = dead;
197 }
198
internalAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)199 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
200 const skgpu::UniqueKey& key,
201 const GrSurfaceProxyView& view) {
202 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
203 if (!tmp) {
204 tmp = this->getEntry(key, view);
205
206 SkASSERT(fUniquelyKeyedEntryMap.find(key));
207 }
208
209 return { tmp->view(), tmp->refCustomData() };
210 }
211
add(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)212 GrSurfaceProxyView GrThreadSafeCache::add(const skgpu::UniqueKey& key,
213 const GrSurfaceProxyView& view) {
214 SkAutoSpinlock lock{fSpinLock};
215
216 GrSurfaceProxyView newView;
217 std::tie(newView, std::ignore) = this->internalAdd(key, view);
218 return newView;
219 }
220
addWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & view)221 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::addWithData(
222 const skgpu::UniqueKey& key,
223 const GrSurfaceProxyView& view) {
224 SkAutoSpinlock lock{fSpinLock};
225
226 return this->internalAdd(key, view);
227 }
228
findOrAdd(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)229 GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const skgpu::UniqueKey& key,
230 const GrSurfaceProxyView& v) {
231 SkAutoSpinlock lock{fSpinLock};
232
233 GrSurfaceProxyView view;
234 std::tie(view, std::ignore) = this->internalFind(key);
235 if (view) {
236 return view;
237 }
238
239 std::tie(view, std::ignore) = this->internalAdd(key, v);
240 return view;
241 }
242
findOrAddWithData(const skgpu::UniqueKey & key,const GrSurfaceProxyView & v)243 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findOrAddWithData(
244 const skgpu::UniqueKey& key,
245 const GrSurfaceProxyView& v) {
246 SkAutoSpinlock lock{fSpinLock};
247
248 auto [view, data] = this->internalFind(key);
249 if (view) {
250 return { std::move(view), std::move(data) };
251 }
252
253 return this->internalAdd(key, v);
254 }
255
MakeVertexData(const void * vertices,int vertexCount,size_t vertexSize)256 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(const void* vertices,
257 int vertexCount,
258 size_t vertexSize) {
259 return sk_sp<VertexData>(new VertexData(vertices, vertexCount, vertexSize));
260 }
261
MakeVertexData(sk_sp<GrGpuBuffer> buffer,int vertexCount,size_t vertexSize)262 sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(sk_sp<GrGpuBuffer> buffer,
263 int vertexCount,
264 size_t vertexSize) {
265 return sk_sp<VertexData>(new VertexData(std::move(buffer), vertexCount, vertexSize));
266 }
267
268 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
internalFindVerts(const skgpu::UniqueKey & key)269 GrThreadSafeCache::internalFindVerts(const skgpu::UniqueKey& key) {
270 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
271 if (tmp) {
272 this->makeExistingEntryMRU(tmp);
273 return { tmp->vertexData(), tmp->refCustomData() };
274 }
275
276 return {};
277 }
278
279 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
findVertsWithData(const skgpu::UniqueKey & key)280 GrThreadSafeCache::findVertsWithData(const skgpu::UniqueKey& key) {
281 SkAutoSpinlock lock{fSpinLock};
282
283 return this->internalFindVerts(key);
284 }
285
internalAddVerts(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)286 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
287 const skgpu::UniqueKey& key,
288 sk_sp<VertexData> vertData,
289 IsNewerBetter isNewerBetter) {
290 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
291 if (!tmp) {
292 tmp = this->getEntry(key, std::move(vertData));
293
294 SkASSERT(fUniquelyKeyedEntryMap.find(key));
295 } else if (isNewerBetter(tmp->getCustomData(), key.getCustomData())) {
296 // This orphans any existing uses of the prior vertex data but ensures the best
297 // version is in the cache.
298 tmp->set(key, std::move(vertData));
299 }
300
301 return { tmp->vertexData(), tmp->refCustomData() };
302 }
303
addVertsWithData(const skgpu::UniqueKey & key,sk_sp<VertexData> vertData,IsNewerBetter isNewerBetter)304 std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::addVertsWithData(
305 const skgpu::UniqueKey& key,
306 sk_sp<VertexData> vertData,
307 IsNewerBetter isNewerBetter) {
308 SkAutoSpinlock lock{fSpinLock};
309
310 return this->internalAddVerts(key, std::move(vertData), isNewerBetter);
311 }
312
remove(const skgpu::UniqueKey & key)313 void GrThreadSafeCache::remove(const skgpu::UniqueKey& key) {
314 SkAutoSpinlock lock{fSpinLock};
315
316 Entry* tmp = fUniquelyKeyedEntryMap.find(key);
317 if (tmp) {
318 fUniquelyKeyedEntryMap.remove(key);
319 fUniquelyKeyedEntryList.remove(tmp);
320 this->recycleEntry(tmp);
321 }
322 }
323
324 std::tuple<GrSurfaceProxyView, sk_sp<GrThreadSafeCache::Trampoline>>
CreateLazyView(GrDirectContext * dContext,GrColorType origCT,SkISize dimensions,GrSurfaceOrigin origin,SkBackingFit fit)325 GrThreadSafeCache::CreateLazyView(GrDirectContext* dContext,
326 GrColorType origCT,
327 SkISize dimensions,
328 GrSurfaceOrigin origin,
329 SkBackingFit fit) {
330 GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
331 const GrCaps* caps = dContext->priv().caps();
332
333 constexpr int kSampleCnt = 1;
334 auto [newCT, format] = caps->getFallbackColorTypeAndFormat(origCT, kSampleCnt);
335
336 if (newCT == GrColorType::kUnknown) {
337 return {GrSurfaceProxyView(nullptr), nullptr};
338 }
339
340 sk_sp<Trampoline> trampoline(new Trampoline);
341
342 GrProxyProvider::TextureInfo texInfo{skgpu::Mipmapped::kNo, GrTextureType::k2D};
343
344 sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
345 [trampoline](
346 GrResourceProvider* resourceProvider,
347 const GrSurfaceProxy::LazySurfaceDesc&) -> GrSurfaceProxy::LazyCallbackResult {
348 if (!resourceProvider || !trampoline->fProxy ||
349 !trampoline->fProxy->isInstantiated()) {
350 return GrSurfaceProxy::LazyCallbackResult(nullptr, true);
351 }
352
353 SkASSERT(!trampoline->fProxy->peekTexture()->getUniqueKey().isValid());
354 return GrSurfaceProxy::LazyCallbackResult(
355 sk_ref_sp(trampoline->fProxy->peekTexture()));
356 },
357 format,
358 dimensions,
359 kSampleCnt,
360 GrInternalSurfaceFlags::kNone,
361 &texInfo,
362 GrMipmapStatus::kNotAllocated,
363 fit,
364 skgpu::Budgeted::kYes,
365 GrProtected::kNo,
366 /* wrapsVkSecondaryCB */ false,
367 GrSurfaceProxy::UseAllocator::kYes);
368
369 // TODO: It seems like this 'newCT' usage should be 'origCT' but this is
370 // what skgpu::ganesh::SurfaceDrawContext::MakeWithFallback does
371 skgpu::Swizzle swizzle = dContext->priv().caps()->getReadSwizzle(format, newCT);
372
373 return {{std::move(proxy), origin, swizzle}, std::move(trampoline)};
374 }
375