1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrThreadSafeCache_DEFINED 9 #define GrThreadSafeCache_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/private/SkSpinlock.h" 13 #include "src/core/SkArenaAlloc.h" 14 #include "src/core/SkTDynamicHash.h" 15 #include "src/core/SkTInternalLList.h" 16 #include "src/gpu/GrSurfaceProxyView.h" 17 18 class GrGpuBuffer; 19 20 // Ganesh creates a lot of utility textures (e.g., blurred-rrect masks) that need to be shared 21 // between the direct context and all the DDL recording contexts. This thread-safe cache 22 // allows this sharing. 23 // 24 // In operation, each thread will first check if the threaded cache possesses the required texture. 25 // 26 // If a DDL thread doesn't find a needed texture it will go off and create it on the cpu and then 27 // attempt to add it to the cache. If another thread had added it in the interim, the losing thread 28 // will discard its work and use the texture the winning thread had created. 29 // 30 // If the thread in possession of the direct context doesn't find the needed texture it should 31 // add a place holder view and then queue up the draw calls to complete it. In this way the 32 // gpu-thread has precedence over the recording threads. 33 // 34 // The invariants for this cache differ a bit from those of the proxy and resource caches. 35 // For this cache: 36 // 37 // only this cache knows the unique key - neither the proxy nor backing resource should 38 // be discoverable in any other cache by the unique key 39 // if a backing resource resides in the resource cache then there should be an entry in this 40 // cache 41 // an entry in this cache, however, doesn't guarantee that there is a corresponding entry in 42 // the resource cache - although the entry here should be able to generate that entry 43 // (i.e., be a lazy proxy) 44 // 45 // Wrt interactions w/ GrContext/GrResourceCache purging, we have: 46 // 47 // Both GrContext::abandonContext and GrContext::releaseResourcesAndAbandonContext will cause 48 // all the refs held in this cache to be dropped prior to clearing out the resource cache. 49 // 50 // For the size_t-variant of GrContext::purgeUnlockedResources, after an initial attempt 51 // to purge the requested amount of resources fails, uniquely held resources in this cache 52 // will be dropped in LRU to MRU order until the cache is under budget. Note that this 53 // prioritizes the survival of resources in this cache over those just in the resource cache. 54 // 55 // For the 'scratchResourcesOnly' variant of GrContext::purgeUnlockedResources, this cache 56 // won't be modified in the scratch-only case unless the resource cache is over budget (in 57 // which case it will purge uniquely-held resources in LRU to MRU order to get 58 // back under budget). In the non-scratch-only case, all uniquely held resources in this cache 59 // will be released prior to the resource cache being cleared out. 60 // 61 // For GrContext::setResourceCacheLimit, if an initial pass through the resource cache doesn't 62 // reach the budget, uniquely held resources in this cache will be released in LRU to MRU order. 63 // 64 // For GrContext::performDeferredCleanup, any uniquely held resources that haven't been accessed 65 // w/in 'msNotUsed' will be released from this cache prior to the resource cache being cleaned. 66 class GrThreadSafeCache { 67 public: 68 GrThreadSafeCache(); 69 ~GrThreadSafeCache(); 70 71 #if GR_TEST_UTILS 72 int numEntries() const SK_EXCLUDES(fSpinLock); 73 74 size_t approxBytesUsedForHash() const SK_EXCLUDES(fSpinLock); 75 #endif 76 77 void dropAllRefs() SK_EXCLUDES(fSpinLock); 78 79 // Drop uniquely held refs until under the resource cache's budget. 80 // A null parameter means drop all uniquely held refs. 81 void dropUniqueRefs(GrResourceCache* resourceCache) SK_EXCLUDES(fSpinLock); 82 83 // Drop uniquely held refs that were last accessed before 'purgeTime' 84 void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock); 85 86 SkDEBUGCODE(bool has(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);) 87 88 GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 89 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData( 90 const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 91 92 GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 93 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData( 94 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 95 96 GrSurfaceProxyView findOrAdd(const GrUniqueKey&, 97 const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 98 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData( 99 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 100 101 // To hold vertex data in the cache and have it transparently transition from cpu-side to 102 // gpu-side while being shared between all the threads we need a ref counted object that 103 // keeps hold of the cpu-side data but allows deferred filling in of the mirroring gpu buffer. 104 class VertexData : public SkNVRefCnt<VertexData> { 105 public: 106 ~VertexData(); 107 vertices()108 const void* vertices() const { return fVertices; } size()109 size_t size() const { return fNumVertices * fVertexSize; } 110 numVertices()111 int numVertices() const { return fNumVertices; } vertexSize()112 size_t vertexSize() const { return fVertexSize; } 113 114 // TODO: make these return const GrGpuBuffers? gpuBuffer()115 GrGpuBuffer* gpuBuffer() { return fGpuBuffer.get(); } refGpuBuffer()116 sk_sp<GrGpuBuffer> refGpuBuffer() { return fGpuBuffer; } 117 setGpuBuffer(sk_sp<GrGpuBuffer> gpuBuffer)118 void setGpuBuffer(sk_sp<GrGpuBuffer> gpuBuffer) { 119 // TODO: once we add the gpuBuffer we could free 'fVertices'. Deinstantiable 120 // DDLs could throw a monkey wrench into that plan though. 121 SkASSERT(!fGpuBuffer); 122 fGpuBuffer = gpuBuffer; 123 } 124 reset()125 void reset() { 126 sk_free(const_cast<void*>(fVertices)); 127 fVertices = nullptr; 128 fNumVertices = 0; 129 fVertexSize = 0; 130 fGpuBuffer.reset(); 131 } 132 133 private: 134 friend class GrThreadSafeCache; // for access to ctor 135 VertexData(const void * vertices,int numVertices,size_t vertexSize)136 VertexData(const void* vertices, int numVertices, size_t vertexSize) 137 : fVertices(vertices) 138 , fNumVertices(numVertices) 139 , fVertexSize(vertexSize) { 140 } 141 VertexData(sk_sp<GrGpuBuffer> gpuBuffer,int numVertices,size_t vertexSize)142 VertexData(sk_sp<GrGpuBuffer> gpuBuffer, int numVertices, size_t vertexSize) 143 : fVertices(nullptr) 144 , fNumVertices(numVertices) 145 , fVertexSize(vertexSize) 146 , fGpuBuffer(std::move(gpuBuffer)) { 147 } 148 149 const void* fVertices; 150 int fNumVertices; 151 size_t fVertexSize; 152 153 sk_sp<GrGpuBuffer> fGpuBuffer; 154 }; 155 156 // The returned VertexData object takes ownership of 'vertices' which had better have been 157 // allocated with malloc! 158 static sk_sp<VertexData> MakeVertexData(const void* vertices, 159 int vertexCount, 160 size_t vertexSize); 161 static sk_sp<VertexData> MakeVertexData(sk_sp<GrGpuBuffer> buffer, 162 int vertexCount, 163 size_t vertexSize); 164 165 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> findVertsWithData( 166 const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 167 168 typedef bool (*IsNewerBetter)(SkData* incumbent, SkData* challenger); 169 170 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> addVertsWithData( 171 const GrUniqueKey&, 172 sk_sp<VertexData>, 173 IsNewerBetter) SK_EXCLUDES(fSpinLock); 174 175 void remove(const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 176 177 // To allow gpu-created resources to have priority, we pre-emptively place a lazy proxy 178 // in the thread-safe cache (with findOrAdd). The Trampoline object allows that lazy proxy to 179 // be instantiated with some later generated rendering result. 180 class Trampoline : public SkRefCnt { 181 public: 182 sk_sp<GrTextureProxy> fProxy; 183 }; 184 185 static std::tuple<GrSurfaceProxyView, sk_sp<Trampoline>> CreateLazyView(GrDirectContext*, 186 GrColorType, 187 SkISize dimensions, 188 GrSurfaceOrigin, 189 SkBackingFit); 190 private: 191 struct Entry { EntryEntry192 Entry(const GrUniqueKey& key, const GrSurfaceProxyView& view) 193 : fKey(key) 194 , fView(view) 195 , fTag(Entry::kView) { 196 } 197 EntryEntry198 Entry(const GrUniqueKey& key, sk_sp<VertexData> vertData) 199 : fKey(key) 200 , fVertData(std::move(vertData)) 201 , fTag(Entry::kVertData) { 202 } 203 ~EntryEntry204 ~Entry() { 205 this->makeEmpty(); 206 } 207 uniquelyHeldEntry208 bool uniquelyHeld() const { 209 SkASSERT(fTag != kEmpty); 210 211 if (fTag == kView && fView.proxy()->unique()) { 212 return true; 213 } else if (fTag == kVertData && fVertData->unique()) { 214 return true; 215 } 216 217 return false; 218 } 219 keyEntry220 const GrUniqueKey& key() const { 221 SkASSERT(fTag != kEmpty); 222 return fKey; 223 } 224 getCustomDataEntry225 SkData* getCustomData() const { 226 SkASSERT(fTag != kEmpty); 227 return fKey.getCustomData(); 228 } 229 refCustomDataEntry230 sk_sp<SkData> refCustomData() const { 231 SkASSERT(fTag != kEmpty); 232 return fKey.refCustomData(); 233 } 234 viewEntry235 GrSurfaceProxyView view() { 236 SkASSERT(fTag == kView); 237 return fView; 238 } 239 vertexDataEntry240 sk_sp<VertexData> vertexData() { 241 SkASSERT(fTag == kVertData); 242 return fVertData; 243 } 244 setEntry245 void set(const GrUniqueKey& key, const GrSurfaceProxyView& view) { 246 SkASSERT(fTag == kEmpty); 247 fKey = key; 248 fView = view; 249 fTag = kView; 250 } 251 makeEmptyEntry252 void makeEmpty() { 253 fKey.reset(); 254 if (fTag == kView) { 255 fView.reset(); 256 } else if (fTag == kVertData) { 257 fVertData.reset(); 258 } 259 fTag = kEmpty; 260 } 261 setEntry262 void set(const GrUniqueKey& key, sk_sp<VertexData> vertData) { 263 SkASSERT(fTag == kEmpty || fTag == kVertData); 264 fKey = key; 265 fVertData = vertData; 266 fTag = kVertData; 267 } 268 269 // The thread-safe cache gets to directly manipulate the llist and last-access members 270 GrStdSteadyClock::time_point fLastAccess; 271 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry); 272 273 // for SkTDynamicHash GetKeyEntry274 static const GrUniqueKey& GetKey(const Entry& e) { 275 SkASSERT(e.fTag != kEmpty); 276 return e.fKey; 277 } HashEntry278 static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); } 279 280 private: 281 // Note: the unique key is stored here bc it is never attached to a proxy or a GrTexture 282 GrUniqueKey fKey; 283 union { 284 GrSurfaceProxyView fView; 285 sk_sp<VertexData> fVertData; 286 }; 287 288 enum { 289 kEmpty, 290 kView, 291 kVertData, 292 } fTag { kEmpty }; 293 }; 294 295 void makeExistingEntryMRU(Entry*) SK_REQUIRES(fSpinLock); 296 Entry* makeNewEntryMRU(Entry*) SK_REQUIRES(fSpinLock); 297 298 Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock); 299 Entry* getEntry(const GrUniqueKey&, sk_sp<VertexData>) SK_REQUIRES(fSpinLock); 300 301 void recycleEntry(Entry*) SK_REQUIRES(fSpinLock); 302 303 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind( 304 const GrUniqueKey&) SK_REQUIRES(fSpinLock); 305 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd( 306 const GrUniqueKey&, 307 const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock); 308 309 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalFindVerts( 310 const GrUniqueKey&) SK_REQUIRES(fSpinLock); 311 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalAddVerts( 312 const GrUniqueKey&, 313 sk_sp<VertexData>, 314 IsNewerBetter) SK_REQUIRES(fSpinLock); 315 316 mutable SkSpinlock fSpinLock; 317 318 SkTDynamicHash<Entry, GrUniqueKey> fUniquelyKeyedEntryMap SK_GUARDED_BY(fSpinLock); 319 // The head of this list is the MRU 320 SkTInternalLList<Entry> fUniquelyKeyedEntryList SK_GUARDED_BY(fSpinLock); 321 322 // TODO: empirically determine this from the skps 323 static const int kInitialArenaSize = 64 * sizeof(Entry); 324 325 char fStorage[kInitialArenaSize]; 326 SkArenaAlloc fEntryAllocator{fStorage, kInitialArenaSize, kInitialArenaSize}; 327 Entry* fFreeEntryList SK_GUARDED_BY(fSpinLock); 328 }; 329 330 #endif // GrThreadSafeCache_DEFINED 331