1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrThreadSafeCache_DEFINED 9 #define GrThreadSafeCache_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/private/SkSpinlock.h" 13 #include "src/core/SkArenaAlloc.h" 14 #include "src/core/SkTDynamicHash.h" 15 #include "src/core/SkTInternalLList.h" 16 #include "src/gpu/GrGpuBuffer.h" 17 #include "src/gpu/GrSurfaceProxyView.h" 18 19 // Ganesh creates a lot of utility textures (e.g., blurred-rrect masks) that need to be shared 20 // between the direct context and all the DDL recording contexts. This thread-safe cache 21 // allows this sharing. 22 // 23 // In operation, each thread will first check if the threaded cache possesses the required texture. 24 // 25 // If a DDL thread doesn't find a needed texture it will go off and create it on the cpu and then 26 // attempt to add it to the cache. If another thread had added it in the interim, the losing thread 27 // will discard its work and use the texture the winning thread had created. 28 // 29 // If the thread in possession of the direct context doesn't find the needed texture it should 30 // add a place holder view and then queue up the draw calls to complete it. In this way the 31 // gpu-thread has precedence over the recording threads. 32 // 33 // The invariants for this cache differ a bit from those of the proxy and resource caches. 34 // For this cache: 35 // 36 // only this cache knows the unique key - neither the proxy nor backing resource should 37 // be discoverable in any other cache by the unique key 38 // if a backing resource resides in the resource cache then there should be an entry in this 39 // cache 40 // an entry in this cache, however, doesn't guarantee that there is a corresponding entry in 41 // the resource cache - although the entry here should be able to generate that entry 42 // (i.e., be a lazy proxy) 43 // 44 // Wrt interactions w/ GrContext/GrResourceCache purging, we have: 45 // 46 // Both GrContext::abandonContext and GrContext::releaseResourcesAndAbandonContext will cause 47 // all the refs held in this cache to be dropped prior to clearing out the resource cache. 48 // 49 // For the size_t-variant of GrContext::purgeUnlockedResources, after an initial attempt 50 // to purge the requested amount of resources fails, uniquely held resources in this cache 51 // will be dropped in LRU to MRU order until the cache is under budget. Note that this 52 // prioritizes the survival of resources in this cache over those just in the resource cache. 53 // 54 // For the 'scratchResourcesOnly' variant of GrContext::purgeUnlockedResources, this cache 55 // won't be modified in the scratch-only case unless the resource cache is over budget (in 56 // which case it will purge uniquely-held resources in LRU to MRU order to get 57 // back under budget). In the non-scratch-only case, all uniquely held resources in this cache 58 // will be released prior to the resource cache being cleared out. 59 // 60 // For GrContext::setResourceCacheLimit, if an initial pass through the resource cache doesn't 61 // reach the budget, uniquely held resources in this cache will be released in LRU to MRU order. 62 // 63 // For GrContext::performDeferredCleanup, any uniquely held resources that haven't been accessed 64 // w/in 'msNotUsed' will be released from this cache prior to the resource cache being cleaned. 65 class GrThreadSafeCache { 66 public: 67 GrThreadSafeCache(); 68 ~GrThreadSafeCache(); 69 70 #if GR_TEST_UTILS 71 int numEntries() const SK_EXCLUDES(fSpinLock); 72 73 size_t approxBytesUsedForHash() const SK_EXCLUDES(fSpinLock); 74 #endif 75 76 void dropAllRefs() SK_EXCLUDES(fSpinLock); 77 78 // Drop uniquely held refs until under the resource cache's budget. 79 // A null parameter means drop all uniquely held refs. 80 void dropUniqueRefs(GrResourceCache* resourceCache) SK_EXCLUDES(fSpinLock); 81 82 // Drop uniquely held refs that were last accessed before 'purgeTime' 83 void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock); 84 85 SkDEBUGCODE(bool has(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);) 86 87 GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 88 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData( 89 const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 90 91 GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 92 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData( 93 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 94 95 GrSurfaceProxyView findOrAdd(const GrUniqueKey&, 96 const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 97 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData( 98 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock); 99 100 // To hold vertex data in the cache and have it transparently transition from cpu-side to 101 // gpu-side while being shared between all the threads we need a ref counted object that 102 // keeps hold of the cpu-side data but allows deferred filling in of the mirroring gpu buffer. 103 class VertexData : public SkNVRefCnt<VertexData> { 104 public: 105 ~VertexData(); 106 vertices()107 const void* vertices() const { return fVertices; } size()108 size_t size() const { return fNumVertices * fVertexSize; } 109 numVertices()110 int numVertices() const { return fNumVertices; } vertexSize()111 size_t vertexSize() const { return fVertexSize; } 112 113 // TODO: make these return const GrGpuBuffers? gpuBuffer()114 GrGpuBuffer* gpuBuffer() { return fGpuBuffer.get(); } refGpuBuffer()115 sk_sp<GrGpuBuffer> refGpuBuffer() { return fGpuBuffer; } 116 setGpuBuffer(sk_sp<GrGpuBuffer> gpuBuffer)117 void setGpuBuffer(sk_sp<GrGpuBuffer> gpuBuffer) { 118 // TODO: once we add the gpuBuffer we could free 'fVertices'. Deinstantiable 119 // DDLs could throw a monkey wrench into that plan though. 120 SkASSERT(!fGpuBuffer); 121 fGpuBuffer = gpuBuffer; 122 } 123 reset()124 void reset() { 125 sk_free(const_cast<void*>(fVertices)); 126 fVertices = nullptr; 127 fNumVertices = 0; 128 fVertexSize = 0; 129 fGpuBuffer.reset(); 130 } 131 132 private: 133 friend class GrThreadSafeCache; // for access to ctor 134 VertexData(const void * vertices,int numVertices,size_t vertexSize)135 VertexData(const void* vertices, int numVertices, size_t vertexSize) 136 : fVertices(vertices) 137 , fNumVertices(numVertices) 138 , fVertexSize(vertexSize) { 139 } 140 VertexData(sk_sp<GrGpuBuffer> gpuBuffer,int numVertices,size_t vertexSize)141 VertexData(sk_sp<GrGpuBuffer> gpuBuffer, int numVertices, size_t vertexSize) 142 : fVertices(nullptr) 143 , fNumVertices(numVertices) 144 , fVertexSize(vertexSize) 145 , fGpuBuffer(std::move(gpuBuffer)) { 146 } 147 148 const void* fVertices; 149 int fNumVertices; 150 size_t fVertexSize; 151 152 sk_sp<GrGpuBuffer> fGpuBuffer; 153 }; 154 155 // The returned VertexData object takes ownership of 'vertices' which had better have been 156 // allocated with malloc! 157 static sk_sp<VertexData> MakeVertexData(const void* vertices, 158 int vertexCount, 159 size_t vertexSize); 160 static sk_sp<VertexData> MakeVertexData(sk_sp<GrGpuBuffer> buffer, 161 int vertexCount, 162 size_t vertexSize); 163 164 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> findVertsWithData( 165 const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 166 167 typedef bool (*IsNewerBetter)(SkData* incumbent, SkData* challenger); 168 169 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> addVertsWithData( 170 const GrUniqueKey&, 171 sk_sp<VertexData>, 172 IsNewerBetter) SK_EXCLUDES(fSpinLock); 173 174 void remove(const GrUniqueKey&) SK_EXCLUDES(fSpinLock); 175 176 // To allow gpu-created resources to have priority, we pre-emptively place a lazy proxy 177 // in the thread-safe cache (with findOrAdd). The Trampoline object allows that lazy proxy to 178 // be instantiated with some later generated rendering result. 179 class Trampoline : public SkRefCnt { 180 public: 181 sk_sp<GrTextureProxy> fProxy; 182 }; 183 184 static std::tuple<GrSurfaceProxyView, sk_sp<Trampoline>> CreateLazyView(GrDirectContext*, 185 GrColorType, 186 SkISize dimensions, 187 GrSurfaceOrigin, 188 SkBackingFit); 189 private: 190 struct Entry { EntryEntry191 Entry(const GrUniqueKey& key, const GrSurfaceProxyView& view) 192 : fKey(key) 193 , fView(view) 194 , fTag(Entry::kView) { 195 } 196 EntryEntry197 Entry(const GrUniqueKey& key, sk_sp<VertexData> vertData) 198 : fKey(key) 199 , fVertData(std::move(vertData)) 200 , fTag(Entry::kVertData) { 201 } 202 ~EntryEntry203 ~Entry() { 204 this->makeEmpty(); 205 } 206 uniquelyHeldEntry207 bool uniquelyHeld() const { 208 SkASSERT(fTag != kEmpty); 209 210 if (fTag == kView && fView.proxy()->unique()) { 211 return true; 212 } else if (fTag == kVertData && fVertData->unique()) { 213 return true; 214 } 215 216 return false; 217 } 218 keyEntry219 const GrUniqueKey& key() const { 220 SkASSERT(fTag != kEmpty); 221 return fKey; 222 } 223 getCustomDataEntry224 SkData* getCustomData() const { 225 SkASSERT(fTag != kEmpty); 226 return fKey.getCustomData(); 227 } 228 refCustomDataEntry229 sk_sp<SkData> refCustomData() const { 230 SkASSERT(fTag != kEmpty); 231 return fKey.refCustomData(); 232 } 233 viewEntry234 GrSurfaceProxyView view() { 235 SkASSERT(fTag == kView); 236 return fView; 237 } 238 vertexDataEntry239 sk_sp<VertexData> vertexData() { 240 SkASSERT(fTag == kVertData); 241 return fVertData; 242 } 243 setEntry244 void set(const GrUniqueKey& key, const GrSurfaceProxyView& view) { 245 SkASSERT(fTag == kEmpty); 246 fKey = key; 247 fView = view; 248 fTag = kView; 249 } 250 makeEmptyEntry251 void makeEmpty() { 252 fKey.reset(); 253 if (fTag == kView) { 254 fView.reset(); 255 } else if (fTag == kVertData) { 256 fVertData.reset(); 257 } 258 fTag = kEmpty; 259 } 260 setEntry261 void set(const GrUniqueKey& key, sk_sp<VertexData> vertData) { 262 SkASSERT(fTag == kEmpty || fTag == kVertData); 263 fKey = key; 264 fVertData = vertData; 265 fTag = kVertData; 266 } 267 268 // The thread-safe cache gets to directly manipulate the llist and last-access members 269 GrStdSteadyClock::time_point fLastAccess; 270 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry); 271 272 // for SkTDynamicHash GetKeyEntry273 static const GrUniqueKey& GetKey(const Entry& e) { 274 SkASSERT(e.fTag != kEmpty); 275 return e.fKey; 276 } HashEntry277 static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); } 278 279 private: 280 // Note: the unique key is stored here bc it is never attached to a proxy or a GrTexture 281 GrUniqueKey fKey; 282 union { 283 GrSurfaceProxyView fView; 284 sk_sp<VertexData> fVertData; 285 }; 286 287 enum { 288 kEmpty, 289 kView, 290 kVertData, 291 } fTag { kEmpty }; 292 }; 293 294 void makeExistingEntryMRU(Entry*) SK_REQUIRES(fSpinLock); 295 Entry* makeNewEntryMRU(Entry*) SK_REQUIRES(fSpinLock); 296 297 Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock); 298 Entry* getEntry(const GrUniqueKey&, sk_sp<VertexData>) SK_REQUIRES(fSpinLock); 299 300 void recycleEntry(Entry*) SK_REQUIRES(fSpinLock); 301 302 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind( 303 const GrUniqueKey&) SK_REQUIRES(fSpinLock); 304 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd( 305 const GrUniqueKey&, 306 const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock); 307 308 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalFindVerts( 309 const GrUniqueKey&) SK_REQUIRES(fSpinLock); 310 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalAddVerts( 311 const GrUniqueKey&, 312 sk_sp<VertexData>, 313 IsNewerBetter) SK_REQUIRES(fSpinLock); 314 315 mutable SkSpinlock fSpinLock; 316 317 SkTDynamicHash<Entry, GrUniqueKey> fUniquelyKeyedEntryMap SK_GUARDED_BY(fSpinLock); 318 // The head of this list is the MRU 319 SkTInternalLList<Entry> fUniquelyKeyedEntryList SK_GUARDED_BY(fSpinLock); 320 321 // TODO: empirically determine this from the skps 322 static const int kInitialArenaSize = 64 * sizeof(Entry); 323 324 char fStorage[kInitialArenaSize]; 325 SkArenaAlloc fEntryAllocator{fStorage, kInitialArenaSize, kInitialArenaSize}; 326 Entry* fFreeEntryList SK_GUARDED_BY(fSpinLock); 327 }; 328 329 #endif // GrThreadSafeCache_DEFINED 330