1 /* 2 * Copyright 2017 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrResourceAllocator_DEFINED 9 #define GrResourceAllocator_DEFINED 10 11 #include "include/private/SkTHash.h" 12 13 #include "src/gpu/GrHashMapWithCache.h" 14 #include "src/gpu/GrSurface.h" 15 #include "src/gpu/GrSurfaceProxy.h" 16 17 #include "src/core/SkArenaAlloc.h" 18 #include "src/core/SkTMultiMap.h" 19 20 class GrDirectContext; 21 22 // Print out explicit allocation information 23 #define GR_ALLOCATION_SPEW 0 24 25 // Print out information about interval creation 26 #define GR_TRACK_INTERVAL_CREATION 0 27 28 /* 29 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by 30 * being given the usage intervals of the various proxies. It keeps these intervals in a singly 31 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID 32 * to interval to find proxy reuse). When it comes time to allocate the resources it 33 * traverses the sorted list and: 34 * removes intervals from the active list that have completed (returning their GrSurfaces 35 * to the free pool) 36 37 * allocates a new resource (preferably from the free pool) for the new interval 38 * adds the new interval to the active list (that is sorted by increasing end index) 39 * 40 * Note: the op indices (used in the usage intervals) come from the order of the ops in 41 * their opsTasks after the opsTask DAG has been linearized. 42 * 43 ************************************************************************************************* 44 * How does instantiation failure handling work when explicitly allocating? 45 * 46 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be 47 * gathered (i.e., in GrOpsTask::gatherProxyIntervals). 48 * 49 * The allocator will churn through this list but could fail anywhere. 50 * 51 * Allocation failure handling occurs at two levels: 52 * 53 * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped. 54 * 55 * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped 56 * (via GrOpsTask::purgeOpsWithUninstantiatedProxies) 57 * 58 * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and 59 * individual ops when something goes wrong in allocation (i.e., when the return code from 60 * GrResourceAllocator::assign is bad) 61 * 62 * All together this means we should never attempt to draw an op which is missing some 63 * required GrSurface. 64 * 65 * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass. 66 * If any of the promise images fail at this stage then the allocator is set into an error 67 * state and all allocations are then scanned for failures during the main allocation pass. 68 */ 69 class GrResourceAllocator { 70 public: GrResourceAllocator(GrDirectContext * dContext)71 GrResourceAllocator(GrDirectContext* dContext) 72 : fDContext(dContext) {} 73 74 ~GrResourceAllocator(); 75 curOp()76 unsigned int curOp() const { return fNumOps; } incOps()77 void incOps() { fNumOps++; } 78 79 /** Indicates whether a given call to addInterval represents an actual usage of the 80 * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks. 81 * In that case we need to create an extra long interval for them (due to the upload) but 82 * don't want to count that usage/reference towards the proxy's recyclability. 83 */ 84 enum class ActualUse : bool { 85 kNo = false, 86 kYes = true 87 }; 88 89 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets. 90 // If an existing interval already exists it will be expanded to include the new range. 91 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse 92 SkDEBUGCODE(, bool isDirectDstRead = false)); 93 failedInstantiation()94 bool failedInstantiation() const { return fFailedInstantiation; } 95 96 // Generate an internal plan for resource allocation. After this you can optionally call 97 // `makeBudgetHeadroom` to check whether that plan would go over our memory budget. 98 // Fully-lazy proxies are also instantiated at this point so that their size can 99 // be known accurately. Returns false if any lazy proxy failed to instantiate, true otherwise. 100 bool planAssignment(); 101 102 // Figure out how much VRAM headroom this plan requires. If there's enough purgeable resources, 103 // purge them and return true. Otherwise return false. 104 bool makeBudgetHeadroom(); 105 106 // Clear all internal state in preparation for a new set of intervals. 107 void reset(); 108 109 // Instantiate and assign resources to all proxies. 110 bool assign(); 111 112 #if GR_ALLOCATION_SPEW 113 void dumpIntervals(); 114 #endif 115 116 private: 117 class Interval; 118 class Register; 119 120 // Remove dead intervals from the active list 121 void expire(unsigned int curIndex); 122 123 // These two methods wrap the interactions with the free pool 124 void recycleRegister(Register* r); 125 Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy); 126 127 struct FreePoolTraits { GetKeyFreePoolTraits128 static const GrScratchKey& GetKey(const Register& r) { 129 return r.scratchKey(); 130 } 131 HashFreePoolTraits132 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); } OnFreeFreePoolTraits133 static void OnFree(Register* r) { } 134 }; 135 typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap; 136 137 typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash; 138 139 struct UniqueKeyHash { operatorUniqueKeyHash140 uint32_t operator()(const GrUniqueKey& key) const { return key.hash(); } 141 }; 142 typedef SkTHashMap<GrUniqueKey, Register*, UniqueKeyHash> UniqueKeyRegisterHash; 143 144 // Each proxy – with some exceptions – is assigned a register. After all assignments are made, 145 // another pass is performed to instantiate and assign actual surfaces to the proxies. Right 146 // now these are performed in one call, but in the future they will be separable and the user 147 // will be able to query re: memory cost before committing to surface creation. 148 class Register { 149 public: 150 // It's OK to pass an invalid scratch key iff the proxy has a unique key. 151 Register(GrSurfaceProxy* originatingProxy, GrScratchKey, GrResourceProvider*); 152 scratchKey()153 const GrScratchKey& scratchKey() const { return fScratchKey; } uniqueKey()154 const GrUniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); } 155 accountedForInBudget()156 bool accountedForInBudget() const { return fAccountedForInBudget; } setAccountedForInBudget()157 void setAccountedForInBudget() { fAccountedForInBudget = true; } 158 existingSurface()159 GrSurface* existingSurface() const { return fExistingSurface.get(); } 160 161 // Can this register be used by other proxies after this one? 162 bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount) const; 163 164 // Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy' 165 // is used to cache the allocation when a given register is used by multiple 166 // proxies. 167 bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*); 168 169 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; }) 170 171 private: 172 GrSurfaceProxy* fOriginatingProxy; 173 GrScratchKey fScratchKey; // free pool wants a reference to this. 174 sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null. 175 bool fAccountedForInBudget = false; 176 177 #ifdef SK_DEBUG 178 uint32_t fUniqueID; 179 180 static uint32_t CreateUniqueID(); 181 #endif 182 }; 183 184 class Interval { 185 public: Interval(GrSurfaceProxy * proxy,unsigned int start,unsigned int end)186 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) 187 : fProxy(proxy) 188 , fStart(start) 189 , fEnd(end) { 190 SkASSERT(proxy); 191 SkDEBUGCODE(fUniqueID = CreateUniqueID()); 192 #if GR_TRACK_INTERVAL_CREATION 193 SkString proxyStr = proxy->dump(); 194 SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end); 195 #endif 196 } 197 proxy()198 const GrSurfaceProxy* proxy() const { return fProxy; } proxy()199 GrSurfaceProxy* proxy() { return fProxy; } 200 start()201 unsigned int start() const { return fStart; } end()202 unsigned int end() const { return fEnd; } 203 setNext(Interval * next)204 void setNext(Interval* next) { fNext = next; } next()205 const Interval* next() const { return fNext; } next()206 Interval* next() { return fNext; } 207 getRegister()208 Register* getRegister() const { return fRegister; } setRegister(Register * r)209 void setRegister(Register* r) { fRegister = r; } 210 addUse()211 void addUse() { fUses++; } uses()212 int uses() const { return fUses; } 213 extendEnd(unsigned int newEnd)214 void extendEnd(unsigned int newEnd) { 215 if (newEnd > fEnd) { 216 fEnd = newEnd; 217 #if GR_TRACK_INTERVAL_CREATION 218 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd); 219 #endif 220 } 221 } 222 223 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; }) 224 225 private: 226 GrSurfaceProxy* fProxy; 227 unsigned int fStart; 228 unsigned int fEnd; 229 Interval* fNext = nullptr; 230 unsigned int fUses = 0; 231 Register* fRegister = nullptr; 232 233 #ifdef SK_DEBUG 234 uint32_t fUniqueID; 235 236 static uint32_t CreateUniqueID(); 237 #endif 238 }; 239 240 class IntervalList { 241 public: 242 IntervalList() = default; 243 // N.B. No need for a destructor – the arena allocator will clean up for us. 244 empty()245 bool empty() const { 246 SkASSERT(SkToBool(fHead) == SkToBool(fTail)); 247 return !SkToBool(fHead); 248 } peekHead()249 const Interval* peekHead() const { return fHead; } peekHead()250 Interval* peekHead() { return fHead; } 251 Interval* popHead(); 252 void insertByIncreasingStart(Interval*); 253 void insertByIncreasingEnd(Interval*); 254 255 private: 256 SkDEBUGCODE(void validate() const;) 257 258 Interval* fHead = nullptr; 259 Interval* fTail = nullptr; 260 }; 261 262 // Compositing use cases can create > 80 intervals. 263 static const int kInitialArenaSize = 128 * sizeof(Interval); 264 265 GrDirectContext* fDContext; 266 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces 267 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID 268 269 IntervalList fIntvlList; // All the intervals sorted by increasing start 270 IntervalList fActiveIntvls; // List of live intervals during assignment 271 // (sorted by increasing end) 272 IntervalList fFinishedIntvls; // All the completed intervals 273 // (sorted by increasing start) 274 UniqueKeyRegisterHash fUniqueKeyRegisters; 275 unsigned int fNumOps = 0; 276 277 SkDEBUGCODE(bool fPlanned = false;) 278 SkDEBUGCODE(bool fAssigned = false;) 279 280 SkSTArenaAllocWithReset<kInitialArenaSize> fInternalAllocator; // intervals & registers 281 bool fFailedInstantiation = false; 282 }; 283 284 #endif // GrResourceAllocator_DEFINED 285