1 /* 2 * Copyright 2017 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrResourceAllocator_DEFINED 9 #define GrResourceAllocator_DEFINED 10 11 #include "include/gpu/GrSurface.h" 12 #include "src/gpu/GrGpuResourcePriv.h" 13 #include "src/gpu/GrSurfaceProxy.h" 14 15 #include "src/core/SkArenaAlloc.h" 16 #include "src/core/SkTDynamicHash.h" 17 #include "src/core/SkTMultiMap.h" 18 19 class GrDeinstantiateProxyTracker; 20 class GrResourceProvider; 21 22 // Print out explicit allocation information 23 #define GR_ALLOCATION_SPEW 0 24 25 // Print out information about interval creation 26 #define GR_TRACK_INTERVAL_CREATION 0 27 28 /* 29 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by 30 * being given the usage intervals of the various proxies. It keeps these intervals in a singly 31 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID 32 * to interval to find proxy reuse). When it comes time to allocate the resources it 33 * traverses the sorted list and: 34 * removes intervals from the active list that have completed (returning their GrSurfaces 35 * to the free pool) 36 37 * allocates a new resource (preferably from the free pool) for the new interval 38 * adds the new interval to the active list (that is sorted by increasing end index) 39 * 40 * Note: the op indices (used in the usage intervals) come from the order of the ops in 41 * their opLists after the opList DAG has been linearized. 42 * 43 ************************************************************************************************* 44 * How does instantiation failure handling work when explicitly allocating? 45 * 46 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be 47 * gathered (i.e., in GrOpList::gatherProxyIntervals). 48 * 49 * The allocator will churn through this list but could fail anywhere. 50 * 51 * Allocation failure handling occurs at two levels: 52 * 53 * 1) If the GrSurface backing an opList fails to allocate then the entire opList is dropped. 54 * 55 * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped 56 * (via GrOpList::purgeOpsWithUninstantiatedProxies) 57 * 58 * The pass to determine which ops to drop is a bit laborious so we only check the opLists and 59 * individual ops when something goes wrong in allocation (i.e., when the return code from 60 * GrResourceAllocator::assign is bad) 61 * 62 * All together this means we should never attempt to draw an op which is missing some 63 * required GrSurface. 64 * 65 * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass. 66 * If any of the promise images fail at this stage then the allocator is set into an error 67 * state and all allocations are then scanned for failures during the main allocation pass. 68 */ 69 class GrResourceAllocator { 70 public: 71 GrResourceAllocator(GrResourceProvider* resourceProvider, 72 GrDeinstantiateProxyTracker* tracker 73 SkDEBUGCODE(, int numOpLists)) fResourceProvider(resourceProvider)74 : fResourceProvider(resourceProvider) 75 , fDeinstantiateTracker(tracker) 76 SkDEBUGCODE(, fNumOpLists(numOpLists)) { 77 } 78 79 ~GrResourceAllocator(); 80 curOp()81 unsigned int curOp() const { return fNumOps; } incOps()82 void incOps() { fNumOps++; } 83 84 /** Indicates whether a given call to addInterval represents an actual usage of the 85 * provided proxy. This is mainly here to accomodate deferred proxies attached to opLists. 86 * In that case we need to create an extra long interval for them (due to the upload) but 87 * don't want to count that usage/reference towards the proxy's recyclability. 88 */ 89 enum class ActualUse : bool { 90 kNo = false, 91 kYes = true 92 }; 93 94 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets. 95 // If an existing interval already exists it will be expanded to include the new range. 96 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse 97 SkDEBUGCODE(, bool isDirectDstRead = false)); 98 99 enum class AssignError { 100 kNoError, 101 kFailedProxyInstantiation 102 }; 103 104 // Returns true when the opLists from 'startIndex' to 'stopIndex' should be executed; 105 // false when nothing remains to be executed. 106 // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation. 107 // If this happens, the caller should remove all ops which reference an uninstantiated proxy. 108 // This is used to execute a portion of the queued opLists in order to reduce the total 109 // amount of GPU resources required. 110 bool assign(int* startIndex, int* stopIndex, AssignError* outError); 111 112 void determineRecyclability(); 113 void markEndOfOpList(int opListIndex); 114 115 #if GR_ALLOCATION_SPEW 116 void dumpIntervals(); 117 #endif 118 119 private: 120 class Interval; 121 122 // Remove dead intervals from the active list 123 void expire(unsigned int curIndex); 124 125 bool onOpListBoundary() const; 126 void forceIntermediateFlush(int* stopIndex); 127 128 // These two methods wrap the interactions with the free pool 129 void recycleSurface(sk_sp<GrSurface> surface); 130 sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, int minStencilSampleCount); 131 132 struct FreePoolTraits { GetKeyFreePoolTraits133 static const GrScratchKey& GetKey(const GrSurface& s) { 134 return s.resourcePriv().getScratchKey(); 135 } 136 HashFreePoolTraits137 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); } OnFreeFreePoolTraits138 static void OnFree(GrSurface* s) { s->unref(); } 139 }; 140 typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap; 141 142 typedef SkTDynamicHash<Interval, unsigned int> IntvlHash; 143 144 class Interval { 145 public: Interval(GrSurfaceProxy * proxy,unsigned int start,unsigned int end)146 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) 147 : fProxy(proxy) 148 , fProxyID(proxy->uniqueID().asUInt()) 149 , fStart(start) 150 , fEnd(end) 151 , fNext(nullptr) { 152 SkASSERT(proxy); 153 #if GR_TRACK_INTERVAL_CREATION 154 fUniqueID = CreateUniqueID(); 155 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n", 156 fUniqueID, proxy->uniqueID().asUInt(), start, end); 157 #endif 158 } 159 160 // Used when recycling an interval resetTo(GrSurfaceProxy * proxy,unsigned int start,unsigned int end)161 void resetTo(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) { 162 SkASSERT(proxy); 163 SkASSERT(!fProxy && !fNext); 164 165 fUses = 0; 166 fProxy = proxy; 167 fProxyID = proxy->uniqueID().asUInt(); 168 fStart = start; 169 fEnd = end; 170 fNext = nullptr; 171 #if GR_TRACK_INTERVAL_CREATION 172 fUniqueID = CreateUniqueID(); 173 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n", 174 fUniqueID, proxy->uniqueID().asUInt(), start, end); 175 #endif 176 } 177 ~Interval()178 ~Interval() { 179 SkASSERT(!fAssignedSurface); 180 } 181 proxy()182 const GrSurfaceProxy* proxy() const { return fProxy; } proxy()183 GrSurfaceProxy* proxy() { return fProxy; } 184 start()185 unsigned int start() const { return fStart; } end()186 unsigned int end() const { return fEnd; } 187 setNext(Interval * next)188 void setNext(Interval* next) { fNext = next; } next()189 const Interval* next() const { return fNext; } next()190 Interval* next() { return fNext; } 191 markAsRecyclable()192 void markAsRecyclable() { fIsRecyclable = true;} isRecyclable()193 bool isRecyclable() const { return fIsRecyclable; } 194 addUse()195 void addUse() { fUses++; } uses()196 int uses() { return fUses; } 197 extendEnd(unsigned int newEnd)198 void extendEnd(unsigned int newEnd) { 199 if (newEnd > fEnd) { 200 fEnd = newEnd; 201 #if GR_TRACK_INTERVAL_CREATION 202 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd); 203 #endif 204 } 205 } 206 207 void assign(sk_sp<GrSurface>); wasAssignedSurface()208 bool wasAssignedSurface() const { return fAssignedSurface != nullptr; } detachSurface()209 sk_sp<GrSurface> detachSurface() { return std::move(fAssignedSurface); } 210 211 // for SkTDynamicHash GetKey(const Interval & intvl)212 static const uint32_t& GetKey(const Interval& intvl) { 213 return intvl.fProxyID; 214 } Hash(const uint32_t & key)215 static uint32_t Hash(const uint32_t& key) { return key; } 216 217 private: 218 sk_sp<GrSurface> fAssignedSurface; 219 GrSurfaceProxy* fProxy; 220 uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key 221 unsigned int fStart; 222 unsigned int fEnd; 223 Interval* fNext; 224 unsigned int fUses = 0; 225 bool fIsRecyclable = false; 226 227 #if GR_TRACK_INTERVAL_CREATION 228 uint32_t fUniqueID; 229 230 uint32_t CreateUniqueID(); 231 #endif 232 }; 233 234 class IntervalList { 235 public: 236 IntervalList() = default; ~IntervalList()237 ~IntervalList() { 238 // The only time we delete an IntervalList is in the GrResourceAllocator dtor. 239 // Since the arena allocator will clean up for us we don't bother here. 240 } 241 empty()242 bool empty() const { 243 SkASSERT(SkToBool(fHead) == SkToBool(fTail)); 244 return !SkToBool(fHead); 245 } peekHead()246 const Interval* peekHead() const { return fHead; } peekHead()247 Interval* peekHead() { return fHead; } 248 Interval* popHead(); 249 void insertByIncreasingStart(Interval*); 250 void insertByIncreasingEnd(Interval*); 251 Interval* detachAll(); 252 253 private: 254 SkDEBUGCODE(void validate() const;) 255 256 Interval* fHead = nullptr; 257 Interval* fTail = nullptr; 258 }; 259 260 // Compositing use cases can create > 80 intervals. 261 static const int kInitialArenaSize = 128 * sizeof(Interval); 262 263 GrResourceProvider* fResourceProvider; 264 GrDeinstantiateProxyTracker* fDeinstantiateTracker; 265 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces 266 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID 267 268 IntervalList fIntvlList; // All the intervals sorted by increasing start 269 IntervalList fActiveIntvls; // List of live intervals during assignment 270 // (sorted by increasing end) 271 unsigned int fNumOps = 0; 272 SkTArray<unsigned int> fEndOfOpListOpIndices; 273 int fCurOpListIndex = 0; 274 SkDEBUGCODE(const int fNumOpLists = -1;) 275 276 SkDEBUGCODE(bool fAssigned = false;) 277 278 char fStorage[kInitialArenaSize]; 279 SkArenaAlloc fIntervalAllocator{fStorage, kInitialArenaSize, kInitialArenaSize}; 280 Interval* fFreeIntervalList = nullptr; 281 bool fLazyInstantiationError = false; 282 }; 283 284 #endif // GrResourceAllocator_DEFINED 285