1 // 2 // Copyright 2017 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // ResourceMap: 7 // An optimized resource map which packs the first set of allocated objects into a 8 // flat array, and then falls back to an unordered map for the higher handle values. 9 // 10 11 #ifndef LIBANGLE_RESOURCE_MAP_H_ 12 #define LIBANGLE_RESOURCE_MAP_H_ 13 14 #include <mutex> 15 #include <type_traits> 16 17 #include "common/SimpleMutex.h" 18 #include "common/hash_containers.h" 19 #include "libANGLE/angletypes.h" 20 21 namespace gl 22 { 23 // The resource map needs to be internally synchronized for maps that are placed in the share group 24 // (as opposed to being private to the context) and that are accessed without holding the share 25 // group lock. 26 #if defined(ANGLE_ENABLE_SHARE_CONTEXT_LOCK) 27 using ResourceMapMutex = angle::SimpleMutex; 28 #else 29 using ResourceMapMutex = angle::NoOpMutex; 30 #endif 31 32 template <bool NeedsLock = true> 33 struct SelectResourceMapMutex 34 { 35 using type = ResourceMapMutex; 36 }; 37 38 template <> 39 struct SelectResourceMapMutex<false> 40 { 41 using type = angle::NoOpMutex; 42 }; 43 44 // Analysis of ANGLE's traces as well as Chrome usage reveals the following: 45 // 46 // - Buffers: Typical applications use no more than 4000 ids. Very few use over 6000. 47 // - Textures: Typical applications use no more than 1200 ids. Very few use over 2000. 48 // - Samplers: Typical applications use no more than 50 ids. Very few use over 100. 49 // - Shaders and Programs: Typical applications use no more than 500. Very few use over 700. 50 // - Sync objects: Typical applications use no more than 500. Very few use over 1500. 51 // 52 // For all the other shared types, the maximum used id is small (under 100). For 53 // context-private parts (such as vertex arrays and queries), the id count can be in the 54 // thousands. 55 // 56 // The initial size of the flat resource map is based on the above, rounded up to a multiple of 57 // 1536. Resource maps that need a lock (kNeedsLock == true) have the maximum flat size identical 58 // to initial flat size to avoid reallocation. For others, the maps start small and can grow. 59 template <typename IDType> 60 struct ResourceMapParams 61 { 62 static constexpr size_t kInitialFlatResourcesSize = 192; 63 64 // The following are private to the context and don't need a lock: 65 // 66 // - Vertex Array Objects 67 // - Framebuffer Objects 68 // - Transform Feedback Objects 69 // - Query Objects 70 // 71 // The rest of the maps need a lock. However, only a select few are currently locked as API 72 // relevant to the rest of the types are protected by the share group lock. As the share group 73 // lock is removed from more types, the resource map lock needs to be enabled for them. 74 static constexpr bool kNeedsLock = false; 75 }; 76 template <> 77 struct ResourceMapParams<BufferID> 78 { 79 static constexpr size_t kInitialFlatResourcesSize = 6144; 80 static constexpr bool kNeedsLock = true; 81 }; 82 template <> 83 struct ResourceMapParams<TextureID> 84 { 85 static constexpr size_t kInitialFlatResourcesSize = 1536; 86 static constexpr bool kNeedsLock = false; 87 }; 88 template <> 89 struct ResourceMapParams<ShaderProgramID> 90 { 91 static constexpr size_t kInitialFlatResourcesSize = 1536; 92 static constexpr bool kNeedsLock = false; 93 }; 94 template <> 95 struct ResourceMapParams<SyncID> 96 { 97 static constexpr size_t kInitialFlatResourcesSize = 1536; 98 static constexpr bool kNeedsLock = false; 99 }; 100 // For the purpose of unit testing, |int| is considered private (not needing lock), and 101 // |unsigned int| is considered shared (needing lock). 102 template <> 103 struct ResourceMapParams<unsigned int> 104 { 105 static constexpr size_t kInitialFlatResourcesSize = 192; 106 static constexpr bool kNeedsLock = true; 107 }; 108 109 template <typename ResourceType, typename IDType> 110 class ResourceMap final : angle::NonCopyable 111 { 112 public: 113 ResourceMap(); 114 ~ResourceMap(); 115 116 ANGLE_INLINE ResourceType *query(IDType id) const 117 { 118 GLuint handle = GetIDValue(id); 119 // No need for a lock when accessing the flat map. Either the flat map is static, or 120 // locking is not needed. 121 static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); 122 123 if (ANGLE_LIKELY(handle < mFlatResourcesSize)) 124 { 125 ResourceType *value = mFlatResources[handle]; 126 return (value == InvalidPointer() ? nullptr : value); 127 } 128 129 return findInHashedResources(handle); 130 } 131 132 // Returns true if the handle was reserved. Not necessarily if the resource is created. 133 bool contains(IDType id) const; 134 135 // Returns the element that was at this location. 136 bool erase(IDType id, ResourceType **resourceOut); 137 138 void assign(IDType id, ResourceType *resource); 139 140 // Clears the map. 141 void clear(); 142 143 using IndexAndResource = std::pair<GLuint, ResourceType *>; 144 using HashMap = angle::HashMap<GLuint, ResourceType *>; 145 146 class Iterator final 147 { 148 public: 149 bool operator==(const Iterator &other) const; 150 bool operator!=(const Iterator &other) const; 151 Iterator &operator++(); 152 const IndexAndResource *operator->() const; 153 const IndexAndResource &operator*() const; 154 155 private: 156 friend class ResourceMap; 157 Iterator(const ResourceMap &origin, 158 GLuint flatIndex, 159 typename HashMap::const_iterator hashIndex, 160 bool skipNulls); 161 void updateValue(); 162 163 const ResourceMap &mOrigin; 164 GLuint mFlatIndex; 165 typename HashMap::const_iterator mHashIndex; 166 IndexAndResource mValue; 167 bool mSkipNulls; 168 }; 169 170 private: 171 friend class Iterator; 172 template <typename SameResourceType, typename SameIDType> 173 friend class UnsafeResourceMapIter; 174 175 // null values represent reserved handles. 176 Iterator begin() const; 177 Iterator end() const; 178 179 Iterator beginWithNull() const; 180 Iterator endWithNull() const; 181 182 // Used by iterators and related functions only (due to lack of thread safety). 183 GLuint nextResource(size_t flatIndex, bool skipNulls) const; 184 185 // constexpr methods cannot contain reinterpret_cast, so we need a static method. 186 static ResourceType *InvalidPointer(); 187 static constexpr intptr_t kInvalidPointer = static_cast<intptr_t>(-1); 188 189 static constexpr bool kNeedsLock = ResourceMapParams<IDType>::kNeedsLock; 190 using Mutex = typename SelectResourceMapMutex<kNeedsLock>::type; 191 192 static constexpr size_t kInitialFlatResourcesSize = 193 ResourceMapParams<IDType>::kInitialFlatResourcesSize; 194 195 // Experimental testing suggests that ~10k is a reasonable upper limit. 196 static constexpr size_t kFlatResourcesLimit = kNeedsLock ? kInitialFlatResourcesSize : 0x3000; 197 // Due to the way assign() is implemented, kFlatResourcesLimit / kInitialFlatResourcesSize must 198 // be a power of 2. 199 static_assert(kFlatResourcesLimit % kInitialFlatResourcesSize == 0); 200 static_assert(((kFlatResourcesLimit / kInitialFlatResourcesSize) & 201 (kFlatResourcesLimit / kInitialFlatResourcesSize - 1)) == 0); 202 203 bool containsInHashedResources(GLuint handle) const; 204 ResourceType *findInHashedResources(GLuint handle) const; 205 bool eraseFromHashedResources(GLuint handle, ResourceType **resourceOut); 206 void assignAboveCurrentFlatSize(GLuint handle, ResourceType *resource); 207 void assignInHashedResources(GLuint handle, ResourceType *resource); 208 209 size_t mFlatResourcesSize; 210 ResourceType **mFlatResources; 211 212 // A map of GL objects indexed by object ID. 213 HashMap mHashedResources; 214 215 // mFlatResources is allocated at object creation time, with a default size of 216 // |kInitialFlatResourcesSize|. This is thread safe, because the allocation is done by the 217 // first context in the share group. The flat map is allowed to grow up to 218 // |kFlatResourcesLimit|, but only for maps that don't need a lock (kNeedsLock == false). 219 // 220 // For maps that don't need a lock, this mutex is a no-op. For those that do, the mutex is 221 // taken when allocating / deleting objects, as well as when accessing |mHashedResources|. 222 // Otherwise, access to the flat map (which never gets reallocated due to 223 // |kInitialFlatResourcesSize == kFlatResourcesLimit|) is lockless. This latter is possible 224 // because the application is not allowed to gen/delete and bind the same ID in different 225 // threads at the same time. 226 // 227 // Note that because HandleAllocator is not yet thread-safe, glGen* and glDelete* functions 228 // cannot be free of the share group mutex yet. To remove the share group mutex from those 229 // functions, likely the HandleAllocator class should be merged with this class, and the 230 // necessary insert/erase operations done under this same lock. 231 mutable Mutex mMutex; 232 }; 233 234 // A helper to retrieve the resource map iterators while being explicit that this is not thread 235 // safe. Usage of iterators are limited to clean up on destruction and capture/replay, neither of 236 // which can race with other threads in their access to the resource map. 237 template <typename ResourceType, typename IDType> 238 class UnsafeResourceMapIter 239 { 240 public: 241 using ResMap = ResourceMap<ResourceType, IDType>; 242 243 UnsafeResourceMapIter(const ResMap &resourceMap) : mResourceMap(resourceMap) {} 244 245 typename ResMap::Iterator begin() const { return mResourceMap.begin(); } 246 typename ResMap::Iterator end() const { return mResourceMap.end(); } 247 248 typename ResMap::Iterator beginWithNull() const { return mResourceMap.beginWithNull(); } 249 typename ResMap::Iterator endWithNull() const { return mResourceMap.endWithNull(); } 250 251 // Not a constant-time operation, should only be used for verification. 252 bool empty() const; 253 254 private: 255 const ResMap &mResourceMap; 256 }; 257 258 template <typename ResourceType, typename IDType> 259 ResourceMap<ResourceType, IDType>::ResourceMap() 260 : mFlatResourcesSize(kInitialFlatResourcesSize), 261 mFlatResources(new ResourceType *[kInitialFlatResourcesSize]) 262 { 263 memset(mFlatResources, kInvalidPointer, mFlatResourcesSize * sizeof(mFlatResources[0])); 264 } 265 266 template <typename ResourceType, typename IDType> 267 ResourceMap<ResourceType, IDType>::~ResourceMap() 268 { 269 ASSERT(begin() == end()); 270 delete[] mFlatResources; 271 } 272 273 template <typename ResourceType, typename IDType> 274 bool ResourceMap<ResourceType, IDType>::containsInHashedResources(GLuint handle) const 275 { 276 std::lock_guard<Mutex> lock(mMutex); 277 278 return mHashedResources.find(handle) != mHashedResources.end(); 279 } 280 281 template <typename ResourceType, typename IDType> 282 ResourceType *ResourceMap<ResourceType, IDType>::findInHashedResources(GLuint handle) const 283 { 284 std::lock_guard<Mutex> lock(mMutex); 285 286 auto it = mHashedResources.find(handle); 287 // Note: it->second can also be nullptr, so nullptr check doesn't work for "contains" 288 return (it == mHashedResources.end() ? nullptr : it->second); 289 } 290 291 template <typename ResourceType, typename IDType> 292 bool ResourceMap<ResourceType, IDType>::eraseFromHashedResources(GLuint handle, 293 ResourceType **resourceOut) 294 { 295 std::lock_guard<Mutex> lock(mMutex); 296 297 auto it = mHashedResources.find(handle); 298 if (it == mHashedResources.end()) 299 { 300 return false; 301 } 302 *resourceOut = it->second; 303 mHashedResources.erase(it); 304 return true; 305 } 306 307 template <typename ResourceType, typename IDType> 308 ANGLE_INLINE bool ResourceMap<ResourceType, IDType>::contains(IDType id) const 309 { 310 GLuint handle = GetIDValue(id); 311 if (ANGLE_LIKELY(handle < mFlatResourcesSize)) 312 { 313 return mFlatResources[handle] != InvalidPointer(); 314 } 315 316 return containsInHashedResources(handle); 317 } 318 319 template <typename ResourceType, typename IDType> 320 bool ResourceMap<ResourceType, IDType>::erase(IDType id, ResourceType **resourceOut) 321 { 322 GLuint handle = GetIDValue(id); 323 if (ANGLE_LIKELY(handle < mFlatResourcesSize)) 324 { 325 auto &value = mFlatResources[handle]; 326 if (value == InvalidPointer()) 327 { 328 return false; 329 } 330 *resourceOut = value; 331 value = InvalidPointer(); 332 return true; 333 } 334 335 return eraseFromHashedResources(handle, resourceOut); 336 } 337 338 template <typename ResourceType, typename IDType> 339 void ResourceMap<ResourceType, IDType>::assignAboveCurrentFlatSize(GLuint handle, 340 ResourceType *resource) 341 { 342 if (ANGLE_LIKELY(handle < kFlatResourcesLimit)) 343 { 344 // No need for a lock as the flat map never grows when locking is needed. 345 static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); 346 347 // Use power-of-two. 348 size_t newSize = mFlatResourcesSize; 349 while (newSize <= handle) 350 { 351 newSize *= 2; 352 } 353 354 ResourceType **oldResources = mFlatResources; 355 356 mFlatResources = new ResourceType *[newSize]; 357 memset(&mFlatResources[mFlatResourcesSize], kInvalidPointer, 358 (newSize - mFlatResourcesSize) * sizeof(mFlatResources[0])); 359 memcpy(mFlatResources, oldResources, mFlatResourcesSize * sizeof(mFlatResources[0])); 360 mFlatResourcesSize = newSize; 361 ASSERT(mFlatResourcesSize <= kFlatResourcesLimit); 362 delete[] oldResources; 363 364 ASSERT(mFlatResourcesSize > handle); 365 mFlatResources[handle] = resource; 366 } 367 else 368 { 369 std::lock_guard<Mutex> lock(mMutex); 370 mHashedResources[handle] = resource; 371 } 372 } 373 374 template <typename ResourceType, typename IDType> 375 ANGLE_INLINE void ResourceMap<ResourceType, IDType>::assign(IDType id, ResourceType *resource) 376 { 377 GLuint handle = GetIDValue(id); 378 if (ANGLE_LIKELY(handle < mFlatResourcesSize)) 379 { 380 mFlatResources[handle] = resource; 381 } 382 else 383 { 384 assignAboveCurrentFlatSize(handle, resource); 385 } 386 } 387 388 template <typename ResourceType, typename IDType> 389 typename ResourceMap<ResourceType, IDType>::Iterator ResourceMap<ResourceType, IDType>::begin() 390 const 391 { 392 return Iterator(*this, nextResource(0, true), mHashedResources.begin(), true); 393 } 394 395 template <typename ResourceType, typename IDType> 396 typename ResourceMap<ResourceType, IDType>::Iterator ResourceMap<ResourceType, IDType>::end() const 397 { 398 return Iterator(*this, static_cast<GLuint>(mFlatResourcesSize), mHashedResources.end(), true); 399 } 400 401 template <typename ResourceType, typename IDType> 402 typename ResourceMap<ResourceType, IDType>::Iterator 403 ResourceMap<ResourceType, IDType>::beginWithNull() const 404 { 405 return Iterator(*this, nextResource(0, false), mHashedResources.begin(), false); 406 } 407 408 template <typename ResourceType, typename IDType> 409 typename ResourceMap<ResourceType, IDType>::Iterator 410 ResourceMap<ResourceType, IDType>::endWithNull() const 411 { 412 return Iterator(*this, static_cast<GLuint>(mFlatResourcesSize), mHashedResources.end(), false); 413 } 414 415 template <typename ResourceType, typename IDType> 416 bool UnsafeResourceMapIter<ResourceType, IDType>::empty() const 417 { 418 return begin() == end(); 419 } 420 421 template <typename ResourceType, typename IDType> 422 void ResourceMap<ResourceType, IDType>::clear() 423 { 424 // No need for a lock as this is only called on destruction. 425 memset(mFlatResources, kInvalidPointer, kInitialFlatResourcesSize * sizeof(mFlatResources[0])); 426 mFlatResourcesSize = kInitialFlatResourcesSize; 427 mHashedResources.clear(); 428 } 429 430 template <typename ResourceType, typename IDType> 431 GLuint ResourceMap<ResourceType, IDType>::nextResource(size_t flatIndex, bool skipNulls) const 432 { 433 // This function is only used by the iterators, access to which is marked by 434 // UnsafeResourceMapIter. Locking is the responsibility of the caller. 435 for (size_t index = flatIndex; index < mFlatResourcesSize; index++) 436 { 437 if ((mFlatResources[index] != nullptr || !skipNulls) && 438 mFlatResources[index] != InvalidPointer()) 439 { 440 return static_cast<GLuint>(index); 441 } 442 } 443 return static_cast<GLuint>(mFlatResourcesSize); 444 } 445 446 template <typename ResourceType, typename IDType> 447 // static 448 ResourceType *ResourceMap<ResourceType, IDType>::InvalidPointer() 449 { 450 return reinterpret_cast<ResourceType *>(kInvalidPointer); 451 } 452 453 template <typename ResourceType, typename IDType> 454 ResourceMap<ResourceType, IDType>::Iterator::Iterator( 455 const ResourceMap &origin, 456 GLuint flatIndex, 457 typename ResourceMap<ResourceType, IDType>::HashMap::const_iterator hashIndex, 458 bool skipNulls) 459 : mOrigin(origin), mFlatIndex(flatIndex), mHashIndex(hashIndex), mSkipNulls(skipNulls) 460 { 461 updateValue(); 462 } 463 464 template <typename ResourceType, typename IDType> 465 bool ResourceMap<ResourceType, IDType>::Iterator::operator==(const Iterator &other) const 466 { 467 return (mFlatIndex == other.mFlatIndex && mHashIndex == other.mHashIndex); 468 } 469 470 template <typename ResourceType, typename IDType> 471 bool ResourceMap<ResourceType, IDType>::Iterator::operator!=(const Iterator &other) const 472 { 473 return !(*this == other); 474 } 475 476 template <typename ResourceType, typename IDType> 477 typename ResourceMap<ResourceType, IDType>::Iterator & 478 ResourceMap<ResourceType, IDType>::Iterator::operator++() 479 { 480 if (mFlatIndex < static_cast<GLuint>(mOrigin.mFlatResourcesSize)) 481 { 482 mFlatIndex = mOrigin.nextResource(mFlatIndex + 1, mSkipNulls); 483 } 484 else 485 { 486 mHashIndex++; 487 } 488 updateValue(); 489 return *this; 490 } 491 492 template <typename ResourceType, typename IDType> 493 const typename ResourceMap<ResourceType, IDType>::IndexAndResource * 494 ResourceMap<ResourceType, IDType>::Iterator::operator->() const 495 { 496 return &mValue; 497 } 498 499 template <typename ResourceType, typename IDType> 500 const typename ResourceMap<ResourceType, IDType>::IndexAndResource & 501 ResourceMap<ResourceType, IDType>::Iterator::operator*() const 502 { 503 return mValue; 504 } 505 506 template <typename ResourceType, typename IDType> 507 void ResourceMap<ResourceType, IDType>::Iterator::updateValue() 508 { 509 if (mFlatIndex < static_cast<GLuint>(mOrigin.mFlatResourcesSize)) 510 { 511 mValue.first = mFlatIndex; 512 mValue.second = mOrigin.mFlatResources[mFlatIndex]; 513 } 514 else if (mHashIndex != mOrigin.mHashedResources.end()) 515 { 516 mValue.first = mHashIndex->first; 517 mValue.second = mHashIndex->second; 518 } 519 } 520 521 } // namespace gl 522 523 #endif // LIBANGLE_RESOURCE_MAP_H_ 524