1 // Copyright 2015 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ 6 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ 7 8 #include <stdint.h> 9 10 #include <atomic> 11 #include <memory> 12 #include <type_traits> 13 14 #include "base/atomicops.h" 15 #include "base/base_export.h" 16 #include "base/check.h" 17 #include "base/check_op.h" 18 #include "base/files/file_path.h" 19 #include "base/gtest_prod_util.h" 20 #include "base/memory/raw_ptr.h" 21 #include "base/memory/raw_ptr_exclusion.h" 22 #include "base/memory/shared_memory_mapping.h" 23 #include "base/strings/string_piece.h" 24 #include "build/build_config.h" 25 26 namespace metrics { 27 class FileMetricsProvider; 28 } 29 30 namespace base { 31 32 class HistogramBase; 33 class MemoryMappedFile; 34 35 // Simple allocator for pieces of a memory block that may be persistent 36 // to some storage or shared across multiple processes. This class resides 37 // under base/metrics because it was written for that purpose. It is, 38 // however, fully general-purpose and can be freely moved to base/memory 39 // if other uses are found. 40 // 41 // This class provides for thread-secure (i.e. safe against other threads 42 // or processes that may be compromised and thus have malicious intent) 43 // allocation of memory within a designated block and also a mechanism by 44 // which other threads can learn of these allocations. 45 // 46 // There is (currently) no way to release an allocated block of data because 47 // doing so would risk invalidating pointers held by other processes and 48 // greatly complicate the allocation algorithm. 49 // 50 // Construction of this object can accept new, clean (i.e. zeroed) memory 51 // or previously initialized memory. In the first case, construction must 52 // be allowed to complete before letting other allocators attach to the same 53 // segment. In other words, don't share the segment until at least one 54 // allocator has been attached to it. 55 // 56 // Note that memory not in active use is not accessed so it is possible to 57 // use virtual memory, including memory-mapped files, as backing storage with 58 // the OS "pinning" new (zeroed) physical RAM pages only as they are needed. 59 // 60 // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching 61 // character arrays and manipulating that memory manually, the better way is 62 // generally to use the "object" methods to create and manage allocations. In 63 // this way the sizing, type-checking, and construction are all automatic. For 64 // this to work, however, every type of stored object must define two public 65 // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such: 66 // 67 // struct MyPersistentObjectType { 68 // // SHA1(MyPersistentObjectType): Increment this if structure changes! 69 // static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1; 70 // 71 // // Expected size for 32/64-bit check. Update this if structure changes! 72 // static constexpr size_t kExpectedInstanceSize = 20; 73 // 74 // ... 75 // }; 76 // 77 // kPersistentTypeId: This value is an arbitrary identifier that allows the 78 // identification of these objects in the allocator, including the ability 79 // to find them via iteration. The number is arbitrary but using the first 80 // four bytes of the SHA1 hash of the type name means that there shouldn't 81 // be any conflicts with other types that may also be stored in the memory. 82 // The fully qualified name (e.g. base::debug::MyPersistentObjectType) could 83 // be used to generate the hash if the type name seems common. Use a command 84 // like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum 85 // If the structure layout changes, ALWAYS increment this number so that 86 // newer versions of the code don't try to interpret persistent data written 87 // by older versions with a different layout. 88 // 89 // kExpectedInstanceSize: This value is the hard-coded number that matches 90 // what sizeof(T) would return. By providing it explicitly, the allocator can 91 // verify that the structure is compatible between both 32-bit and 64-bit 92 // versions of the code. 93 // 94 // Using New manages the memory and then calls the default constructor for the 95 // object. Given that objects are persistent, no destructor is ever called 96 // automatically though a caller can explicitly call Delete to destruct it and 97 // change the type to something indicating it is no longer in use. 98 // 99 // Though persistent memory segments are transferrable between programs built 100 // for different natural word widths, they CANNOT be exchanged between CPUs 101 // of different endianess. Attempts to do so will simply see the existing data 102 // as corrupt and refuse to access any of it. 103 class BASE_EXPORT PersistentMemoryAllocator { 104 public: 105 typedef uint32_t Reference; 106 107 // These states are used to indicate the overall condition of the memory 108 // segment irrespective of what is stored within it. Because the data is 109 // often persistent and thus needs to be readable by different versions of 110 // a program, these values are fixed and can never change. 111 enum MemoryState : uint8_t { 112 // Persistent memory starts all zeros and so shows "uninitialized". 113 MEMORY_UNINITIALIZED = 0, 114 115 // The header has been written and the memory is ready for use. 116 MEMORY_INITIALIZED = 1, 117 118 // The data should be considered deleted. This would be set when the 119 // allocator is being cleaned up. If file-backed, the file is likely 120 // to be deleted but since deletion can fail for a variety of reasons, 121 // having this extra status means a future reader can realize what 122 // should have happened. 123 MEMORY_DELETED = 2, 124 125 // The data should be considered complete. This is usually set when the 126 // browser is going to exit to indicate that it terminated cleanly and that 127 // the memory should be well-formed. In theory, this is not perfect as it is 128 // possible for the browser/device to crash after this has been set, but in 129 // practice this should be a reasonable indication as to whether the data 130 // comes from a completed or crashed session (if file-backed). Note that 131 // this might not be set on certain platforms (e.g. Android, iOS) due to not 132 // having a guaranteed clean shutdown path. 133 MEMORY_COMPLETED = 3, 134 135 // Outside code can create states starting with this number; these too 136 // must also never change between code versions. 137 MEMORY_USER_DEFINED = 100, 138 }; 139 140 // Iterator for going through all iterable memory records in an allocator. 141 // Like the allocator itself, iterators are lock-free and thread-secure. 142 // That means that multiple threads can share an iterator and the same 143 // reference will not be returned twice. 144 // 145 // The order of the items returned by an iterator matches the order in which 146 // MakeIterable() was called on them. Once an allocation is made iterable, 147 // it is always such so the only possible difference between successive 148 // iterations is for more to be added to the end. 149 // 150 // Iteration, in general, is tolerant of corrupted memory. It will return 151 // what it can and stop only when corruption forces it to. Bad corruption 152 // could cause the same object to be returned many times but it will 153 // eventually quit. 154 class BASE_EXPORT Iterator { 155 public: 156 // Constructs an iterator on a given |allocator|, starting at the beginning. 157 // The allocator must live beyond the lifetime of the iterator. This class 158 // has read-only access to the allocator (hence "const") but the returned 159 // references can be used on a read/write version, too. 160 explicit Iterator(const PersistentMemoryAllocator* allocator); 161 162 // As above but resuming from the |starting_after| reference. The first call 163 // to GetNext() will return the next object found after that reference. The 164 // reference must be to an "iterable" object; references to non-iterable 165 // objects (those that never had MakeIterable() called for them) will cause 166 // a run-time error. 167 Iterator(const PersistentMemoryAllocator* allocator, 168 Reference starting_after); 169 170 Iterator(const Iterator&) = delete; 171 Iterator& operator=(const Iterator&) = delete; 172 173 ~Iterator(); 174 175 // Resets the iterator back to the beginning. 176 void Reset(); 177 178 // Resets the iterator, resuming from the |starting_after| reference. 179 void Reset(Reference starting_after); 180 181 // Returns the previously retrieved reference, or kReferenceNull if none. 182 // If constructor or reset with a starting_after location, this will return 183 // that value. 184 Reference GetLast(); 185 186 // Gets the next iterable, storing that type in |type_return|. The actual 187 // return value is a reference to the allocation inside the allocator or 188 // zero if there are no more. GetNext() may still be called again at a 189 // later time to retrieve any new allocations that have been added. 190 Reference GetNext(uint32_t* type_return); 191 192 // Similar to above but gets the next iterable of a specific |type_match|. 193 // This should not be mixed with calls to GetNext() because any allocations 194 // skipped here due to a type mis-match will never be returned by later 195 // calls to GetNext() meaning it's possible to completely miss entries. 196 Reference GetNextOfType(uint32_t type_match); 197 198 // As above but works using object type. 199 template <typename T> GetNextOfType()200 Reference GetNextOfType() { 201 return GetNextOfType(T::kPersistentTypeId); 202 } 203 204 // As above but works using objects and returns null if not found. 205 template <typename T> GetNextOfObject()206 const T* GetNextOfObject() { 207 return GetAsObject<T>(GetNextOfType<T>()); 208 } 209 210 // Converts references to objects. This is a convenience method so that 211 // users of the iterator don't need to also have their own pointer to the 212 // allocator over which the iterator runs in order to retrieve objects. 213 // Because the iterator is not read/write, only "const" objects can be 214 // fetched. Non-const objects can be fetched using the reference on a 215 // non-const (external) pointer to the same allocator (or use const_cast 216 // to remove the qualifier). 217 template <typename T> GetAsObject(Reference ref)218 const T* GetAsObject(Reference ref) const { 219 return allocator_->GetAsObject<T>(ref); 220 } 221 222 // Similar to GetAsObject() but converts references to arrays of things. 223 template <typename T> GetAsArray(Reference ref,uint32_t type_id,size_t count)224 const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { 225 return allocator_->GetAsArray<T>(ref, type_id, count); 226 } 227 228 // Convert a generic pointer back into a reference. A null reference will 229 // be returned if |memory| is not inside the persistent segment or does not 230 // point to an object of the specified |type_id|. GetAsReference(const void * memory,uint32_t type_id)231 Reference GetAsReference(const void* memory, uint32_t type_id) const { 232 return allocator_->GetAsReference(memory, type_id); 233 } 234 235 // As above but convert an object back into a reference. 236 template <typename T> GetAsReference(const T * obj)237 Reference GetAsReference(const T* obj) const { 238 return allocator_->GetAsReference(obj); 239 } 240 241 private: 242 // Weak-pointer to memory allocator being iterated over. 243 raw_ptr<const PersistentMemoryAllocator> allocator_; 244 245 // The last record that was returned. 246 std::atomic<Reference> last_record_; 247 248 // The number of records found; used for detecting loops. 249 std::atomic<uint32_t> record_count_; 250 }; 251 252 // Returned information about the internal state of the heap. 253 struct MemoryInfo { 254 size_t total; 255 size_t free; 256 }; 257 258 enum : Reference { 259 // A common "null" reference value. 260 kReferenceNull = 0, 261 }; 262 263 enum : uint32_t { 264 // A value that will match any type when doing lookups. 265 kTypeIdAny = 0x00000000, 266 267 // A value indicating that the type is in transition. Work is being done 268 // on the contents to prepare it for a new type to come. 269 kTypeIdTransitioning = 0xFFFFFFFF, 270 }; 271 272 enum : size_t { 273 kSizeAny = 1 // Constant indicating that any array size is acceptable. 274 }; 275 276 // Indicates the mode for accessing the underlying data. 277 enum AccessMode { 278 kReadOnly, 279 kReadWrite, 280 // Open existing initialized data in R/W mode. If the passed data appears to 281 // not have been initialized, does not write to it and instead marks the 282 // allocator as corrupt (without writing anything to the underlying data.) 283 kReadWriteExisting, 284 }; 285 286 // This is the standard file extension (suitable for being passed to the 287 // AddExtension() method of base::FilePath) for dumps of persistent memory. 288 static const base::FilePath::CharType kFileExtension[]; 289 290 // The allocator operates on any arbitrary block of memory. Creation and 291 // persisting or sharing of that block with another process is the 292 // responsibility of the caller. The allocator needs to know only the 293 // block's |base| address, the total |size| of the block, and any internal 294 // |page| size (zero if not paged) across which allocations should not span. 295 // The |id| is an arbitrary value the caller can use to identify a 296 // particular memory segment. It will only be loaded during the initial 297 // creation of the segment and can be checked by the caller for consistency. 298 // The |name|, if provided, is used to distinguish histograms for this 299 // allocator. Only the primary owner of the segment should define this value; 300 // other processes can learn it from the shared state. If the access mode 301 // is kReadOnly then no changes will be made to it. The resulting object 302 // should be stored as a "const" pointer. 303 // 304 // PersistentMemoryAllocator does NOT take ownership of the memory block. 305 // The caller must manage it and ensure it stays available throughout the 306 // lifetime of this object. 307 // 308 // Memory segments for sharing must have had an allocator attached to them 309 // before actually being shared. If the memory segment was just created, it 310 // should be zeroed before being passed here. If it was an existing segment, 311 // the values here will be compared to copies stored in the shared segment 312 // as a guard against corruption. 313 // 314 // Make sure that the memory segment is acceptable (see IsMemoryAcceptable() 315 // method below) before construction if the definition of the segment can 316 // vary in any way at run-time. Invalid memory segments will cause a crash. 317 PersistentMemoryAllocator(void* base, 318 size_t size, 319 size_t page_size, 320 uint64_t id, 321 base::StringPiece name, 322 AccessMode access_mode); 323 324 PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete; 325 PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) = 326 delete; 327 328 virtual ~PersistentMemoryAllocator(); 329 330 // Check if memory segment is acceptable for creation of an Allocator. This 331 // doesn't do any analysis of the data and so doesn't guarantee that the 332 // contents are valid, just that the paramaters won't cause the program to 333 // abort. The IsCorrupt() method will report detection of data problems 334 // found during construction and general operation. 335 static bool IsMemoryAcceptable(const void* data, size_t size, 336 size_t page_size, bool readonly); 337 338 // Get the internal identifier for this persistent memory segment. 339 uint64_t Id() const; 340 341 // Get the internal name of this allocator (possibly an empty string). 342 const char* Name() const; 343 344 // Is this segment open only for read? IsReadonly()345 bool IsReadonly() const { return access_mode_ == kReadOnly; } 346 347 // Manage the saved state of the memory. 348 void SetMemoryState(uint8_t memory_state); 349 uint8_t GetMemoryState() const; 350 351 // Create internal histograms for tracking memory use and allocation sizes 352 // for allocator of |name| (which can simply be the result of Name()). This 353 // is done seperately from construction for situations such as when the 354 // histograms will be backed by memory provided by this very allocator. 355 // 356 // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must 357 // be updated with the following histograms for each |name| param: 358 // UMA.PersistentAllocator.name.Errors 359 // UMA.PersistentAllocator.name.UsedPct 360 void CreateTrackingHistograms(base::StringPiece name); 361 362 // Flushes the persistent memory to any backing store. This typically does 363 // nothing but is used by the FilePersistentMemoryAllocator to inform the 364 // OS that all the data should be sent to the disk immediately. This is 365 // useful in the rare case where something has just been stored that needs 366 // to survive a hard shutdown of the machine like from a power failure. 367 // The |sync| parameter indicates if this call should block until the flush 368 // is complete but is only advisory and may or may not have an effect 369 // depending on the capabilities of the OS. Synchronous flushes are allowed 370 // only from threads that are allowed to do I/O but since |sync| is only 371 // advisory, all flushes should be done on IO-capable threads. 372 // TODO: Since |sync| is ignored on Windows, consider making it re-post on a 373 // background thread with |sync| set to true so that |sync| is not just 374 // advisory. 375 void Flush(bool sync); 376 377 // Direct access to underlying memory segment. If the segment is shared 378 // across threads or processes, reading data through these values does 379 // not guarantee consistency. Use with care. Do not write. data()380 const void* data() const { return const_cast<const char*>(mem_base_); } length()381 size_t length() const { return mem_size_; } size()382 size_t size() const { return mem_size_; } 383 size_t used() const; 384 385 // Get an object referenced by a |ref|. For safety reasons, the |type_id| 386 // code and size-of(|T|) are compared to ensure the reference is valid 387 // and cannot return an object outside of the memory segment. A |type_id| of 388 // kTypeIdAny (zero) will match any though the size is still checked. NULL is 389 // returned if any problem is detected, such as corrupted storage or incorrect 390 // parameters. Callers MUST check that the returned value is not-null EVERY 391 // TIME before accessing it or risk crashing! Once dereferenced, the pointer 392 // is safe to reuse forever. 393 // 394 // It is essential that the object be of a fixed size. All fields must be of 395 // a defined type that does not change based on the compiler or the CPU 396 // natural word size. Acceptable are char, float, double, and (u)intXX_t. 397 // Unacceptable are int, bool, and wchar_t which are implementation defined 398 // with regards to their size. 399 // 400 // Alignment must also be consistent. A uint64_t after a uint32_t will pad 401 // differently between 32 and 64 bit architectures. Either put the bigger 402 // elements first, group smaller elements into blocks the size of larger 403 // elements, or manually insert padding fields as appropriate for the 404 // largest architecture, including at the end. 405 // 406 // To protected against mistakes, all objects must have the attribute 407 // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded 408 // numerical value -- NNN, not sizeof(T) -- that can be tested. If the 409 // instance size is not fixed, at least one build will fail. 410 // 411 // If the size of a structure changes, the type-ID used to recognize it 412 // should also change so later versions of the code don't try to read 413 // incompatible structures from earlier versions. 414 // 415 // NOTE: Though this method will guarantee that an object of the specified 416 // type can be accessed without going outside the bounds of the memory 417 // segment, it makes no guarantees of the validity of the data within the 418 // object itself. If it is expected that the contents of the segment could 419 // be compromised with malicious intent, the object must be hardened as well. 420 // 421 // Though the persistent data may be "volatile" if it is shared with 422 // other processes, such is not necessarily the case. The internal 423 // "volatile" designation is discarded so as to not propagate the viral 424 // nature of that keyword to the caller. It can add it back, if necessary, 425 // based on knowledge of how the allocator is being used. 426 template <typename T> GetAsObject(Reference ref)427 T* GetAsObject(Reference ref) { 428 static_assert(std::is_standard_layout_v<T>, "only standard objects"); 429 static_assert(!std::is_array_v<T>, "use GetAsArray<>()"); 430 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); 431 return const_cast<T*>(reinterpret_cast<volatile T*>( 432 GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); 433 } 434 template <typename T> GetAsObject(Reference ref)435 const T* GetAsObject(Reference ref) const { 436 static_assert(std::is_standard_layout_v<T>, "only standard objects"); 437 static_assert(!std::is_array_v<T>, "use GetAsArray<>()"); 438 static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); 439 return const_cast<const T*>(reinterpret_cast<const volatile T*>( 440 GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); 441 } 442 443 // Like GetAsObject but get an array of simple, fixed-size types. 444 // 445 // Use a |count| of the required number of array elements, or kSizeAny. 446 // GetAllocSize() can be used to calculate the upper bound but isn't reliable 447 // because padding can make space for extra elements that were not written. 448 // 449 // Remember that an array of char is a string but may not be NUL terminated. 450 // 451 // There are no compile-time or run-time checks to ensure 32/64-bit size 452 // compatibilty when using these accessors. Only use fixed-size types such 453 // as char, float, double, or (u)intXX_t. 454 template <typename T> GetAsArray(Reference ref,uint32_t type_id,size_t count)455 T* GetAsArray(Reference ref, uint32_t type_id, size_t count) { 456 static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()"); 457 return const_cast<T*>(reinterpret_cast<volatile T*>( 458 GetBlockData(ref, type_id, count * sizeof(T)))); 459 } 460 template <typename T> GetAsArray(Reference ref,uint32_t type_id,size_t count)461 const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { 462 static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()"); 463 return const_cast<const char*>(reinterpret_cast<const volatile T*>( 464 GetBlockData(ref, type_id, count * sizeof(T)))); 465 } 466 467 // Get the corresponding reference for an object held in persistent memory. 468 // If the |memory| is not valid or the type does not match, a kReferenceNull 469 // result will be returned. 470 Reference GetAsReference(const void* memory, uint32_t type_id) const; 471 472 // Get the number of bytes allocated to a block. This is useful when storing 473 // arrays in order to validate the ending boundary. The returned value will 474 // include any padding added to achieve the required alignment and so could 475 // be larger than given in the original Allocate() request. 476 size_t GetAllocSize(Reference ref) const; 477 478 // Access the internal "type" of an object. This generally isn't necessary 479 // but can be used to "clear" the type and so effectively mark it as deleted 480 // even though the memory stays valid and allocated. Changing the type is 481 // an atomic compare/exchange and so requires knowing the existing value. 482 // It will return false if the existing type is not what is expected. 483 // 484 // Changing the type doesn't mean the data is compatible with the new type. 485 // Passing true for |clear| will zero the memory after the type has been 486 // changed away from |from_type_id| but before it becomes |to_type_id| meaning 487 // that it is done in a manner that is thread-safe. Memory is guaranteed to 488 // be zeroed atomically by machine-word in a monotonically increasing order. 489 // 490 // It will likely be necessary to reconstruct the type before it can be used. 491 // Changing the type WILL NOT invalidate existing pointers to the data, either 492 // in this process or others, so changing the data structure could have 493 // unpredicatable results. USE WITH CARE! 494 uint32_t GetType(Reference ref) const; 495 bool ChangeType(Reference ref, 496 uint32_t to_type_id, 497 uint32_t from_type_id, 498 bool clear); 499 500 // Allocated objects can be added to an internal list that can then be 501 // iterated over by other processes. If an allocated object can be found 502 // another way, such as by having its reference within a different object 503 // that will be made iterable, then this call is not necessary. This always 504 // succeeds unless corruption is detected; check IsCorrupted() to find out. 505 // Once an object is made iterable, its position in iteration can never 506 // change; new iterable objects will always be added after it in the series. 507 // Changing the type does not alter its "iterable" status. 508 void MakeIterable(Reference ref); 509 510 // Get the information about the amount of free space in the allocator. The 511 // amount of free space should be treated as approximate due to extras from 512 // alignment and metadata. Concurrent allocations from other threads will 513 // also make the true amount less than what is reported. 514 void GetMemoryInfo(MemoryInfo* meminfo) const; 515 516 // If there is some indication that the memory has become corrupted, 517 // calling this will attempt to prevent further damage by indicating to 518 // all processes that something is not as expected. 519 // If `allow_write` is false, the corrupt bit will not be written to the data. 520 void SetCorrupt(bool allow_write = true) const; 521 522 // This can be called to determine if corruption has been detected in the 523 // segment, possibly my a malicious actor. Once detected, future allocations 524 // will fail and iteration may not locate all objects. 525 bool IsCorrupt() const; 526 527 // Flag set if an allocation has failed because the memory segment was full. 528 bool IsFull() const; 529 530 // Update those "tracking" histograms which do not get updates during regular 531 // operation, such as how much memory is currently used. This should be 532 // called before such information is to be displayed or uploaded. 533 void UpdateTrackingHistograms(); 534 535 // While the above works much like malloc & free, these next methods provide 536 // an "object" interface similar to new and delete. 537 538 // Reserve space in the memory segment of the desired |size| and |type_id|. 539 // A return value of zero indicates the allocation failed, otherwise the 540 // returned reference can be used by any process to get a real pointer via 541 // the GetAsObject() or GetAsArray calls. The actual allocated size may be 542 // larger and will always be a multiple of 8 bytes (64 bits). 543 Reference Allocate(size_t size, uint32_t type_id); 544 545 // Allocate and construct an object in persistent memory. The type must have 546 // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId 547 // static constexpr fields that are used to ensure compatibility between 548 // software versions. An optional size parameter can be specified to force 549 // the allocation to be bigger than the size of the object; this is useful 550 // when the last field is actually variable length. 551 template <typename T> New(size_t size)552 T* New(size_t size) { 553 if (size < sizeof(T)) 554 size = sizeof(T); 555 Reference ref = Allocate(size, T::kPersistentTypeId); 556 void* mem = 557 const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size)); 558 if (!mem) 559 return nullptr; 560 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1)); 561 return new (mem) T(); 562 } 563 template <typename T> New()564 T* New() { 565 return New<T>(sizeof(T)); 566 } 567 568 // Similar to New, above, but construct the object out of an existing memory 569 // block and of an expected type. If |clear| is true, memory will be zeroed 570 // before construction. Though this is not standard object behavior, it 571 // is present to match with new allocations that always come from zeroed 572 // memory. Anything previously present simply ceases to exist; no destructor 573 // is called for it so explicitly Delete() the old object first if need be. 574 // Calling this will not invalidate existing pointers to the object, either 575 // in this process or others, so changing the object could have unpredictable 576 // results. USE WITH CARE! 577 template <typename T> New(Reference ref,uint32_t from_type_id,bool clear)578 T* New(Reference ref, uint32_t from_type_id, bool clear) { 579 DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj"; 580 // Make sure the memory is appropriate. This won't be used until after 581 // the type is changed but checking first avoids the possibility of having 582 // to change the type back. 583 void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T))); 584 if (!mem) 585 return nullptr; 586 // Ensure the allocator's internal alignment is sufficient for this object. 587 // This protects against coding errors in the allocator. 588 DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1)); 589 // Change the type, clearing the memory if so desired. The new type is 590 // "transitioning" so that there is no race condition with the construction 591 // of the object should another thread be simultaneously iterating over 592 // data. This will "acquire" the memory so no changes get reordered before 593 // it. 594 if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear)) 595 return nullptr; 596 // Construct an object of the desired type on this memory, just as if 597 // New() had been called to create it. 598 T* obj = new (mem) T(); 599 // Finally change the type to the desired one. This will "release" all of 600 // the changes above and so provide a consistent view to other threads. 601 bool success = 602 ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false); 603 DCHECK(success); 604 return obj; 605 } 606 607 // Deletes an object by destructing it and then changing the type to a 608 // different value (default 0). 609 template <typename T> Delete(T * obj,uint32_t new_type)610 void Delete(T* obj, uint32_t new_type) { 611 // Get the reference for the object. 612 Reference ref = GetAsReference<T>(obj); 613 // First change the type to "transitioning" so there is no race condition 614 // where another thread could find the object through iteration while it 615 // is been destructed. This will "acquire" the memory so no changes get 616 // reordered before it. It will fail if |ref| is invalid. 617 if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false)) 618 return; 619 // Destruct the object. 620 obj->~T(); 621 // Finally change the type to the desired value. This will "release" all 622 // the changes above. 623 bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false); 624 DCHECK(success); 625 } 626 template <typename T> Delete(T * obj)627 void Delete(T* obj) { 628 Delete<T>(obj, 0); 629 } 630 631 // As above but works with objects allocated from persistent memory. 632 template <typename T> GetAsReference(const T * obj)633 Reference GetAsReference(const T* obj) const { 634 return GetAsReference(obj, T::kPersistentTypeId); 635 } 636 637 // As above but works with an object allocated from persistent memory. 638 template <typename T> MakeIterable(const T * obj)639 void MakeIterable(const T* obj) { 640 MakeIterable(GetAsReference<T>(obj)); 641 } 642 643 protected: 644 enum MemoryType { 645 MEM_EXTERNAL, 646 MEM_MALLOC, 647 MEM_VIRTUAL, 648 MEM_SHARED, 649 MEM_FILE, 650 }; 651 652 struct Memory { MemoryMemory653 Memory(void* b, MemoryType t) : base(b), type(t) {} 654 655 raw_ptr<void> base; 656 MemoryType type; 657 }; 658 659 // Constructs the allocator. Everything is the same as the public allocator 660 // except |memory| which is a structure with additional information besides 661 // the base address. 662 PersistentMemoryAllocator(Memory memory, 663 size_t size, 664 size_t page_size, 665 uint64_t id, 666 base::StringPiece name, 667 AccessMode access_mode); 668 669 // Implementation of Flush that accepts how much to flush. 670 virtual void FlushPartial(size_t length, bool sync); 671 672 // This field is not a raw_ptr<> because it always points to a mmap'd region 673 // of memory outside of the PA heap. Thus, there would be overhead involved 674 // with using a raw_ptr<> but no safety gains. 675 RAW_PTR_EXCLUSION volatile char* const 676 mem_base_; // Memory base. (char so sizeof guaranteed 1) 677 const MemoryType mem_type_; // Type of memory allocation. 678 const uint32_t mem_size_; // Size of entire memory segment. 679 const uint32_t mem_page_; // Page size allocations shouldn't cross. 680 const size_t vm_page_size_; // The page size used by the OS. 681 682 private: 683 struct SharedMetadata; 684 struct BlockHeader; 685 // All allocations and data-structures must be aligned to this byte boundary. 686 // Alignment as large as the physical bus between CPU and RAM is _required_ 687 // for some architectures, is simply more efficient on other CPUs, and 688 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the 689 // chance that a type will span cache lines. Alignment mustn't be less 690 // than 8 to ensure proper alignment for all types. The rest is a balance 691 // between reducing spans across multiple cache lines and wasted space spent 692 // padding out allocations. An alignment of 16 would ensure that the block 693 // header structure always sits in a single cache line. An average of about 694 // 1/2 this value will be wasted with every allocation. 695 static constexpr size_t kAllocAlignment = 8; 696 static const Reference kReferenceQueue; 697 698 // The shared metadata is always located at the top of the memory segment. 699 // These convenience functions eliminate constant casting of the base 700 // pointer within the code. shared_meta()701 const SharedMetadata* shared_meta() const { 702 return reinterpret_cast<const SharedMetadata*>( 703 const_cast<const char*>(mem_base_)); 704 } shared_meta()705 SharedMetadata* shared_meta() { 706 return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_)); 707 } 708 709 // Actual method for doing the allocation. 710 Reference AllocateImpl(size_t size, uint32_t type_id); 711 712 // Gets the block header associated with a specific reference. 713 const volatile BlockHeader* GetBlock(Reference ref, 714 uint32_t type_id, 715 size_t size, 716 bool queue_ok, 717 bool free_ok) const; GetBlock(Reference ref,uint32_t type_id,size_t size,bool queue_ok,bool free_ok)718 volatile BlockHeader* GetBlock(Reference ref, 719 uint32_t type_id, 720 size_t size, 721 bool queue_ok, 722 bool free_ok) { 723 return const_cast<volatile BlockHeader*>( 724 const_cast<const PersistentMemoryAllocator*>(this)->GetBlock( 725 ref, type_id, size, queue_ok, free_ok)); 726 } 727 728 // Gets the actual data within a block associated with a specific reference. 729 const volatile void* GetBlockData(Reference ref, 730 uint32_t type_id, 731 size_t size) const; GetBlockData(Reference ref,uint32_t type_id,size_t size)732 volatile void* GetBlockData(Reference ref, uint32_t type_id, size_t size) { 733 return const_cast<volatile void*>( 734 const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData( 735 ref, type_id, size)); 736 } 737 738 // Records an error in the internal histogram. 739 void RecordError(int error) const; 740 741 // Returns the offset to the first free space segment. 742 uint32_t freeptr() const; 743 744 // Returns the metadata version used in this allocator. 745 uint32_t version() const; 746 747 const AccessMode access_mode_; 748 749 // Local version of "corrupted" flag. 750 mutable std::atomic<bool> corrupt_ = false; 751 752 // Histogram recording allocs. 753 raw_ptr<HistogramBase> allocs_histogram_ = nullptr; 754 // Histogram recording used space. 755 raw_ptr<HistogramBase> used_histogram_ = nullptr; 756 // Histogram recording errors. 757 raw_ptr<HistogramBase> errors_histogram_ = nullptr; 758 759 friend class metrics::FileMetricsProvider; 760 friend class PersistentMemoryAllocatorTest; 761 FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate); 762 }; 763 764 765 // This allocator uses a local memory block it allocates from the general 766 // heap. It is generally used when some kind of "death rattle" handler will 767 // save the contents to persistent storage during process shutdown. It is 768 // also useful for testing. 769 class BASE_EXPORT LocalPersistentMemoryAllocator 770 : public PersistentMemoryAllocator { 771 public: 772 LocalPersistentMemoryAllocator(size_t size, uint64_t id, 773 base::StringPiece name); 774 775 LocalPersistentMemoryAllocator(const LocalPersistentMemoryAllocator&) = 776 delete; 777 LocalPersistentMemoryAllocator& operator=( 778 const LocalPersistentMemoryAllocator&) = delete; 779 780 ~LocalPersistentMemoryAllocator() override; 781 782 private: 783 // Allocates a block of local memory of the specified |size|, ensuring that 784 // the memory will not be physically allocated until accessed and will read 785 // as zero when that happens. 786 static Memory AllocateLocalMemory(size_t size, base::StringPiece name); 787 788 // Deallocates a block of local |memory| of the specified |size|. 789 static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type); 790 }; 791 792 793 // This allocator takes a writable shared memory mapping object and performs 794 // allocation from it. The allocator takes ownership of the mapping object. 795 class BASE_EXPORT WritableSharedPersistentMemoryAllocator 796 : public PersistentMemoryAllocator { 797 public: 798 WritableSharedPersistentMemoryAllocator( 799 base::WritableSharedMemoryMapping memory, 800 uint64_t id, 801 base::StringPiece name); 802 803 WritableSharedPersistentMemoryAllocator( 804 const WritableSharedPersistentMemoryAllocator&) = delete; 805 WritableSharedPersistentMemoryAllocator& operator=( 806 const WritableSharedPersistentMemoryAllocator&) = delete; 807 808 ~WritableSharedPersistentMemoryAllocator() override; 809 810 // Ensure that the memory isn't so invalid that it would crash when passing it 811 // to the allocator. This doesn't guarantee the data is valid, just that it 812 // won't cause the program to abort. The existing IsCorrupt() call will handle 813 // the rest. 814 static bool IsSharedMemoryAcceptable( 815 const base::WritableSharedMemoryMapping& memory); 816 817 private: 818 base::WritableSharedMemoryMapping shared_memory_; 819 }; 820 821 // This allocator takes a read-only shared memory mapping object and performs 822 // allocation from it. The allocator takes ownership of the mapping object. 823 class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator 824 : public PersistentMemoryAllocator { 825 public: 826 ReadOnlySharedPersistentMemoryAllocator( 827 base::ReadOnlySharedMemoryMapping memory, 828 uint64_t id, 829 base::StringPiece name); 830 831 ReadOnlySharedPersistentMemoryAllocator( 832 const ReadOnlySharedPersistentMemoryAllocator&) = delete; 833 ReadOnlySharedPersistentMemoryAllocator& operator=( 834 const ReadOnlySharedPersistentMemoryAllocator&) = delete; 835 836 ~ReadOnlySharedPersistentMemoryAllocator() override; 837 838 // Ensure that the memory isn't so invalid that it would crash when passing it 839 // to the allocator. This doesn't guarantee the data is valid, just that it 840 // won't cause the program to abort. The existing IsCorrupt() call will handle 841 // the rest. 842 static bool IsSharedMemoryAcceptable( 843 const base::ReadOnlySharedMemoryMapping& memory); 844 845 private: 846 base::ReadOnlySharedMemoryMapping shared_memory_; 847 }; 848 849 // NACL doesn't support any kind of file access in build. 850 #if !BUILDFLAG(IS_NACL) 851 // This allocator takes a memory-mapped file object and performs allocation 852 // from it. The allocator takes ownership of the file object. 853 class BASE_EXPORT FilePersistentMemoryAllocator 854 : public PersistentMemoryAllocator { 855 public: 856 // A |max_size| of zero will use the length of the file as the maximum 857 // size. The |file| object must have been already created with sufficient 858 // permissions (read, read/write, or read/write/extend). 859 FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file, 860 size_t max_size, 861 uint64_t id, 862 base::StringPiece name, 863 AccessMode access_mode); 864 865 FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete; 866 FilePersistentMemoryAllocator& operator=( 867 const FilePersistentMemoryAllocator&) = delete; 868 869 ~FilePersistentMemoryAllocator() override; 870 871 // Ensure that the file isn't so invalid that it would crash when passing it 872 // to the allocator. This doesn't guarantee the file is valid, just that it 873 // won't cause the program to abort. The existing IsCorrupt() call will handle 874 // the rest. 875 static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only); 876 877 // Load all or a portion of the file into memory for fast access. This can 878 // be used to force the disk access to be done on a background thread and 879 // then have the data available to be read on the main thread with a greatly 880 // reduced risk of blocking due to I/O. The risk isn't eliminated completely 881 // because the system could always release the memory when under pressure 882 // but this can happen to any block of memory (i.e. swapped out). 883 void Cache(); 884 885 protected: 886 // PersistentMemoryAllocator: 887 void FlushPartial(size_t length, bool sync) override; 888 889 private: 890 std::unique_ptr<MemoryMappedFile> mapped_file_; 891 }; 892 #endif // !BUILDFLAG(IS_NACL) 893 894 // An allocation that is defined but not executed until required at a later 895 // time. This allows for potential users of an allocation to be decoupled 896 // from the logic that defines it. In addition, there can be multiple users 897 // of the same allocation or any region thereof that are guaranteed to always 898 // use the same space. It's okay to copy/move these objects. 899 // 900 // This is a top-level class instead of an inner class of the PMA so that it 901 // can be forward-declared in other header files without the need to include 902 // the full contents of this file. 903 class BASE_EXPORT DelayedPersistentAllocation { 904 public: 905 using Reference = PersistentMemoryAllocator::Reference; 906 907 // Creates a delayed allocation using the specified |allocator|. When 908 // needed, the memory will be allocated using the specified |type| and 909 // |size|. If |offset| is given, the returned pointer will be at that 910 // offset into the segment; this allows combining allocations into a 911 // single persistent segment to reduce overhead and means an "all or 912 // nothing" request. Note that |size| is always the total memory size 913 // and |offset| is just indicating the start of a block within it. 914 // 915 // Once allocated, a reference to the segment will be stored at |ref|. 916 // This shared location must be initialized to zero (0); it is checked 917 // with every Get() request to see if the allocation has already been 918 // done. If reading |ref| outside of this object, be sure to do an 919 // "acquire" load. Don't write to it -- leave that to this object. 920 DelayedPersistentAllocation(PersistentMemoryAllocator* allocator, 921 std::atomic<Reference>* ref, 922 uint32_t type, 923 size_t size, 924 size_t offset = 0); 925 ~DelayedPersistentAllocation(); 926 927 // Gets a pointer to the defined allocation. This will realize the request 928 // and update the reference provided during construction. The memory will 929 // be zeroed the first time it is returned, after that it is shared with 930 // all other Get() requests and so shows any changes made to it elsewhere. 931 // 932 // If the allocation fails for any reason, null will be returned. This works 933 // even on "const" objects because the allocation is already defined, just 934 // delayed. 935 void* Get() const; 936 937 // Gets the internal reference value. If this returns a non-zero value then 938 // a subsequent call to Get() will do nothing but convert that reference into 939 // a memory location -- useful for accessing an existing allocation without 940 // creating one unnecessarily. reference()941 Reference reference() const { 942 return reference_->load(std::memory_order_relaxed); 943 } 944 945 private: 946 // The underlying object that does the actual allocation of memory. Its 947 // lifetime must exceed that of all DelayedPersistentAllocation objects 948 // that use it. 949 const raw_ptr<PersistentMemoryAllocator> allocator_; 950 951 // The desired type and size of the allocated segment plus the offset 952 // within it for the defined request. 953 const uint32_t type_; 954 const uint32_t size_; 955 const uint32_t offset_; 956 957 // The location at which a reference to the allocated segment is to be 958 // stored once the allocation is complete. If multiple delayed allocations 959 // share the same pointer then an allocation on one will amount to an 960 // allocation for all. 961 const raw_ptr<volatile std::atomic<Reference>, AllowPtrArithmetic> reference_; 962 963 // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects. 964 }; 965 966 } // namespace base 967 968 #endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_ 969