• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "base/metrics/persistent_memory_allocator.h"
11 
12 #include <assert.h>
13 
14 #include <algorithm>
15 #include <atomic>
16 #include <optional>
17 #include <string_view>
18 
19 #include "base/bits.h"
20 #include "base/containers/contains.h"
21 #include "base/debug/alias.h"
22 #include "base/debug/crash_logging.h"
23 #include "base/debug/dump_without_crashing.h"
24 #include "base/files/memory_mapped_file.h"
25 #include "base/logging.h"
26 #include "base/metrics/histogram_functions.h"
27 #include "base/metrics/persistent_histogram_allocator.h"
28 #include "base/metrics/sparse_histogram.h"
29 #include "base/notreached.h"
30 #include "base/numerics/checked_math.h"
31 #include "base/numerics/safe_conversions.h"
32 #include "base/strings/strcat.h"
33 #include "base/system/sys_info.h"
34 #include "base/threading/scoped_blocking_call.h"
35 #include "build/build_config.h"
36 
37 #if BUILDFLAG(IS_WIN)
38 #include <windows.h>
39 // Must be after <windows.h>
40 #include <winbase.h>
41 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
42 #include <sys/mman.h>
43 #if BUILDFLAG(IS_ANDROID)
44 #include <sys/prctl.h>
45 #endif
46 #endif
47 
48 namespace {
49 
50 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
51 // and should be a power of 2 in order to accommodate almost any page size.
52 constexpr uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
53 
54 // A constant (random) value placed in the shared metadata to identify
55 // an already initialized memory segment.
56 constexpr uint32_t kGlobalCookie = 0x408305DC;
57 
58 // The current version of the metadata. If updates are made that change
59 // the metadata, the version number can be queried to operate in a backward-
60 // compatible manner until the memory segment is completely re-initalized.
61 // Note: If you update the metadata in a non-backwards compatible way, reset
62 // |kCompatibleVersions|. Otherwise, add the previous version.
63 constexpr uint32_t kGlobalVersion = 3;
64 static constexpr uint32_t kOldCompatibleVersions[] = {2};
65 
66 // Constant values placed in the block headers to indicate its state.
67 constexpr uint32_t kBlockCookieFree = 0;
68 constexpr uint32_t kBlockCookieQueue = 1;
69 constexpr uint32_t kBlockCookieWasted = 0x4B594F52;
70 constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
71 
72 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
73 // types rather than combined bitfield.
74 
75 // Flags stored in the flags_ field of the SharedMetadata structure below.
76 constexpr uint32_t kFlagCorrupt = 1 << 0;
77 constexpr uint32_t kFlagFull = 1 << 1;
78 
79 // Errors that are logged in "errors" histogram.
80 // These values are persisted to logs. Entries should not be renumbered and
81 // numeric values should never be reused.
82 enum AllocatorError : int {
83   kMemoryIsCorrupt = 1,
84   kMaxValue = kMemoryIsCorrupt,
85 };
86 
CheckFlag(const volatile std::atomic<uint32_t> * flags,uint32_t flag)87 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
88   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
89   return (loaded_flags & flag) != 0;
90 }
91 
SetFlag(volatile std::atomic<uint32_t> * flags,uint32_t flag)92 void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
93   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
94   for (;;) {
95     uint32_t new_flags = (loaded_flags & ~flag) | flag;
96     // In the failue case, actual "flags" value stored in loaded_flags.
97     // These access are "relaxed" because they are completely independent
98     // of all other values.
99     if (flags->compare_exchange_weak(loaded_flags, new_flags,
100                                      std::memory_order_relaxed,
101                                      std::memory_order_relaxed)) {
102       break;
103     }
104   }
105 }
106 
107 }  // namespace
108 
109 namespace base {
110 
111 // The block-header is placed at the top of every allocation within the
112 // segment to describe the data that follows it.
113 struct PersistentMemoryAllocator::BlockHeader {
114   uint32_t size;       // Number of bytes in this block, including header.
115   uint32_t cookie;     // Constant value indicating completed allocation.
116   std::atomic<uint32_t> type_id;  // Arbitrary number indicating data type.
117   std::atomic<uint32_t> next;     // Pointer to the next block when iterating.
118 };
119 
120 // The shared metadata exists once at the top of the memory segment to
121 // describe the state of the allocator to all processes. The size of this
122 // structure must be a multiple of 64-bits to ensure compatibility between
123 // architectures.
124 struct PersistentMemoryAllocator::SharedMetadata {
125   uint32_t cookie;     // Some value that indicates complete initialization.
126   uint32_t size;       // Total size of memory segment.
127   uint32_t page_size;  // Paging size within memory segment.
128   uint32_t version;    // Version code so upgrades don't break.
129   uint64_t id;         // Arbitrary ID number given by creator.
130   uint32_t name;       // Reference to stored name string.
131   uint32_t padding1;   // Pad-out read-only data to 64-bit alignment.
132 
133   // Above is read-only after first construction. Below may be changed and
134   // so must be marked "volatile" to provide correct inter-process behavior.
135 
136   // State of the memory, plus some padding to keep alignment.
137   volatile std::atomic<uint8_t> memory_state;  // MemoryState enum values.
138   uint8_t padding2[3];
139 
140   // Bitfield of information flags. Access to this should be done through
141   // the CheckFlag() and SetFlag() methods defined above.
142   volatile std::atomic<uint32_t> flags;
143 
144   // Offset/reference to first free space in segment.
145   volatile std::atomic<uint32_t> freeptr;
146 
147   // The "iterable" queue is an M&S Queue as described here, append-only:
148   // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
149   // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
150   volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
151   volatile BlockHeader queue;   // Empty block for linked-list head/tail.
152 };
153 
154 // The "queue" block header is used to detect "last node" so that zero/null
155 // can be used to indicate that it hasn't been added at all. It is part of
156 // the SharedMetadata structure which itself is always located at offset zero.
157 const PersistentMemoryAllocator::Reference
158     PersistentMemoryAllocator::kReferenceQueue =
159         offsetof(SharedMetadata, queue);
160 
161 const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
162     FILE_PATH_LITERAL(".pma");
163 
164 
Iterator(const PersistentMemoryAllocator * allocator)165 PersistentMemoryAllocator::Iterator::Iterator(
166     const PersistentMemoryAllocator* allocator)
167     : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
168 
Iterator(const PersistentMemoryAllocator * allocator,Reference starting_after)169 PersistentMemoryAllocator::Iterator::Iterator(
170     const PersistentMemoryAllocator* allocator,
171     Reference starting_after)
172     : allocator_(allocator), last_record_(0), record_count_(0) {
173   Reset(starting_after);
174 }
175 
176 PersistentMemoryAllocator::Iterator::~Iterator() = default;
177 
Reset()178 void PersistentMemoryAllocator::Iterator::Reset() {
179   last_record_.store(kReferenceQueue, std::memory_order_relaxed);
180   record_count_.store(0, std::memory_order_relaxed);
181 }
182 
Reset(Reference starting_after)183 void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
184   if (starting_after == 0) {
185     Reset();
186     return;
187   }
188 
189   last_record_.store(starting_after, std::memory_order_relaxed);
190   record_count_.store(0, std::memory_order_relaxed);
191 
192   // Ensure that the starting point is a valid, iterable block (meaning it can
193   // be read and has a non-zero "next" pointer).
194   const volatile BlockHeader* block =
195       allocator_->GetBlock(starting_after, 0, 0, false, false);
196   if (!block || block->next.load(std::memory_order_relaxed) == 0) {
197     NOTREACHED();
198   }
199 }
200 
201 PersistentMemoryAllocator::Reference
GetLast()202 PersistentMemoryAllocator::Iterator::GetLast() {
203   Reference last = last_record_.load(std::memory_order_relaxed);
204   if (last == kReferenceQueue)
205     return kReferenceNull;
206   return last;
207 }
208 
209 PersistentMemoryAllocator::Reference
GetNext(uint32_t * type_return)210 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
211   // Make a copy of the existing count of found-records, acquiring all changes
212   // made to the allocator, notably "freeptr" (see comment in loop for why
213   // the load of that value cannot be moved above here) that occurred during
214   // any previous runs of this method, including those by parallel threads
215   // that interrupted it. It pairs with the Release at the end of this method.
216   //
217   // Otherwise, if the compiler were to arrange the two loads such that
218   // "count" was fetched _after_ "freeptr" then it would be possible for
219   // this thread to be interrupted between them and other threads perform
220   // multiple allocations, make-iterables, and iterations (with the included
221   // increment of |record_count_|) culminating in the check at the bottom
222   // mistakenly determining that a loop exists. Isn't this stuff fun?
223   uint32_t count = record_count_.load(std::memory_order_acquire);
224 
225   Reference last = last_record_.load(std::memory_order_acquire);
226   Reference next;
227   while (true) {
228     const volatile BlockHeader* block =
229         allocator_->GetBlock(last, 0, 0, true, false);
230     if (!block)  // Invalid iterator state.
231       return kReferenceNull;
232 
233     // The compiler and CPU can freely reorder all memory accesses on which
234     // there are no dependencies. It could, for example, move the load of
235     // "freeptr" to above this point because there are no explicit dependencies
236     // between it and "next". If it did, however, then another block could
237     // be queued after that but before the following load meaning there is
238     // one more queued block than the future "detect loop by having more
239     // blocks that could fit before freeptr" will allow.
240     //
241     // By "acquiring" the "next" value here, it's synchronized to the enqueue
242     // of the node which in turn is synchronized to the allocation (which sets
243     // freeptr). Thus, the scenario above cannot happen.
244     next = block->next.load(std::memory_order_acquire);
245     if (next == kReferenceQueue)  // No next allocation in queue.
246       return kReferenceNull;
247     block = allocator_->GetBlock(next, 0, 0, false, false);
248     if (!block) {  // Memory is corrupt.
249       allocator_->SetCorrupt();
250       return kReferenceNull;
251     }
252 
253     // Update the "last_record" pointer to be the reference being returned.
254     // If it fails then another thread has already iterated past it so loop
255     // again. Failing will also load the existing value into "last" so there
256     // is no need to do another such load when the while-loop restarts. A
257     // "strong" compare-exchange is used because failing unnecessarily would
258     // mean repeating some fairly costly validations above.
259     if (last_record_.compare_exchange_strong(
260             last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
261       *type_return = block->type_id.load(std::memory_order_relaxed);
262       break;
263     }
264   }
265 
266   // Memory corruption could cause a loop in the list. Such must be detected
267   // so as to not cause an infinite loop in the caller. This is done by simply
268   // making sure it doesn't iterate more times than the absolute maximum
269   // number of allocations that could have been made. Callers are likely
270   // to loop multiple times before it is detected but at least it stops.
271   const uint32_t freeptr = std::min(
272       allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
273       allocator_->mem_size_);
274   const uint32_t max_records =
275       freeptr / (sizeof(BlockHeader) + kAllocAlignment);
276   if (count > max_records) {
277     allocator_->SetCorrupt();
278     return kReferenceNull;
279   }
280 
281   // Increment the count and release the changes made above. It pairs with
282   // the Acquire at the top of this method. Note that this operation is not
283   // strictly synchonized with fetching of the object to return, which would
284   // have to be done inside the loop and is somewhat complicated to achieve.
285   // It does not matter if it falls behind temporarily so long as it never
286   // gets ahead.
287   record_count_.fetch_add(1, std::memory_order_release);
288   return next;
289 }
290 
291 PersistentMemoryAllocator::Reference
GetNextOfType(uint32_t type_match)292 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
293   Reference ref;
294   uint32_t type_found;
295   while ((ref = GetNext(&type_found)) != 0) {
296     if (type_found == type_match)
297       return ref;
298   }
299   return kReferenceNull;
300 }
301 
302 
303 // static
IsMemoryAcceptable(const void * base,size_t size,size_t page_size,bool readonly)304 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
305                                                    size_t size,
306                                                    size_t page_size,
307                                                    bool readonly) {
308   return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
309           (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
310           (size % kAllocAlignment == 0 || readonly) &&
311           (page_size == 0 || size % page_size == 0 || readonly));
312 }
313 
PersistentMemoryAllocator(void * base,size_t size,size_t page_size,uint64_t id,std::string_view name,AccessMode access_mode)314 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
315                                                      size_t size,
316                                                      size_t page_size,
317                                                      uint64_t id,
318                                                      std::string_view name,
319                                                      AccessMode access_mode)
320     : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
321                                 size,
322                                 page_size,
323                                 id,
324                                 name,
325                                 access_mode) {}
326 
PersistentMemoryAllocator(Memory memory,size_t size,size_t page_size,uint64_t id,std::string_view name,AccessMode access_mode)327 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
328                                                      size_t size,
329                                                      size_t page_size,
330                                                      uint64_t id,
331                                                      std::string_view name,
332                                                      AccessMode access_mode)
333     : mem_base_(static_cast<char*>(memory.base)),
334       mem_type_(memory.type),
335       mem_size_(checked_cast<uint32_t>(size)),
336       mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
337 #if BUILDFLAG(IS_NACL)
338       vm_page_size_(4096U),  // SysInfo is not built for NACL.
339 #else
340       vm_page_size_(SysInfo::VMAllocationGranularity()),
341 #endif
342       access_mode_(access_mode) {
343   // These asserts ensure that the structures are 32/64-bit agnostic and meet
344   // all the requirements of use within the allocator. They access private
345   // definitions and so cannot be moved to the global scope.
346   static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
347                 "struct is not portable across different natural word widths");
348   static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
349                 "struct is not portable across different natural word widths");
350 
351   static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
352                 "BlockHeader is not a multiple of kAllocAlignment");
353   static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
354                 "SharedMetadata is not a multiple of kAllocAlignment");
355   static_assert(kReferenceQueue % kAllocAlignment == 0,
356                 "\"queue\" is not aligned properly; must be at end of struct");
357 
358   // Ensure that memory segment is of acceptable size.
359   const bool readonly = access_mode == kReadOnly;
360   CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
361 
362   // These atomics operate inter-process and so must be lock-free.
363   DCHECK(SharedMetadata().freeptr.is_lock_free());
364   DCHECK(SharedMetadata().flags.is_lock_free());
365   DCHECK(BlockHeader().next.is_lock_free());
366   CHECK(corrupt_.is_lock_free());
367 
368   // When calling SetCorrupt() during initialization, don't write to the memory
369   // in kReadOnly and kReadWriteExisting modes.
370   const bool allow_write_for_set_corrupt = (access_mode == kReadWrite);
371   if (shared_meta()->cookie != kGlobalCookie) {
372     if (access_mode != kReadWrite) {
373       SetCorrupt(allow_write_for_set_corrupt);
374       return;
375     }
376 
377     // This block is only executed when a completely new memory segment is
378     // being initialized. It's unshared and single-threaded...
379     volatile BlockHeader* const first_block =
380         reinterpret_cast<volatile BlockHeader*>(mem_base_ +
381                                                 sizeof(SharedMetadata));
382     if (shared_meta()->cookie != 0 ||
383         shared_meta()->size != 0 ||
384         shared_meta()->version != 0 ||
385         shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
386         shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
387         shared_meta()->id != 0 ||
388         shared_meta()->name != 0 ||
389         shared_meta()->tailptr != 0 ||
390         shared_meta()->queue.cookie != 0 ||
391         shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
392         first_block->size != 0 ||
393         first_block->cookie != 0 ||
394         first_block->type_id.load(std::memory_order_relaxed) != 0 ||
395         first_block->next != 0) {
396       // ...or something malicious has been playing with the metadata.
397       CHECK(allow_write_for_set_corrupt);
398       SetCorrupt(allow_write_for_set_corrupt);
399     }
400 
401     // This is still safe to do even if corruption has been detected.
402     shared_meta()->cookie = kGlobalCookie;
403     shared_meta()->size = mem_size_;
404     shared_meta()->page_size = mem_page_;
405     shared_meta()->version = kGlobalVersion;
406     shared_meta()->id = id;
407     // Don't overwrite `freeptr` if it is set since we could have raced with
408     // another allocator. In such a case, `freeptr` would get "rewinded", and
409     // new objects would be allocated on top of already allocated objects.
410     uint32_t empty_freeptr = 0;
411     shared_meta()->freeptr.compare_exchange_strong(
412         /*expected=*/empty_freeptr, /*desired=*/sizeof(SharedMetadata),
413         /*success=*/std::memory_order_release,
414         /*failure=*/std::memory_order_relaxed);
415 
416     // Set up the queue of iterable allocations.
417     shared_meta()->queue.size = sizeof(BlockHeader);
418     shared_meta()->queue.cookie = kBlockCookieQueue;
419     shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
420     shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
421 
422     // Allocate space for the name so other processes can learn it.
423     if (!name.empty()) {
424       const size_t name_length = name.length() + 1;
425       shared_meta()->name = Allocate(name_length, 0);
426       char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
427       if (name_cstr)
428         memcpy(name_cstr, name.data(), name.length());
429     }
430 
431     shared_meta()->memory_state.store(MEMORY_INITIALIZED,
432                                       std::memory_order_release);
433   } else {
434     if (shared_meta()->size == 0 ||
435         (shared_meta()->version != kGlobalVersion &&
436          !Contains(kOldCompatibleVersions, shared_meta()->version)) ||
437         shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
438         shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
439         shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
440       SetCorrupt(allow_write_for_set_corrupt);
441     }
442     if (!readonly) {
443       // The allocator is attaching to a previously initialized segment of
444       // memory. If the initialization parameters differ, make the best of it
445       // by reducing the local construction parameters to match those of the
446       // actual memory area. This ensures that the local object never tries to
447       // write outside of the original bounds.
448       // Because the fields are const to ensure that no code other than the
449       // constructor makes changes to them as well as to give optimization hints
450       // to the compiler, it's necessary to const-cast them for changes here.
451       if (shared_meta()->size < mem_size_)
452         *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
453       if (shared_meta()->page_size < mem_page_)
454         *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
455 
456       // Ensure that settings are still valid after the above adjustments.
457       if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly)) {
458         SetCorrupt(allow_write_for_set_corrupt);
459       }
460     }
461   }
462 }
463 
~PersistentMemoryAllocator()464 PersistentMemoryAllocator::~PersistentMemoryAllocator() {
465   // It's strictly forbidden to do any memory access here in case there is
466   // some issue with the underlying memory segment. The "Local" allocator
467   // makes use of this to allow deletion of the segment on the heap from
468   // within its destructor.
469 }
470 
Id() const471 uint64_t PersistentMemoryAllocator::Id() const {
472   return shared_meta()->id;
473 }
474 
Name() const475 const char* PersistentMemoryAllocator::Name() const {
476   Reference name_ref = shared_meta()->name;
477   const char* name_cstr =
478       GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
479   if (!name_cstr)
480     return "";
481 
482   size_t name_length = GetAllocSize(name_ref);
483   if (name_cstr[name_length - 1] != '\0') {
484     NOTREACHED();
485   }
486 
487   return name_cstr;
488 }
489 
CreateTrackingHistograms(std::string_view name)490 void PersistentMemoryAllocator::CreateTrackingHistograms(
491     std::string_view name) {
492   if (name.empty() || access_mode_ == kReadOnly) {
493     return;
494   }
495   std::string name_string(name);
496 
497   DCHECK(!used_histogram_);
498   used_histogram_ = LinearHistogram::FactoryGet(
499       "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
500       HistogramBase::kUmaTargetedHistogramFlag);
501 }
502 
Flush(bool sync)503 void PersistentMemoryAllocator::Flush(bool sync) {
504   FlushPartial(used(), sync);
505 }
506 
SetMemoryState(uint8_t memory_state)507 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
508   shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
509   FlushPartial(sizeof(SharedMetadata), false);
510 }
511 
GetMemoryState() const512 uint8_t PersistentMemoryAllocator::GetMemoryState() const {
513   return shared_meta()->memory_state.load(std::memory_order_relaxed);
514 }
515 
used() const516 size_t PersistentMemoryAllocator::used() const {
517   return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
518                   mem_size_);
519 }
520 
GetAsReference(const void * memory,uint32_t type_id) const521 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
522     const void* memory,
523     uint32_t type_id) const {
524   uintptr_t address = reinterpret_cast<uintptr_t>(memory);
525   if (address < reinterpret_cast<uintptr_t>(mem_base_))
526     return kReferenceNull;
527 
528   uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
529   if (offset >= mem_size_ || offset < sizeof(BlockHeader))
530     return kReferenceNull;
531 
532   Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
533   if (!GetBlockData(ref, type_id, kSizeAny))
534     return kReferenceNull;
535 
536   return ref;
537 }
538 
GetAllocSize(Reference ref) const539 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
540   const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
541   if (!block)
542     return 0;
543   uint32_t size = block->size;
544   // Header was verified by GetBlock() but a malicious actor could change
545   // the value between there and here. Check it again.
546   uint32_t total_size;
547   if (size <= sizeof(BlockHeader) ||
548       !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
549       total_size > mem_size_) {
550     SetCorrupt();
551     return 0;
552   }
553   return size - sizeof(BlockHeader);
554 }
555 
GetType(Reference ref) const556 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
557   const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
558   if (!block)
559     return 0;
560   return block->type_id.load(std::memory_order_relaxed);
561 }
562 
ChangeType(Reference ref,uint32_t to_type_id,uint32_t from_type_id,bool clear)563 bool PersistentMemoryAllocator::ChangeType(Reference ref,
564                                            uint32_t to_type_id,
565                                            uint32_t from_type_id,
566                                            bool clear) {
567   DCHECK_NE(access_mode_, kReadOnly);
568   volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
569   if (!block)
570     return false;
571 
572   // "Strong" exchanges are used below because there is no loop that can retry
573   // in the wake of spurious failures possible with "weak" exchanges. It is,
574   // in aggregate, an "acquire-release" operation so no memory accesses can be
575   // reordered either before or after this method (since changes based on type
576   // could happen on either side).
577 
578   if (clear) {
579     // If clearing the memory, first change it to the "transitioning" type so
580     // there can be no confusion by other threads. After the memory is cleared,
581     // it can be changed to its final type.
582     if (!block->type_id.compare_exchange_strong(
583             from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
584             std::memory_order_acquire)) {
585       // Existing type wasn't what was expected: fail (with no changes)
586       return false;
587     }
588 
589     // Clear the memory in an atomic manner. Using "release" stores force
590     // every write to be done after the ones before it. This is better than
591     // using memset because (a) it supports "volatile" and (b) it creates a
592     // reliable pattern upon which other threads may rely.
593     volatile std::atomic<int>* data =
594         reinterpret_cast<volatile std::atomic<int>*>(
595             reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
596     const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
597     DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
598     for (uint32_t i = 0; i < words; ++i) {
599       data->store(0, std::memory_order_release);
600       ++data;
601     }
602 
603     // If the destination type is "transitioning" then skip the final exchange.
604     if (to_type_id == kTypeIdTransitioning)
605       return true;
606 
607     // Finish the change to the desired type.
608     from_type_id = kTypeIdTransitioning;  // Exchange needs modifiable original.
609     bool success = block->type_id.compare_exchange_strong(
610         from_type_id, to_type_id, std::memory_order_release,
611         std::memory_order_relaxed);
612     DCHECK(success);  // Should never fail.
613     return success;
614   }
615 
616   // One step change to the new type. Will return false if the existing value
617   // doesn't match what is expected.
618   return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
619                                                 std::memory_order_acq_rel,
620                                                 std::memory_order_acquire);
621 }
622 
Allocate(size_t req_size,uint32_t type_id)623 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
624     size_t req_size,
625     uint32_t type_id) {
626   return AllocateImpl(req_size, type_id);
627 }
628 
AllocateImpl(size_t req_size,uint32_t type_id)629 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
630     size_t req_size,
631     uint32_t type_id) {
632   DCHECK_NE(access_mode_, kReadOnly);
633 
634   // Validate req_size to ensure it won't overflow when used as 32-bit value.
635   if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
636     NOTREACHED();
637   }
638 
639   // Round up the requested size, plus header, to the next allocation alignment.
640   size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
641   if (size <= sizeof(BlockHeader) || size > mem_page_) {
642     // This shouldn't be reached through normal means.
643     debug::DumpWithoutCrashing();
644     return kReferenceNull;
645   }
646 
647   // Get the current start of unallocated memory. Other threads may
648   // update this at any time and cause us to retry these operations.
649   // This value should be treated as "const" to avoid confusion through
650   // the code below but recognize that any failed compare-exchange operation
651   // involving it will cause it to be loaded with a more recent value. The
652   // code should either exit or restart the loop in that case.
653   /* const */ uint32_t freeptr =
654       shared_meta()->freeptr.load(std::memory_order_acquire);
655 
656   // Allocation is lockless so we do all our caculation and then, if saving
657   // indicates a change has occurred since we started, scrap everything and
658   // start over.
659   for (;;) {
660     if (IsCorrupt())
661       return kReferenceNull;
662 
663     if (freeptr + size > mem_size_) {
664       SetFlag(&shared_meta()->flags, kFlagFull);
665       return kReferenceNull;
666     }
667 
668     // Get pointer to the "free" block. If something has been allocated since
669     // the load of freeptr above, it is still safe as nothing will be written
670     // to that location until after the compare-exchange below.
671     volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
672     if (!block) {
673       SetCorrupt();
674       return kReferenceNull;
675     }
676 
677     // An allocation cannot cross page boundaries. If it would, create a
678     // "wasted" block and begin again at the top of the next page. This
679     // area could just be left empty but we fill in the block header just
680     // for completeness sake.
681     const uint32_t page_free = mem_page_ - freeptr % mem_page_;
682     if (size > page_free) {
683       if (page_free <= sizeof(BlockHeader)) {
684         SetCorrupt();
685         return kReferenceNull;
686       }
687 
688 #if !BUILDFLAG(IS_NACL)
689       // In production, with the current state of the code, this code path
690       // should not be reached. However, crash reports have been hinting that it
691       // is. Add crash keys to investigate this.
692       // TODO(crbug.com/40064026): Remove them once done.
693       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_size_",
694                               mem_size_);
695       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_page_",
696                               mem_page_);
697       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "freeptr", freeptr);
698       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "page_free",
699                               page_free);
700       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size", size);
701       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "req_size",
702                               req_size);
703       SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_id", type_id);
704       std::string persistent_file_name = "N/A";
705       auto* allocator = GlobalHistogramAllocator::Get();
706       if (allocator && allocator->HasPersistentLocation()) {
707         persistent_file_name =
708             allocator->GetPersistentLocation().BaseName().AsUTF8Unsafe();
709       }
710       SCOPED_CRASH_KEY_STRING256("PersistentMemoryAllocator", "file_name",
711                                  persistent_file_name);
712       debug::DumpWithoutCrashing();
713 #endif  // !BUILDFLAG(IS_NACL)
714 
715       const uint32_t new_freeptr = freeptr + page_free;
716       if (shared_meta()->freeptr.compare_exchange_strong(
717               freeptr, new_freeptr, std::memory_order_acq_rel,
718               std::memory_order_acquire)) {
719         block->size = page_free;
720         block->cookie = kBlockCookieWasted;
721       }
722       continue;
723     }
724 
725     // Don't leave a slice at the end of a page too small for anything. This
726     // can result in an allocation up to two alignment-sizes greater than the
727     // minimum required by requested-size + header + alignment.
728     if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
729       size = page_free;
730       if (freeptr + size > mem_size_) {
731         SetCorrupt();
732         return kReferenceNull;
733       }
734     }
735 
736     // This cast is safe because (freeptr + size) <= mem_size_.
737     const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
738 
739     // Save our work. Try again if another thread has completed an allocation
740     // while we were processing. A "weak" exchange would be permissable here
741     // because the code will just loop and try again but the above processing
742     // is significant so make the extra effort of a "strong" exchange.
743     if (!shared_meta()->freeptr.compare_exchange_strong(
744             freeptr, new_freeptr, std::memory_order_acq_rel,
745             std::memory_order_acquire)) {
746       continue;
747     }
748 
749     // Given that all memory was zeroed before ever being given to an instance
750     // of this class and given that we only allocate in a monotomic fashion
751     // going forward, it must be that the newly allocated block is completely
752     // full of zeros. If we find anything in the block header that is NOT a
753     // zero then something must have previously run amuck through memory,
754     // writing beyond the allocated space and into unallocated space.
755     if (block->size != 0 ||
756         block->cookie != kBlockCookieFree ||
757         block->type_id.load(std::memory_order_relaxed) != 0 ||
758         block->next.load(std::memory_order_relaxed) != 0) {
759       SetCorrupt();
760       return kReferenceNull;
761     }
762 
763     // Make sure the memory exists by writing to the first byte of every memory
764     // page it touches beyond the one containing the block header itself.
765     // As the underlying storage is often memory mapped from disk or shared
766     // space, sometimes things go wrong and those address don't actually exist
767     // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
768     // in the code. This should concentrate all those failures into this
769     // location for easy tracking and, eventually, proper handling.
770     volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
771     volatile char* mem_begin = reinterpret_cast<volatile char*>(
772         (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
773          (vm_page_size_ - 1)) &
774         ~static_cast<uintptr_t>(vm_page_size_ - 1));
775     for (volatile char* memory = mem_begin; memory < mem_end;
776          memory += vm_page_size_) {
777       // It's required that a memory segment start as all zeros and thus the
778       // newly allocated block is all zeros at this point. Thus, writing a
779       // zero to it allows testing that the memory exists without actually
780       // changing its contents. The compiler doesn't know about the requirement
781       // and so cannot optimize-away these writes.
782       *memory = 0;
783     }
784 
785     // Load information into the block header. There is no "release" of the
786     // data here because this memory can, currently, be seen only by the thread
787     // performing the allocation. When it comes time to share this, the thread
788     // will call MakeIterable() which does the release operation.
789     // `size` is at most kSegmentMaxSize, so this cast is safe.
790     block->size = static_cast<uint32_t>(size);
791     block->cookie = kBlockCookieAllocated;
792     block->type_id.store(type_id, std::memory_order_relaxed);
793     return freeptr;
794   }
795 }
796 
GetMemoryInfo(MemoryInfo * meminfo) const797 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
798   uint32_t remaining = std::max(
799       mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
800       (uint32_t)sizeof(BlockHeader));
801   meminfo->total = mem_size_;
802   meminfo->free = remaining - sizeof(BlockHeader);
803 }
804 
MakeIterable(Reference ref)805 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
806   DCHECK_NE(access_mode_, kReadOnly);
807   if (IsCorrupt())
808     return;
809   volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
810   if (!block)  // invalid reference
811     return;
812 
813   Reference empty_ref = 0;
814   if (!block->next.compare_exchange_strong(
815           /*expected=*/empty_ref, /*desired=*/kReferenceQueue,
816           /*success=*/std::memory_order_acq_rel,
817           /*failure=*/std::memory_order_acquire)) {
818     // Already iterable (or another thread is currently making this iterable).
819     return;
820   }
821 
822   // Try to add this block to the tail of the queue. May take multiple tries.
823   // If so, tail will be automatically updated with a more recent value during
824   // compare-exchange operations.
825   uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
826   for (;;) {
827     // Acquire the current tail-pointer released by previous call to this
828     // method and validate it.
829     block = GetBlock(tail, 0, 0, true, false);
830     if (!block) {
831       SetCorrupt();
832       return;
833     }
834 
835     // Try to insert the block at the tail of the queue. The tail node always
836     // has an existing value of kReferenceQueue; if that is somehow not the
837     // existing value then another thread has acted in the meantime. A "strong"
838     // exchange is necessary so the "else" block does not get executed when
839     // that is not actually the case (which can happen with a "weak" exchange).
840     uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
841     if (block->next.compare_exchange_strong(next, ref,
842                                             std::memory_order_acq_rel,
843                                             std::memory_order_acquire)) {
844       // Update the tail pointer to the new offset. If the "else" clause did
845       // not exist, then this could be a simple Release_Store to set the new
846       // value but because it does, it's possible that other threads could add
847       // one or more nodes at the tail before reaching this point. We don't
848       // have to check the return value because it either operates correctly
849       // or the exact same operation has already been done (by the "else"
850       // clause) on some other thread.
851       shared_meta()->tailptr.compare_exchange_strong(tail, ref,
852                                                      std::memory_order_release,
853                                                      std::memory_order_relaxed);
854       return;
855     }
856     // In the unlikely case that a thread crashed or was killed between the
857     // update of "next" and the update of "tailptr", it is necessary to
858     // perform the operation that would have been done. There's no explicit
859     // check for crash/kill which means that this operation may also happen
860     // even when the other thread is in perfect working order which is what
861     // necessitates the CompareAndSwap above.
862     shared_meta()->tailptr.compare_exchange_strong(
863         tail, next, std::memory_order_acq_rel, std::memory_order_acquire);
864   }
865 }
866 
867 // The "corrupted" state is held both locally and globally (shared). The
868 // shared flag can't be trusted since a malicious actor could overwrite it.
869 // Because corruption can be detected during read-only operations such as
870 // iteration, this method may be called by other "const" methods. In this
871 // case, it's safe to discard the constness and modify the local flag and
872 // maybe even the shared flag if the underlying data isn't actually read-only.
SetCorrupt(bool allow_write) const873 void PersistentMemoryAllocator::SetCorrupt(bool allow_write) const {
874   if (!corrupt_.load(std::memory_order_relaxed) &&
875       !CheckFlag(
876           const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
877           kFlagCorrupt)) {
878     LOG(ERROR) << "Corruption detected in shared-memory segment.";
879   }
880 
881   corrupt_.store(true, std::memory_order_relaxed);
882   if (allow_write && access_mode_ != kReadOnly) {
883     SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
884             kFlagCorrupt);
885   }
886 }
887 
IsCorrupt() const888 bool PersistentMemoryAllocator::IsCorrupt() const {
889   if (corrupt_.load(std::memory_order_relaxed)) {
890     return true;
891   }
892   if (CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
893     // Set the local flag if we found the flag in the data.
894     SetCorrupt(/*allow_write=*/false);
895     return true;
896   }
897   return false;
898 }
899 
IsFull() const900 bool PersistentMemoryAllocator::IsFull() const {
901   return CheckFlag(&shared_meta()->flags, kFlagFull);
902 }
903 
904 // Dereference a block |ref| and ensure that it's valid for the desired
905 // |type_id| and |size|. |special| indicates that we may try to access block
906 // headers not available to callers but still accessed by this module. By
907 // having internal dereferences go through this same function, the allocator
908 // is hardened against corruption.
909 const volatile PersistentMemoryAllocator::BlockHeader*
GetBlock(Reference ref,uint32_t type_id,size_t size,bool queue_ok,bool free_ok) const910 PersistentMemoryAllocator::GetBlock(Reference ref,
911                                     uint32_t type_id,
912                                     size_t size,
913                                     bool queue_ok,
914                                     bool free_ok) const {
915   // Handle special cases.
916   if (ref == kReferenceQueue && queue_ok)
917     return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
918 
919   // Validation of parameters.
920   if (ref < sizeof(SharedMetadata))
921     return nullptr;
922   if (ref % kAllocAlignment != 0)
923     return nullptr;
924   size += sizeof(BlockHeader);
925   uint32_t total_size;
926   if (!base::CheckAdd(ref, size).AssignIfValid(&total_size)) {
927     return nullptr;
928   }
929   if (total_size > mem_size_) {
930     return nullptr;
931   }
932 
933   // Validation of referenced block-header.
934   if (!free_ok) {
935     const volatile BlockHeader* const block =
936         reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
937     if (block->cookie != kBlockCookieAllocated)
938       return nullptr;
939     if (block->size < size)
940       return nullptr;
941     uint32_t block_size;
942     if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
943       return nullptr;
944     }
945     if (block_size > mem_size_) {
946       return nullptr;
947     }
948     if (type_id != 0 &&
949         block->type_id.load(std::memory_order_relaxed) != type_id) {
950       return nullptr;
951     }
952   }
953 
954   // Return pointer to block data.
955   return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
956 }
957 
FlushPartial(size_t length,bool sync)958 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
959   // Generally there is nothing to do as every write is done through volatile
960   // memory with atomic instructions to guarantee consistency. This (virtual)
961   // method exists so that derived classes can do special things, such as tell
962   // the OS to write changes to disk now rather than when convenient.
963 }
964 
freeptr() const965 uint32_t PersistentMemoryAllocator::freeptr() const {
966   return shared_meta()->freeptr.load(std::memory_order_relaxed);
967 }
968 
version() const969 uint32_t PersistentMemoryAllocator::version() const {
970   return shared_meta()->version;
971 }
972 
GetBlockData(Reference ref,uint32_t type_id,size_t size) const973 const volatile void* PersistentMemoryAllocator::GetBlockData(
974     Reference ref,
975     uint32_t type_id,
976     size_t size) const {
977   DCHECK(size > 0);
978   const volatile BlockHeader* block =
979       GetBlock(ref, type_id, size, false, false);
980   if (!block)
981     return nullptr;
982   return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
983 }
984 
UpdateTrackingHistograms()985 void PersistentMemoryAllocator::UpdateTrackingHistograms() {
986   DCHECK_NE(access_mode_, kReadOnly);
987   if (used_histogram_) {
988     MemoryInfo meminfo;
989     GetMemoryInfo(&meminfo);
990     HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
991         ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
992     used_histogram_->Add(used_percent);
993   }
994 }
995 
996 
997 //----- LocalPersistentMemoryAllocator -----------------------------------------
998 
LocalPersistentMemoryAllocator(size_t size,uint64_t id,std::string_view name)999 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
1000     size_t size,
1001     uint64_t id,
1002     std::string_view name)
1003     : PersistentMemoryAllocator(AllocateLocalMemory(size, name),
1004                                 size,
1005                                 0,
1006                                 id,
1007                                 name,
1008                                 kReadWrite) {}
1009 
~LocalPersistentMemoryAllocator()1010 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
1011   DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
1012 }
1013 
1014 // static
1015 PersistentMemoryAllocator::Memory
AllocateLocalMemory(size_t size,std::string_view name)1016 LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size,
1017                                                     std::string_view name) {
1018   void* address;
1019 
1020 #if BUILDFLAG(IS_WIN)
1021   address =
1022       ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1023   if (address)
1024     return Memory(address, MEM_VIRTUAL);
1025 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1026   // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
1027   // MAP_SHARED is not available on Linux <2.4 but required on Mac.
1028   address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
1029                    MAP_ANON | MAP_SHARED, -1, 0);
1030   if (address != MAP_FAILED) {
1031 #if BUILDFLAG(IS_ANDROID)
1032     // Allow the anonymous memory region allocated by mmap(MAP_ANON) to be
1033     // identified in /proc/$PID/smaps.  This helps improve visibility into
1034     // Chrome's memory usage on Android.
1035     const std::string arena_name = base::StrCat({"persistent:", name});
1036     prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, arena_name.c_str());
1037 #endif
1038     return Memory(address, MEM_VIRTUAL);
1039   }
1040 #else
1041 #error This architecture is not (yet) supported.
1042 #endif
1043 
1044   // As a last resort, just allocate the memory from the heap. This will
1045   // achieve the same basic result but the acquired memory has to be
1046   // explicitly zeroed and thus realized immediately (i.e. all pages are
1047   // added to the process now istead of only when first accessed).
1048   address = malloc(size);
1049   DPCHECK(address);
1050   memset(address, 0, size);
1051   return Memory(address, MEM_MALLOC);
1052 }
1053 
1054 // static
DeallocateLocalMemory(void * memory,size_t size,MemoryType type)1055 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
1056                                                            size_t size,
1057                                                            MemoryType type) {
1058   if (type == MEM_MALLOC) {
1059     free(memory);
1060     return;
1061   }
1062 
1063   DCHECK_EQ(MEM_VIRTUAL, type);
1064 #if BUILDFLAG(IS_WIN)
1065   BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
1066   DCHECK(success);
1067 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1068   int result = ::munmap(memory, size);
1069   DCHECK_EQ(0, result);
1070 #else
1071 #error This architecture is not (yet) supported.
1072 #endif
1073 }
1074 
1075 //----- WritableSharedPersistentMemoryAllocator --------------------------------
1076 
1077 WritableSharedPersistentMemoryAllocator::
WritableSharedPersistentMemoryAllocator(base::WritableSharedMemoryMapping memory,uint64_t id,std::string_view name)1078     WritableSharedPersistentMemoryAllocator(
1079         base::WritableSharedMemoryMapping memory,
1080         uint64_t id,
1081         std::string_view name)
1082     : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
1083                                 memory.size(),
1084                                 0,
1085                                 id,
1086                                 name,
1087                                 kReadWrite),
1088       shared_memory_(std::move(memory)) {}
1089 
1090 WritableSharedPersistentMemoryAllocator::
1091     ~WritableSharedPersistentMemoryAllocator() = default;
1092 
1093 // static
IsSharedMemoryAcceptable(const base::WritableSharedMemoryMapping & memory)1094 bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1095     const base::WritableSharedMemoryMapping& memory) {
1096   return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
1097 }
1098 
1099 //----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
1100 
1101 ReadOnlySharedPersistentMemoryAllocator::
ReadOnlySharedPersistentMemoryAllocator(base::ReadOnlySharedMemoryMapping memory,uint64_t id,std::string_view name)1102     ReadOnlySharedPersistentMemoryAllocator(
1103         base::ReadOnlySharedMemoryMapping memory,
1104         uint64_t id,
1105         std::string_view name)
1106     : PersistentMemoryAllocator(
1107           Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
1108           memory.size(),
1109           0,
1110           id,
1111           name,
1112           kReadOnly),
1113       shared_memory_(std::move(memory)) {}
1114 
1115 ReadOnlySharedPersistentMemoryAllocator::
1116     ~ReadOnlySharedPersistentMemoryAllocator() = default;
1117 
1118 // static
IsSharedMemoryAcceptable(const base::ReadOnlySharedMemoryMapping & memory)1119 bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1120     const base::ReadOnlySharedMemoryMapping& memory) {
1121   return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
1122 }
1123 
1124 #if !BUILDFLAG(IS_NACL)
1125 //----- FilePersistentMemoryAllocator ------------------------------------------
1126 
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,size_t max_size,uint64_t id,std::string_view name,AccessMode access_mode)1127 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
1128     std::unique_ptr<MemoryMappedFile> file,
1129     size_t max_size,
1130     uint64_t id,
1131     std::string_view name,
1132     AccessMode access_mode)
1133     : PersistentMemoryAllocator(
1134           Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
1135           max_size != 0 ? max_size : file->length(),
1136           0,
1137           id,
1138           name,
1139           access_mode),
1140       mapped_file_(std::move(file)) {}
1141 
1142 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
1143 
1144 // static
IsFileAcceptable(const MemoryMappedFile & file,bool readonly)1145 bool FilePersistentMemoryAllocator::IsFileAcceptable(
1146     const MemoryMappedFile& file,
1147     bool readonly) {
1148   return IsMemoryAcceptable(file.data(), file.length(), 0, readonly);
1149 }
1150 
Cache()1151 void FilePersistentMemoryAllocator::Cache() {
1152   // Since this method is expected to load data from permanent storage
1153   // into memory, blocking I/O may occur.
1154   base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
1155                                                 base::BlockingType::MAY_BLOCK);
1156 
1157   // Calculate begin/end addresses so that the first byte of every page
1158   // in that range can be read. Keep within the used space. The |volatile|
1159   // keyword makes it so the compiler can't make assumptions about what is
1160   // in a given memory location and thus possibly avoid the read.
1161   const volatile char* mem_end = mem_base_ + used();
1162   const volatile char* mem_begin = mem_base_;
1163 
1164   // Iterate over the memory a page at a time, reading the first byte of
1165   // every page. The values are added to a |total| so that the compiler
1166   // can't omit the read.
1167   int total = 0;
1168   for (const volatile char* memory = mem_begin; memory < mem_end;
1169        memory += vm_page_size_) {
1170     total += *memory;
1171   }
1172 
1173   // Tell the compiler that |total| is used so that it can't optimize away
1174   // the memory accesses above.
1175   debug::Alias(&total);
1176 }
1177 
FlushPartial(size_t length,bool sync)1178 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
1179   if (IsReadonly())
1180     return;
1181 
1182   std::optional<base::ScopedBlockingCall> scoped_blocking_call;
1183   if (sync)
1184     scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1185 
1186 #if BUILDFLAG(IS_WIN)
1187   // Windows doesn't support asynchronous flush.
1188   scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1189   BOOL success = ::FlushViewOfFile(data(), length);
1190   DPCHECK(success);
1191 #elif BUILDFLAG(IS_APPLE)
1192   // On OSX, "invalidate" removes all cached pages, forcing a re-read from
1193   // disk. That's not applicable to "flush" so omit it.
1194   int result =
1195       ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
1196   DCHECK_NE(EINVAL, result);
1197 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1198   // On POSIX, "invalidate" forces _other_ processes to recognize what has
1199   // been written to disk and so is applicable to "flush".
1200   int result = ::msync(const_cast<void*>(data()), length,
1201                        MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
1202   DCHECK_NE(EINVAL, result);
1203 #else
1204 #error Unsupported OS.
1205 #endif
1206 }
1207 #endif  // !BUILDFLAG(IS_NACL)
1208 
1209 //----- DelayedPersistentAllocation --------------------------------------------
1210 
DelayedPersistentAllocation(PersistentMemoryAllocator * allocator,std::atomic<Reference> * ref,uint32_t type,size_t size,size_t offset)1211 DelayedPersistentAllocation::DelayedPersistentAllocation(
1212     PersistentMemoryAllocator* allocator,
1213     std::atomic<Reference>* ref,
1214     uint32_t type,
1215     size_t size,
1216     size_t offset)
1217     : allocator_(allocator),
1218       type_(type),
1219       size_(checked_cast<uint32_t>(size)),
1220       offset_(checked_cast<uint32_t>(offset)),
1221       reference_(ref) {
1222   DCHECK(allocator_);
1223   DCHECK_NE(0U, type_);
1224   DCHECK_LT(0U, size_);
1225   DCHECK(reference_);
1226 }
1227 
1228 DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
1229 
GetUntyped() const1230 span<uint8_t> DelayedPersistentAllocation::GetUntyped() const {
1231   // Relaxed operations are acceptable here because it's not protecting the
1232   // contents of the allocation in any way.
1233   Reference ref = reference_->load(std::memory_order_acquire);
1234 
1235 #if !BUILDFLAG(IS_NACL)
1236   // TODO(crbug.com/40064026): Remove these. They are used to investigate
1237   // unexpected failures.
1238   bool ref_found = (ref != 0);
1239   bool raced = false;
1240 #endif  // !BUILDFLAG(IS_NACL)
1241 
1242   if (!ref) {
1243     ref = allocator_->Allocate(size_, type_);
1244     if (!ref) {
1245       return span<uint8_t>();
1246     }
1247 
1248     // Store the new reference in its proper location using compare-and-swap.
1249     // Use a "strong" exchange to ensure no false-negatives since the operation
1250     // cannot be retried.
1251     Reference existing = 0;  // Must be mutable; receives actual value.
1252     if (!reference_->compare_exchange_strong(existing, ref,
1253                                              std::memory_order_release,
1254                                              std::memory_order_relaxed)) {
1255       // Failure indicates that something else has raced ahead, performed the
1256       // allocation, and stored its reference. Purge the allocation that was
1257       // just done and use the other one instead.
1258       DCHECK_EQ(type_, allocator_->GetType(existing));
1259       DCHECK_LE(size_, allocator_->GetAllocSize(existing));
1260       allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
1261       ref = existing;
1262 #if !BUILDFLAG(IS_NACL)
1263       raced = true;
1264 #endif  // !BUILDFLAG(IS_NACL)
1265     }
1266   }
1267 
1268   uint8_t* mem = allocator_->GetAsArray<uint8_t>(ref, type_, size_);
1269   if (!mem) {
1270 #if !BUILDFLAG(IS_NACL)
1271     // TODO(crbug.com/40064026): Remove these. They are used to investigate
1272     // unexpected failures.
1273     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "full",
1274                           allocator_->IsFull());
1275     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "corrupted",
1276                           allocator_->IsCorrupt());
1277     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "freeptr",
1278                             allocator_->freeptr());
1279     // The allocator's cookie should always be `kGlobalCookie`. Add it to crash
1280     // keys to see if the file was corrupted externally, e.g. by a file
1281     // shredder. Cast to volatile to avoid compiler optimizations and ensure
1282     // that the actual value is read.
1283     SCOPED_CRASH_KEY_NUMBER(
1284         "PersistentMemoryAllocator", "cookie",
1285         static_cast<volatile PersistentMemoryAllocator::SharedMetadata*>(
1286             allocator_->shared_meta())
1287             ->cookie);
1288     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "ref", ref);
1289     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "ref_found", ref_found);
1290     SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "raced", raced);
1291     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_", type_);
1292     SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
1293     if (ref == 0xC8799269) {
1294       // There are many crash reports containing the corrupted "0xC8799269"
1295       // value in |ref|. This value is actually a "magic" number to indicate
1296       // that a certain block in persistent memory was successfully allocated,
1297       // so it should not appear there. Include some extra crash keys to see if
1298       // the surrounding values were also corrupted. If so, the value before
1299       // would be the size of the allocated object, and the value after would be
1300       // the type id of the allocated object. If they are not corrupted, these
1301       // would contain |ranges_checksum| and the start of |samples_metadata|
1302       // respectively (see PersistentHistogramData struct). We do some pointer
1303       // arithmetic here -- it should theoretically be safe, unless something
1304       // went terribly wrong...
1305       SCOPED_CRASH_KEY_NUMBER(
1306           "PersistentMemoryAllocator", "ref_before",
1307           (reference_ - 1)->load(std::memory_order_relaxed));
1308       SCOPED_CRASH_KEY_NUMBER(
1309           "PersistentMemoryAllocator", "ref_after",
1310           (reference_ + 1)->load(std::memory_order_relaxed));
1311       DUMP_WILL_BE_NOTREACHED();
1312       return span<uint8_t>();
1313     }
1314 #endif  // !BUILDFLAG(IS_NACL)
1315     // This should never happen but be tolerant if it does as corruption from
1316     // the outside is something to guard against.
1317     DUMP_WILL_BE_NOTREACHED();
1318     return span<uint8_t>();
1319   }
1320   return span(mem + offset_, size_ - offset_);
1321 }
1322 
1323 }  // namespace base
1324