1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_memory_allocator.h"
6
7 #include <assert.h>
8
9 #include <algorithm>
10
11 #include "base/bits.h"
12 #include "base/debug/alias.h"
13 #include "base/debug/crash_logging.h"
14 #include "base/files/memory_mapped_file.h"
15 #include "base/logging.h"
16 #include "base/metrics/histogram_functions.h"
17 #include "base/metrics/sparse_histogram.h"
18 #include "base/notreached.h"
19 #include "base/numerics/checked_math.h"
20 #include "base/numerics/safe_conversions.h"
21 #include "base/strings/strcat.h"
22 #include "base/strings/string_piece.h"
23 #include "base/system/sys_info.h"
24 #include "base/threading/scoped_blocking_call.h"
25 #include "build/build_config.h"
26 #include "third_party/abseil-cpp/absl/types/optional.h"
27
28 #if BUILDFLAG(IS_WIN)
29 #include <windows.h>
30 // Must be after <windows.h>
31 #include <winbase.h>
32 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
33 #include <sys/mman.h>
34 #if BUILDFLAG(IS_ANDROID)
35 #include <sys/prctl.h>
36 #endif
37 #endif
38
39 namespace {
40
41 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
42 // and should be a power of 2 in order to accommodate almost any page size.
43 constexpr uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
44
45 // A constant (random) value placed in the shared metadata to identify
46 // an already initialized memory segment.
47 constexpr uint32_t kGlobalCookie = 0x408305DC;
48
49 // The current version of the metadata. If updates are made that change
50 // the metadata, the version number can be queried to operate in a backward-
51 // compatible manner until the memory segment is completely re-initalized.
52 constexpr uint32_t kGlobalVersion = 2;
53
54 // Constant values placed in the block headers to indicate its state.
55 constexpr uint32_t kBlockCookieFree = 0;
56 constexpr uint32_t kBlockCookieQueue = 1;
57 constexpr uint32_t kBlockCookieWasted = (uint32_t)-1;
58 constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
59
60 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
61 // types rather than combined bitfield.
62
63 // Flags stored in the flags_ field of the SharedMetadata structure below.
64 constexpr uint32_t kFlagCorrupt = 1 << 0;
65 constexpr uint32_t kFlagFull = 1 << 1;
66
67 // Errors that are logged in "errors" histogram.
68 enum AllocatorError : int {
69 kMemoryIsCorrupt = 1,
70 };
71
CheckFlag(const volatile std::atomic<uint32_t> * flags,uint32_t flag)72 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
73 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
74 return (loaded_flags & flag) != 0;
75 }
76
SetFlag(volatile std::atomic<uint32_t> * flags,uint32_t flag)77 void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
78 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
79 for (;;) {
80 uint32_t new_flags = (loaded_flags & ~flag) | flag;
81 // In the failue case, actual "flags" value stored in loaded_flags.
82 // These access are "relaxed" because they are completely independent
83 // of all other values.
84 if (flags->compare_exchange_weak(loaded_flags, new_flags,
85 std::memory_order_relaxed,
86 std::memory_order_relaxed)) {
87 break;
88 }
89 }
90 }
91
92 } // namespace
93
94 namespace base {
95
96 // The block-header is placed at the top of every allocation within the
97 // segment to describe the data that follows it.
98 struct PersistentMemoryAllocator::BlockHeader {
99 uint32_t size; // Number of bytes in this block, including header.
100 uint32_t cookie; // Constant value indicating completed allocation.
101 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
102 std::atomic<uint32_t> next; // Pointer to the next block when iterating.
103 };
104
105 // The shared metadata exists once at the top of the memory segment to
106 // describe the state of the allocator to all processes. The size of this
107 // structure must be a multiple of 64-bits to ensure compatibility between
108 // architectures.
109 struct PersistentMemoryAllocator::SharedMetadata {
110 uint32_t cookie; // Some value that indicates complete initialization.
111 uint32_t size; // Total size of memory segment.
112 uint32_t page_size; // Paging size within memory segment.
113 uint32_t version; // Version code so upgrades don't break.
114 uint64_t id; // Arbitrary ID number given by creator.
115 uint32_t name; // Reference to stored name string.
116 uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
117
118 // Above is read-only after first construction. Below may be changed and
119 // so must be marked "volatile" to provide correct inter-process behavior.
120
121 // State of the memory, plus some padding to keep alignment.
122 volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
123 uint8_t padding2[3];
124
125 // Bitfield of information flags. Access to this should be done through
126 // the CheckFlag() and SetFlag() methods defined above.
127 volatile std::atomic<uint32_t> flags;
128
129 // Offset/reference to first free space in segment.
130 volatile std::atomic<uint32_t> freeptr;
131
132 // The "iterable" queue is an M&S Queue as described here, append-only:
133 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
134 // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
135 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
136 volatile BlockHeader queue; // Empty block for linked-list head/tail.
137 };
138
139 // The "queue" block header is used to detect "last node" so that zero/null
140 // can be used to indicate that it hasn't been added at all. It is part of
141 // the SharedMetadata structure which itself is always located at offset zero.
142 const PersistentMemoryAllocator::Reference
143 PersistentMemoryAllocator::kReferenceQueue =
144 offsetof(SharedMetadata, queue);
145
146 const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
147 FILE_PATH_LITERAL(".pma");
148
149
Iterator(const PersistentMemoryAllocator * allocator)150 PersistentMemoryAllocator::Iterator::Iterator(
151 const PersistentMemoryAllocator* allocator)
152 : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
153
Iterator(const PersistentMemoryAllocator * allocator,Reference starting_after)154 PersistentMemoryAllocator::Iterator::Iterator(
155 const PersistentMemoryAllocator* allocator,
156 Reference starting_after)
157 : allocator_(allocator), last_record_(0), record_count_(0) {
158 Reset(starting_after);
159 }
160
161 PersistentMemoryAllocator::Iterator::~Iterator() = default;
162
Reset()163 void PersistentMemoryAllocator::Iterator::Reset() {
164 last_record_.store(kReferenceQueue, std::memory_order_relaxed);
165 record_count_.store(0, std::memory_order_relaxed);
166 }
167
Reset(Reference starting_after)168 void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
169 if (starting_after == 0) {
170 Reset();
171 return;
172 }
173
174 last_record_.store(starting_after, std::memory_order_relaxed);
175 record_count_.store(0, std::memory_order_relaxed);
176
177 // Ensure that the starting point is a valid, iterable block (meaning it can
178 // be read and has a non-zero "next" pointer).
179 const volatile BlockHeader* block =
180 allocator_->GetBlock(starting_after, 0, 0, false, false);
181 if (!block || block->next.load(std::memory_order_relaxed) == 0) {
182 NOTREACHED();
183 last_record_.store(kReferenceQueue, std::memory_order_release);
184 }
185 }
186
187 PersistentMemoryAllocator::Reference
GetLast()188 PersistentMemoryAllocator::Iterator::GetLast() {
189 Reference last = last_record_.load(std::memory_order_relaxed);
190 if (last == kReferenceQueue)
191 return kReferenceNull;
192 return last;
193 }
194
195 PersistentMemoryAllocator::Reference
GetNext(uint32_t * type_return)196 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
197 // Make a copy of the existing count of found-records, acquiring all changes
198 // made to the allocator, notably "freeptr" (see comment in loop for why
199 // the load of that value cannot be moved above here) that occurred during
200 // any previous runs of this method, including those by parallel threads
201 // that interrupted it. It pairs with the Release at the end of this method.
202 //
203 // Otherwise, if the compiler were to arrange the two loads such that
204 // "count" was fetched _after_ "freeptr" then it would be possible for
205 // this thread to be interrupted between them and other threads perform
206 // multiple allocations, make-iterables, and iterations (with the included
207 // increment of |record_count_|) culminating in the check at the bottom
208 // mistakenly determining that a loop exists. Isn't this stuff fun?
209 uint32_t count = record_count_.load(std::memory_order_acquire);
210
211 Reference last = last_record_.load(std::memory_order_acquire);
212 Reference next;
213 while (true) {
214 const volatile BlockHeader* block =
215 allocator_->GetBlock(last, 0, 0, true, false);
216 if (!block) // Invalid iterator state.
217 return kReferenceNull;
218
219 // The compiler and CPU can freely reorder all memory accesses on which
220 // there are no dependencies. It could, for example, move the load of
221 // "freeptr" to above this point because there are no explicit dependencies
222 // between it and "next". If it did, however, then another block could
223 // be queued after that but before the following load meaning there is
224 // one more queued block than the future "detect loop by having more
225 // blocks that could fit before freeptr" will allow.
226 //
227 // By "acquiring" the "next" value here, it's synchronized to the enqueue
228 // of the node which in turn is synchronized to the allocation (which sets
229 // freeptr). Thus, the scenario above cannot happen.
230 next = block->next.load(std::memory_order_acquire);
231 if (next == kReferenceQueue) // No next allocation in queue.
232 return kReferenceNull;
233 block = allocator_->GetBlock(next, 0, 0, false, false);
234 if (!block) { // Memory is corrupt.
235 allocator_->SetCorrupt();
236 return kReferenceNull;
237 }
238
239 // Update the "last_record" pointer to be the reference being returned.
240 // If it fails then another thread has already iterated past it so loop
241 // again. Failing will also load the existing value into "last" so there
242 // is no need to do another such load when the while-loop restarts. A
243 // "strong" compare-exchange is used because failing unnecessarily would
244 // mean repeating some fairly costly validations above.
245 if (last_record_.compare_exchange_strong(
246 last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
247 *type_return = block->type_id.load(std::memory_order_relaxed);
248 break;
249 }
250 }
251
252 // Memory corruption could cause a loop in the list. Such must be detected
253 // so as to not cause an infinite loop in the caller. This is done by simply
254 // making sure it doesn't iterate more times than the absolute maximum
255 // number of allocations that could have been made. Callers are likely
256 // to loop multiple times before it is detected but at least it stops.
257 const uint32_t freeptr = std::min(
258 allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
259 allocator_->mem_size_);
260 const uint32_t max_records =
261 freeptr / (sizeof(BlockHeader) + kAllocAlignment);
262 if (count > max_records) {
263 allocator_->SetCorrupt();
264 return kReferenceNull;
265 }
266
267 // Increment the count and release the changes made above. It pairs with
268 // the Acquire at the top of this method. Note that this operation is not
269 // strictly synchonized with fetching of the object to return, which would
270 // have to be done inside the loop and is somewhat complicated to achieve.
271 // It does not matter if it falls behind temporarily so long as it never
272 // gets ahead.
273 record_count_.fetch_add(1, std::memory_order_release);
274 return next;
275 }
276
277 PersistentMemoryAllocator::Reference
GetNextOfType(uint32_t type_match)278 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
279 Reference ref;
280 uint32_t type_found;
281 while ((ref = GetNext(&type_found)) != 0) {
282 if (type_found == type_match)
283 return ref;
284 }
285 return kReferenceNull;
286 }
287
288
289 // static
IsMemoryAcceptable(const void * base,size_t size,size_t page_size,bool readonly)290 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
291 size_t size,
292 size_t page_size,
293 bool readonly) {
294 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
295 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
296 (size % kAllocAlignment == 0 || readonly) &&
297 (page_size == 0 || size % page_size == 0 || readonly));
298 }
299
PersistentMemoryAllocator(void * base,size_t size,size_t page_size,uint64_t id,base::StringPiece name,bool readonly)300 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
301 size_t size,
302 size_t page_size,
303 uint64_t id,
304 base::StringPiece name,
305 bool readonly)
306 : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
307 size,
308 page_size,
309 id,
310 name,
311 readonly) {}
312
PersistentMemoryAllocator(Memory memory,size_t size,size_t page_size,uint64_t id,base::StringPiece name,bool readonly)313 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
314 size_t size,
315 size_t page_size,
316 uint64_t id,
317 base::StringPiece name,
318 bool readonly)
319 : mem_base_(static_cast<char*>(memory.base)),
320 mem_type_(memory.type),
321 mem_size_(checked_cast<uint32_t>(size)),
322 mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
323 #if BUILDFLAG(IS_NACL)
324 vm_page_size_(4096U), // SysInfo is not built for NACL.
325 #else
326 vm_page_size_(SysInfo::VMAllocationGranularity()),
327 #endif
328 readonly_(readonly),
329 corrupt_(false),
330 allocs_histogram_(nullptr),
331 used_histogram_(nullptr),
332 errors_histogram_(nullptr) {
333 // These asserts ensure that the structures are 32/64-bit agnostic and meet
334 // all the requirements of use within the allocator. They access private
335 // definitions and so cannot be moved to the global scope.
336 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
337 "struct is not portable across different natural word widths");
338 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
339 "struct is not portable across different natural word widths");
340
341 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
342 "BlockHeader is not a multiple of kAllocAlignment");
343 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
344 "SharedMetadata is not a multiple of kAllocAlignment");
345 static_assert(kReferenceQueue % kAllocAlignment == 0,
346 "\"queue\" is not aligned properly; must be at end of struct");
347
348 // Ensure that memory segment is of acceptable size.
349 CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
350
351 // These atomics operate inter-process and so must be lock-free.
352 DCHECK(SharedMetadata().freeptr.is_lock_free());
353 DCHECK(SharedMetadata().flags.is_lock_free());
354 DCHECK(BlockHeader().next.is_lock_free());
355 CHECK(corrupt_.is_lock_free());
356
357 if (shared_meta()->cookie != kGlobalCookie) {
358 if (readonly) {
359 SetCorrupt();
360 return;
361 }
362
363 // This block is only executed when a completely new memory segment is
364 // being initialized. It's unshared and single-threaded...
365 volatile BlockHeader* const first_block =
366 reinterpret_cast<volatile BlockHeader*>(mem_base_ +
367 sizeof(SharedMetadata));
368 if (shared_meta()->cookie != 0 ||
369 shared_meta()->size != 0 ||
370 shared_meta()->version != 0 ||
371 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
372 shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
373 shared_meta()->id != 0 ||
374 shared_meta()->name != 0 ||
375 shared_meta()->tailptr != 0 ||
376 shared_meta()->queue.cookie != 0 ||
377 shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
378 first_block->size != 0 ||
379 first_block->cookie != 0 ||
380 first_block->type_id.load(std::memory_order_relaxed) != 0 ||
381 first_block->next != 0) {
382 // ...or something malicious has been playing with the metadata.
383 SetCorrupt();
384 }
385
386 // This is still safe to do even if corruption has been detected.
387 shared_meta()->cookie = kGlobalCookie;
388 shared_meta()->size = mem_size_;
389 shared_meta()->page_size = mem_page_;
390 shared_meta()->version = kGlobalVersion;
391 shared_meta()->id = id;
392 shared_meta()->freeptr.store(sizeof(SharedMetadata),
393 std::memory_order_release);
394
395 // Set up the queue of iterable allocations.
396 shared_meta()->queue.size = sizeof(BlockHeader);
397 shared_meta()->queue.cookie = kBlockCookieQueue;
398 shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
399 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
400
401 // Allocate space for the name so other processes can learn it.
402 if (!name.empty()) {
403 const size_t name_length = name.length() + 1;
404 shared_meta()->name = Allocate(name_length, 0);
405 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
406 if (name_cstr)
407 memcpy(name_cstr, name.data(), name.length());
408 }
409
410 shared_meta()->memory_state.store(MEMORY_INITIALIZED,
411 std::memory_order_release);
412 } else {
413 if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
414 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
415 shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
416 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
417 SetCorrupt();
418 }
419 if (!readonly) {
420 // The allocator is attaching to a previously initialized segment of
421 // memory. If the initialization parameters differ, make the best of it
422 // by reducing the local construction parameters to match those of
423 // the actual memory area. This ensures that the local object never
424 // tries to write outside of the original bounds.
425 // Because the fields are const to ensure that no code other than the
426 // constructor makes changes to them as well as to give optimization
427 // hints to the compiler, it's necessary to const-cast them for changes
428 // here.
429 if (shared_meta()->size < mem_size_)
430 *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
431 if (shared_meta()->page_size < mem_page_)
432 *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
433
434 // Ensure that settings are still valid after the above adjustments.
435 if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
436 SetCorrupt();
437 }
438 }
439 }
440
~PersistentMemoryAllocator()441 PersistentMemoryAllocator::~PersistentMemoryAllocator() {
442 // It's strictly forbidden to do any memory access here in case there is
443 // some issue with the underlying memory segment. The "Local" allocator
444 // makes use of this to allow deletion of the segment on the heap from
445 // within its destructor.
446 }
447
Id() const448 uint64_t PersistentMemoryAllocator::Id() const {
449 return shared_meta()->id;
450 }
451
Name() const452 const char* PersistentMemoryAllocator::Name() const {
453 Reference name_ref = shared_meta()->name;
454 const char* name_cstr =
455 GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
456 if (!name_cstr)
457 return "";
458
459 size_t name_length = GetAllocSize(name_ref);
460 if (name_cstr[name_length - 1] != '\0') {
461 NOTREACHED();
462 SetCorrupt();
463 return "";
464 }
465
466 return name_cstr;
467 }
468
CreateTrackingHistograms(base::StringPiece name)469 void PersistentMemoryAllocator::CreateTrackingHistograms(
470 base::StringPiece name) {
471 if (name.empty() || readonly_)
472 return;
473 std::string name_string(name);
474
475 #if 0
476 // This histogram wasn't being used so has been disabled. It is left here
477 // in case development of a new use of the allocator could benefit from
478 // recording (temporarily and locally) the allocation sizes.
479 DCHECK(!allocs_histogram_);
480 allocs_histogram_ = Histogram::FactoryGet(
481 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
482 HistogramBase::kUmaTargetedHistogramFlag);
483 #endif
484
485 DCHECK(!used_histogram_);
486 used_histogram_ = LinearHistogram::FactoryGet(
487 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
488 HistogramBase::kUmaTargetedHistogramFlag);
489
490 DCHECK(!errors_histogram_);
491 errors_histogram_ = SparseHistogram::FactoryGet(
492 "UMA.PersistentAllocator." + name_string + ".Errors",
493 HistogramBase::kUmaTargetedHistogramFlag);
494 }
495
Flush(bool sync)496 void PersistentMemoryAllocator::Flush(bool sync) {
497 FlushPartial(used(), sync);
498 }
499
SetMemoryState(uint8_t memory_state)500 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
501 shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
502 FlushPartial(sizeof(SharedMetadata), false);
503 }
504
GetMemoryState() const505 uint8_t PersistentMemoryAllocator::GetMemoryState() const {
506 return shared_meta()->memory_state.load(std::memory_order_relaxed);
507 }
508
used() const509 size_t PersistentMemoryAllocator::used() const {
510 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
511 mem_size_);
512 }
513
GetAsReference(const void * memory,uint32_t type_id) const514 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
515 const void* memory,
516 uint32_t type_id) const {
517 uintptr_t address = reinterpret_cast<uintptr_t>(memory);
518 if (address < reinterpret_cast<uintptr_t>(mem_base_))
519 return kReferenceNull;
520
521 uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
522 if (offset >= mem_size_ || offset < sizeof(BlockHeader))
523 return kReferenceNull;
524
525 Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
526 if (!GetBlockData(ref, type_id, kSizeAny))
527 return kReferenceNull;
528
529 return ref;
530 }
531
GetAllocSize(Reference ref) const532 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
533 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
534 if (!block)
535 return 0;
536 uint32_t size = block->size;
537 // Header was verified by GetBlock() but a malicious actor could change
538 // the value between there and here. Check it again.
539 uint32_t total_size;
540 if (size <= sizeof(BlockHeader) ||
541 !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
542 total_size > mem_size_) {
543 SetCorrupt();
544 return 0;
545 }
546 return size - sizeof(BlockHeader);
547 }
548
GetType(Reference ref) const549 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
550 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
551 if (!block)
552 return 0;
553 return block->type_id.load(std::memory_order_relaxed);
554 }
555
ChangeType(Reference ref,uint32_t to_type_id,uint32_t from_type_id,bool clear)556 bool PersistentMemoryAllocator::ChangeType(Reference ref,
557 uint32_t to_type_id,
558 uint32_t from_type_id,
559 bool clear) {
560 DCHECK(!readonly_);
561 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
562 if (!block)
563 return false;
564
565 // "Strong" exchanges are used below because there is no loop that can retry
566 // in the wake of spurious failures possible with "weak" exchanges. It is,
567 // in aggregate, an "acquire-release" operation so no memory accesses can be
568 // reordered either before or after this method (since changes based on type
569 // could happen on either side).
570
571 if (clear) {
572 // If clearing the memory, first change it to the "transitioning" type so
573 // there can be no confusion by other threads. After the memory is cleared,
574 // it can be changed to its final type.
575 if (!block->type_id.compare_exchange_strong(
576 from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
577 std::memory_order_acquire)) {
578 // Existing type wasn't what was expected: fail (with no changes)
579 return false;
580 }
581
582 // Clear the memory in an atomic manner. Using "release" stores force
583 // every write to be done after the ones before it. This is better than
584 // using memset because (a) it supports "volatile" and (b) it creates a
585 // reliable pattern upon which other threads may rely.
586 volatile std::atomic<int>* data =
587 reinterpret_cast<volatile std::atomic<int>*>(
588 reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
589 const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
590 DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
591 for (uint32_t i = 0; i < words; ++i) {
592 data->store(0, std::memory_order_release);
593 ++data;
594 }
595
596 // If the destination type is "transitioning" then skip the final exchange.
597 if (to_type_id == kTypeIdTransitioning)
598 return true;
599
600 // Finish the change to the desired type.
601 from_type_id = kTypeIdTransitioning; // Exchange needs modifiable original.
602 bool success = block->type_id.compare_exchange_strong(
603 from_type_id, to_type_id, std::memory_order_release,
604 std::memory_order_relaxed);
605 DCHECK(success); // Should never fail.
606 return success;
607 }
608
609 // One step change to the new type. Will return false if the existing value
610 // doesn't match what is expected.
611 return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
612 std::memory_order_acq_rel,
613 std::memory_order_acquire);
614 }
615
Allocate(size_t req_size,uint32_t type_id)616 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
617 size_t req_size,
618 uint32_t type_id) {
619 Reference ref = AllocateImpl(req_size, type_id);
620 if (ref) {
621 // Success: Record this allocation in usage stats (if active).
622 if (allocs_histogram_)
623 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
624 } else {
625 // Failure: Record an allocation of zero for tracking.
626 if (allocs_histogram_)
627 allocs_histogram_->Add(0);
628 }
629 return ref;
630 }
631
AllocateImpl(size_t req_size,uint32_t type_id)632 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
633 size_t req_size,
634 uint32_t type_id) {
635 DCHECK(!readonly_);
636
637 // Validate req_size to ensure it won't overflow when used as 32-bit value.
638 if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
639 NOTREACHED();
640 return kReferenceNull;
641 }
642
643 // Round up the requested size, plus header, to the next allocation alignment.
644 size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
645 if (size <= sizeof(BlockHeader) || size > mem_page_) {
646 NOTREACHED();
647 return kReferenceNull;
648 }
649
650 // Get the current start of unallocated memory. Other threads may
651 // update this at any time and cause us to retry these operations.
652 // This value should be treated as "const" to avoid confusion through
653 // the code below but recognize that any failed compare-exchange operation
654 // involving it will cause it to be loaded with a more recent value. The
655 // code should either exit or restart the loop in that case.
656 /* const */ uint32_t freeptr =
657 shared_meta()->freeptr.load(std::memory_order_acquire);
658
659 // Allocation is lockless so we do all our caculation and then, if saving
660 // indicates a change has occurred since we started, scrap everything and
661 // start over.
662 for (;;) {
663 if (IsCorrupt())
664 return kReferenceNull;
665
666 if (freeptr + size > mem_size_) {
667 SetFlag(&shared_meta()->flags, kFlagFull);
668 return kReferenceNull;
669 }
670
671 // Get pointer to the "free" block. If something has been allocated since
672 // the load of freeptr above, it is still safe as nothing will be written
673 // to that location until after the compare-exchange below.
674 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
675 if (!block) {
676 SetCorrupt();
677 return kReferenceNull;
678 }
679
680 // An allocation cannot cross page boundaries. If it would, create a
681 // "wasted" block and begin again at the top of the next page. This
682 // area could just be left empty but we fill in the block header just
683 // for completeness sake.
684 const uint32_t page_free = mem_page_ - freeptr % mem_page_;
685 if (size > page_free) {
686 if (page_free <= sizeof(BlockHeader)) {
687 SetCorrupt();
688 return kReferenceNull;
689 }
690 const uint32_t new_freeptr = freeptr + page_free;
691 if (shared_meta()->freeptr.compare_exchange_strong(
692 freeptr, new_freeptr, std::memory_order_acq_rel,
693 std::memory_order_acquire)) {
694 block->size = page_free;
695 block->cookie = kBlockCookieWasted;
696 }
697 continue;
698 }
699
700 // Don't leave a slice at the end of a page too small for anything. This
701 // can result in an allocation up to two alignment-sizes greater than the
702 // minimum required by requested-size + header + alignment.
703 if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
704 size = page_free;
705 if (freeptr + size > mem_size_) {
706 SetCorrupt();
707 return kReferenceNull;
708 }
709 }
710
711 // This cast is safe because (freeptr + size) <= mem_size_.
712 const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
713
714 // Save our work. Try again if another thread has completed an allocation
715 // while we were processing. A "weak" exchange would be permissable here
716 // because the code will just loop and try again but the above processing
717 // is significant so make the extra effort of a "strong" exchange.
718 if (!shared_meta()->freeptr.compare_exchange_strong(
719 freeptr, new_freeptr, std::memory_order_acq_rel,
720 std::memory_order_acquire)) {
721 continue;
722 }
723
724 // Given that all memory was zeroed before ever being given to an instance
725 // of this class and given that we only allocate in a monotomic fashion
726 // going forward, it must be that the newly allocated block is completely
727 // full of zeros. If we find anything in the block header that is NOT a
728 // zero then something must have previously run amuck through memory,
729 // writing beyond the allocated space and into unallocated space.
730 if (block->size != 0 ||
731 block->cookie != kBlockCookieFree ||
732 block->type_id.load(std::memory_order_relaxed) != 0 ||
733 block->next.load(std::memory_order_relaxed) != 0) {
734 SetCorrupt();
735 return kReferenceNull;
736 }
737
738 // Make sure the memory exists by writing to the first byte of every memory
739 // page it touches beyond the one containing the block header itself.
740 // As the underlying storage is often memory mapped from disk or shared
741 // space, sometimes things go wrong and those address don't actually exist
742 // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
743 // in the code. This should concentrate all those failures into this
744 // location for easy tracking and, eventually, proper handling.
745 volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
746 volatile char* mem_begin = reinterpret_cast<volatile char*>(
747 (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
748 (vm_page_size_ - 1)) &
749 ~static_cast<uintptr_t>(vm_page_size_ - 1));
750 for (volatile char* memory = mem_begin; memory < mem_end;
751 memory += vm_page_size_) {
752 // It's required that a memory segment start as all zeros and thus the
753 // newly allocated block is all zeros at this point. Thus, writing a
754 // zero to it allows testing that the memory exists without actually
755 // changing its contents. The compiler doesn't know about the requirement
756 // and so cannot optimize-away these writes.
757 *memory = 0;
758 }
759
760 // Load information into the block header. There is no "release" of the
761 // data here because this memory can, currently, be seen only by the thread
762 // performing the allocation. When it comes time to share this, the thread
763 // will call MakeIterable() which does the release operation.
764 // `size` is at most kSegmentMaxSize, so this cast is safe.
765 block->size = static_cast<uint32_t>(size);
766 block->cookie = kBlockCookieAllocated;
767 block->type_id.store(type_id, std::memory_order_relaxed);
768 return freeptr;
769 }
770 }
771
GetMemoryInfo(MemoryInfo * meminfo) const772 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
773 uint32_t remaining = std::max(
774 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
775 (uint32_t)sizeof(BlockHeader));
776 meminfo->total = mem_size_;
777 meminfo->free = remaining - sizeof(BlockHeader);
778 }
779
MakeIterable(Reference ref)780 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
781 DCHECK(!readonly_);
782 if (IsCorrupt())
783 return;
784 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
785 if (!block) // invalid reference
786 return;
787 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
788 return;
789 block->next.store(kReferenceQueue, std::memory_order_release); // New tail.
790
791 // Try to add this block to the tail of the queue. May take multiple tries.
792 // If so, tail will be automatically updated with a more recent value during
793 // compare-exchange operations.
794 uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
795 for (;;) {
796 // Acquire the current tail-pointer released by previous call to this
797 // method and validate it.
798 block = GetBlock(tail, 0, 0, true, false);
799 if (!block) {
800 SetCorrupt();
801 return;
802 }
803
804 // Try to insert the block at the tail of the queue. The tail node always
805 // has an existing value of kReferenceQueue; if that is somehow not the
806 // existing value then another thread has acted in the meantime. A "strong"
807 // exchange is necessary so the "else" block does not get executed when
808 // that is not actually the case (which can happen with a "weak" exchange).
809 uint32_t next = kReferenceQueue; // Will get replaced with existing value.
810 if (block->next.compare_exchange_strong(next, ref,
811 std::memory_order_acq_rel,
812 std::memory_order_acquire)) {
813 // Update the tail pointer to the new offset. If the "else" clause did
814 // not exist, then this could be a simple Release_Store to set the new
815 // value but because it does, it's possible that other threads could add
816 // one or more nodes at the tail before reaching this point. We don't
817 // have to check the return value because it either operates correctly
818 // or the exact same operation has already been done (by the "else"
819 // clause) on some other thread.
820 shared_meta()->tailptr.compare_exchange_strong(tail, ref,
821 std::memory_order_release,
822 std::memory_order_relaxed);
823 return;
824 }
825 // In the unlikely case that a thread crashed or was killed between the
826 // update of "next" and the update of "tailptr", it is necessary to
827 // perform the operation that would have been done. There's no explicit
828 // check for crash/kill which means that this operation may also happen
829 // even when the other thread is in perfect working order which is what
830 // necessitates the CompareAndSwap above.
831 shared_meta()->tailptr.compare_exchange_strong(
832 tail, next, std::memory_order_acq_rel, std::memory_order_acquire);
833 }
834 }
835
836 // The "corrupted" state is held both locally and globally (shared). The
837 // shared flag can't be trusted since a malicious actor could overwrite it.
838 // Because corruption can be detected during read-only operations such as
839 // iteration, this method may be called by other "const" methods. In this
840 // case, it's safe to discard the constness and modify the local flag and
841 // maybe even the shared flag if the underlying data isn't actually read-only.
SetCorrupt() const842 void PersistentMemoryAllocator::SetCorrupt() const {
843 if (!corrupt_.load(std::memory_order_relaxed) &&
844 !CheckFlag(
845 const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
846 kFlagCorrupt)) {
847 LOG(ERROR) << "Corruption detected in shared-memory segment.";
848 RecordError(kMemoryIsCorrupt);
849 }
850
851 corrupt_.store(true, std::memory_order_relaxed);
852 if (!readonly_) {
853 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
854 kFlagCorrupt);
855 }
856 }
857
IsCorrupt() const858 bool PersistentMemoryAllocator::IsCorrupt() const {
859 if (corrupt_.load(std::memory_order_relaxed) ||
860 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
861 SetCorrupt(); // Make sure all indicators are set.
862 return true;
863 }
864 return false;
865 }
866
IsFull() const867 bool PersistentMemoryAllocator::IsFull() const {
868 return CheckFlag(&shared_meta()->flags, kFlagFull);
869 }
870
871 // Dereference a block |ref| and ensure that it's valid for the desired
872 // |type_id| and |size|. |special| indicates that we may try to access block
873 // headers not available to callers but still accessed by this module. By
874 // having internal dereferences go through this same function, the allocator
875 // is hardened against corruption.
876 const volatile PersistentMemoryAllocator::BlockHeader*
GetBlock(Reference ref,uint32_t type_id,size_t size,bool queue_ok,bool free_ok) const877 PersistentMemoryAllocator::GetBlock(Reference ref,
878 uint32_t type_id,
879 size_t size,
880 bool queue_ok,
881 bool free_ok) const {
882 // Handle special cases.
883 if (ref == kReferenceQueue && queue_ok)
884 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
885
886 // Validation of parameters.
887 if (ref < sizeof(SharedMetadata))
888 return nullptr;
889 if (ref % kAllocAlignment != 0)
890 return nullptr;
891 size += sizeof(BlockHeader);
892 uint32_t total_size;
893 if (!base::CheckAdd(ref, size).AssignIfValid(&total_size)) {
894 return nullptr;
895 }
896 if (total_size > mem_size_) {
897 return nullptr;
898 }
899
900 // Validation of referenced block-header.
901 if (!free_ok) {
902 const volatile BlockHeader* const block =
903 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
904 if (block->cookie != kBlockCookieAllocated)
905 return nullptr;
906 if (block->size < size)
907 return nullptr;
908 uint32_t block_size;
909 if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
910 return nullptr;
911 }
912 if (block_size > mem_size_) {
913 return nullptr;
914 }
915 if (type_id != 0 &&
916 block->type_id.load(std::memory_order_relaxed) != type_id) {
917 return nullptr;
918 }
919 }
920
921 // Return pointer to block data.
922 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
923 }
924
FlushPartial(size_t length,bool sync)925 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
926 // Generally there is nothing to do as every write is done through volatile
927 // memory with atomic instructions to guarantee consistency. This (virtual)
928 // method exists so that derivced classes can do special things, such as
929 // tell the OS to write changes to disk now rather than when convenient.
930 }
931
RecordError(int error) const932 void PersistentMemoryAllocator::RecordError(int error) const {
933 if (errors_histogram_)
934 errors_histogram_->Add(error);
935 }
936
GetBlockData(Reference ref,uint32_t type_id,size_t size) const937 const volatile void* PersistentMemoryAllocator::GetBlockData(
938 Reference ref,
939 uint32_t type_id,
940 size_t size) const {
941 DCHECK(size > 0);
942 const volatile BlockHeader* block =
943 GetBlock(ref, type_id, size, false, false);
944 if (!block)
945 return nullptr;
946 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
947 }
948
UpdateTrackingHistograms()949 void PersistentMemoryAllocator::UpdateTrackingHistograms() {
950 DCHECK(!readonly_);
951 if (used_histogram_) {
952 MemoryInfo meminfo;
953 GetMemoryInfo(&meminfo);
954 HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
955 ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
956 used_histogram_->Add(used_percent);
957 }
958 }
959
960
961 //----- LocalPersistentMemoryAllocator -----------------------------------------
962
LocalPersistentMemoryAllocator(size_t size,uint64_t id,base::StringPiece name)963 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
964 size_t size,
965 uint64_t id,
966 base::StringPiece name)
967 : PersistentMemoryAllocator(AllocateLocalMemory(size, name),
968 size,
969 0,
970 id,
971 name,
972 false) {}
973
~LocalPersistentMemoryAllocator()974 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
975 DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
976 }
977
978 // static
979 PersistentMemoryAllocator::Memory
AllocateLocalMemory(size_t size,base::StringPiece name)980 LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size,
981 base::StringPiece name) {
982 void* address;
983
984 #if BUILDFLAG(IS_WIN)
985 address =
986 ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
987 if (address)
988 return Memory(address, MEM_VIRTUAL);
989 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
990 // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
991 // MAP_SHARED is not available on Linux <2.4 but required on Mac.
992 address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
993 MAP_ANON | MAP_SHARED, -1, 0);
994 if (address != MAP_FAILED) {
995 #if BUILDFLAG(IS_ANDROID)
996 // Allow the anonymous memory region allocated by mmap(MAP_ANON) to be
997 // identified in /proc/$PID/smaps. This helps improve visibility into
998 // Chrome's memory usage on Android.
999 const std::string arena_name = base::StrCat({"persistent:", name});
1000 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, arena_name.c_str());
1001 #endif
1002 return Memory(address, MEM_VIRTUAL);
1003 }
1004 #else
1005 #error This architecture is not (yet) supported.
1006 #endif
1007
1008 // As a last resort, just allocate the memory from the heap. This will
1009 // achieve the same basic result but the acquired memory has to be
1010 // explicitly zeroed and thus realized immediately (i.e. all pages are
1011 // added to the process now istead of only when first accessed).
1012 address = malloc(size);
1013 DPCHECK(address);
1014 memset(address, 0, size);
1015 return Memory(address, MEM_MALLOC);
1016 }
1017
1018 // static
DeallocateLocalMemory(void * memory,size_t size,MemoryType type)1019 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
1020 size_t size,
1021 MemoryType type) {
1022 if (type == MEM_MALLOC) {
1023 free(memory);
1024 return;
1025 }
1026
1027 DCHECK_EQ(MEM_VIRTUAL, type);
1028 #if BUILDFLAG(IS_WIN)
1029 BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
1030 DCHECK(success);
1031 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1032 int result = ::munmap(memory, size);
1033 DCHECK_EQ(0, result);
1034 #else
1035 #error This architecture is not (yet) supported.
1036 #endif
1037 }
1038
1039 //----- WritableSharedPersistentMemoryAllocator --------------------------------
1040
1041 WritableSharedPersistentMemoryAllocator::
WritableSharedPersistentMemoryAllocator(base::WritableSharedMemoryMapping memory,uint64_t id,base::StringPiece name)1042 WritableSharedPersistentMemoryAllocator(
1043 base::WritableSharedMemoryMapping memory,
1044 uint64_t id,
1045 base::StringPiece name)
1046 : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
1047 memory.size(),
1048 0,
1049 id,
1050 name,
1051 false),
1052 shared_memory_(std::move(memory)) {}
1053
1054 WritableSharedPersistentMemoryAllocator::
1055 ~WritableSharedPersistentMemoryAllocator() = default;
1056
1057 // static
IsSharedMemoryAcceptable(const base::WritableSharedMemoryMapping & memory)1058 bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1059 const base::WritableSharedMemoryMapping& memory) {
1060 return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
1061 }
1062
1063 //----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
1064
1065 ReadOnlySharedPersistentMemoryAllocator::
ReadOnlySharedPersistentMemoryAllocator(base::ReadOnlySharedMemoryMapping memory,uint64_t id,base::StringPiece name)1066 ReadOnlySharedPersistentMemoryAllocator(
1067 base::ReadOnlySharedMemoryMapping memory,
1068 uint64_t id,
1069 base::StringPiece name)
1070 : PersistentMemoryAllocator(
1071 Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
1072 memory.size(),
1073 0,
1074 id,
1075 name,
1076 true),
1077 shared_memory_(std::move(memory)) {}
1078
1079 ReadOnlySharedPersistentMemoryAllocator::
1080 ~ReadOnlySharedPersistentMemoryAllocator() = default;
1081
1082 // static
IsSharedMemoryAcceptable(const base::ReadOnlySharedMemoryMapping & memory)1083 bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1084 const base::ReadOnlySharedMemoryMapping& memory) {
1085 return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
1086 }
1087
1088 #if !BUILDFLAG(IS_NACL)
1089 //----- FilePersistentMemoryAllocator ------------------------------------------
1090
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,size_t max_size,uint64_t id,base::StringPiece name,bool read_only)1091 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
1092 std::unique_ptr<MemoryMappedFile> file,
1093 size_t max_size,
1094 uint64_t id,
1095 base::StringPiece name,
1096 bool read_only)
1097 : PersistentMemoryAllocator(
1098 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
1099 max_size != 0 ? max_size : file->length(),
1100 0,
1101 id,
1102 name,
1103 read_only),
1104 mapped_file_(std::move(file)) {}
1105
1106 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
1107
1108 // static
IsFileAcceptable(const MemoryMappedFile & file,bool read_only)1109 bool FilePersistentMemoryAllocator::IsFileAcceptable(
1110 const MemoryMappedFile& file,
1111 bool read_only) {
1112 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
1113 }
1114
Cache()1115 void FilePersistentMemoryAllocator::Cache() {
1116 // Since this method is expected to load data from permanent storage
1117 // into memory, blocking I/O may occur.
1118 base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
1119 base::BlockingType::MAY_BLOCK);
1120
1121 // Calculate begin/end addresses so that the first byte of every page
1122 // in that range can be read. Keep within the used space. The |volatile|
1123 // keyword makes it so the compiler can't make assumptions about what is
1124 // in a given memory location and thus possibly avoid the read.
1125 const volatile char* mem_end = mem_base_ + used();
1126 const volatile char* mem_begin = mem_base_;
1127
1128 // Iterate over the memory a page at a time, reading the first byte of
1129 // every page. The values are added to a |total| so that the compiler
1130 // can't omit the read.
1131 int total = 0;
1132 for (const volatile char* memory = mem_begin; memory < mem_end;
1133 memory += vm_page_size_) {
1134 total += *memory;
1135 }
1136
1137 // Tell the compiler that |total| is used so that it can't optimize away
1138 // the memory accesses above.
1139 debug::Alias(&total);
1140 }
1141
FlushPartial(size_t length,bool sync)1142 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
1143 if (IsReadonly())
1144 return;
1145
1146 absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
1147 if (sync)
1148 scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1149
1150 #if BUILDFLAG(IS_WIN)
1151 // Windows doesn't support asynchronous flush.
1152 scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1153 BOOL success = ::FlushViewOfFile(data(), length);
1154 DPCHECK(success);
1155 #elif BUILDFLAG(IS_APPLE)
1156 // On OSX, "invalidate" removes all cached pages, forcing a re-read from
1157 // disk. That's not applicable to "flush" so omit it.
1158 int result =
1159 ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
1160 DCHECK_NE(EINVAL, result);
1161 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1162 // On POSIX, "invalidate" forces _other_ processes to recognize what has
1163 // been written to disk and so is applicable to "flush".
1164 int result = ::msync(const_cast<void*>(data()), length,
1165 MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
1166 DCHECK_NE(EINVAL, result);
1167 #else
1168 #error Unsupported OS.
1169 #endif
1170 }
1171 #endif // !BUILDFLAG(IS_NACL)
1172
1173 //----- DelayedPersistentAllocation --------------------------------------------
1174
DelayedPersistentAllocation(PersistentMemoryAllocator * allocator,std::atomic<Reference> * ref,uint32_t type,size_t size,size_t offset)1175 DelayedPersistentAllocation::DelayedPersistentAllocation(
1176 PersistentMemoryAllocator* allocator,
1177 std::atomic<Reference>* ref,
1178 uint32_t type,
1179 size_t size,
1180 size_t offset)
1181 : allocator_(allocator),
1182 type_(type),
1183 size_(checked_cast<uint32_t>(size)),
1184 offset_(checked_cast<uint32_t>(offset)),
1185 reference_(ref) {
1186 DCHECK(allocator_);
1187 DCHECK_NE(0U, type_);
1188 DCHECK_LT(0U, size_);
1189 DCHECK(reference_);
1190 }
1191
1192 DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
1193
Get() const1194 void* DelayedPersistentAllocation::Get() const {
1195 // Relaxed operations are acceptable here because it's not protecting the
1196 // contents of the allocation in any way.
1197 Reference ref = reference_->load(std::memory_order_acquire);
1198
1199 #if !BUILDFLAG(IS_NACL)
1200 // TODO(crbug/1432981): Remove these. They are used to investigate unexpected
1201 // failures.
1202 bool ref_found = (ref != 0);
1203 bool raced = false;
1204 #endif // !BUILDFLAG(IS_NACL)
1205
1206 if (!ref) {
1207 ref = allocator_->Allocate(size_, type_);
1208 if (!ref)
1209 return nullptr;
1210
1211 // Store the new reference in its proper location using compare-and-swap.
1212 // Use a "strong" exchange to ensure no false-negatives since the operation
1213 // cannot be retried.
1214 Reference existing = 0; // Must be mutable; receives actual value.
1215 if (!reference_->compare_exchange_strong(existing, ref,
1216 std::memory_order_release,
1217 std::memory_order_relaxed)) {
1218 // Failure indicates that something else has raced ahead, performed the
1219 // allocation, and stored its reference. Purge the allocation that was
1220 // just done and use the other one instead.
1221 DCHECK_EQ(type_, allocator_->GetType(existing));
1222 DCHECK_LE(size_, allocator_->GetAllocSize(existing));
1223 allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
1224 ref = existing;
1225 #if !BUILDFLAG(IS_NACL)
1226 raced = true;
1227 #endif // !BUILDFLAG(IS_NACL)
1228 }
1229 }
1230
1231 char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
1232 if (!mem) {
1233 #if !BUILDFLAG(IS_NACL)
1234 // TODO(crbug/1432981): Remove these. They are used to investigate
1235 // unexpected failures.
1236 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "full",
1237 allocator_->IsFull());
1238 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "corrupted",
1239 allocator_->IsCorrupt());
1240 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "ref", ref);
1241 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "ref_found", ref_found);
1242 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "raced", raced);
1243 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_", type_);
1244 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
1245 #endif // !BUILDFLAG(IS_NACL)
1246 // This should never happen but be tolerant if it does as corruption from
1247 // the outside is something to guard against.
1248 NOTREACHED();
1249 return nullptr;
1250 }
1251 return mem + offset_;
1252 }
1253
1254 } // namespace base
1255