1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_memory_allocator.h"
6
7 #include <assert.h>
8
9 #include <algorithm>
10 #include <atomic>
11
12 #include "base/bits.h"
13 #include "base/containers/contains.h"
14 #include "base/debug/alias.h"
15 #include "base/debug/crash_logging.h"
16 #include "base/debug/dump_without_crashing.h"
17 #include "base/files/memory_mapped_file.h"
18 #include "base/logging.h"
19 #include "base/metrics/histogram_functions.h"
20 #include "base/metrics/persistent_histogram_allocator.h"
21 #include "base/metrics/sparse_histogram.h"
22 #include "base/notreached.h"
23 #include "base/numerics/checked_math.h"
24 #include "base/numerics/safe_conversions.h"
25 #include "base/strings/strcat.h"
26 #include "base/strings/string_piece.h"
27 #include "base/system/sys_info.h"
28 #include "base/threading/scoped_blocking_call.h"
29 #include "build/build_config.h"
30 #include "third_party/abseil-cpp/absl/types/optional.h"
31
32 #if BUILDFLAG(IS_WIN)
33 #include <windows.h>
34 // Must be after <windows.h>
35 #include <winbase.h>
36 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
37 #include <sys/mman.h>
38 #if BUILDFLAG(IS_ANDROID)
39 #include <sys/prctl.h>
40 #endif
41 #endif
42
43 namespace {
44
45 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
46 // and should be a power of 2 in order to accommodate almost any page size.
47 constexpr uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
48
49 // A constant (random) value placed in the shared metadata to identify
50 // an already initialized memory segment.
51 constexpr uint32_t kGlobalCookie = 0x408305DC;
52
53 // The current version of the metadata. If updates are made that change
54 // the metadata, the version number can be queried to operate in a backward-
55 // compatible manner until the memory segment is completely re-initalized.
56 // Note: If you update the metadata in a non-backwards compatible way, reset
57 // |kCompatibleVersions|. Otherwise, add the previous version.
58 constexpr uint32_t kGlobalVersion = 3;
59 static constexpr uint32_t kOldCompatibleVersions[] = {2};
60
61 // Constant values placed in the block headers to indicate its state.
62 constexpr uint32_t kBlockCookieFree = 0;
63 constexpr uint32_t kBlockCookieQueue = 1;
64 constexpr uint32_t kBlockCookieWasted = 0x4B594F52;
65 constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
66
67 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
68 // types rather than combined bitfield.
69
70 // Flags stored in the flags_ field of the SharedMetadata structure below.
71 constexpr uint32_t kFlagCorrupt = 1 << 0;
72 constexpr uint32_t kFlagFull = 1 << 1;
73
74 // Errors that are logged in "errors" histogram.
75 // These values are persisted to logs. Entries should not be renumbered and
76 // numeric values should never be reused.
77 enum AllocatorError : int {
78 kMemoryIsCorrupt = 1,
79 kMaxValue = kMemoryIsCorrupt,
80 };
81
CheckFlag(const volatile std::atomic<uint32_t> * flags,uint32_t flag)82 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
83 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
84 return (loaded_flags & flag) != 0;
85 }
86
SetFlag(volatile std::atomic<uint32_t> * flags,uint32_t flag)87 void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
88 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
89 for (;;) {
90 uint32_t new_flags = (loaded_flags & ~flag) | flag;
91 // In the failue case, actual "flags" value stored in loaded_flags.
92 // These access are "relaxed" because they are completely independent
93 // of all other values.
94 if (flags->compare_exchange_weak(loaded_flags, new_flags,
95 std::memory_order_relaxed,
96 std::memory_order_relaxed)) {
97 break;
98 }
99 }
100 }
101
102 } // namespace
103
104 namespace base {
105
106 // The block-header is placed at the top of every allocation within the
107 // segment to describe the data that follows it.
108 struct PersistentMemoryAllocator::BlockHeader {
109 uint32_t size; // Number of bytes in this block, including header.
110 uint32_t cookie; // Constant value indicating completed allocation.
111 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
112 std::atomic<uint32_t> next; // Pointer to the next block when iterating.
113 };
114
115 // The shared metadata exists once at the top of the memory segment to
116 // describe the state of the allocator to all processes. The size of this
117 // structure must be a multiple of 64-bits to ensure compatibility between
118 // architectures.
119 struct PersistentMemoryAllocator::SharedMetadata {
120 uint32_t cookie; // Some value that indicates complete initialization.
121 uint32_t size; // Total size of memory segment.
122 uint32_t page_size; // Paging size within memory segment.
123 uint32_t version; // Version code so upgrades don't break.
124 uint64_t id; // Arbitrary ID number given by creator.
125 uint32_t name; // Reference to stored name string.
126 uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
127
128 // Above is read-only after first construction. Below may be changed and
129 // so must be marked "volatile" to provide correct inter-process behavior.
130
131 // State of the memory, plus some padding to keep alignment.
132 volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
133 uint8_t padding2[3];
134
135 // Bitfield of information flags. Access to this should be done through
136 // the CheckFlag() and SetFlag() methods defined above.
137 volatile std::atomic<uint32_t> flags;
138
139 // Offset/reference to first free space in segment.
140 volatile std::atomic<uint32_t> freeptr;
141
142 // The "iterable" queue is an M&S Queue as described here, append-only:
143 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
144 // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
145 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
146 volatile BlockHeader queue; // Empty block for linked-list head/tail.
147 };
148
149 // The "queue" block header is used to detect "last node" so that zero/null
150 // can be used to indicate that it hasn't been added at all. It is part of
151 // the SharedMetadata structure which itself is always located at offset zero.
152 const PersistentMemoryAllocator::Reference
153 PersistentMemoryAllocator::kReferenceQueue =
154 offsetof(SharedMetadata, queue);
155
156 const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
157 FILE_PATH_LITERAL(".pma");
158
159
Iterator(const PersistentMemoryAllocator * allocator)160 PersistentMemoryAllocator::Iterator::Iterator(
161 const PersistentMemoryAllocator* allocator)
162 : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
163
Iterator(const PersistentMemoryAllocator * allocator,Reference starting_after)164 PersistentMemoryAllocator::Iterator::Iterator(
165 const PersistentMemoryAllocator* allocator,
166 Reference starting_after)
167 : allocator_(allocator), last_record_(0), record_count_(0) {
168 Reset(starting_after);
169 }
170
171 PersistentMemoryAllocator::Iterator::~Iterator() = default;
172
Reset()173 void PersistentMemoryAllocator::Iterator::Reset() {
174 last_record_.store(kReferenceQueue, std::memory_order_relaxed);
175 record_count_.store(0, std::memory_order_relaxed);
176 }
177
Reset(Reference starting_after)178 void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
179 if (starting_after == 0) {
180 Reset();
181 return;
182 }
183
184 last_record_.store(starting_after, std::memory_order_relaxed);
185 record_count_.store(0, std::memory_order_relaxed);
186
187 // Ensure that the starting point is a valid, iterable block (meaning it can
188 // be read and has a non-zero "next" pointer).
189 const volatile BlockHeader* block =
190 allocator_->GetBlock(starting_after, 0, 0, false, false);
191 if (!block || block->next.load(std::memory_order_relaxed) == 0) {
192 NOTREACHED();
193 last_record_.store(kReferenceQueue, std::memory_order_release);
194 }
195 }
196
197 PersistentMemoryAllocator::Reference
GetLast()198 PersistentMemoryAllocator::Iterator::GetLast() {
199 Reference last = last_record_.load(std::memory_order_relaxed);
200 if (last == kReferenceQueue)
201 return kReferenceNull;
202 return last;
203 }
204
205 PersistentMemoryAllocator::Reference
GetNext(uint32_t * type_return)206 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
207 // Make a copy of the existing count of found-records, acquiring all changes
208 // made to the allocator, notably "freeptr" (see comment in loop for why
209 // the load of that value cannot be moved above here) that occurred during
210 // any previous runs of this method, including those by parallel threads
211 // that interrupted it. It pairs with the Release at the end of this method.
212 //
213 // Otherwise, if the compiler were to arrange the two loads such that
214 // "count" was fetched _after_ "freeptr" then it would be possible for
215 // this thread to be interrupted between them and other threads perform
216 // multiple allocations, make-iterables, and iterations (with the included
217 // increment of |record_count_|) culminating in the check at the bottom
218 // mistakenly determining that a loop exists. Isn't this stuff fun?
219 uint32_t count = record_count_.load(std::memory_order_acquire);
220
221 Reference last = last_record_.load(std::memory_order_acquire);
222 Reference next;
223 while (true) {
224 const volatile BlockHeader* block =
225 allocator_->GetBlock(last, 0, 0, true, false);
226 if (!block) // Invalid iterator state.
227 return kReferenceNull;
228
229 // The compiler and CPU can freely reorder all memory accesses on which
230 // there are no dependencies. It could, for example, move the load of
231 // "freeptr" to above this point because there are no explicit dependencies
232 // between it and "next". If it did, however, then another block could
233 // be queued after that but before the following load meaning there is
234 // one more queued block than the future "detect loop by having more
235 // blocks that could fit before freeptr" will allow.
236 //
237 // By "acquiring" the "next" value here, it's synchronized to the enqueue
238 // of the node which in turn is synchronized to the allocation (which sets
239 // freeptr). Thus, the scenario above cannot happen.
240 next = block->next.load(std::memory_order_acquire);
241 if (next == kReferenceQueue) // No next allocation in queue.
242 return kReferenceNull;
243 block = allocator_->GetBlock(next, 0, 0, false, false);
244 if (!block) { // Memory is corrupt.
245 allocator_->SetCorrupt();
246 return kReferenceNull;
247 }
248
249 // Update the "last_record" pointer to be the reference being returned.
250 // If it fails then another thread has already iterated past it so loop
251 // again. Failing will also load the existing value into "last" so there
252 // is no need to do another such load when the while-loop restarts. A
253 // "strong" compare-exchange is used because failing unnecessarily would
254 // mean repeating some fairly costly validations above.
255 if (last_record_.compare_exchange_strong(
256 last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
257 *type_return = block->type_id.load(std::memory_order_relaxed);
258 break;
259 }
260 }
261
262 // Memory corruption could cause a loop in the list. Such must be detected
263 // so as to not cause an infinite loop in the caller. This is done by simply
264 // making sure it doesn't iterate more times than the absolute maximum
265 // number of allocations that could have been made. Callers are likely
266 // to loop multiple times before it is detected but at least it stops.
267 const uint32_t freeptr = std::min(
268 allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
269 allocator_->mem_size_);
270 const uint32_t max_records =
271 freeptr / (sizeof(BlockHeader) + kAllocAlignment);
272 if (count > max_records) {
273 allocator_->SetCorrupt();
274 return kReferenceNull;
275 }
276
277 // Increment the count and release the changes made above. It pairs with
278 // the Acquire at the top of this method. Note that this operation is not
279 // strictly synchonized with fetching of the object to return, which would
280 // have to be done inside the loop and is somewhat complicated to achieve.
281 // It does not matter if it falls behind temporarily so long as it never
282 // gets ahead.
283 record_count_.fetch_add(1, std::memory_order_release);
284 return next;
285 }
286
287 PersistentMemoryAllocator::Reference
GetNextOfType(uint32_t type_match)288 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
289 Reference ref;
290 uint32_t type_found;
291 while ((ref = GetNext(&type_found)) != 0) {
292 if (type_found == type_match)
293 return ref;
294 }
295 return kReferenceNull;
296 }
297
298
299 // static
IsMemoryAcceptable(const void * base,size_t size,size_t page_size,bool readonly)300 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
301 size_t size,
302 size_t page_size,
303 bool readonly) {
304 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
305 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
306 (size % kAllocAlignment == 0 || readonly) &&
307 (page_size == 0 || size % page_size == 0 || readonly));
308 }
309
PersistentMemoryAllocator(void * base,size_t size,size_t page_size,uint64_t id,base::StringPiece name,AccessMode access_mode)310 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
311 size_t size,
312 size_t page_size,
313 uint64_t id,
314 base::StringPiece name,
315 AccessMode access_mode)
316 : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
317 size,
318 page_size,
319 id,
320 name,
321 access_mode) {}
322
PersistentMemoryAllocator(Memory memory,size_t size,size_t page_size,uint64_t id,base::StringPiece name,AccessMode access_mode)323 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
324 size_t size,
325 size_t page_size,
326 uint64_t id,
327 base::StringPiece name,
328 AccessMode access_mode)
329 : mem_base_(static_cast<char*>(memory.base)),
330 mem_type_(memory.type),
331 mem_size_(checked_cast<uint32_t>(size)),
332 mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
333 #if BUILDFLAG(IS_NACL)
334 vm_page_size_(4096U), // SysInfo is not built for NACL.
335 #else
336 vm_page_size_(SysInfo::VMAllocationGranularity()),
337 #endif
338 access_mode_(access_mode) {
339 // These asserts ensure that the structures are 32/64-bit agnostic and meet
340 // all the requirements of use within the allocator. They access private
341 // definitions and so cannot be moved to the global scope.
342 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
343 "struct is not portable across different natural word widths");
344 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
345 "struct is not portable across different natural word widths");
346
347 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
348 "BlockHeader is not a multiple of kAllocAlignment");
349 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
350 "SharedMetadata is not a multiple of kAllocAlignment");
351 static_assert(kReferenceQueue % kAllocAlignment == 0,
352 "\"queue\" is not aligned properly; must be at end of struct");
353
354 // Ensure that memory segment is of acceptable size.
355 const bool readonly = access_mode == kReadOnly;
356 CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
357
358 // These atomics operate inter-process and so must be lock-free.
359 DCHECK(SharedMetadata().freeptr.is_lock_free());
360 DCHECK(SharedMetadata().flags.is_lock_free());
361 DCHECK(BlockHeader().next.is_lock_free());
362 CHECK(corrupt_.is_lock_free());
363
364 // When calling SetCorrupt() during initialization, don't write to the memory
365 // in kReadOnly and kReadWriteExisting modes.
366 const bool allow_write_for_set_corrupt = (access_mode == kReadWrite);
367 if (shared_meta()->cookie != kGlobalCookie) {
368 if (access_mode != kReadWrite) {
369 SetCorrupt(allow_write_for_set_corrupt);
370 return;
371 }
372
373 // This block is only executed when a completely new memory segment is
374 // being initialized. It's unshared and single-threaded...
375 volatile BlockHeader* const first_block =
376 reinterpret_cast<volatile BlockHeader*>(mem_base_ +
377 sizeof(SharedMetadata));
378 if (shared_meta()->cookie != 0 ||
379 shared_meta()->size != 0 ||
380 shared_meta()->version != 0 ||
381 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
382 shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
383 shared_meta()->id != 0 ||
384 shared_meta()->name != 0 ||
385 shared_meta()->tailptr != 0 ||
386 shared_meta()->queue.cookie != 0 ||
387 shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
388 first_block->size != 0 ||
389 first_block->cookie != 0 ||
390 first_block->type_id.load(std::memory_order_relaxed) != 0 ||
391 first_block->next != 0) {
392 // ...or something malicious has been playing with the metadata.
393 CHECK(allow_write_for_set_corrupt);
394 SetCorrupt(allow_write_for_set_corrupt);
395 }
396
397 // This is still safe to do even if corruption has been detected.
398 shared_meta()->cookie = kGlobalCookie;
399 shared_meta()->size = mem_size_;
400 shared_meta()->page_size = mem_page_;
401 shared_meta()->version = kGlobalVersion;
402 shared_meta()->id = id;
403 shared_meta()->freeptr.store(sizeof(SharedMetadata),
404 std::memory_order_release);
405
406 // Set up the queue of iterable allocations.
407 shared_meta()->queue.size = sizeof(BlockHeader);
408 shared_meta()->queue.cookie = kBlockCookieQueue;
409 shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
410 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
411
412 // Allocate space for the name so other processes can learn it.
413 if (!name.empty()) {
414 const size_t name_length = name.length() + 1;
415 shared_meta()->name = Allocate(name_length, 0);
416 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
417 if (name_cstr)
418 memcpy(name_cstr, name.data(), name.length());
419 }
420
421 shared_meta()->memory_state.store(MEMORY_INITIALIZED,
422 std::memory_order_release);
423 } else {
424 if (shared_meta()->size == 0 ||
425 (shared_meta()->version != kGlobalVersion &&
426 !Contains(kOldCompatibleVersions, shared_meta()->version)) ||
427 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
428 shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
429 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
430 SetCorrupt(allow_write_for_set_corrupt);
431 }
432 if (!readonly) {
433 // The allocator is attaching to a previously initialized segment of
434 // memory. If the initialization parameters differ, make the best of it
435 // by reducing the local construction parameters to match those of the
436 // actual memory area. This ensures that the local object never tries to
437 // write outside of the original bounds.
438 // Because the fields are const to ensure that no code other than the
439 // constructor makes changes to them as well as to give optimization hints
440 // to the compiler, it's necessary to const-cast them for changes here.
441 if (shared_meta()->size < mem_size_)
442 *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
443 if (shared_meta()->page_size < mem_page_)
444 *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
445
446 // Ensure that settings are still valid after the above adjustments.
447 if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly)) {
448 SetCorrupt(allow_write_for_set_corrupt);
449 }
450 }
451 }
452 }
453
~PersistentMemoryAllocator()454 PersistentMemoryAllocator::~PersistentMemoryAllocator() {
455 // It's strictly forbidden to do any memory access here in case there is
456 // some issue with the underlying memory segment. The "Local" allocator
457 // makes use of this to allow deletion of the segment on the heap from
458 // within its destructor.
459 }
460
Id() const461 uint64_t PersistentMemoryAllocator::Id() const {
462 return shared_meta()->id;
463 }
464
Name() const465 const char* PersistentMemoryAllocator::Name() const {
466 Reference name_ref = shared_meta()->name;
467 const char* name_cstr =
468 GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
469 if (!name_cstr)
470 return "";
471
472 size_t name_length = GetAllocSize(name_ref);
473 if (name_cstr[name_length - 1] != '\0') {
474 NOTREACHED();
475 SetCorrupt();
476 return "";
477 }
478
479 return name_cstr;
480 }
481
CreateTrackingHistograms(base::StringPiece name)482 void PersistentMemoryAllocator::CreateTrackingHistograms(
483 base::StringPiece name) {
484 if (name.empty() || access_mode_ == kReadOnly) {
485 return;
486 }
487 std::string name_string(name);
488
489 #if 0
490 // This histogram wasn't being used so has been disabled. It is left here
491 // in case development of a new use of the allocator could benefit from
492 // recording (temporarily and locally) the allocation sizes.
493 DCHECK(!allocs_histogram_);
494 allocs_histogram_ = Histogram::FactoryGet(
495 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
496 HistogramBase::kUmaTargetedHistogramFlag);
497 #endif
498
499 DCHECK(!used_histogram_);
500 used_histogram_ = LinearHistogram::FactoryGet(
501 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
502 HistogramBase::kUmaTargetedHistogramFlag);
503
504 DCHECK(!errors_histogram_);
505 errors_histogram_ = LinearHistogram::FactoryGet(
506 "UMA.PersistentAllocator." + name_string + ".Errors", 1,
507 AllocatorError::kMaxValue + 1, AllocatorError::kMaxValue + 2,
508 HistogramBase::kUmaTargetedHistogramFlag);
509 }
510
Flush(bool sync)511 void PersistentMemoryAllocator::Flush(bool sync) {
512 FlushPartial(used(), sync);
513 }
514
SetMemoryState(uint8_t memory_state)515 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
516 shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
517 FlushPartial(sizeof(SharedMetadata), false);
518 }
519
GetMemoryState() const520 uint8_t PersistentMemoryAllocator::GetMemoryState() const {
521 return shared_meta()->memory_state.load(std::memory_order_relaxed);
522 }
523
used() const524 size_t PersistentMemoryAllocator::used() const {
525 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
526 mem_size_);
527 }
528
GetAsReference(const void * memory,uint32_t type_id) const529 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
530 const void* memory,
531 uint32_t type_id) const {
532 uintptr_t address = reinterpret_cast<uintptr_t>(memory);
533 if (address < reinterpret_cast<uintptr_t>(mem_base_))
534 return kReferenceNull;
535
536 uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
537 if (offset >= mem_size_ || offset < sizeof(BlockHeader))
538 return kReferenceNull;
539
540 Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
541 if (!GetBlockData(ref, type_id, kSizeAny))
542 return kReferenceNull;
543
544 return ref;
545 }
546
GetAllocSize(Reference ref) const547 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
548 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
549 if (!block)
550 return 0;
551 uint32_t size = block->size;
552 // Header was verified by GetBlock() but a malicious actor could change
553 // the value between there and here. Check it again.
554 uint32_t total_size;
555 if (size <= sizeof(BlockHeader) ||
556 !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
557 total_size > mem_size_) {
558 SetCorrupt();
559 return 0;
560 }
561 return size - sizeof(BlockHeader);
562 }
563
GetType(Reference ref) const564 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
565 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
566 if (!block)
567 return 0;
568 return block->type_id.load(std::memory_order_relaxed);
569 }
570
ChangeType(Reference ref,uint32_t to_type_id,uint32_t from_type_id,bool clear)571 bool PersistentMemoryAllocator::ChangeType(Reference ref,
572 uint32_t to_type_id,
573 uint32_t from_type_id,
574 bool clear) {
575 DCHECK_NE(access_mode_, kReadOnly);
576 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
577 if (!block)
578 return false;
579
580 // "Strong" exchanges are used below because there is no loop that can retry
581 // in the wake of spurious failures possible with "weak" exchanges. It is,
582 // in aggregate, an "acquire-release" operation so no memory accesses can be
583 // reordered either before or after this method (since changes based on type
584 // could happen on either side).
585
586 if (clear) {
587 // If clearing the memory, first change it to the "transitioning" type so
588 // there can be no confusion by other threads. After the memory is cleared,
589 // it can be changed to its final type.
590 if (!block->type_id.compare_exchange_strong(
591 from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
592 std::memory_order_acquire)) {
593 // Existing type wasn't what was expected: fail (with no changes)
594 return false;
595 }
596
597 // Clear the memory in an atomic manner. Using "release" stores force
598 // every write to be done after the ones before it. This is better than
599 // using memset because (a) it supports "volatile" and (b) it creates a
600 // reliable pattern upon which other threads may rely.
601 volatile std::atomic<int>* data =
602 reinterpret_cast<volatile std::atomic<int>*>(
603 reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
604 const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
605 DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
606 for (uint32_t i = 0; i < words; ++i) {
607 data->store(0, std::memory_order_release);
608 ++data;
609 }
610
611 // If the destination type is "transitioning" then skip the final exchange.
612 if (to_type_id == kTypeIdTransitioning)
613 return true;
614
615 // Finish the change to the desired type.
616 from_type_id = kTypeIdTransitioning; // Exchange needs modifiable original.
617 bool success = block->type_id.compare_exchange_strong(
618 from_type_id, to_type_id, std::memory_order_release,
619 std::memory_order_relaxed);
620 DCHECK(success); // Should never fail.
621 return success;
622 }
623
624 // One step change to the new type. Will return false if the existing value
625 // doesn't match what is expected.
626 return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
627 std::memory_order_acq_rel,
628 std::memory_order_acquire);
629 }
630
Allocate(size_t req_size,uint32_t type_id)631 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
632 size_t req_size,
633 uint32_t type_id) {
634 Reference ref = AllocateImpl(req_size, type_id);
635 if (ref) {
636 // Success: Record this allocation in usage stats (if active).
637 if (allocs_histogram_)
638 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
639 } else {
640 // Failure: Record an allocation of zero for tracking.
641 if (allocs_histogram_)
642 allocs_histogram_->Add(0);
643 }
644 return ref;
645 }
646
AllocateImpl(size_t req_size,uint32_t type_id)647 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
648 size_t req_size,
649 uint32_t type_id) {
650 DCHECK_NE(access_mode_, kReadOnly);
651
652 // Validate req_size to ensure it won't overflow when used as 32-bit value.
653 if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
654 NOTREACHED();
655 return kReferenceNull;
656 }
657
658 // Round up the requested size, plus header, to the next allocation alignment.
659 size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
660 if (size <= sizeof(BlockHeader) || size > mem_page_) {
661 NOTREACHED();
662 return kReferenceNull;
663 }
664
665 // Get the current start of unallocated memory. Other threads may
666 // update this at any time and cause us to retry these operations.
667 // This value should be treated as "const" to avoid confusion through
668 // the code below but recognize that any failed compare-exchange operation
669 // involving it will cause it to be loaded with a more recent value. The
670 // code should either exit or restart the loop in that case.
671 /* const */ uint32_t freeptr =
672 shared_meta()->freeptr.load(std::memory_order_acquire);
673
674 // Allocation is lockless so we do all our caculation and then, if saving
675 // indicates a change has occurred since we started, scrap everything and
676 // start over.
677 for (;;) {
678 if (IsCorrupt())
679 return kReferenceNull;
680
681 if (freeptr + size > mem_size_) {
682 SetFlag(&shared_meta()->flags, kFlagFull);
683 return kReferenceNull;
684 }
685
686 // Get pointer to the "free" block. If something has been allocated since
687 // the load of freeptr above, it is still safe as nothing will be written
688 // to that location until after the compare-exchange below.
689 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
690 if (!block) {
691 SetCorrupt();
692 return kReferenceNull;
693 }
694
695 // An allocation cannot cross page boundaries. If it would, create a
696 // "wasted" block and begin again at the top of the next page. This
697 // area could just be left empty but we fill in the block header just
698 // for completeness sake.
699 const uint32_t page_free = mem_page_ - freeptr % mem_page_;
700 if (size > page_free) {
701 if (page_free <= sizeof(BlockHeader)) {
702 SetCorrupt();
703 return kReferenceNull;
704 }
705
706 #if !BUILDFLAG(IS_NACL)
707 // In production, with the current state of the code, this code path
708 // should not be reached. However, crash reports have been hinting that it
709 // is. Add crash keys to investigate this.
710 // TODO(crbug.com/1432981): Remove them once done.
711 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_size_",
712 mem_size_);
713 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "mem_page_",
714 mem_page_);
715 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "freeptr", freeptr);
716 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "page_free",
717 page_free);
718 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size", size);
719 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "req_size",
720 req_size);
721 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_id", type_id);
722 std::string persistent_file_name = "N/A";
723 auto* allocator = GlobalHistogramAllocator::Get();
724 if (allocator && allocator->HasPersistentLocation()) {
725 persistent_file_name =
726 allocator->GetPersistentLocation().BaseName().AsUTF8Unsafe();
727 }
728 SCOPED_CRASH_KEY_STRING256("PersistentMemoryAllocator", "file_name",
729 persistent_file_name);
730 debug::DumpWithoutCrashing();
731 #endif // !BUILDFLAG(IS_NACL)
732
733 const uint32_t new_freeptr = freeptr + page_free;
734 if (shared_meta()->freeptr.compare_exchange_strong(
735 freeptr, new_freeptr, std::memory_order_acq_rel,
736 std::memory_order_acquire)) {
737 block->size = page_free;
738 block->cookie = kBlockCookieWasted;
739 }
740 continue;
741 }
742
743 // Don't leave a slice at the end of a page too small for anything. This
744 // can result in an allocation up to two alignment-sizes greater than the
745 // minimum required by requested-size + header + alignment.
746 if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
747 size = page_free;
748 if (freeptr + size > mem_size_) {
749 SetCorrupt();
750 return kReferenceNull;
751 }
752 }
753
754 // This cast is safe because (freeptr + size) <= mem_size_.
755 const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
756
757 // Save our work. Try again if another thread has completed an allocation
758 // while we were processing. A "weak" exchange would be permissable here
759 // because the code will just loop and try again but the above processing
760 // is significant so make the extra effort of a "strong" exchange.
761 if (!shared_meta()->freeptr.compare_exchange_strong(
762 freeptr, new_freeptr, std::memory_order_acq_rel,
763 std::memory_order_acquire)) {
764 continue;
765 }
766
767 // Given that all memory was zeroed before ever being given to an instance
768 // of this class and given that we only allocate in a monotomic fashion
769 // going forward, it must be that the newly allocated block is completely
770 // full of zeros. If we find anything in the block header that is NOT a
771 // zero then something must have previously run amuck through memory,
772 // writing beyond the allocated space and into unallocated space.
773 if (block->size != 0 ||
774 block->cookie != kBlockCookieFree ||
775 block->type_id.load(std::memory_order_relaxed) != 0 ||
776 block->next.load(std::memory_order_relaxed) != 0) {
777 SetCorrupt();
778 return kReferenceNull;
779 }
780
781 // Make sure the memory exists by writing to the first byte of every memory
782 // page it touches beyond the one containing the block header itself.
783 // As the underlying storage is often memory mapped from disk or shared
784 // space, sometimes things go wrong and those address don't actually exist
785 // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
786 // in the code. This should concentrate all those failures into this
787 // location for easy tracking and, eventually, proper handling.
788 volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
789 volatile char* mem_begin = reinterpret_cast<volatile char*>(
790 (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
791 (vm_page_size_ - 1)) &
792 ~static_cast<uintptr_t>(vm_page_size_ - 1));
793 for (volatile char* memory = mem_begin; memory < mem_end;
794 memory += vm_page_size_) {
795 // It's required that a memory segment start as all zeros and thus the
796 // newly allocated block is all zeros at this point. Thus, writing a
797 // zero to it allows testing that the memory exists without actually
798 // changing its contents. The compiler doesn't know about the requirement
799 // and so cannot optimize-away these writes.
800 *memory = 0;
801 }
802
803 // Load information into the block header. There is no "release" of the
804 // data here because this memory can, currently, be seen only by the thread
805 // performing the allocation. When it comes time to share this, the thread
806 // will call MakeIterable() which does the release operation.
807 // `size` is at most kSegmentMaxSize, so this cast is safe.
808 block->size = static_cast<uint32_t>(size);
809 block->cookie = kBlockCookieAllocated;
810 block->type_id.store(type_id, std::memory_order_relaxed);
811 return freeptr;
812 }
813 }
814
GetMemoryInfo(MemoryInfo * meminfo) const815 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
816 uint32_t remaining = std::max(
817 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
818 (uint32_t)sizeof(BlockHeader));
819 meminfo->total = mem_size_;
820 meminfo->free = remaining - sizeof(BlockHeader);
821 }
822
MakeIterable(Reference ref)823 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
824 DCHECK_NE(access_mode_, kReadOnly);
825 if (IsCorrupt())
826 return;
827 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
828 if (!block) // invalid reference
829 return;
830 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
831 return;
832 block->next.store(kReferenceQueue, std::memory_order_release); // New tail.
833
834 // Try to add this block to the tail of the queue. May take multiple tries.
835 // If so, tail will be automatically updated with a more recent value during
836 // compare-exchange operations.
837 uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
838 for (;;) {
839 // Acquire the current tail-pointer released by previous call to this
840 // method and validate it.
841 block = GetBlock(tail, 0, 0, true, false);
842 if (!block) {
843 SetCorrupt();
844 return;
845 }
846
847 // Try to insert the block at the tail of the queue. The tail node always
848 // has an existing value of kReferenceQueue; if that is somehow not the
849 // existing value then another thread has acted in the meantime. A "strong"
850 // exchange is necessary so the "else" block does not get executed when
851 // that is not actually the case (which can happen with a "weak" exchange).
852 uint32_t next = kReferenceQueue; // Will get replaced with existing value.
853 if (block->next.compare_exchange_strong(next, ref,
854 std::memory_order_acq_rel,
855 std::memory_order_acquire)) {
856 // Update the tail pointer to the new offset. If the "else" clause did
857 // not exist, then this could be a simple Release_Store to set the new
858 // value but because it does, it's possible that other threads could add
859 // one or more nodes at the tail before reaching this point. We don't
860 // have to check the return value because it either operates correctly
861 // or the exact same operation has already been done (by the "else"
862 // clause) on some other thread.
863 shared_meta()->tailptr.compare_exchange_strong(tail, ref,
864 std::memory_order_release,
865 std::memory_order_relaxed);
866 return;
867 }
868 // In the unlikely case that a thread crashed or was killed between the
869 // update of "next" and the update of "tailptr", it is necessary to
870 // perform the operation that would have been done. There's no explicit
871 // check for crash/kill which means that this operation may also happen
872 // even when the other thread is in perfect working order which is what
873 // necessitates the CompareAndSwap above.
874 shared_meta()->tailptr.compare_exchange_strong(
875 tail, next, std::memory_order_acq_rel, std::memory_order_acquire);
876 }
877 }
878
879 // The "corrupted" state is held both locally and globally (shared). The
880 // shared flag can't be trusted since a malicious actor could overwrite it.
881 // Because corruption can be detected during read-only operations such as
882 // iteration, this method may be called by other "const" methods. In this
883 // case, it's safe to discard the constness and modify the local flag and
884 // maybe even the shared flag if the underlying data isn't actually read-only.
SetCorrupt(bool allow_write) const885 void PersistentMemoryAllocator::SetCorrupt(bool allow_write) const {
886 if (!corrupt_.load(std::memory_order_relaxed) &&
887 !CheckFlag(
888 const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
889 kFlagCorrupt)) {
890 LOG(ERROR) << "Corruption detected in shared-memory segment.";
891 RecordError(kMemoryIsCorrupt);
892 }
893
894 corrupt_.store(true, std::memory_order_relaxed);
895 if (allow_write && access_mode_ != kReadOnly) {
896 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
897 kFlagCorrupt);
898 }
899 }
900
IsCorrupt() const901 bool PersistentMemoryAllocator::IsCorrupt() const {
902 if (corrupt_.load(std::memory_order_relaxed)) {
903 return true;
904 }
905 if (CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
906 // Set the local flag if we found the flag in the data.
907 SetCorrupt(/*allow_write=*/false);
908 return true;
909 }
910 return false;
911 }
912
IsFull() const913 bool PersistentMemoryAllocator::IsFull() const {
914 return CheckFlag(&shared_meta()->flags, kFlagFull);
915 }
916
917 // Dereference a block |ref| and ensure that it's valid for the desired
918 // |type_id| and |size|. |special| indicates that we may try to access block
919 // headers not available to callers but still accessed by this module. By
920 // having internal dereferences go through this same function, the allocator
921 // is hardened against corruption.
922 const volatile PersistentMemoryAllocator::BlockHeader*
GetBlock(Reference ref,uint32_t type_id,size_t size,bool queue_ok,bool free_ok) const923 PersistentMemoryAllocator::GetBlock(Reference ref,
924 uint32_t type_id,
925 size_t size,
926 bool queue_ok,
927 bool free_ok) const {
928 // Handle special cases.
929 if (ref == kReferenceQueue && queue_ok)
930 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
931
932 // Validation of parameters.
933 if (ref < sizeof(SharedMetadata))
934 return nullptr;
935 if (ref % kAllocAlignment != 0)
936 return nullptr;
937 size += sizeof(BlockHeader);
938 uint32_t total_size;
939 if (!base::CheckAdd(ref, size).AssignIfValid(&total_size)) {
940 return nullptr;
941 }
942 if (total_size > mem_size_) {
943 return nullptr;
944 }
945
946 // Validation of referenced block-header.
947 if (!free_ok) {
948 const volatile BlockHeader* const block =
949 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
950 if (block->cookie != kBlockCookieAllocated)
951 return nullptr;
952 if (block->size < size)
953 return nullptr;
954 uint32_t block_size;
955 if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
956 return nullptr;
957 }
958 if (block_size > mem_size_) {
959 return nullptr;
960 }
961 if (type_id != 0 &&
962 block->type_id.load(std::memory_order_relaxed) != type_id) {
963 return nullptr;
964 }
965 }
966
967 // Return pointer to block data.
968 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
969 }
970
FlushPartial(size_t length,bool sync)971 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
972 // Generally there is nothing to do as every write is done through volatile
973 // memory with atomic instructions to guarantee consistency. This (virtual)
974 // method exists so that derived classes can do special things, such as tell
975 // the OS to write changes to disk now rather than when convenient.
976 }
977
RecordError(int error) const978 void PersistentMemoryAllocator::RecordError(int error) const {
979 if (errors_histogram_)
980 errors_histogram_->Add(error);
981 }
982
freeptr() const983 uint32_t PersistentMemoryAllocator::freeptr() const {
984 return shared_meta()->freeptr.load(std::memory_order_relaxed);
985 }
986
version() const987 uint32_t PersistentMemoryAllocator::version() const {
988 return shared_meta()->version;
989 }
990
GetBlockData(Reference ref,uint32_t type_id,size_t size) const991 const volatile void* PersistentMemoryAllocator::GetBlockData(
992 Reference ref,
993 uint32_t type_id,
994 size_t size) const {
995 DCHECK(size > 0);
996 const volatile BlockHeader* block =
997 GetBlock(ref, type_id, size, false, false);
998 if (!block)
999 return nullptr;
1000 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
1001 }
1002
UpdateTrackingHistograms()1003 void PersistentMemoryAllocator::UpdateTrackingHistograms() {
1004 DCHECK_NE(access_mode_, kReadOnly);
1005 if (used_histogram_) {
1006 MemoryInfo meminfo;
1007 GetMemoryInfo(&meminfo);
1008 HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
1009 ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
1010 used_histogram_->Add(used_percent);
1011 }
1012 }
1013
1014
1015 //----- LocalPersistentMemoryAllocator -----------------------------------------
1016
LocalPersistentMemoryAllocator(size_t size,uint64_t id,base::StringPiece name)1017 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
1018 size_t size,
1019 uint64_t id,
1020 base::StringPiece name)
1021 : PersistentMemoryAllocator(AllocateLocalMemory(size, name),
1022 size,
1023 0,
1024 id,
1025 name,
1026 kReadWrite) {}
1027
~LocalPersistentMemoryAllocator()1028 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
1029 DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
1030 }
1031
1032 // static
1033 PersistentMemoryAllocator::Memory
AllocateLocalMemory(size_t size,base::StringPiece name)1034 LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size,
1035 base::StringPiece name) {
1036 void* address;
1037
1038 #if BUILDFLAG(IS_WIN)
1039 address =
1040 ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1041 if (address)
1042 return Memory(address, MEM_VIRTUAL);
1043 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1044 // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
1045 // MAP_SHARED is not available on Linux <2.4 but required on Mac.
1046 address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
1047 MAP_ANON | MAP_SHARED, -1, 0);
1048 if (address != MAP_FAILED) {
1049 #if BUILDFLAG(IS_ANDROID)
1050 // Allow the anonymous memory region allocated by mmap(MAP_ANON) to be
1051 // identified in /proc/$PID/smaps. This helps improve visibility into
1052 // Chrome's memory usage on Android.
1053 const std::string arena_name = base::StrCat({"persistent:", name});
1054 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, arena_name.c_str());
1055 #endif
1056 return Memory(address, MEM_VIRTUAL);
1057 }
1058 #else
1059 #error This architecture is not (yet) supported.
1060 #endif
1061
1062 // As a last resort, just allocate the memory from the heap. This will
1063 // achieve the same basic result but the acquired memory has to be
1064 // explicitly zeroed and thus realized immediately (i.e. all pages are
1065 // added to the process now istead of only when first accessed).
1066 address = malloc(size);
1067 DPCHECK(address);
1068 memset(address, 0, size);
1069 return Memory(address, MEM_MALLOC);
1070 }
1071
1072 // static
DeallocateLocalMemory(void * memory,size_t size,MemoryType type)1073 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
1074 size_t size,
1075 MemoryType type) {
1076 if (type == MEM_MALLOC) {
1077 free(memory);
1078 return;
1079 }
1080
1081 DCHECK_EQ(MEM_VIRTUAL, type);
1082 #if BUILDFLAG(IS_WIN)
1083 BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
1084 DCHECK(success);
1085 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1086 int result = ::munmap(memory, size);
1087 DCHECK_EQ(0, result);
1088 #else
1089 #error This architecture is not (yet) supported.
1090 #endif
1091 }
1092
1093 //----- WritableSharedPersistentMemoryAllocator --------------------------------
1094
1095 WritableSharedPersistentMemoryAllocator::
WritableSharedPersistentMemoryAllocator(base::WritableSharedMemoryMapping memory,uint64_t id,base::StringPiece name)1096 WritableSharedPersistentMemoryAllocator(
1097 base::WritableSharedMemoryMapping memory,
1098 uint64_t id,
1099 base::StringPiece name)
1100 : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
1101 memory.size(),
1102 0,
1103 id,
1104 name,
1105 kReadWrite),
1106 shared_memory_(std::move(memory)) {}
1107
1108 WritableSharedPersistentMemoryAllocator::
1109 ~WritableSharedPersistentMemoryAllocator() = default;
1110
1111 // static
IsSharedMemoryAcceptable(const base::WritableSharedMemoryMapping & memory)1112 bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1113 const base::WritableSharedMemoryMapping& memory) {
1114 return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
1115 }
1116
1117 //----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
1118
1119 ReadOnlySharedPersistentMemoryAllocator::
ReadOnlySharedPersistentMemoryAllocator(base::ReadOnlySharedMemoryMapping memory,uint64_t id,base::StringPiece name)1120 ReadOnlySharedPersistentMemoryAllocator(
1121 base::ReadOnlySharedMemoryMapping memory,
1122 uint64_t id,
1123 base::StringPiece name)
1124 : PersistentMemoryAllocator(
1125 Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
1126 memory.size(),
1127 0,
1128 id,
1129 name,
1130 kReadOnly),
1131 shared_memory_(std::move(memory)) {}
1132
1133 ReadOnlySharedPersistentMemoryAllocator::
1134 ~ReadOnlySharedPersistentMemoryAllocator() = default;
1135
1136 // static
IsSharedMemoryAcceptable(const base::ReadOnlySharedMemoryMapping & memory)1137 bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
1138 const base::ReadOnlySharedMemoryMapping& memory) {
1139 return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
1140 }
1141
1142 #if !BUILDFLAG(IS_NACL)
1143 //----- FilePersistentMemoryAllocator ------------------------------------------
1144
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,size_t max_size,uint64_t id,base::StringPiece name,AccessMode access_mode)1145 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
1146 std::unique_ptr<MemoryMappedFile> file,
1147 size_t max_size,
1148 uint64_t id,
1149 base::StringPiece name,
1150 AccessMode access_mode)
1151 : PersistentMemoryAllocator(
1152 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
1153 max_size != 0 ? max_size : file->length(),
1154 0,
1155 id,
1156 name,
1157 access_mode),
1158 mapped_file_(std::move(file)) {}
1159
1160 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
1161
1162 // static
IsFileAcceptable(const MemoryMappedFile & file,bool readonly)1163 bool FilePersistentMemoryAllocator::IsFileAcceptable(
1164 const MemoryMappedFile& file,
1165 bool readonly) {
1166 return IsMemoryAcceptable(file.data(), file.length(), 0, readonly);
1167 }
1168
Cache()1169 void FilePersistentMemoryAllocator::Cache() {
1170 // Since this method is expected to load data from permanent storage
1171 // into memory, blocking I/O may occur.
1172 base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
1173 base::BlockingType::MAY_BLOCK);
1174
1175 // Calculate begin/end addresses so that the first byte of every page
1176 // in that range can be read. Keep within the used space. The |volatile|
1177 // keyword makes it so the compiler can't make assumptions about what is
1178 // in a given memory location and thus possibly avoid the read.
1179 const volatile char* mem_end = mem_base_ + used();
1180 const volatile char* mem_begin = mem_base_;
1181
1182 // Iterate over the memory a page at a time, reading the first byte of
1183 // every page. The values are added to a |total| so that the compiler
1184 // can't omit the read.
1185 int total = 0;
1186 for (const volatile char* memory = mem_begin; memory < mem_end;
1187 memory += vm_page_size_) {
1188 total += *memory;
1189 }
1190
1191 // Tell the compiler that |total| is used so that it can't optimize away
1192 // the memory accesses above.
1193 debug::Alias(&total);
1194 }
1195
FlushPartial(size_t length,bool sync)1196 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
1197 if (IsReadonly())
1198 return;
1199
1200 absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
1201 if (sync)
1202 scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1203
1204 #if BUILDFLAG(IS_WIN)
1205 // Windows doesn't support asynchronous flush.
1206 scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
1207 BOOL success = ::FlushViewOfFile(data(), length);
1208 DPCHECK(success);
1209 #elif BUILDFLAG(IS_APPLE)
1210 // On OSX, "invalidate" removes all cached pages, forcing a re-read from
1211 // disk. That's not applicable to "flush" so omit it.
1212 int result =
1213 ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
1214 DCHECK_NE(EINVAL, result);
1215 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
1216 // On POSIX, "invalidate" forces _other_ processes to recognize what has
1217 // been written to disk and so is applicable to "flush".
1218 int result = ::msync(const_cast<void*>(data()), length,
1219 MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
1220 DCHECK_NE(EINVAL, result);
1221 #else
1222 #error Unsupported OS.
1223 #endif
1224 }
1225 #endif // !BUILDFLAG(IS_NACL)
1226
1227 //----- DelayedPersistentAllocation --------------------------------------------
1228
DelayedPersistentAllocation(PersistentMemoryAllocator * allocator,std::atomic<Reference> * ref,uint32_t type,size_t size,size_t offset)1229 DelayedPersistentAllocation::DelayedPersistentAllocation(
1230 PersistentMemoryAllocator* allocator,
1231 std::atomic<Reference>* ref,
1232 uint32_t type,
1233 size_t size,
1234 size_t offset)
1235 : allocator_(allocator),
1236 type_(type),
1237 size_(checked_cast<uint32_t>(size)),
1238 offset_(checked_cast<uint32_t>(offset)),
1239 reference_(ref) {
1240 DCHECK(allocator_);
1241 DCHECK_NE(0U, type_);
1242 DCHECK_LT(0U, size_);
1243 DCHECK(reference_);
1244 }
1245
1246 DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
1247
Get() const1248 void* DelayedPersistentAllocation::Get() const {
1249 // Relaxed operations are acceptable here because it's not protecting the
1250 // contents of the allocation in any way.
1251 Reference ref = reference_->load(std::memory_order_acquire);
1252
1253 #if !BUILDFLAG(IS_NACL)
1254 // TODO(crbug/1432981): Remove these. They are used to investigate unexpected
1255 // failures.
1256 bool ref_found = (ref != 0);
1257 bool raced = false;
1258 #endif // !BUILDFLAG(IS_NACL)
1259
1260 if (!ref) {
1261 ref = allocator_->Allocate(size_, type_);
1262 if (!ref)
1263 return nullptr;
1264
1265 // Store the new reference in its proper location using compare-and-swap.
1266 // Use a "strong" exchange to ensure no false-negatives since the operation
1267 // cannot be retried.
1268 Reference existing = 0; // Must be mutable; receives actual value.
1269 if (!reference_->compare_exchange_strong(existing, ref,
1270 std::memory_order_release,
1271 std::memory_order_relaxed)) {
1272 // Failure indicates that something else has raced ahead, performed the
1273 // allocation, and stored its reference. Purge the allocation that was
1274 // just done and use the other one instead.
1275 DCHECK_EQ(type_, allocator_->GetType(existing));
1276 DCHECK_LE(size_, allocator_->GetAllocSize(existing));
1277 allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
1278 ref = existing;
1279 #if !BUILDFLAG(IS_NACL)
1280 raced = true;
1281 #endif // !BUILDFLAG(IS_NACL)
1282 }
1283 }
1284
1285 char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
1286 if (!mem) {
1287 #if !BUILDFLAG(IS_NACL)
1288 // TODO(crbug/1432981): Remove these. They are used to investigate
1289 // unexpected failures.
1290 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "full",
1291 allocator_->IsFull());
1292 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "corrupted",
1293 allocator_->IsCorrupt());
1294 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "ref", ref);
1295 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "ref_found", ref_found);
1296 SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "raced", raced);
1297 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_", type_);
1298 SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
1299 if (ref == 0xC8799269) {
1300 // There are many crash reports containing the corrupted "0xC8799269"
1301 // value in |ref|. This value is actually a "magic" number to indicate
1302 // that a certain block in persistent memory was successfully allocated,
1303 // so it should not appear there. Include some extra crash keys to see if
1304 // the surrounding values were also corrupted. If so, the value before
1305 // would be the size of the allocated object, and the value after would be
1306 // the type id of the allocated object. If they are not corrupted, these
1307 // would contain |ranges_checksum| and the start of |samples_metadata|
1308 // respectively (see PersistentHistogramData struct). We do some pointer
1309 // arithmetic here -- it should theoretically be safe, unless something
1310 // went terribly wrong...
1311 SCOPED_CRASH_KEY_NUMBER(
1312 "PersistentMemoryAllocator", "ref_before",
1313 (reference_ - 1)->load(std::memory_order_relaxed));
1314 SCOPED_CRASH_KEY_NUMBER(
1315 "PersistentMemoryAllocator", "ref_after",
1316 (reference_ + 1)->load(std::memory_order_relaxed));
1317 DUMP_WILL_BE_NOTREACHED_NORETURN();
1318 return nullptr;
1319 }
1320 #endif // !BUILDFLAG(IS_NACL)
1321 // This should never happen but be tolerant if it does as corruption from
1322 // the outside is something to guard against.
1323 DUMP_WILL_BE_NOTREACHED_NORETURN();
1324 return nullptr;
1325 }
1326 return mem + offset_;
1327 }
1328
1329 } // namespace base
1330