• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
6 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
7 
8 #include <stdint.h>
9 
10 #include <atomic>
11 #include <memory>
12 #include <type_traits>
13 
14 #include "base/atomicops.h"
15 #include "base/base_export.h"
16 #include "base/files/file_path.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/macros.h"
19 #include "base/strings/string_piece.h"
20 
21 namespace base {
22 
23 class HistogramBase;
24 class MemoryMappedFile;
25 class SharedMemory;
26 
27 // Simple allocator for pieces of a memory block that may be persistent
28 // to some storage or shared across multiple processes. This class resides
29 // under base/metrics because it was written for that purpose. It is,
30 // however, fully general-purpose and can be freely moved to base/memory
31 // if other uses are found.
32 //
33 // This class provides for thread-secure (i.e. safe against other threads
34 // or processes that may be compromised and thus have malicious intent)
35 // allocation of memory within a designated block and also a mechanism by
36 // which other threads can learn of these allocations.
37 //
38 // There is (currently) no way to release an allocated block of data because
39 // doing so would risk invalidating pointers held by other processes and
40 // greatly complicate the allocation algorithm.
41 //
42 // Construction of this object can accept new, clean (i.e. zeroed) memory
43 // or previously initialized memory. In the first case, construction must
44 // be allowed to complete before letting other allocators attach to the same
45 // segment. In other words, don't share the segment until at least one
46 // allocator has been attached to it.
47 //
48 // Note that memory not in active use is not accessed so it is possible to
49 // use virtual memory, including memory-mapped files, as backing storage with
50 // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
51 //
52 // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
53 // character arrays and manipulating that memory manually, the better way is
54 // generally to use the "object" methods to create and manage allocations. In
55 // this way the sizing, type-checking, and construction are all automatic. For
56 // this to work, however, every type of stored object must define two public
57 // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
58 //
59 // struct MyPersistentObjectType {
60 //     // SHA1(MyPersistentObjectType): Increment this if structure changes!
61 //     static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
62 //
63 //     // Expected size for 32/64-bit check. Update this if structure changes!
64 //     static constexpr size_t kExpectedInstanceSize = 20;
65 //
66 //     ...
67 // };
68 //
69 // kPersistentTypeId: This value is an arbitrary identifier that allows the
70 //   identification of these objects in the allocator, including the ability
71 //   to find them via iteration. The number is arbitrary but using the first
72 //   four bytes of the SHA1 hash of the type name means that there shouldn't
73 //   be any conflicts with other types that may also be stored in the memory.
74 //   The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
75 //   be used to generate the hash if the type name seems common. Use a command
76 //   like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
77 //   If the structure layout changes, ALWAYS increment this number so that
78 //   newer versions of the code don't try to interpret persistent data written
79 //   by older versions with a different layout.
80 //
81 // kExpectedInstanceSize: This value is the hard-coded number that matches
82 //   what sizeof(T) would return. By providing it explicitly, the allocator can
83 //   verify that the structure is compatible between both 32-bit and 64-bit
84 //   versions of the code.
85 //
86 // Using New manages the memory and then calls the default constructor for the
87 // object. Given that objects are persistent, no destructor is ever called
88 // automatically though a caller can explicitly call Delete to destruct it and
89 // change the type to something indicating it is no longer in use.
90 //
91 // Though persistent memory segments are transferrable between programs built
92 // for different natural word widths, they CANNOT be exchanged between CPUs
93 // of different endianess. Attempts to do so will simply see the existing data
94 // as corrupt and refuse to access any of it.
95 class BASE_EXPORT PersistentMemoryAllocator {
96  public:
97   typedef uint32_t Reference;
98 
99   // These states are used to indicate the overall condition of the memory
100   // segment irrespective of what is stored within it. Because the data is
101   // often persistent and thus needs to be readable by different versions of
102   // a program, these values are fixed and can never change.
103   enum MemoryState : uint8_t {
104     // Persistent memory starts all zeros and so shows "uninitialized".
105     MEMORY_UNINITIALIZED = 0,
106 
107     // The header has been written and the memory is ready for use.
108     MEMORY_INITIALIZED = 1,
109 
110     // The data should be considered deleted. This would be set when the
111     // allocator is being cleaned up. If file-backed, the file is likely
112     // to be deleted but since deletion can fail for a variety of reasons,
113     // having this extra status means a future reader can realize what
114     // should have happened.
115     MEMORY_DELETED = 2,
116 
117     // Outside code can create states starting with this number; these too
118     // must also never change between code versions.
119     MEMORY_USER_DEFINED = 100,
120   };
121 
122   // Iterator for going through all iterable memory records in an allocator.
123   // Like the allocator itself, iterators are lock-free and thread-secure.
124   // That means that multiple threads can share an iterator and the same
125   // reference will not be returned twice.
126   //
127   // The order of the items returned by an iterator matches the order in which
128   // MakeIterable() was called on them. Once an allocation is made iterable,
129   // it is always such so the only possible difference between successive
130   // iterations is for more to be added to the end.
131   //
132   // Iteration, in general, is tolerant of corrupted memory. It will return
133   // what it can and stop only when corruption forces it to. Bad corruption
134   // could cause the same object to be returned many times but it will
135   // eventually quit.
136   class BASE_EXPORT Iterator {
137    public:
138     // Constructs an iterator on a given |allocator|, starting at the beginning.
139     // The allocator must live beyond the lifetime of the iterator. This class
140     // has read-only access to the allocator (hence "const") but the returned
141     // references can be used on a read/write version, too.
142     explicit Iterator(const PersistentMemoryAllocator* allocator);
143 
144     // As above but resuming from the |starting_after| reference. The first call
145     // to GetNext() will return the next object found after that reference. The
146     // reference must be to an "iterable" object; references to non-iterable
147     // objects (those that never had MakeIterable() called for them) will cause
148     // a run-time error.
149     Iterator(const PersistentMemoryAllocator* allocator,
150              Reference starting_after);
151 
152     // Resets the iterator back to the beginning.
153     void Reset();
154 
155     // Resets the iterator, resuming from the |starting_after| reference.
156     void Reset(Reference starting_after);
157 
158     // Returns the previously retrieved reference, or kReferenceNull if none.
159     // If constructor or reset with a starting_after location, this will return
160     // that value.
161     Reference GetLast();
162 
163     // Gets the next iterable, storing that type in |type_return|. The actual
164     // return value is a reference to the allocation inside the allocator or
165     // zero if there are no more. GetNext() may still be called again at a
166     // later time to retrieve any new allocations that have been added.
167     Reference GetNext(uint32_t* type_return);
168 
169     // Similar to above but gets the next iterable of a specific |type_match|.
170     // This should not be mixed with calls to GetNext() because any allocations
171     // skipped here due to a type mis-match will never be returned by later
172     // calls to GetNext() meaning it's possible to completely miss entries.
173     Reference GetNextOfType(uint32_t type_match);
174 
175     // As above but works using object type.
176     template <typename T>
GetNextOfType()177     Reference GetNextOfType() {
178       return GetNextOfType(T::kPersistentTypeId);
179     }
180 
181     // As above but works using objects and returns null if not found.
182     template <typename T>
GetNextOfObject()183     const T* GetNextOfObject() {
184       return GetAsObject<T>(GetNextOfType<T>());
185     }
186 
187     // Converts references to objects. This is a convenience method so that
188     // users of the iterator don't need to also have their own pointer to the
189     // allocator over which the iterator runs in order to retrieve objects.
190     // Because the iterator is not read/write, only "const" objects can be
191     // fetched. Non-const objects can be fetched using the reference on a
192     // non-const (external) pointer to the same allocator (or use const_cast
193     // to remove the qualifier).
194     template <typename T>
GetAsObject(Reference ref)195     const T* GetAsObject(Reference ref) const {
196       return allocator_->GetAsObject<T>(ref);
197     }
198 
199     // Similar to GetAsObject() but converts references to arrays of things.
200     template <typename T>
GetAsArray(Reference ref,uint32_t type_id,size_t count)201     const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
202       return allocator_->GetAsArray<T>(ref, type_id, count);
203     }
204 
205     // Convert a generic pointer back into a reference. A null reference will
206     // be returned if |memory| is not inside the persistent segment or does not
207     // point to an object of the specified |type_id|.
GetAsReference(const void * memory,uint32_t type_id)208     Reference GetAsReference(const void* memory, uint32_t type_id) const {
209       return allocator_->GetAsReference(memory, type_id);
210     }
211 
212     // As above but convert an object back into a reference.
213     template <typename T>
GetAsReference(const T * obj)214     Reference GetAsReference(const T* obj) const {
215       return allocator_->GetAsReference(obj);
216     }
217 
218    private:
219     // Weak-pointer to memory allocator being iterated over.
220     const PersistentMemoryAllocator* allocator_;
221 
222     // The last record that was returned.
223     std::atomic<Reference> last_record_;
224 
225     // The number of records found; used for detecting loops.
226     std::atomic<uint32_t> record_count_;
227 
228     DISALLOW_COPY_AND_ASSIGN(Iterator);
229   };
230 
231   // Returned information about the internal state of the heap.
232   struct MemoryInfo {
233     size_t total;
234     size_t free;
235   };
236 
237   enum : Reference {
238     // A common "null" reference value.
239     kReferenceNull = 0,
240   };
241 
242   enum : uint32_t {
243     // A value that will match any type when doing lookups.
244     kTypeIdAny = 0x00000000,
245 
246     // A value indicating that the type is in transition. Work is being done
247     // on the contents to prepare it for a new type to come.
248     kTypeIdTransitioning = 0xFFFFFFFF,
249   };
250 
251   enum : size_t {
252     kSizeAny = 1  // Constant indicating that any array size is acceptable.
253   };
254 
255   // This is the standard file extension (suitable for being passed to the
256   // AddExtension() method of base::FilePath) for dumps of persistent memory.
257   static const base::FilePath::CharType kFileExtension[];
258 
259   // The allocator operates on any arbitrary block of memory. Creation and
260   // persisting or sharing of that block with another process is the
261   // responsibility of the caller. The allocator needs to know only the
262   // block's |base| address, the total |size| of the block, and any internal
263   // |page| size (zero if not paged) across which allocations should not span.
264   // The |id| is an arbitrary value the caller can use to identify a
265   // particular memory segment. It will only be loaded during the initial
266   // creation of the segment and can be checked by the caller for consistency.
267   // The |name|, if provided, is used to distinguish histograms for this
268   // allocator. Only the primary owner of the segment should define this value;
269   // other processes can learn it from the shared state. If the underlying
270   // memory is |readonly| then no changes will be made to it. The resulting
271   // object should be stored as a "const" pointer.
272   //
273   // PersistentMemoryAllocator does NOT take ownership of the memory block.
274   // The caller must manage it and ensure it stays available throughout the
275   // lifetime of this object.
276   //
277   // Memory segments for sharing must have had an allocator attached to them
278   // before actually being shared. If the memory segment was just created, it
279   // should be zeroed before being passed here. If it was an existing segment,
280   // the values here will be compared to copies stored in the shared segment
281   // as a guard against corruption.
282   //
283   // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
284   // method below) before construction if the definition of the segment can
285   // vary in any way at run-time. Invalid memory segments will cause a crash.
286   PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
287                             uint64_t id, base::StringPiece name,
288                             bool readonly);
289   virtual ~PersistentMemoryAllocator();
290 
291   // Check if memory segment is acceptable for creation of an Allocator. This
292   // doesn't do any analysis of the data and so doesn't guarantee that the
293   // contents are valid, just that the paramaters won't cause the program to
294   // abort. The IsCorrupt() method will report detection of data problems
295   // found during construction and general operation.
296   static bool IsMemoryAcceptable(const void* data, size_t size,
297                                  size_t page_size, bool readonly);
298 
299   // Get the internal identifier for this persistent memory segment.
300   uint64_t Id() const;
301 
302   // Get the internal name of this allocator (possibly an empty string).
303   const char* Name() const;
304 
305   // Is this segment open only for read?
IsReadonly()306   bool IsReadonly() const { return readonly_; }
307 
308   // Manage the saved state of the memory.
309   void SetMemoryState(uint8_t memory_state);
310   uint8_t GetMemoryState() const;
311 
312   // Create internal histograms for tracking memory use and allocation sizes
313   // for allocator of |name| (which can simply be the result of Name()). This
314   // is done seperately from construction for situations such as when the
315   // histograms will be backed by memory provided by this very allocator.
316   //
317   // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
318   // with the following histograms:
319   //    UMA.PersistentAllocator.name.Errors
320   //    UMA.PersistentAllocator.name.UsedPct
321   void CreateTrackingHistograms(base::StringPiece name);
322 
323   // Flushes the persistent memory to any backing store. This typically does
324   // nothing but is used by the FilePersistentMemoryAllocator to inform the
325   // OS that all the data should be sent to the disk immediately. This is
326   // useful in the rare case where something has just been stored that needs
327   // to survive a hard shutdown of the machine like from a power failure.
328   // The |sync| parameter indicates if this call should block until the flush
329   // is complete but is only advisory and may or may not have an effect
330   // depending on the capabilities of the OS. Synchronous flushes are allowed
331   // only from theads that are allowed to do I/O but since |sync| is only
332   // advisory, all flushes should be done on IO-capable threads.
333   void Flush(bool sync);
334 
335   // Direct access to underlying memory segment. If the segment is shared
336   // across threads or processes, reading data through these values does
337   // not guarantee consistency. Use with care. Do not write.
data()338   const void* data() const { return const_cast<const char*>(mem_base_); }
length()339   size_t length() const { return mem_size_; }
size()340   size_t size() const { return mem_size_; }
341   size_t used() const;
342 
343   // Get an object referenced by a |ref|. For safety reasons, the |type_id|
344   // code and size-of(|T|) are compared to ensure the reference is valid
345   // and cannot return an object outside of the memory segment. A |type_id| of
346   // kTypeIdAny (zero) will match any though the size is still checked. NULL is
347   // returned if any problem is detected, such as corrupted storage or incorrect
348   // parameters. Callers MUST check that the returned value is not-null EVERY
349   // TIME before accessing it or risk crashing! Once dereferenced, the pointer
350   // is safe to reuse forever.
351   //
352   // It is essential that the object be of a fixed size. All fields must be of
353   // a defined type that does not change based on the compiler or the CPU
354   // natural word size. Acceptable are char, float, double, and (u)intXX_t.
355   // Unacceptable are int, bool, and wchar_t which are implementation defined
356   // with regards to their size.
357   //
358   // Alignment must also be consistent. A uint64_t after a uint32_t will pad
359   // differently between 32 and 64 bit architectures. Either put the bigger
360   // elements first, group smaller elements into blocks the size of larger
361   // elements, or manually insert padding fields as appropriate for the
362   // largest architecture, including at the end.
363   //
364   // To protected against mistakes, all objects must have the attribute
365   // |kExpectedInstanceSize| (static constexpr size_t)  that is a hard-coded
366   // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
367   // instance size is not fixed, at least one build will fail.
368   //
369   // If the size of a structure changes, the type-ID used to recognize it
370   // should also change so later versions of the code don't try to read
371   // incompatible structures from earlier versions.
372   //
373   // NOTE: Though this method will guarantee that an object of the specified
374   // type can be accessed without going outside the bounds of the memory
375   // segment, it makes no guarantees of the validity of the data within the
376   // object itself. If it is expected that the contents of the segment could
377   // be compromised with malicious intent, the object must be hardened as well.
378   //
379   // Though the persistent data may be "volatile" if it is shared with
380   // other processes, such is not necessarily the case. The internal
381   // "volatile" designation is discarded so as to not propagate the viral
382   // nature of that keyword to the caller. It can add it back, if necessary,
383   // based on knowledge of how the allocator is being used.
384   template <typename T>
GetAsObject(Reference ref)385   T* GetAsObject(Reference ref) {
386     static_assert(std::is_standard_layout<T>::value, "only standard objects");
387     static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
388     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
389     return const_cast<T*>(reinterpret_cast<volatile T*>(
390         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
391   }
392   template <typename T>
GetAsObject(Reference ref)393   const T* GetAsObject(Reference ref) const {
394     static_assert(std::is_standard_layout<T>::value, "only standard objects");
395     static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
396     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
397     return const_cast<const T*>(reinterpret_cast<const volatile T*>(
398         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
399   }
400 
401   // Like GetAsObject but get an array of simple, fixed-size types.
402   //
403   // Use a |count| of the required number of array elements, or kSizeAny.
404   // GetAllocSize() can be used to calculate the upper bound but isn't reliable
405   // because padding can make space for extra elements that were not written.
406   //
407   // Remember that an array of char is a string but may not be NUL terminated.
408   //
409   // There are no compile-time or run-time checks to ensure 32/64-bit size
410   // compatibilty when using these accessors. Only use fixed-size types such
411   // as char, float, double, or (u)intXX_t.
412   template <typename T>
GetAsArray(Reference ref,uint32_t type_id,size_t count)413   T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
414     static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
415     return const_cast<T*>(reinterpret_cast<volatile T*>(
416         GetBlockData(ref, type_id, count * sizeof(T))));
417   }
418   template <typename T>
GetAsArray(Reference ref,uint32_t type_id,size_t count)419   const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
420     static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
421     return const_cast<const char*>(reinterpret_cast<const volatile T*>(
422         GetBlockData(ref, type_id, count * sizeof(T))));
423   }
424 
425   // Get the corresponding reference for an object held in persistent memory.
426   // If the |memory| is not valid or the type does not match, a kReferenceNull
427   // result will be returned.
428   Reference GetAsReference(const void* memory, uint32_t type_id) const;
429 
430   // Get the number of bytes allocated to a block. This is useful when storing
431   // arrays in order to validate the ending boundary. The returned value will
432   // include any padding added to achieve the required alignment and so could
433   // be larger than given in the original Allocate() request.
434   size_t GetAllocSize(Reference ref) const;
435 
436   // Access the internal "type" of an object. This generally isn't necessary
437   // but can be used to "clear" the type and so effectively mark it as deleted
438   // even though the memory stays valid and allocated. Changing the type is
439   // an atomic compare/exchange and so requires knowing the existing value.
440   // It will return false if the existing type is not what is expected.
441   //
442   // Changing the type doesn't mean the data is compatible with the new type.
443   // Passing true for |clear| will zero the memory after the type has been
444   // changed away from |from_type_id| but before it becomes |to_type_id| meaning
445   // that it is done in a manner that is thread-safe. Memory is guaranteed to
446   // be zeroed atomically by machine-word in a monotonically increasing order.
447   //
448   // It will likely be necessary to reconstruct the type before it can be used.
449   // Changing the type WILL NOT invalidate existing pointers to the data, either
450   // in this process or others, so changing the data structure could have
451   // unpredicatable results. USE WITH CARE!
452   uint32_t GetType(Reference ref) const;
453   bool ChangeType(Reference ref,
454                   uint32_t to_type_id,
455                   uint32_t from_type_id,
456                   bool clear);
457 
458   // Allocated objects can be added to an internal list that can then be
459   // iterated over by other processes. If an allocated object can be found
460   // another way, such as by having its reference within a different object
461   // that will be made iterable, then this call is not necessary. This always
462   // succeeds unless corruption is detected; check IsCorrupted() to find out.
463   // Once an object is made iterable, its position in iteration can never
464   // change; new iterable objects will always be added after it in the series.
465   // Changing the type does not alter its "iterable" status.
466   void MakeIterable(Reference ref);
467 
468   // Get the information about the amount of free space in the allocator. The
469   // amount of free space should be treated as approximate due to extras from
470   // alignment and metadata. Concurrent allocations from other threads will
471   // also make the true amount less than what is reported.
472   void GetMemoryInfo(MemoryInfo* meminfo) const;
473 
474   // If there is some indication that the memory has become corrupted,
475   // calling this will attempt to prevent further damage by indicating to
476   // all processes that something is not as expected.
477   void SetCorrupt() const;
478 
479   // This can be called to determine if corruption has been detected in the
480   // segment, possibly my a malicious actor. Once detected, future allocations
481   // will fail and iteration may not locate all objects.
482   bool IsCorrupt() const;
483 
484   // Flag set if an allocation has failed because the memory segment was full.
485   bool IsFull() const;
486 
487   // Update those "tracking" histograms which do not get updates during regular
488   // operation, such as how much memory is currently used. This should be
489   // called before such information is to be displayed or uploaded.
490   void UpdateTrackingHistograms();
491 
492   // While the above works much like malloc & free, these next methods provide
493   // an "object" interface similar to new and delete.
494 
495   // Reserve space in the memory segment of the desired |size| and |type_id|.
496   // A return value of zero indicates the allocation failed, otherwise the
497   // returned reference can be used by any process to get a real pointer via
498   // the GetAsObject() or GetAsArray calls. The actual allocated size may be
499   // larger and will always be a multiple of 8 bytes (64 bits).
500   Reference Allocate(size_t size, uint32_t type_id);
501 
502   // Allocate and construct an object in persistent memory. The type must have
503   // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
504   // static constexpr fields that are used to ensure compatibility between
505   // software versions. An optional size parameter can be specified to force
506   // the allocation to be bigger than the size of the object; this is useful
507   // when the last field is actually variable length.
508   template <typename T>
New(size_t size)509   T* New(size_t size) {
510     if (size < sizeof(T))
511       size = sizeof(T);
512     Reference ref = Allocate(size, T::kPersistentTypeId);
513     void* mem =
514         const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
515     if (!mem)
516       return nullptr;
517     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
518     return new (mem) T();
519   }
520   template <typename T>
New()521   T* New() {
522     return New<T>(sizeof(T));
523   }
524 
525   // Similar to New, above, but construct the object out of an existing memory
526   // block and of an expected type. If |clear| is true, memory will be zeroed
527   // before construction. Though this is not standard object behavior, it
528   // is present to match with new allocations that always come from zeroed
529   // memory. Anything previously present simply ceases to exist; no destructor
530   // is called for it so explicitly Delete() the old object first if need be.
531   // Calling this will not invalidate existing pointers to the object, either
532   // in this process or others, so changing the object could have unpredictable
533   // results. USE WITH CARE!
534   template <typename T>
New(Reference ref,uint32_t from_type_id,bool clear)535   T* New(Reference ref, uint32_t from_type_id, bool clear) {
536     DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
537     // Make sure the memory is appropriate. This won't be used until after
538     // the type is changed but checking first avoids the possibility of having
539     // to change the type back.
540     void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
541     if (!mem)
542       return nullptr;
543     // Ensure the allocator's internal alignment is sufficient for this object.
544     // This protects against coding errors in the allocator.
545     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
546     // Change the type, clearing the memory if so desired. The new type is
547     // "transitioning" so that there is no race condition with the construction
548     // of the object should another thread be simultaneously iterating over
549     // data. This will "acquire" the memory so no changes get reordered before
550     // it.
551     if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
552       return nullptr;
553     // Construct an object of the desired type on this memory, just as if
554     // New() had been called to create it.
555     T* obj = new (mem) T();
556     // Finally change the type to the desired one. This will "release" all of
557     // the changes above and so provide a consistent view to other threads.
558     bool success =
559         ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
560     DCHECK(success);
561     return obj;
562   }
563 
564   // Deletes an object by destructing it and then changing the type to a
565   // different value (default 0).
566   template <typename T>
Delete(T * obj,uint32_t new_type)567   void Delete(T* obj, uint32_t new_type) {
568     // Get the reference for the object.
569     Reference ref = GetAsReference<T>(obj);
570     // First change the type to "transitioning" so there is no race condition
571     // where another thread could find the object through iteration while it
572     // is been destructed. This will "acquire" the memory so no changes get
573     // reordered before it. It will fail if |ref| is invalid.
574     if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
575       return;
576     // Destruct the object.
577     obj->~T();
578     // Finally change the type to the desired value. This will "release" all
579     // the changes above.
580     bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
581     DCHECK(success);
582   }
583   template <typename T>
Delete(T * obj)584   void Delete(T* obj) {
585     Delete<T>(obj, 0);
586   }
587 
588   // As above but works with objects allocated from persistent memory.
589   template <typename T>
GetAsReference(const T * obj)590   Reference GetAsReference(const T* obj) const {
591     return GetAsReference(obj, T::kPersistentTypeId);
592   }
593 
594   // As above but works with an object allocated from persistent memory.
595   template <typename T>
MakeIterable(const T * obj)596   void MakeIterable(const T* obj) {
597     MakeIterable(GetAsReference<T>(obj));
598   }
599 
600  protected:
601   enum MemoryType {
602     MEM_EXTERNAL,
603     MEM_MALLOC,
604     MEM_VIRTUAL,
605     MEM_SHARED,
606     MEM_FILE,
607   };
608 
609   struct Memory {
MemoryMemory610     Memory(void* b, MemoryType t) : base(b), type(t) {}
611 
612     void* base;
613     MemoryType type;
614   };
615 
616   // Constructs the allocator. Everything is the same as the public allocator
617   // except |memory| which is a structure with additional information besides
618   // the base address.
619   PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
620                             uint64_t id, base::StringPiece name,
621                             bool readonly);
622 
623   // Implementation of Flush that accepts how much to flush.
624   virtual void FlushPartial(size_t length, bool sync);
625 
626   volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
627   const MemoryType mem_type_;      // Type of memory allocation.
628   const uint32_t mem_size_;        // Size of entire memory segment.
629   const uint32_t mem_page_;        // Page size allocations shouldn't cross.
630 
631  private:
632   struct SharedMetadata;
633   struct BlockHeader;
634   static const uint32_t kAllocAlignment;
635   static const Reference kReferenceQueue;
636 
637   // The shared metadata is always located at the top of the memory segment.
638   // These convenience functions eliminate constant casting of the base
639   // pointer within the code.
shared_meta()640   const SharedMetadata* shared_meta() const {
641     return reinterpret_cast<const SharedMetadata*>(
642         const_cast<const char*>(mem_base_));
643   }
shared_meta()644   SharedMetadata* shared_meta() {
645     return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
646   }
647 
648   // Actual method for doing the allocation.
649   Reference AllocateImpl(size_t size, uint32_t type_id);
650 
651   // Get the block header associated with a specific reference.
652   const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
653                                        uint32_t size, bool queue_ok,
654                                        bool free_ok) const;
GetBlock(Reference ref,uint32_t type_id,uint32_t size,bool queue_ok,bool free_ok)655   volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
656                                  bool queue_ok, bool free_ok) {
657       return const_cast<volatile BlockHeader*>(
658           const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
659               ref, type_id, size, queue_ok, free_ok));
660   }
661 
662   // Get the actual data within a block associated with a specific reference.
663   const volatile void* GetBlockData(Reference ref, uint32_t type_id,
664                                     uint32_t size) const;
GetBlockData(Reference ref,uint32_t type_id,uint32_t size)665   volatile void* GetBlockData(Reference ref, uint32_t type_id,
666                               uint32_t size) {
667       return const_cast<volatile void*>(
668           const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
669               ref, type_id, size));
670   }
671 
672   // Record an error in the internal histogram.
673   void RecordError(int error) const;
674 
675   const size_t vm_page_size_;          // The page size used by the OS.
676   const bool readonly_;                // Indicates access to read-only memory.
677   mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
678 
679   HistogramBase* allocs_histogram_;  // Histogram recording allocs.
680   HistogramBase* used_histogram_;    // Histogram recording used space.
681   HistogramBase* errors_histogram_;  // Histogram recording errors.
682 
683   friend class PersistentMemoryAllocatorTest;
684   FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
685   DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
686 };
687 
688 
689 // This allocator uses a local memory block it allocates from the general
690 // heap. It is generally used when some kind of "death rattle" handler will
691 // save the contents to persistent storage during process shutdown. It is
692 // also useful for testing.
693 class BASE_EXPORT LocalPersistentMemoryAllocator
694     : public PersistentMemoryAllocator {
695  public:
696   LocalPersistentMemoryAllocator(size_t size, uint64_t id,
697                                  base::StringPiece name);
698   ~LocalPersistentMemoryAllocator() override;
699 
700  private:
701   // Allocates a block of local memory of the specified |size|, ensuring that
702   // the memory will not be physically allocated until accessed and will read
703   // as zero when that happens.
704   static Memory AllocateLocalMemory(size_t size);
705 
706   // Deallocates a block of local |memory| of the specified |size|.
707   static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
708 
709   DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
710 };
711 
712 
713 // This allocator takes a shared-memory object and performs allocation from
714 // it. The memory must be previously mapped via Map() or MapAt(). The allocator
715 // takes ownership of the memory object.
716 class BASE_EXPORT SharedPersistentMemoryAllocator
717     : public PersistentMemoryAllocator {
718  public:
719   SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
720                                   uint64_t id,
721                                   base::StringPiece name,
722                                   bool read_only);
723   ~SharedPersistentMemoryAllocator() override;
724 
shared_memory()725   SharedMemory* shared_memory() { return shared_memory_.get(); }
726 
727   // Ensure that the memory isn't so invalid that it would crash when passing it
728   // to the allocator. This doesn't guarantee the data is valid, just that it
729   // won't cause the program to abort. The existing IsCorrupt() call will handle
730   // the rest.
731   static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
732 
733  private:
734   std::unique_ptr<SharedMemory> shared_memory_;
735 
736   DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
737 };
738 
739 
740 #if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
741 // This allocator takes a memory-mapped file object and performs allocation
742 // from it. The allocator takes ownership of the file object.
743 class BASE_EXPORT FilePersistentMemoryAllocator
744     : public PersistentMemoryAllocator {
745  public:
746   // A |max_size| of zero will use the length of the file as the maximum
747   // size. The |file| object must have been already created with sufficient
748   // permissions (read, read/write, or read/write/extend).
749   FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
750                                 size_t max_size,
751                                 uint64_t id,
752                                 base::StringPiece name,
753                                 bool read_only);
754   ~FilePersistentMemoryAllocator() override;
755 
756   // Ensure that the file isn't so invalid that it would crash when passing it
757   // to the allocator. This doesn't guarantee the file is valid, just that it
758   // won't cause the program to abort. The existing IsCorrupt() call will handle
759   // the rest.
760   static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
761 
762  protected:
763   // PersistentMemoryAllocator:
764   void FlushPartial(size_t length, bool sync) override;
765 
766  private:
767   std::unique_ptr<MemoryMappedFile> mapped_file_;
768 
769   DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
770 };
771 #endif  // !defined(OS_NACL)
772 
773 // An allocation that is defined but not executed until required at a later
774 // time. This allows for potential users of an allocation to be decoupled
775 // from the logic that defines it. In addition, there can be multiple users
776 // of the same allocation or any region thereof that are guaranteed to always
777 // use the same space. It's okay to copy/move these objects.
778 //
779 // This is a top-level class instead of an inner class of the PMA so that it
780 // can be forward-declared in other header files without the need to include
781 // the full contents of this file.
782 class BASE_EXPORT DelayedPersistentAllocation {
783  public:
784   using Reference = PersistentMemoryAllocator::Reference;
785 
786   // Creates a delayed allocation using the specified |allocator|. When
787   // needed, the memory will be allocated using the specified |type| and
788   // |size|. If |offset| is given, the returned pointer will be at that
789   // offset into the segment; this allows combining allocations into a
790   // single persistent segment to reduce overhead and means an "all or
791   // nothing" request. Note that |size| is always the total memory size
792   // and |offset| is just indicating the start of a block within it.  If
793   // |make_iterable| was true, the allocation will made iterable when it
794   // is created; already existing allocations are not changed.
795   //
796   // Once allocated, a reference to the segment will be stored at |ref|.
797   // This shared location must be initialized to zero (0); it is checked
798   // with every Get() request to see if the allocation has already been
799   // done. If reading |ref| outside of this object, be sure to do an
800   // "acquire" load. Don't write to it -- leave that to this object.
801   //
802   // For convenience, methods taking both Atomic32 and std::atomic<Reference>
803   // are defined.
804   DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
805                               subtle::Atomic32* ref,
806                               uint32_t type,
807                               size_t size,
808                               bool make_iterable);
809   DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
810                               subtle::Atomic32* ref,
811                               uint32_t type,
812                               size_t size,
813                               size_t offset,
814                               bool make_iterable);
815   DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
816                               std::atomic<Reference>* ref,
817                               uint32_t type,
818                               size_t size,
819                               bool make_iterable);
820   DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
821                               std::atomic<Reference>* ref,
822                               uint32_t type,
823                               size_t size,
824                               size_t offset,
825                               bool make_iterable);
826   ~DelayedPersistentAllocation();
827 
828   // Gets a pointer to the defined allocation. This will realize the request
829   // and update the reference provided during construction. The memory will
830   // be zeroed the first time it is returned, after that it is shared with
831   // all other Get() requests and so shows any changes made to it elsewhere.
832   //
833   // If the allocation fails for any reason, null will be returned. This works
834   // even on "const" objects because the allocation is already defined, just
835   // delayed.
836   void* Get() const;
837 
838   // Gets the internal reference value. If this returns a non-zero value then
839   // a subsequent call to Get() will do nothing but convert that reference into
840   // a memory location -- useful for accessing an existing allocation without
841   // creating one unnecessarily.
reference()842   Reference reference() const {
843     return reference_->load(std::memory_order_relaxed);
844   }
845 
846  private:
847   // The underlying object that does the actual allocation of memory. Its
848   // lifetime must exceed that of all DelayedPersistentAllocation objects
849   // that use it.
850   PersistentMemoryAllocator* const allocator_;
851 
852   // The desired type and size of the allocated segment plus the offset
853   // within it for the defined request.
854   const uint32_t type_;
855   const uint32_t size_;
856   const uint32_t offset_;
857 
858   // Flag indicating if allocation should be made iterable when done.
859   const bool make_iterable_;
860 
861   // The location at which a reference to the allocated segment is to be
862   // stored once the allocation is complete. If multiple delayed allocations
863   // share the same pointer then an allocation on one will amount to an
864   // allocation for all.
865   volatile std::atomic<Reference>* const reference_;
866 
867   // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
868 };
869 
870 }  // namespace base
871 
872 #endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
873