• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
6 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
7 
8 #include <stdint.h>
9 
10 #include <atomic>
11 #include <memory>
12 
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/files/file_path.h"
16 #include "base/gtest_prod_util.h"
17 #include "base/macros.h"
18 #include "base/strings/string_piece.h"
19 
20 namespace base {
21 
22 class HistogramBase;
23 class MemoryMappedFile;
24 class SharedMemory;
25 
26 // Simple allocator for pieces of a memory block that may be persistent
27 // to some storage or shared across multiple processes. This class resides
28 // under base/metrics because it was written for that purpose. It is,
29 // however, fully general-purpose and can be freely moved to base/memory
30 // if other uses are found.
31 //
32 // This class provides for thread-secure (i.e. safe against other threads
33 // or processes that may be compromised and thus have malicious intent)
34 // allocation of memory within a designated block and also a mechanism by
35 // which other threads can learn of these allocations.
36 //
37 // There is (currently) no way to release an allocated block of data because
38 // doing so would risk invalidating pointers held by other processes and
39 // greatly complicate the allocation algorithm.
40 //
41 // Construction of this object can accept new, clean (i.e. zeroed) memory
42 // or previously initialized memory. In the first case, construction must
43 // be allowed to complete before letting other allocators attach to the same
44 // segment. In other words, don't share the segment until at least one
45 // allocator has been attached to it.
46 //
47 // Note that memory not in active use is not accessed so it is possible to
48 // use virtual memory, including memory-mapped files, as backing storage with
49 // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
50 class BASE_EXPORT PersistentMemoryAllocator {
51  public:
52   typedef uint32_t Reference;
53 
54   // Iterator for going through all iterable memory records in an allocator.
55   // Like the allocator itself, iterators are lock-free and thread-secure.
56   // That means that multiple threads can share an iterator and the same
57   // reference will not be returned twice.
58   //
59   // Iteration, in general, is tolerant of corrupted memory. It will return
60   // what it can and stop only when corruption forces it to. Bad corruption
61   // could cause the same object to be returned many times but it will
62   // eventually quit.
63   class BASE_EXPORT Iterator {
64    public:
65     // Constructs an iterator on a given |allocator|, starting at the beginning.
66     // The allocator must live beyond the lifetime of the iterator. This class
67     // has read-only access to the allocator (hence "const") but the returned
68     // references can be used on a read/write version, too.
69     explicit Iterator(const PersistentMemoryAllocator* allocator);
70 
71     // As above but resuming from the |starting_after| reference. The first call
72     // to GetNext() will return the next object found after that reference. The
73     // reference must be to an "iterable" object; references to non-iterable
74     // objects (those that never had MakeIterable() called for them) will cause
75     // a run-time error.
76     Iterator(const PersistentMemoryAllocator* allocator,
77              Reference starting_after);
78 
79     // Gets the next iterable, storing that type in |type_return|. The actual
80     // return value is a reference to the allocation inside the allocator or
81     // zero if there are no more. GetNext() may still be called again at a
82     // later time to retrieve any new allocations that have been added.
83     Reference GetNext(uint32_t* type_return);
84 
85     // Similar to above but gets the next iterable of a specific |type_match|.
86     // This should not be mixed with calls to GetNext() because any allocations
87     // skipped here due to a type mis-match will never be returned by later
88     // calls to GetNext() meaning it's possible to completely miss entries.
89     Reference GetNextOfType(uint32_t type_match);
90 
91     // Converts references to objects. This is a convenience method so that
92     // users of the iterator don't need to also have their own pointer to the
93     // allocator over which the iterator runs in order to retrieve objects.
94     // Because the iterator is not read/write, only "const" objects can be
95     // fetched. Non-const objects can be fetched using the reference on a
96     // non-const (external) pointer to the same allocator (or use const_cast
97     // to remove the qualifier).
98     template <typename T>
GetAsObject(Reference ref,uint32_t type_id)99     const T* GetAsObject(Reference ref, uint32_t type_id) const {
100       return allocator_->GetAsObject<T>(ref, type_id);
101     }
102 
103    private:
104     // Weak-pointer to memory allocator being iterated over.
105     const PersistentMemoryAllocator* allocator_;
106 
107     // The last record that was returned.
108     std::atomic<Reference> last_record_;
109 
110     // The number of records found; used for detecting loops.
111     std::atomic<uint32_t> record_count_;
112 
113     DISALLOW_COPY_AND_ASSIGN(Iterator);
114   };
115 
116   // Returned information about the internal state of the heap.
117   struct MemoryInfo {
118     size_t total;
119     size_t free;
120   };
121 
122   enum : Reference {
123     kReferenceNull = 0  // A common "null" reference value.
124   };
125 
126   enum : uint32_t {
127     kTypeIdAny = 0  // Match any type-id inside GetAsObject().
128   };
129 
130   // This is the standard file extension (suitable for being passed to the
131   // AddExtension() method of base::FilePath) for dumps of persistent memory.
132   static const base::FilePath::CharType kFileExtension[];
133 
134   // The allocator operates on any arbitrary block of memory. Creation and
135   // persisting or sharing of that block with another process is the
136   // responsibility of the caller. The allocator needs to know only the
137   // block's |base| address, the total |size| of the block, and any internal
138   // |page| size (zero if not paged) across which allocations should not span.
139   // The |id| is an arbitrary value the caller can use to identify a
140   // particular memory segment. It will only be loaded during the initial
141   // creation of the segment and can be checked by the caller for consistency.
142   // The |name|, if provided, is used to distinguish histograms for this
143   // allocator. Only the primary owner of the segment should define this value;
144   // other processes can learn it from the shared state. If the underlying
145   // memory is |readonly| then no changes will be made to it. The resulting
146   // object should be stored as a "const" pointer.
147   //
148   // PersistentMemoryAllocator does NOT take ownership of the memory block.
149   // The caller must manage it and ensure it stays available throughout the
150   // lifetime of this object.
151   //
152   // Memory segments for sharing must have had an allocator attached to them
153   // before actually being shared. If the memory segment was just created, it
154   // should be zeroed before being passed here. If it was an existing segment,
155   // the values here will be compared to copies stored in the shared segment
156   // as a guard against corruption.
157   //
158   // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
159   // method below) before construction if the definition of the segment can
160   // vary in any way at run-time. Invalid memory segments will cause a crash.
161   PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
162                             uint64_t id, base::StringPiece name,
163                             bool readonly);
164   virtual ~PersistentMemoryAllocator();
165 
166   // Check if memory segment is acceptable for creation of an Allocator. This
167   // doesn't do any analysis of the data and so doesn't guarantee that the
168   // contents are valid, just that the paramaters won't cause the program to
169   // abort. The IsCorrupt() method will report detection of data problems
170   // found during construction and general operation.
171   static bool IsMemoryAcceptable(const void* data, size_t size,
172                                  size_t page_size, bool readonly);
173 
174   // Get the internal identifier for this persistent memory segment.
175   uint64_t Id() const;
176 
177   // Get the internal name of this allocator (possibly an empty string).
178   const char* Name() const;
179 
180   // Is this segment open only for read?
IsReadonly()181   bool IsReadonly() { return readonly_; }
182 
183   // Create internal histograms for tracking memory use and allocation sizes
184   // for allocator of |name| (which can simply be the result of Name()). This
185   // is done seperately from construction for situations such as when the
186   // histograms will be backed by memory provided by this very allocator.
187   //
188   // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
189   // with the following histograms:
190   //    UMA.PersistentAllocator.name.Allocs
191   //    UMA.PersistentAllocator.name.UsedPct
192   void CreateTrackingHistograms(base::StringPiece name);
193 
194   // Direct access to underlying memory segment. If the segment is shared
195   // across threads or processes, reading data through these values does
196   // not guarantee consistency. Use with care. Do not write.
data()197   const void* data() const { return const_cast<const char*>(mem_base_); }
length()198   size_t length() const { return mem_size_; }
size()199   size_t size() const { return mem_size_; }
200   size_t used() const;
201 
202   // Get an object referenced by a |ref|. For safety reasons, the |type_id|
203   // code and size-of(|T|) are compared to ensure the reference is valid
204   // and cannot return an object outside of the memory segment. A |type_id| of
205   // kTypeIdAny (zero) will match any though the size is still checked. NULL is
206   // returned if any problem is detected, such as corrupted storage or incorrect
207   // parameters. Callers MUST check that the returned value is not-null EVERY
208   // TIME before accessing it or risk crashing! Once dereferenced, the pointer
209   // is safe to reuse forever.
210   //
211   // NOTE: Though this method will guarantee that an object of the specified
212   // type can be accessed without going outside the bounds of the memory
213   // segment, it makes no guarantees of the validity of the data within the
214   // object itself. If it is expected that the contents of the segment could
215   // be compromised with malicious intent, the object must be hardened as well.
216   //
217   // Though the persistent data may be "volatile" if it is shared with
218   // other processes, such is not necessarily the case. The internal
219   // "volatile" designation is discarded so as to not propagate the viral
220   // nature of that keyword to the caller. It can add it back, if necessary,
221   // based on knowledge of how the allocator is being used.
222   template <typename T>
GetAsObject(Reference ref,uint32_t type_id)223   T* GetAsObject(Reference ref, uint32_t type_id) {
224     static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
225     return const_cast<T*>(
226         reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
227   }
228   template <typename T>
GetAsObject(Reference ref,uint32_t type_id)229   const T* GetAsObject(Reference ref, uint32_t type_id) const {
230     static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
231     return const_cast<const T*>(
232         reinterpret_cast<const volatile T*>(GetBlockData(
233             ref, type_id, sizeof(T))));
234   }
235 
236   // Get the number of bytes allocated to a block. This is useful when storing
237   // arrays in order to validate the ending boundary. The returned value will
238   // include any padding added to achieve the required alignment and so could
239   // be larger than given in the original Allocate() request.
240   size_t GetAllocSize(Reference ref) const;
241 
242   // Access the internal "type" of an object. This generally isn't necessary
243   // but can be used to "clear" the type and so effectively mark it as deleted
244   // even though the memory stays valid and allocated. Changing the type is
245   // an atomic compare/exchange and so requires knowing the existing value.
246   // It will return false if the existing type is not what is expected.
247   uint32_t GetType(Reference ref) const;
248   bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
249 
250   // Reserve space in the memory segment of the desired |size| and |type_id|.
251   // A return value of zero indicates the allocation failed, otherwise the
252   // returned reference can be used by any process to get a real pointer via
253   // the GetAsObject() call.
254   Reference Allocate(size_t size, uint32_t type_id);
255 
256   // Allocated objects can be added to an internal list that can then be
257   // iterated over by other processes. If an allocated object can be found
258   // another way, such as by having its reference within a different object
259   // that will be made iterable, then this call is not necessary. This always
260   // succeeds unless corruption is detected; check IsCorrupted() to find out.
261   // Once an object is made iterable, its position in iteration can never
262   // change; new iterable objects will always be added after it in the series.
263   void MakeIterable(Reference ref);
264 
265   // Get the information about the amount of free space in the allocator. The
266   // amount of free space should be treated as approximate due to extras from
267   // alignment and metadata. Concurrent allocations from other threads will
268   // also make the true amount less than what is reported.
269   void GetMemoryInfo(MemoryInfo* meminfo) const;
270 
271   // If there is some indication that the memory has become corrupted,
272   // calling this will attempt to prevent further damage by indicating to
273   // all processes that something is not as expected.
274   void SetCorrupt() const;
275 
276   // This can be called to determine if corruption has been detected in the
277   // segment, possibly my a malicious actor. Once detected, future allocations
278   // will fail and iteration may not locate all objects.
279   bool IsCorrupt() const;
280 
281   // Flag set if an allocation has failed because the memory segment was full.
282   bool IsFull() const;
283 
284   // Update those "tracking" histograms which do not get updates during regular
285   // operation, such as how much memory is currently used. This should be
286   // called before such information is to be displayed or uploaded.
287   void UpdateTrackingHistograms();
288 
289  protected:
290   volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
291   const uint32_t mem_size_;        // Size of entire memory segment.
292   const uint32_t mem_page_;        // Page size allocations shouldn't cross.
293 
294  private:
295   struct SharedMetadata;
296   struct BlockHeader;
297   static const uint32_t kAllocAlignment;
298   static const Reference kReferenceQueue;
299 
300   // The shared metadata is always located at the top of the memory segment.
301   // These convenience functions eliminate constant casting of the base
302   // pointer within the code.
shared_meta()303   const SharedMetadata* shared_meta() const {
304     return reinterpret_cast<const SharedMetadata*>(
305         const_cast<const char*>(mem_base_));
306   }
shared_meta()307   SharedMetadata* shared_meta() {
308     return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
309   }
310 
311   // Actual method for doing the allocation.
312   Reference AllocateImpl(size_t size, uint32_t type_id);
313 
314   // Get the block header associated with a specific reference.
315   const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
316                                        uint32_t size, bool queue_ok,
317                                        bool free_ok) const;
GetBlock(Reference ref,uint32_t type_id,uint32_t size,bool queue_ok,bool free_ok)318   volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
319                                  bool queue_ok, bool free_ok) {
320       return const_cast<volatile BlockHeader*>(
321           const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
322               ref, type_id, size, queue_ok, free_ok));
323   }
324 
325   // Get the actual data within a block associated with a specific reference.
326   const volatile void* GetBlockData(Reference ref, uint32_t type_id,
327                                     uint32_t size) const;
GetBlockData(Reference ref,uint32_t type_id,uint32_t size)328   volatile void* GetBlockData(Reference ref, uint32_t type_id,
329                               uint32_t size) {
330       return const_cast<volatile void*>(
331           const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
332               ref, type_id, size));
333   }
334 
335   const bool readonly_;              // Indicates access to read-only memory.
336   std::atomic<bool> corrupt_;        // Local version of "corrupted" flag.
337 
338   HistogramBase* allocs_histogram_;  // Histogram recording allocs.
339   HistogramBase* used_histogram_;    // Histogram recording used space.
340 
341   friend class PersistentMemoryAllocatorTest;
342   FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
343   DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
344 };
345 
346 
347 // This allocator uses a local memory block it allocates from the general
348 // heap. It is generally used when some kind of "death rattle" handler will
349 // save the contents to persistent storage during process shutdown. It is
350 // also useful for testing.
351 class BASE_EXPORT LocalPersistentMemoryAllocator
352     : public PersistentMemoryAllocator {
353  public:
354   LocalPersistentMemoryAllocator(size_t size, uint64_t id,
355                                  base::StringPiece name);
356   ~LocalPersistentMemoryAllocator() override;
357 
358  private:
359   // Allocates a block of local memory of the specified |size|, ensuring that
360   // the memory will not be physically allocated until accessed and will read
361   // as zero when that happens.
362   static void* AllocateLocalMemory(size_t size);
363 
364   // Deallocates a block of local |memory| of the specified |size|.
365   static void DeallocateLocalMemory(void* memory, size_t size);
366 
367   DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
368 };
369 
370 
371 // This allocator takes a shared-memory object and performs allocation from
372 // it. The memory must be previously mapped via Map() or MapAt(). The allocator
373 // takes ownership of the memory object.
374 class BASE_EXPORT SharedPersistentMemoryAllocator
375     : public PersistentMemoryAllocator {
376  public:
377   SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
378                                   uint64_t id,
379                                   base::StringPiece name,
380                                   bool read_only);
381   ~SharedPersistentMemoryAllocator() override;
382 
shared_memory()383   SharedMemory* shared_memory() { return shared_memory_.get(); }
384 
385   // Ensure that the memory isn't so invalid that it won't crash when passing it
386   // to the allocator. This doesn't guarantee the data is valid, just that it
387   // won't cause the program to abort. The existing IsCorrupt() call will handle
388   // the rest.
389   static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
390 
391  private:
392   std::unique_ptr<SharedMemory> shared_memory_;
393 
394   DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
395 };
396 
397 
398 #if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
399 // This allocator takes a memory-mapped file object and performs allocation
400 // from it. The allocator takes ownership of the file object.
401 class BASE_EXPORT FilePersistentMemoryAllocator
402     : public PersistentMemoryAllocator {
403  public:
404   // A |max_size| of zero will use the length of the file as the maximum
405   // size. The |file| object must have been already created with sufficient
406   // permissions (read, read/write, or read/write/extend).
407   FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
408                                 size_t max_size,
409                                 uint64_t id,
410                                 base::StringPiece name,
411                                 bool read_only);
412   ~FilePersistentMemoryAllocator() override;
413 
414   // Ensure that the file isn't so invalid that it won't crash when passing it
415   // to the allocator. This doesn't guarantee the file is valid, just that it
416   // won't cause the program to abort. The existing IsCorrupt() call will handle
417   // the rest.
418   static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
419 
420  private:
421   std::unique_ptr<MemoryMappedFile> mapped_file_;
422 
423   DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
424 };
425 #endif  // !defined(OS_NACL)
426 
427 }  // namespace base
428 
429 #endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
430