• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <utility>
12 
13 #include "base/bits.h"
14 #include "base/logging.h"
15 #include "base/macros.h"
16 #include "base/process/process_metrics.h"
17 #include "base/template_util.h"
18 #include "base/trace_event/heap_profiler_allocation_context.h"
19 #include "build/build_config.h"
20 
21 namespace base {
22 namespace trace_event {
23 
24 class AllocationRegisterTest;
25 
26 namespace internal {
27 
28 // Allocates a region of virtual address space of |size| rounded up to the
29 // system page size. The memory is zeroed by the system. A guard page is
30 // added after the end.
31 void* AllocateGuardedVirtualMemory(size_t size);
32 
33 // Frees a region of virtual address space allocated by a call to
34 // |AllocateVirtualMemory|.
35 void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
36 
37 // Hash map that mmaps memory only once in the constructor. Its API is
38 // similar to std::unordered_map, only index (KVIndex) is used to address
39 template <size_t NumBuckets, class Key, class Value, class KeyHasher>
40 class FixedHashMap {
41   // To keep things simple we don't call destructors.
42   static_assert(is_trivially_destructible<Key>::value &&
43                     is_trivially_destructible<Value>::value,
44                 "Key and Value shouldn't have destructors");
45  public:
46   using KVPair = std::pair<const Key, Value>;
47 
48   // For implementation simplicity API uses integer index instead
49   // of iterators. Most operations (except Find) on KVIndex are O(1).
50   using KVIndex = size_t;
51   enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
52 
53   // Capacity controls how many items this hash map can hold, and largely
54   // affects memory footprint.
FixedHashMap(size_t capacity)55   explicit FixedHashMap(size_t capacity)
56       : num_cells_(capacity),
57         num_inserts_dropped_(0),
58         cells_(static_cast<Cell*>(
59             AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
60         buckets_(static_cast<Bucket*>(
61             AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
62         free_list_(nullptr),
63         next_unused_cell_(0) {}
64 
~FixedHashMap()65   ~FixedHashMap() {
66     FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
67     FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
68   }
69 
70   // Returns {kInvalidKVIndex, false} if the table is full.
Insert(const Key & key,const Value & value)71   std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
72     Cell** p_cell = Lookup(key);
73     Cell* cell = *p_cell;
74     if (cell) {
75       return {static_cast<KVIndex>(cell - cells_), false};  // not inserted
76     }
77 
78     // Get a free cell and link it.
79     cell = GetFreeCell();
80     if (!cell) {
81       if (num_inserts_dropped_ <
82           std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
83         ++num_inserts_dropped_;
84       }
85       return {kInvalidKVIndex, false};
86     }
87     *p_cell = cell;
88     cell->p_prev = p_cell;
89     cell->next = nullptr;
90 
91     // Initialize key/value pair. Since key is 'const Key' this is the
92     // only way to initialize it.
93     new (&cell->kv) KVPair(key, value);
94 
95     return {static_cast<KVIndex>(cell - cells_), true};  // inserted
96   }
97 
Remove(KVIndex index)98   void Remove(KVIndex index) {
99     DCHECK_LT(index, next_unused_cell_);
100 
101     Cell* cell = &cells_[index];
102 
103     // Unlink the cell.
104     *cell->p_prev = cell->next;
105     if (cell->next) {
106       cell->next->p_prev = cell->p_prev;
107     }
108     cell->p_prev = nullptr;  // mark as free
109 
110     // Add it to the free list.
111     cell->next = free_list_;
112     free_list_ = cell;
113   }
114 
Find(const Key & key)115   KVIndex Find(const Key& key) const {
116     Cell* cell = *Lookup(key);
117     return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
118   }
119 
Get(KVIndex index)120   KVPair& Get(KVIndex index) {
121     return cells_[index].kv;
122   }
123 
Get(KVIndex index)124   const KVPair& Get(KVIndex index) const {
125     return cells_[index].kv;
126   }
127 
128   // Finds next index that has a KVPair associated with it. Search starts
129   // with the specified index. Returns kInvalidKVIndex if nothing was found.
130   // To find the first valid index, call this function with 0. Continue
131   // calling with the last_index + 1 until kInvalidKVIndex is returned.
Next(KVIndex index)132   KVIndex Next(KVIndex index) const {
133     for (;index < next_unused_cell_; ++index) {
134       if (cells_[index].p_prev) {
135         return index;
136       }
137     }
138     return kInvalidKVIndex;
139   }
140 
141   // Estimates number of bytes used in allocated memory regions.
EstimateUsedMemory()142   size_t EstimateUsedMemory() const {
143     size_t page_size = base::GetPageSize();
144     // |next_unused_cell_| is the first cell that wasn't touched, i.e.
145     // it's the number of touched cells.
146     return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
147            bits::Align(sizeof(Bucket) * NumBuckets, page_size);
148   }
149 
num_inserts_dropped()150   size_t num_inserts_dropped() const { return num_inserts_dropped_; }
151 
152  private:
153   friend base::trace_event::AllocationRegisterTest;
154 
155   struct Cell {
156     KVPair kv;
157     Cell* next;
158 
159     // Conceptually this is |prev| in a doubly linked list. However, buckets
160     // also participate in the bucket's cell list - they point to the list's
161     // head and also need to be linked / unlinked properly. To treat these two
162     // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
163     // that points to this Cell" kind of thing. So |p_prev| points to a bucket
164     // for the first cell in a list, and points to |next| of the previous cell
165     // for any other cell. With that Lookup() is the only function that handles
166     // buckets / cells differently.
167     // If |p_prev| is nullptr, the cell is in the free list.
168     Cell** p_prev;
169   };
170 
171   using Bucket = Cell*;
172 
173   // Returns a pointer to the cell that contains or should contain the entry
174   // for |key|. The pointer may point at an element of |buckets_| or at the
175   // |next| member of an element of |cells_|.
Lookup(const Key & key)176   Cell** Lookup(const Key& key) const {
177     // The list head is in |buckets_| at the hash offset.
178     Cell** p_cell = &buckets_[Hash(key)];
179 
180     // Chase down the list until the cell that holds |key| is found,
181     // or until the list ends.
182     while (*p_cell && (*p_cell)->kv.first != key) {
183       p_cell = &(*p_cell)->next;
184     }
185 
186     return p_cell;
187   }
188 
189   // Returns a cell that is not being used to store an entry (either by
190   // recycling from the free list or by taking a fresh cell). May return
191   // nullptr if the hash table has run out of memory.
GetFreeCell()192   Cell* GetFreeCell() {
193     // First try to re-use a cell from the free list.
194     if (free_list_) {
195       Cell* cell = free_list_;
196       free_list_ = cell->next;
197       return cell;
198     }
199 
200     // If the hash table has too little capacity (when too little address space
201     // was reserved for |cells_|), return nullptr.
202     if (next_unused_cell_ >= num_cells_) {
203       return nullptr;
204     }
205 
206     // Otherwise pick the next cell that has not been touched before.
207     return &cells_[next_unused_cell_++];
208   }
209 
210   // Returns a value in the range [0, NumBuckets - 1] (inclusive).
Hash(const Key & key)211   size_t Hash(const Key& key) const {
212     if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
213       // NumBuckets is a power of 2.
214       return KeyHasher()(key) & (NumBuckets - 1);
215     } else {
216       return KeyHasher()(key) % NumBuckets;
217     }
218   }
219 
220   // Number of cells.
221   size_t const num_cells_;
222 
223   // Number of calls to Insert() that were lost because the hashtable was full.
224   size_t num_inserts_dropped_;
225 
226   // The array of cells. This array is backed by mmapped memory. Lower indices
227   // are accessed first, higher indices are accessed only when the |free_list_|
228   // is empty. This is to minimize the amount of resident memory used.
229   Cell* const cells_;
230 
231   // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
232   // contain the pointer to the linked list of cells for |Hash(key)|.
233   // This array is backed by mmapped memory.
234   mutable Bucket* buckets_;
235 
236   // The head of the free list.
237   Cell* free_list_;
238 
239   // The index of the first element of |cells_| that has not been used before.
240   // If the free list is empty and a new cell is needed, the cell at this index
241   // is used. This is the high water mark for the number of entries stored.
242   size_t next_unused_cell_;
243 
244   DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
245 };
246 
247 }  // namespace internal
248 
249 class TraceEventMemoryOverhead;
250 
251 // The allocation register keeps track of all allocations that have not been
252 // freed. Internally it has two hashtables: one for Backtraces and one for
253 // actual allocations. Sizes of both hashtables are fixed, and this class
254 // allocates (mmaps) only in its constructor.
255 //
256 // When either hash table hits max size, new inserts are dropped.
257 class BASE_EXPORT AllocationRegister {
258  public:
259   // Details about an allocation.
260   struct Allocation {
261     const void* address;
262     size_t size;
263     AllocationContext context;
264   };
265 
266   // An iterator that iterates entries in no particular order.
267   class BASE_EXPORT ConstIterator {
268    public:
269     void operator++();
270     bool operator!=(const ConstIterator& other) const;
271     Allocation operator*() const;
272 
273    private:
274     friend class AllocationRegister;
275     using AllocationIndex = size_t;
276 
277     ConstIterator(const AllocationRegister& alloc_register,
278                   AllocationIndex index);
279 
280     const AllocationRegister& register_;
281     AllocationIndex index_;
282   };
283 
284   AllocationRegister();
285   AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
286 
287   ~AllocationRegister();
288 
289   // Inserts allocation details into the table. If the address was present
290   // already, its details are updated. |address| must not be null.
291   //
292   // Returns true if an insert occurred. Inserts may fail because the table
293   // is full.
294   bool Insert(const void* address,
295               size_t size,
296               const AllocationContext& context);
297 
298   // Removes the address from the table if it is present. It is ok to call this
299   // with a null pointer.
300   void Remove(const void* address);
301 
302   // Finds allocation for the address and fills |out_allocation|.
303   bool Get(const void* address, Allocation* out_allocation) const;
304 
305   ConstIterator begin() const;
306   ConstIterator end() const;
307 
308   // Estimates memory overhead including |sizeof(AllocationRegister)|.
309   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
310 
311  private:
312   friend AllocationRegisterTest;
313 
314 // Expect lower number of allocations from mobile platforms. Load factor
315 // (capacity / bucket count) is kept less than 10 for optimal hashing. The
316 // number of buckets should be changed together with AddressHasher.
317 #if defined(OS_ANDROID) || defined(OS_IOS)
318   static const size_t kAllocationBuckets = 1 << 18;
319   static const size_t kAllocationCapacity = 1500000;
320 #else
321   static const size_t kAllocationBuckets = 1 << 19;
322   static const size_t kAllocationCapacity = 5000000;
323 #endif
324 
325   // 2^16 works well with BacktraceHasher. When increasing this number make
326   // sure BacktraceHasher still produces low number of collisions.
327   static const size_t kBacktraceBuckets = 1 << 16;
328 #if defined(OS_ANDROID)
329   static const size_t kBacktraceCapacity = 32000;  // 22K was observed
330 #else
331   static const size_t kBacktraceCapacity = 55000;  // 45K was observed on Linux
332 #endif
333 
334   struct BacktraceHasher {
335     size_t operator () (const Backtrace& backtrace) const;
336   };
337 
338   using BacktraceMap = internal::FixedHashMap<
339       kBacktraceBuckets,
340       Backtrace,
341       size_t, // Number of references to the backtrace (the key). Incremented
342               // when an allocation that references the backtrace is inserted,
343               // and decremented when the allocation is removed. When the
344               // number drops to zero, the backtrace is removed from the map.
345       BacktraceHasher>;
346 
347   struct AllocationInfo {
348     size_t size;
349     const char* type_name;
350     BacktraceMap::KVIndex backtrace_index;
351   };
352 
353   struct AddressHasher {
354     size_t operator () (const void* address) const;
355   };
356 
357   using AllocationMap = internal::FixedHashMap<
358       kAllocationBuckets,
359       const void*,
360       AllocationInfo,
361       AddressHasher>;
362 
363   BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
364   void RemoveBacktrace(BacktraceMap::KVIndex index);
365 
366   Allocation GetAllocation(AllocationMap::KVIndex) const;
367 
368   AllocationMap allocations_;
369   BacktraceMap backtraces_;
370 
371   // Sentinel used when the |backtraces_| table is full.
372   //
373   // This is a slightly abstraction to allow for constant propagation. It
374   // knows that the sentinel will be the first item inserted into the table
375   // and that the first index retuned will be 0. The constructor DCHECKs
376   // this assumption.
377   enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
378 
379   DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
380 };
381 
382 }  // namespace trace_event
383 }  // namespace base
384 
385 #endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
386