• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/allocator/partition_allocator/shim/malloc_zone_functions_mac.h"
6 
7 #include <atomic>
8 #include <type_traits>
9 
10 #include "base/synchronization/lock.h"
11 
12 namespace allocator_shim {
13 
14 MallocZoneFunctions g_malloc_zones[kMaxZoneCount];
15 static_assert(std::is_pod<MallocZoneFunctions>::value,
16               "MallocZoneFunctions must be POD");
17 
StoreZoneFunctions(const ChromeMallocZone * zone,MallocZoneFunctions * functions)18 void StoreZoneFunctions(const ChromeMallocZone* zone,
19                         MallocZoneFunctions* functions) {
20   memset(functions, 0, sizeof(MallocZoneFunctions));
21   functions->malloc = zone->malloc;
22   functions->calloc = zone->calloc;
23   functions->valloc = zone->valloc;
24   functions->free = zone->free;
25   functions->realloc = zone->realloc;
26   functions->size = zone->size;
27   CHECK(functions->malloc && functions->calloc && functions->valloc &&
28         functions->free && functions->realloc && functions->size);
29 
30   // These functions might be nullptr.
31   functions->batch_malloc = zone->batch_malloc;
32   functions->batch_free = zone->batch_free;
33 
34   if (zone->version >= 5) {
35     // Not all custom malloc zones have a memalign.
36     functions->memalign = zone->memalign;
37   }
38   if (zone->version >= 6) {
39     // This may be nullptr.
40     functions->free_definite_size = zone->free_definite_size;
41   }
42   if (zone->version >= 10) {
43     functions->claimed_address = zone->claimed_address;
44   }
45   if (zone->version >= 13) {
46     functions->try_free_default = zone->try_free_default;
47   }
48 
49   // Note that zone version 8 introduced a pressure relief callback, and version
50   // 10 introduced a claimed address callback, but neither are allocation or
51   // deallocation callbacks and so aren't important to intercept.
52 
53   functions->context = zone;
54 }
55 
56 namespace {
57 
58 // All modifications to g_malloc_zones are gated behind this lock.
59 // Dispatch to a malloc zone does not need to acquire this lock.
GetLock()60 base::Lock& GetLock() {
61   static base::Lock* g_lock = new base::Lock;
62   return *g_lock;
63 }
64 
EnsureMallocZonesInitializedLocked()65 void EnsureMallocZonesInitializedLocked() {
66   GetLock().AssertAcquired();
67 }
68 
69 int g_zone_count = 0;
70 
IsMallocZoneAlreadyStoredLocked(ChromeMallocZone * zone)71 bool IsMallocZoneAlreadyStoredLocked(ChromeMallocZone* zone) {
72   EnsureMallocZonesInitializedLocked();
73   GetLock().AssertAcquired();
74   for (int i = 0; i < g_zone_count; ++i) {
75     if (g_malloc_zones[i].context == reinterpret_cast<void*>(zone))
76       return true;
77   }
78   return false;
79 }
80 
81 }  // namespace
82 
StoreMallocZone(ChromeMallocZone * zone)83 bool StoreMallocZone(ChromeMallocZone* zone) {
84   base::AutoLock l(GetLock());
85   EnsureMallocZonesInitializedLocked();
86   if (IsMallocZoneAlreadyStoredLocked(zone))
87     return false;
88 
89   if (g_zone_count == kMaxZoneCount)
90     return false;
91 
92   StoreZoneFunctions(zone, &g_malloc_zones[g_zone_count]);
93   ++g_zone_count;
94 
95   // No other thread can possibly see these stores at this point. The code that
96   // reads these values is triggered after this function returns. so we want to
97   // guarantee that they are committed at this stage"
98   std::atomic_thread_fence(std::memory_order_seq_cst);
99   return true;
100 }
101 
IsMallocZoneAlreadyStored(ChromeMallocZone * zone)102 bool IsMallocZoneAlreadyStored(ChromeMallocZone* zone) {
103   base::AutoLock l(GetLock());
104   return IsMallocZoneAlreadyStoredLocked(zone);
105 }
106 
DoesMallocZoneNeedReplacing(ChromeMallocZone * zone,const MallocZoneFunctions * functions)107 bool DoesMallocZoneNeedReplacing(ChromeMallocZone* zone,
108                                  const MallocZoneFunctions* functions) {
109   return IsMallocZoneAlreadyStored(zone) && zone->malloc != functions->malloc;
110 }
111 
GetMallocZoneCountForTesting()112 int GetMallocZoneCountForTesting() {
113   base::AutoLock l(GetLock());
114   return g_zone_count;
115 }
116 
ClearAllMallocZonesForTesting()117 void ClearAllMallocZonesForTesting() {
118   base::AutoLock l(GetLock());
119   EnsureMallocZonesInitializedLocked();
120   memset(g_malloc_zones, 0, kMaxZoneCount * sizeof(MallocZoneFunctions));
121   g_zone_count = 0;
122 }
123 
124 }  // namespace allocator_shim
125