• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MONITOR_POOL_H_
18 #define ART_RUNTIME_MONITOR_POOL_H_
19 
20 #include "monitor.h"
21 
22 #include "base/allocator.h"
23 #ifdef __LP64__
24 #include <stdint.h>
25 #include "base/atomic.h"
26 #include "runtime.h"
27 #else
28 #include "base/stl_util.h"     // STLDeleteElements
29 #endif
30 
31 namespace art {
32 
33 // Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the
34 // monitor id loses the alignment bits of the Monitor*.
35 class MonitorPool {
36  public:
Create()37   static MonitorPool* Create() {
38 #ifndef __LP64__
39     return nullptr;
40 #else
41     return new MonitorPool();
42 #endif
43   }
44 
CreateMonitor(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code)45   static Monitor* CreateMonitor(Thread* self,
46                                 Thread* owner,
47                                 ObjPtr<mirror::Object> obj,
48                                 int32_t hash_code)
49       REQUIRES_SHARED(Locks::mutator_lock_) {
50 #ifndef __LP64__
51     Monitor* mon = new Monitor(self, owner, obj, hash_code);
52     DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
53     return mon;
54 #else
55     return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code);
56 #endif
57   }
58 
ReleaseMonitor(Thread * self,Monitor * monitor)59   static void ReleaseMonitor(Thread* self, Monitor* monitor) {
60 #ifndef __LP64__
61     UNUSED(self);
62     delete monitor;
63 #else
64     GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
65 #endif
66   }
67 
ReleaseMonitors(Thread * self,MonitorList::Monitors * monitors)68   static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
69 #ifndef __LP64__
70     UNUSED(self);
71     STLDeleteElements(monitors);
72 #else
73     GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
74 #endif
75   }
76 
MonitorFromMonitorId(MonitorId mon_id)77   static Monitor* MonitorFromMonitorId(MonitorId mon_id) {
78 #ifndef __LP64__
79     return reinterpret_cast<Monitor*>(mon_id << LockWord::kMonitorIdAlignmentShift);
80 #else
81     return GetMonitorPool()->LookupMonitor(mon_id);
82 #endif
83   }
84 
MonitorIdFromMonitor(Monitor * mon)85   static MonitorId MonitorIdFromMonitor(Monitor* mon) {
86 #ifndef __LP64__
87     return reinterpret_cast<MonitorId>(mon) >> LockWord::kMonitorIdAlignmentShift;
88 #else
89     return mon->GetMonitorId();
90 #endif
91   }
92 
ComputeMonitorId(Monitor * mon,Thread * self)93   static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
94 #ifndef __LP64__
95     UNUSED(self);
96     return MonitorIdFromMonitor(mon);
97 #else
98     return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
99 #endif
100   }
101 
GetMonitorPool()102   static MonitorPool* GetMonitorPool() {
103 #ifndef __LP64__
104     return nullptr;
105 #else
106     return Runtime::Current()->GetMonitorPool();
107 #endif
108   }
109 
~MonitorPool()110   ~MonitorPool() {
111 #ifdef __LP64__
112     FreeInternal();
113 #endif
114   }
115 
116  private:
117 #ifdef __LP64__
118   // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
119   // analysis.
120   MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
121 
122   void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_);
123 
124   // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed,
125   // so ignore thead-safety analysis.
126   void FreeInternal() NO_THREAD_SAFETY_ANALYSIS;
127 
128   Monitor* CreateMonitorInPool(Thread* self,
129                                Thread* owner,
130                                ObjPtr<mirror::Object> obj,
131                                int32_t hash_code)
132       REQUIRES_SHARED(Locks::mutator_lock_);
133 
134   void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
135   void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
136 
137   // Note: This is safe as we do not ever move chunks.  All needed entries in the monitor_chunks_
138   // data structure are read-only once we get here.  Updates happen-before this call because
139   // the lock word was stored with release semantics and we read it with acquire semantics to
140   // retrieve the id.
LookupMonitor(MonitorId mon_id)141   Monitor* LookupMonitor(MonitorId mon_id) {
142     size_t offset = MonitorIdToOffset(mon_id);
143     size_t index = offset / kChunkSize;
144     size_t top_index = index / kMaxListSize;
145     size_t list_index = index % kMaxListSize;
146     size_t offset_in_chunk = offset % kChunkSize;
147     uintptr_t base = monitor_chunks_[top_index][list_index];
148     return reinterpret_cast<Monitor*>(base + offset_in_chunk);
149   }
150 
IsInChunk(uintptr_t base_addr,Monitor * mon)151   static bool IsInChunk(uintptr_t base_addr, Monitor* mon) {
152     uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon);
153     return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
154   }
155 
ComputeMonitorIdInPool(Monitor * mon,Thread * self)156   MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
157     MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
158     for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
159       for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
160         if (j >= num_chunks_ && i == current_chunk_list_index_) {
161           break;
162         }
163         uintptr_t chunk_addr = monitor_chunks_[i][j];
164         if (IsInChunk(chunk_addr, mon)) {
165           return OffsetToMonitorId(
166               reinterpret_cast<uintptr_t>(mon) - chunk_addr
167               + i * (kMaxListSize * kChunkSize) + j * kChunkSize);
168         }
169       }
170     }
171     LOG(FATAL) << "Did not find chunk that contains monitor.";
172     return 0;
173   }
174 
MonitorIdToOffset(MonitorId id)175   static constexpr size_t MonitorIdToOffset(MonitorId id) {
176     return id << 3;
177   }
178 
OffsetToMonitorId(size_t offset)179   static constexpr MonitorId OffsetToMonitorId(size_t offset) {
180     return static_cast<MonitorId>(offset >> 3);
181   }
182 
ChunkListCapacity(size_t index)183   static constexpr size_t ChunkListCapacity(size_t index) {
184     return kInitialChunkStorage << index;
185   }
186 
187   // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
188   static constexpr size_t kMonitorAlignment = 8;
189   // Size of a monitor, rounded up to a multiple of alignment.
190   static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) &
191                                                 -kMonitorAlignment;
192   // Size of the chunks holding the actual monitors. The bottom bits of the monitor id are the
193   // index into such a chunk. We can collapse this to the actually used storage
194   // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this would mean proper divisions.
195   static constexpr size_t kChunkSize = 4096;
196   static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2");
197   static constexpr size_t kChunkCapacity = kChunkSize / kAlignedMonitorSize;
198   // The number of chunks of storage that can be referenced by the initial chunk list.
199   // The total number of usable monitor chunks is typically 255 times this number, so it
200   // should be large enough that we don't run out. We run out of address bits if it's > 512.
201   // Currently we set it a bit smaller, to save half a page per process.  We make it tiny in
202   // debug builds to catch growth errors. The only value we really expect to tune.
203   static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 8U : 256U;
204   static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2");
205   // The number of lists, each containing pointers to storage chunks.
206   static constexpr size_t kMaxChunkLists = 8;  //  Dictated by 3 bit index. Don't increase above 8.
207   static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2");
208   static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1);
209   // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from
210   // the 3 bit alignment constraint on monitors:
211   static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize),
212       "Monitor id bits don't fit");
213   static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2");
214 
215   // Array of pointers to lists (again arrays) of pointers to chunks containing monitors.
216   // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks.
217   // Each subsequent list as twice as large as the preceding one.
218   // Monitor Ids are effectively interpreted as follows:
219   //     Top 3 bits (of 28): index into monitor_chunks_.
220   //     Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i].
221   //     Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment.
222   // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of
223   // monitors, which is 0.5GB of monitors.  With this maximum setting, the largest chunk list
224   // contains 64K entries, and we make full use of the available index space. With a
225   // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors.
226   // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ .
227   // No field in this entire data structure is ever updated once a monitor id whose lookup
228   // requires it has been made visible to another thread.  Thus readers never race with
229   // updates, in spite of the fact that they acquire no locks.
230   uintptr_t* monitor_chunks_[kMaxChunkLists];  //  uintptr_t is really a Monitor* .
231   // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks.
232   size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
233   // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far.
234   size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
235   // After the initial allocation, this is always equal to
236   // ChunkListCapacity(current_chunk_list_index_).
237   size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
238 
239   using Allocator = TrackingAllocator<uint8_t, kAllocatorTagMonitorPool>;
240   Allocator allocator_;
241 
242   // Start of free list of monitors.
243   // Note: these point to the right memory regions, but do *not* denote initialized objects.
244   Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
245 #endif
246 };
247 
248 }  // namespace art
249 
250 #endif  // ART_RUNTIME_MONITOR_POOL_H_
251