1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger_interface.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/array_ref.h"
22 #include "base/bit_utils.h"
23 #include "base/logging.h"
24 #include "base/mutex.h"
25 #include "base/time_utils.h"
26 #include "base/utils.h"
27 #include "dex/dex_file.h"
28 #include "elf/elf_debug_reader.h"
29 #include "jit/jit.h"
30 #include "jit/jit_code_cache.h"
31 #include "jit/jit_memory_region.h"
32 #include "runtime.h"
33 #include "thread-current-inl.h"
34 #include "thread.h"
35
36 #include <atomic>
37 #include <cstddef>
38
39 //
40 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
41 //
42 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
43 //
44 // There are two ways for native tools to access the debug data safely:
45 //
46 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
47 // method, which is called after every modification of the linked list.
48 // GDB does this, but it is complex to set up and it stops the process.
49 //
50 // 2) Asynchronously, using the entry seqlocks.
51 // * The seqlock is a monotonically increasing counter, which
52 // is even if the entry is valid and odd if it is invalid.
53 // It is set to even value after all other fields are set,
54 // and it is set to odd value before the entry is deleted.
55 // * This makes it possible to safely read the symfile data:
56 // * The reader should read the value of the seqlock both
57 // before and after reading the symfile. If the seqlock
58 // values match and are even the copy is consistent.
59 // * Entries are recycled, but never freed, which guarantees
60 // that the seqlock is not overwritten by a random value.
61 // * The linked-list is one level higher. The next-pointer
62 // must always point to an entry with even seqlock, which
63 // ensures that entries of a crashed process can be read.
64 // This means the entry must be added after it is created
65 // and it must be removed before it is invalidated (odd).
66 // * When iterating over the linked list the reader can use
67 // the timestamps to ensure that current and next entry
68 // were not deleted using the following steps:
69 // 1) Read next pointer and the next entry's seqlock.
70 // 2) Read the symfile and re-read the next pointer.
71 // 3) Re-read both the current and next seqlock.
72 // 4) Go to step 1 with using new entry and seqlock.
73 //
74 // 3) Asynchronously, using the global seqlock.
75 // * The seqlock is a monotonically increasing counter which is incremented
76 // before and after every modification of the linked list. Odd value of
77 // the counter means the linked list is being modified (it is locked).
78 // * The tool should read the value of the seqlock both before and after
79 // copying the linked list. If the seqlock values match and are even,
80 // the copy is consistent. Otherwise, the reader should try again.
81 // * Note that using the data directly while is it being modified
82 // might crash the tool. Therefore, the only safe way is to make
83 // a copy and use the copy only after the seqlock has been checked.
84 // * Note that the process might even free and munmap the data while
85 // it is being copied, therefore the reader should either handle
86 // SEGV or use OS calls to read the memory (e.g. process_vm_readv).
87 // * The timestamps on the entry record the time when the entry was
88 // created which is relevant if the unwinding is not live and is
89 // postponed until much later. All timestamps must be unique.
90 // * For full conformance with the C++ memory model, all seqlock
91 // protected accesses should be atomic. We currently do this in the
92 // more critical cases. The rest will have to be fixed before
93 // attempting to run TSAN on this code.
94 //
95
96 namespace art HIDDEN {
97
98 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
99 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
100
101 // Most loads and stores need no synchronization since all memory is protected by the global locks.
102 // Some writes are synchronized so libunwindstack can read the memory safely from another process.
103 constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
104
105 // Size of JIT code range covered by each packed JITCodeEntry.
106 constexpr uint32_t kJitRepackGroupSize = 64 * KB;
107
108 // Automatically call the repack method every 'n' new entries.
109 constexpr uint32_t kJitRepackFrequency = 64;
110
111 } // namespace art
112
113 // Public binary interface between ART and native tools (gdb, libunwind, etc).
114 // The fields below need to be exported and have special names as per the gdb api.
115 namespace art EXPORT {
116 extern "C" {
117 enum JITAction {
118 JIT_NOACTION = 0,
119 JIT_REGISTER_FN,
120 JIT_UNREGISTER_FN
121 };
122
123 // Public/stable binary interface.
124 struct JITCodeEntryPublic {
125 std::atomic<const JITCodeEntry*> next_; // Atomic to guarantee consistency after crash.
126 const JITCodeEntry* prev_ = nullptr; // For linked list deletion. Unused in readers.
127 const uint8_t* symfile_addr_ = nullptr; // Address of the in-memory ELF file.
128 uint64_t symfile_size_ = 0; // NB: The offset is 12 on x86 but 16 on ARM32.
129
130 // Android-specific fields:
131 uint64_t timestamp_; // CLOCK_MONOTONIC time of entry registration.
132 std::atomic_uint32_t seqlock_{1}; // Synchronization. Even value if entry is valid.
133 };
134
135 // Implementation-specific fields (which can be used only in this file).
136 struct JITCodeEntry : public JITCodeEntryPublic {
137 // Unpacked entries: Code address of the symbol in the ELF file.
138 // Packed entries: The start address of the covered memory range.
139 const void* addr_ = nullptr;
140 // Allow merging of ELF files to save space.
141 // Packing drops advanced DWARF data, so it is not always desirable.
142 bool allow_packing_ = false;
143 // Whether this entry has been LZMA compressed.
144 // Compression is expensive, so we don't always do it.
145 bool is_compressed_ = false;
146 };
147
148 // Public/stable binary interface.
149 struct JITDescriptorPublic {
150 uint32_t version_ = 1; // NB: GDB supports only version 1.
151 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
152 const JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
153 std::atomic<const JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
154
155 // Android-specific fields:
156 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
157 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
158 uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
159 uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
160 std::atomic_uint32_t seqlock_{0}; // Incremented before and after any modification.
161 uint64_t timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
162 };
163
164 // Implementation-specific fields (which can be used only in this file).
165 struct JITDescriptor : public JITDescriptorPublic {
166 const JITCodeEntry* tail_ = nullptr; // Tail of link list of all live entries.
167 const JITCodeEntry* free_entries_ = nullptr; // List of deleted entries ready for reuse.
168
169 // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
170 const JITCodeEntry* zygote_head_entry_ = nullptr;
171 JITCodeEntry application_tail_entry_{};
172 };
173
174 // Public interface: Can be used by reader to check the structs have the expected size.
175 uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
176 uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
177
178 // Check that std::atomic has the expected layout.
179 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
180 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
181 static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
182 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
183 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
184 static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
185
186 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()187 void __attribute__((noinline)) __jit_debug_register_code() {
188 __asm__("");
189 }
190
191 // Alternatively, native tools may overwrite this field to execute custom handler.
192 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
193
194 // The root data structure describing of all JITed methods.
GUARDED_BY(g_jit_debug_lock)195 JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
196
197 // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()198 void __attribute__((noinline)) __dex_debug_register_code() {
199 __asm__("");
200 }
201 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
GUARDED_BY(g_dex_debug_lock)202 JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
203 }
204 } // namespace art
205
206 namespace art HIDDEN {
207
208 // The fields below are internal, but we keep them here anyway for consistency.
209 // Their state is related to the static state above and it must be kept in sync.
210
211 // Used only in debug builds to check that we are not adding duplicate entries.
212 static std::unordered_set<const void*> g_dcheck_all_jit_functions GUARDED_BY(g_jit_debug_lock);
213
214 // Methods that have been marked for deletion on the next repack pass.
215 static std::vector<const void*> g_removed_jit_functions GUARDED_BY(g_jit_debug_lock);
216
217 // Number of small (single symbol) ELF files. Used to trigger repacking.
218 static uint32_t g_jit_num_unpacked_entries = 0;
219
220 struct DexNativeInfo {
Lockart::DexNativeInfo221 static Mutex* Lock() RETURN_CAPABILITY(g_dex_debug_lock) { return &g_dex_debug_lock; }
222 static constexpr bool kCopySymfileData = false; // Just reference DEX files.
Descriptorart::DexNativeInfo223 static JITDescriptor& Descriptor() REQUIRES(g_dex_debug_lock) {
224 g_dex_debug_lock.AssertHeld(Thread::Current());
225 return __dex_debug_descriptor;
226 }
NotifyNativeDebuggerart::DexNativeInfo227 static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
Allocart::DexNativeInfo228 static const void* Alloc(size_t size) { return malloc(size); }
Freeart::DexNativeInfo229 static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
Writableart::DexNativeInfo230 template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
231 };
232
233 struct JitNativeInfo {
Lockart::JitNativeInfo234 static Mutex* Lock() RETURN_CAPABILITY(g_jit_debug_lock) { return &g_jit_debug_lock; }
235 static constexpr bool kCopySymfileData = true; // Copy debug info to JIT memory.
Descriptorart::JitNativeInfo236 static JITDescriptor& Descriptor() REQUIRES(g_jit_debug_lock) {
237 g_jit_debug_lock.AssertHeld(Thread::Current());
238 return __jit_debug_descriptor;
239 }
NotifyNativeDebuggerart::JitNativeInfo240 static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
Allocart::JitNativeInfo241 static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
Freeart::JitNativeInfo242 static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
243 static void Free(void* ptr) = delete;
244
245 template <class T>
Writableart::JitNativeInfo246 static T* Writable(const T* v) REQUIRES(g_jit_debug_lock) {
247 // Special case: This entry is in static memory and not allocated in JIT memory.
248 if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
249 return const_cast<T*>(v);
250 }
251 return const_cast<T*>(Memory()->GetWritableDataAddress(v));
252 }
253
Memoryart::JitNativeInfo254 static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
255 Locks::jit_lock_->AssertHeld(Thread::Current());
256 jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
257 CHECK(jit_code_cache != nullptr);
258 jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
259 CHECK(memory->IsValid());
260 return memory;
261 }
262 };
263
GetJITCodeEntrySymFile(const JITCodeEntry * entry)264 ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
265 return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
266 }
267
268 // Ensure the timestamp is monotonically increasing even in presence of low
269 // granularity system timer. This ensures each entry has unique timestamp.
GetNextTimestamp(JITDescriptor & descriptor)270 static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
271 return std::max(descriptor.timestamp_ + 1, NanoTime());
272 }
273
274 // Mark the descriptor as "locked", so native tools know the data is being modified.
Seqlock(JITDescriptor & descriptor)275 static void Seqlock(JITDescriptor& descriptor) {
276 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
277 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
278 // Ensure that any writes within the locked section cannot be reordered before the increment.
279 std::atomic_thread_fence(std::memory_order_release);
280 }
281
282 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
Sequnlock(JITDescriptor & descriptor)283 static void Sequnlock(JITDescriptor& descriptor) {
284 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
285 // Ensure that any writes within the locked section cannot be reordered after the increment.
286 std::atomic_thread_fence(std::memory_order_release);
287 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
288 }
289
290 // Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
291 // This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
292 template <class NativeInfo>
InsertNewEntry(const JITCodeEntry * entry,const JITCodeEntry * next)293 static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next)
294 REQUIRES(NativeInfo::Lock()) {
295 CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
296 JITDescriptor& descriptor = NativeInfo::Descriptor();
297 const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
298 JITCodeEntry* writable = NativeInfo::Writable(entry);
299 writable->next_ = next;
300 writable->prev_ = prev;
301 writable->seqlock_.fetch_add(1, std::memory_order_release); // Mark as valid.
302 // Backward pointers should not be used by readers, so they are non-atomic.
303 if (next != nullptr) {
304 NativeInfo::Writable(next)->prev_ = entry;
305 } else {
306 descriptor.tail_ = entry;
307 }
308 // Forward pointers must be atomic and they must point to a valid entry at all times.
309 if (prev != nullptr) {
310 NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
311 } else {
312 descriptor.head_.store(entry, std::memory_order_release);
313 }
314 }
315
316 // This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
317 template <class NativeInfo>
CreateJITCodeEntryInternal(ArrayRef<const uint8_t> symfile=ArrayRef<const uint8_t> (),const void * addr=nullptr,bool allow_packing=false,bool is_compressed=false)318 static const JITCodeEntry* CreateJITCodeEntryInternal(
319 ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
320 const void* addr = nullptr,
321 bool allow_packing = false,
322 bool is_compressed = false) REQUIRES(NativeInfo::Lock()) {
323 JITDescriptor& descriptor = NativeInfo::Descriptor();
324
325 // Allocate JITCodeEntry if needed.
326 if (descriptor.free_entries_ == nullptr) {
327 const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
328 if (memory == nullptr) {
329 LOG(ERROR) << "Failed to allocate memory for native debug info";
330 return nullptr;
331 }
332 new (NativeInfo::Writable(memory)) JITCodeEntry();
333 descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
334 }
335
336 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
337 if (NativeInfo::kCopySymfileData && !symfile.empty()) {
338 const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
339 if (copy == nullptr) {
340 LOG(ERROR) << "Failed to allocate memory for native debug info";
341 return nullptr;
342 }
343 memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
344 symfile = ArrayRef<const uint8_t>(copy, symfile.size());
345 }
346
347 uint64_t timestamp = GetNextTimestamp(descriptor);
348
349 // We must insert entries at specific place. See NativeDebugInfoPreFork().
350 const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed); // Insert at the head.
351 if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
352 next = nullptr; // Insert zygote entries at the tail.
353 }
354
355 // Pop entry from the free list.
356 const JITCodeEntry* entry = descriptor.free_entries_;
357 descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
358
359 // Create the entry and set all its fields.
360 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
361 writable_entry->symfile_addr_ = symfile.data();
362 writable_entry->symfile_size_ = symfile.size();
363 writable_entry->addr_ = addr;
364 writable_entry->allow_packing_ = allow_packing;
365 writable_entry->is_compressed_ = is_compressed;
366 writable_entry->timestamp_ = timestamp;
367
368 // Add the entry to the main linked list.
369 Seqlock(descriptor);
370 InsertNewEntry<NativeInfo>(entry, next);
371 descriptor.relevant_entry_ = entry;
372 descriptor.action_flag_ = JIT_REGISTER_FN;
373 descriptor.timestamp_ = timestamp;
374 Sequnlock(descriptor);
375
376 NativeInfo::NotifyNativeDebugger();
377
378 return entry;
379 }
380
381 template <class NativeInfo>
DeleteJITCodeEntryInternal(const JITCodeEntry * entry)382 static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) REQUIRES(NativeInfo::Lock()) {
383 CHECK(entry != nullptr);
384 JITDescriptor& descriptor = NativeInfo::Descriptor();
385
386 // Remove the entry from the main linked-list.
387 Seqlock(descriptor);
388 const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
389 const JITCodeEntry* prev = entry->prev_;
390 if (next != nullptr) {
391 NativeInfo::Writable(next)->prev_ = prev;
392 } else {
393 descriptor.tail_ = prev;
394 }
395 if (prev != nullptr) {
396 NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
397 } else {
398 descriptor.head_.store(next, std::memory_order_relaxed);
399 }
400 descriptor.relevant_entry_ = entry;
401 descriptor.action_flag_ = JIT_UNREGISTER_FN;
402 descriptor.timestamp_ = GetNextTimestamp(descriptor);
403 Sequnlock(descriptor);
404
405 NativeInfo::NotifyNativeDebugger();
406
407 // Delete the entry.
408 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
409 CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
410 // Release: Ensures that "next_" points to valid entry at any time in reader.
411 writable_entry->seqlock_.fetch_add(1, std::memory_order_release); // Mark as invalid.
412 // Release: Ensures that the entry is seen as invalid before it's data is freed.
413 std::atomic_thread_fence(std::memory_order_release);
414 const uint8_t* symfile = entry->symfile_addr_;
415 writable_entry->symfile_addr_ = nullptr;
416 if (NativeInfo::kCopySymfileData && symfile != nullptr) {
417 NativeInfo::Free(symfile);
418 }
419
420 // Push the entry to the free list.
421 writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
422 writable_entry->prev_ = nullptr;
423 descriptor.free_entries_ = entry;
424 }
425
AddNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)426 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
427 MutexLock mu(self, g_dex_debug_lock);
428 DCHECK(dexfile != nullptr);
429 // Container dex files (v41) may store data past the size defined in the header.
430 uint32_t size = dexfile->SizeIncludingSharedData();
431 CHECK(!dexfile->IsCompactDexFile());
432 const ArrayRef<const uint8_t> symfile(dexfile->Begin(), size);
433 CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
434 }
435
RemoveNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)436 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
437 MutexLock mu(self, g_dex_debug_lock);
438 DCHECK(dexfile != nullptr);
439 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
440 // there might be cases where we load the dex file without using it in the class linker.
441 // On the other hand, single dex file might also be used with different class-loaders.
442 for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
443 const JITCodeEntry* next = entry->next_; // Save next pointer before we free the memory.
444 if (entry->symfile_addr_ == dexfile->Begin()) {
445 DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
446 }
447 entry = next;
448 }
449 }
450
451 // Splits the linked linked in to two parts:
452 // The first part (including the static head pointer) is owned by the application.
453 // The second part is owned by zygote and might be concurrently modified by it.
454 //
455 // We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
456 // These entries are needed to preserve the next/prev pointers in the linked list,
457 // since zygote can not modify the application's data and vice versa.
458 //
459 // <------- owned by the application memory --------> <--- owned by zygote memory --->
460 // |----------------------|------------------|-------------|-----------------|
461 // head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
462 // |+---------------------|------------------|-------------|----------------+|
463 // | |
464 // \-(new application entries) (new zygote entries)-/
465 //
466 // Zygote entries are inserted at the end, which means that repacked zygote entries
467 // will still be seen by single forward iteration of the linked list (avoiding race).
468 //
469 // Application entries are inserted at the start which introduces repacking race,
470 // but that is ok, since it is easy to read new entries from head in further pass.
471 // The benefit is that this makes it fast to read only the new entries.
472 //
NativeDebugInfoPreFork()473 void NativeDebugInfoPreFork() {
474 CHECK(Runtime::Current()->IsZygote());
475 MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Needed to alloc entry.
476 MutexLock mu2(Thread::Current(), g_jit_debug_lock);
477 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
478 if (descriptor.zygote_head_entry_ != nullptr) {
479 return; // Already done - we need to do this only on the first fork.
480 }
481
482 // Create the zygote-owned head entry (with no ELF file).
483 // The data will be allocated from the current JIT memory (owned by zygote).
484 const JITCodeEntry* zygote_head =
485 reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
486 CHECK(zygote_head != nullptr);
487 new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry(); // Initialize.
488 InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
489 descriptor.zygote_head_entry_ = zygote_head;
490
491 // Create the child-owned tail entry (with no ELF file).
492 // The data is statically allocated since it must be owned by the forked process.
493 InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
494 }
495
NativeDebugInfoPostFork()496 void NativeDebugInfoPostFork() {
497 CHECK(!Runtime::Current()->IsZygote());
498 MutexLock mu(Thread::Current(), g_jit_debug_lock);
499 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
500 descriptor.free_entries_ = nullptr; // Don't reuse zygote's entries.
501 }
502
503 // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
504 // The start address of method's code determines which group it belongs to. The end is irrelevant.
505 // New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
RepackEntries(bool compress_entries,ArrayRef<const void * > removed)506 static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
507 REQUIRES(g_jit_debug_lock) {
508 DCHECK(std::is_sorted(removed.begin(), removed.end()));
509 jit::Jit* jit = Runtime::Current()->GetJit();
510 if (jit == nullptr) {
511 return;
512 }
513 JITDescriptor& descriptor = __jit_debug_descriptor;
514 bool is_zygote = Runtime::Current()->IsZygote();
515
516 // Collect entries that we want to pack.
517 std::vector<const JITCodeEntry*> entries;
518 entries.reserve(2 * kJitRepackFrequency);
519 for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
520 if (it == descriptor.zygote_head_entry_ && !is_zygote) {
521 break; // Memory owned by the zygote process (read-only for an app).
522 }
523 if (it->allow_packing_) {
524 if (!compress_entries && it->is_compressed_ && removed.empty()) {
525 continue; // If we are not compressing, also avoid decompressing.
526 }
527 entries.push_back(it);
528 }
529 }
530 auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
531 std::sort(entries.begin(), entries.end(), cmp); // Sort by address.
532
533 // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
534 for (auto group_it = entries.begin(); group_it != entries.end();) {
535 const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
536 const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
537
538 // Find all entries in this group (each entry is an in-memory ELF file).
539 auto begin = group_it;
540 auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
541 CHECK(end > begin);
542 ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
543
544 // Find all symbols that have been removed in this memory range.
545 auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
546 auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
547 CHECK(removed_end >= removed_begin);
548 ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
549
550 // Optimization: Don't compress the last group since it will likely change again soon.
551 bool compress = compress_entries && end != entries.end();
552
553 // Bail out early if there is nothing to do for this group.
554 if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
555 group_it = end; // Go to next group.
556 continue;
557 }
558
559 // Create new single JITCodeEntry that covers this memory range.
560 uint64_t start_time = MicroTime();
561 size_t live_symbols;
562 std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
563 elfs, removed_subset, compress, &live_symbols);
564 VLOG(jit)
565 << "JIT mini-debug-info repacked"
566 << " for " << group_ptr
567 << " in " << MicroTime() - start_time << "us"
568 << " elfs=" << elfs.size()
569 << " dead=" << removed_subset.size()
570 << " live=" << live_symbols
571 << " size=" << packed.size() << (compress ? "(lzma)" : "");
572
573 // Replace the old entries with the new one (with their lifetime temporally overlapping).
574 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
575 /*addr_=*/ group_ptr,
576 /*allow_packing_=*/ true,
577 /*is_compressed_=*/ compress);
578 for (auto it : elfs) {
579 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
580 }
581 group_it = end; // Go to next group.
582 }
583 g_jit_num_unpacked_entries = 0;
584 }
585
586 static void RepackNativeDebugInfoForJitLocked() REQUIRES(g_jit_debug_lock);
587
AddNativeDebugInfoForJit(const void * code_ptr,const std::vector<uint8_t> & symfile,bool allow_packing)588 void AddNativeDebugInfoForJit(const void* code_ptr,
589 const std::vector<uint8_t>& symfile,
590 bool allow_packing) {
591 MutexLock mu(Thread::Current(), g_jit_debug_lock);
592 DCHECK_NE(symfile.size(), 0u);
593 if (kIsDebugBuild && code_ptr != nullptr) {
594 DCHECK(g_dcheck_all_jit_functions.insert(code_ptr).second) << code_ptr << " already added";
595 }
596
597 // Remove all methods which have been marked for removal. The JIT GC should
598 // force repack, so this should happen only rarely for various corner cases.
599 // Must be done before addition in case the added code_ptr is in the removed set.
600 if (!g_removed_jit_functions.empty()) {
601 RepackNativeDebugInfoForJitLocked();
602 }
603
604 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
605 /*addr=*/ code_ptr,
606 /*allow_packing=*/ allow_packing,
607 /*is_compressed=*/ false);
608
609 if (code_ptr == nullptr) {
610 VLOG(jit) << "JIT mini-debug-info added for new type, size=" << PrettySize(symfile.size());
611 } else {
612 VLOG(jit)
613 << "JIT mini-debug-info added for native code at " << code_ptr
614 << ", size=" << PrettySize(symfile.size());
615 }
616
617 // Automatically repack entries on regular basis to save space.
618 // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
619 // We delay compression until after GC since it is more expensive (and saves further ~4x).
620 // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
621 if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
622 bool is_zygote = Runtime::Current()->IsZygote();
623 RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
624 }
625 }
626
RemoveNativeDebugInfoForJit(const void * code_ptr)627 void RemoveNativeDebugInfoForJit(const void* code_ptr) {
628 MutexLock mu(Thread::Current(), g_jit_debug_lock);
629 g_dcheck_all_jit_functions.erase(code_ptr);
630
631 // Method removal is very expensive since we need to decompress and read ELF files.
632 // Collet methods to be removed and do the removal in bulk later.
633 g_removed_jit_functions.push_back(code_ptr);
634
635 VLOG(jit) << "JIT mini-debug-info removed for " << code_ptr;
636 }
637
RepackNativeDebugInfoForJitLocked()638 static void RepackNativeDebugInfoForJitLocked() {
639 // Remove entries which are inside packed and compressed ELF files.
640 std::vector<const void*>& removed = g_removed_jit_functions;
641 std::sort(removed.begin(), removed.end());
642 RepackEntries(/*compress_entries=*/ true, ArrayRef<const void*>(removed));
643
644 // Remove entries which are not allowed to be packed (containing single method each).
645 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
646 const JITCodeEntry* next = it->next_;
647 if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
648 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
649 }
650 it = next;
651 }
652
653 removed.clear();
654 removed.shrink_to_fit();
655 }
656
RepackNativeDebugInfoForJit()657 void RepackNativeDebugInfoForJit() {
658 MutexLock mu(Thread::Current(), g_jit_debug_lock);
659 RepackNativeDebugInfoForJitLocked();
660 }
661
GetJitMiniDebugInfoMemUsage()662 size_t GetJitMiniDebugInfoMemUsage() {
663 MutexLock mu(Thread::Current(), g_jit_debug_lock);
664 size_t size = 0;
665 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
666 size += sizeof(JITCodeEntry) + it->symfile_size_;
667 }
668 return size;
669 }
670
GetNativeDebugInfoLock()671 Mutex* GetNativeDebugInfoLock() {
672 return &g_jit_debug_lock;
673 }
674
ForEachNativeDebugSymbol(std::function<void (const void *,size_t,const char *)> cb)675 void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb) {
676 MutexLock mu(Thread::Current(), g_jit_debug_lock);
677 using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
678 const JITCodeEntry* end = __jit_debug_descriptor.zygote_head_entry_;
679 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != end; it = it->next_) {
680 ArrayRef<const uint8_t> buffer(it->symfile_addr_, it->symfile_size_);
681 if (!buffer.empty()) {
682 ElfDebugReader<ElfRuntimeTypes> reader(buffer);
683 reader.VisitFunctionSymbols([&](ElfRuntimeTypes::Sym sym, const char* name) {
684 cb(reinterpret_cast<const void*>(sym.st_value), sym.st_size, name);
685 });
686 }
687 }
688 }
689
690 } // namespace art
691