• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "jit_code_cache.h"
18 
19 #include <sstream>
20 
21 #include "art_method-inl.h"
22 #include "base/stl_util.h"
23 #include "base/systrace.h"
24 #include "base/time_utils.h"
25 #include "debugger_interface.h"
26 #include "entrypoints/runtime_asm_entrypoints.h"
27 #include "gc/accounting/bitmap-inl.h"
28 #include "jit/jit.h"
29 #include "jit/profiling_info.h"
30 #include "linear_alloc.h"
31 #include "mem_map.h"
32 #include "oat_file-inl.h"
33 #include "scoped_thread_state_change.h"
34 #include "thread_list.h"
35 
36 namespace art {
37 namespace jit {
38 
39 static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
40 static constexpr int kProtData = PROT_READ | PROT_WRITE;
41 static constexpr int kProtCode = PROT_READ | PROT_EXEC;
42 
43 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
44 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
45 
46 #define CHECKED_MPROTECT(memory, size, prot)                \
47   do {                                                      \
48     int rc = mprotect(memory, size, prot);                  \
49     if (UNLIKELY(rc != 0)) {                                \
50       errno = rc;                                           \
51       PLOG(FATAL) << "Failed to mprotect jit code cache";   \
52     }                                                       \
53   } while (false)                                           \
54 
Create(size_t initial_capacity,size_t max_capacity,bool generate_debug_info,std::string * error_msg)55 JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
56                                    size_t max_capacity,
57                                    bool generate_debug_info,
58                                    std::string* error_msg) {
59   ScopedTrace trace(__PRETTY_FUNCTION__);
60   CHECK_GE(max_capacity, initial_capacity);
61 
62   // Generating debug information is mostly for using the 'perf' tool, which does
63   // not work with ashmem.
64   bool use_ashmem = !generate_debug_info;
65   // With 'perf', we want a 1-1 mapping between an address and a method.
66   bool garbage_collect_code = !generate_debug_info;
67 
68   // We need to have 32 bit offsets from method headers in code cache which point to things
69   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
70   // Ensure we're below 1 GB to be safe.
71   if (max_capacity > 1 * GB) {
72     std::ostringstream oss;
73     oss << "Maxium code cache capacity is limited to 1 GB, "
74         << PrettySize(max_capacity) << " is too big";
75     *error_msg = oss.str();
76     return nullptr;
77   }
78 
79   std::string error_str;
80   // Map name specific for android_os_Debug.cpp accounting.
81   MemMap* data_map = MemMap::MapAnonymous(
82       "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str, use_ashmem);
83   if (data_map == nullptr) {
84     std::ostringstream oss;
85     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
86     *error_msg = oss.str();
87     return nullptr;
88   }
89 
90   // Align both capacities to page size, as that's the unit mspaces use.
91   initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
92   max_capacity = RoundDown(max_capacity, 2 * kPageSize);
93 
94   // Data cache is 1 / 2 of the map.
95   // TODO: Make this variable?
96   size_t data_size = max_capacity / 2;
97   size_t code_size = max_capacity - data_size;
98   DCHECK_EQ(code_size + data_size, max_capacity);
99   uint8_t* divider = data_map->Begin() + data_size;
100 
101   MemMap* code_map =
102       data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
103   if (code_map == nullptr) {
104     std::ostringstream oss;
105     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
106     *error_msg = oss.str();
107     return nullptr;
108   }
109   DCHECK_EQ(code_map->Begin(), divider);
110   data_size = initial_capacity / 2;
111   code_size = initial_capacity - data_size;
112   DCHECK_EQ(code_size + data_size, initial_capacity);
113   return new JitCodeCache(
114       code_map, data_map, code_size, data_size, max_capacity, garbage_collect_code);
115 }
116 
JitCodeCache(MemMap * code_map,MemMap * data_map,size_t initial_code_capacity,size_t initial_data_capacity,size_t max_capacity,bool garbage_collect_code)117 JitCodeCache::JitCodeCache(MemMap* code_map,
118                            MemMap* data_map,
119                            size_t initial_code_capacity,
120                            size_t initial_data_capacity,
121                            size_t max_capacity,
122                            bool garbage_collect_code)
123     : lock_("Jit code cache", kJitCodeCacheLock),
124       lock_cond_("Jit code cache variable", lock_),
125       collection_in_progress_(false),
126       code_map_(code_map),
127       data_map_(data_map),
128       max_capacity_(max_capacity),
129       current_capacity_(initial_code_capacity + initial_data_capacity),
130       code_end_(initial_code_capacity),
131       data_end_(initial_data_capacity),
132       last_collection_increased_code_cache_(false),
133       last_update_time_ns_(0),
134       garbage_collect_code_(garbage_collect_code),
135       used_memory_for_data_(0),
136       used_memory_for_code_(0),
137       number_of_compilations_(0),
138       number_of_osr_compilations_(0),
139       number_of_deoptimizations_(0),
140       number_of_collections_(0),
141       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
142       histogram_code_memory_use_("Memory used for compiled code", 16),
143       histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
144 
145   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
146   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
147   data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
148 
149   if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
150     PLOG(FATAL) << "create_mspace_with_base failed";
151   }
152 
153   SetFootprintLimit(current_capacity_);
154 
155   CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
156   CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
157 
158   VLOG(jit) << "Created jit code cache: initial data size="
159             << PrettySize(initial_data_capacity)
160             << ", initial code size="
161             << PrettySize(initial_code_capacity);
162 }
163 
ContainsPc(const void * ptr) const164 bool JitCodeCache::ContainsPc(const void* ptr) const {
165   return code_map_->Begin() <= ptr && ptr < code_map_->End();
166 }
167 
ContainsMethod(ArtMethod * method)168 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
169   MutexLock mu(Thread::Current(), lock_);
170   for (auto& it : method_code_map_) {
171     if (it.second == method) {
172       return true;
173     }
174   }
175   return false;
176 }
177 
178 class ScopedCodeCacheWrite : ScopedTrace {
179  public:
ScopedCodeCacheWrite(MemMap * code_map)180   explicit ScopedCodeCacheWrite(MemMap* code_map)
181       : ScopedTrace("ScopedCodeCacheWrite"),
182         code_map_(code_map) {
183     ScopedTrace trace("mprotect all");
184     CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
185   }
~ScopedCodeCacheWrite()186   ~ScopedCodeCacheWrite() {
187     ScopedTrace trace("mprotect code");
188     CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
189   }
190  private:
191   MemMap* const code_map_;
192 
193   DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
194 };
195 
CommitCode(Thread * self,ArtMethod * method,const uint8_t * vmap_table,size_t frame_size_in_bytes,size_t core_spill_mask,size_t fp_spill_mask,const uint8_t * code,size_t code_size,bool osr)196 uint8_t* JitCodeCache::CommitCode(Thread* self,
197                                   ArtMethod* method,
198                                   const uint8_t* vmap_table,
199                                   size_t frame_size_in_bytes,
200                                   size_t core_spill_mask,
201                                   size_t fp_spill_mask,
202                                   const uint8_t* code,
203                                   size_t code_size,
204                                   bool osr) {
205   uint8_t* result = CommitCodeInternal(self,
206                                        method,
207                                        vmap_table,
208                                        frame_size_in_bytes,
209                                        core_spill_mask,
210                                        fp_spill_mask,
211                                        code,
212                                        code_size,
213                                        osr);
214   if (result == nullptr) {
215     // Retry.
216     GarbageCollectCache(self);
217     result = CommitCodeInternal(self,
218                                 method,
219                                 vmap_table,
220                                 frame_size_in_bytes,
221                                 core_spill_mask,
222                                 fp_spill_mask,
223                                 code,
224                                 code_size,
225                                 osr);
226   }
227   return result;
228 }
229 
WaitForPotentialCollectionToComplete(Thread * self)230 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
231   bool in_collection = false;
232   while (collection_in_progress_) {
233     in_collection = true;
234     lock_cond_.Wait(self);
235   }
236   return in_collection;
237 }
238 
FromCodeToAllocation(const void * code)239 static uintptr_t FromCodeToAllocation(const void* code) {
240   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
241   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
242 }
243 
FreeCode(const void * code_ptr,ArtMethod * method ATTRIBUTE_UNUSED)244 void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
245   uintptr_t allocation = FromCodeToAllocation(code_ptr);
246   const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
247   // Notify native debugger that we are about to remove the code.
248   // It does nothing if we are not using native debugger.
249   DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
250 
251   // Use the offset directly to prevent sanity check that the method is
252   // compiled with optimizing.
253   // TODO(ngeoffray): Clean up.
254   if (method_header->vmap_table_offset_ != 0) {
255     const uint8_t* data = method_header->code_ - method_header->vmap_table_offset_;
256     FreeData(const_cast<uint8_t*>(data));
257   }
258   FreeCode(reinterpret_cast<uint8_t*>(allocation));
259 }
260 
RemoveMethodsIn(Thread * self,const LinearAlloc & alloc)261 void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
262   ScopedTrace trace(__PRETTY_FUNCTION__);
263   MutexLock mu(self, lock_);
264   // We do not check if a code cache GC is in progress, as this method comes
265   // with the classlinker_classes_lock_ held, and suspending ourselves could
266   // lead to a deadlock.
267   {
268     ScopedCodeCacheWrite scc(code_map_.get());
269     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
270       if (alloc.ContainsUnsafe(it->second)) {
271         FreeCode(it->first, it->second);
272         it = method_code_map_.erase(it);
273       } else {
274         ++it;
275       }
276     }
277   }
278   for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
279     if (alloc.ContainsUnsafe(it->first)) {
280       // Note that the code has already been removed in the loop above.
281       it = osr_code_map_.erase(it);
282     } else {
283       ++it;
284     }
285   }
286   for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
287     ProfilingInfo* info = *it;
288     if (alloc.ContainsUnsafe(info->GetMethod())) {
289       info->GetMethod()->SetProfilingInfo(nullptr);
290       FreeData(reinterpret_cast<uint8_t*>(info));
291       it = profiling_infos_.erase(it);
292     } else {
293       ++it;
294     }
295   }
296 }
297 
ClearGcRootsInInlineCaches(Thread * self)298 void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
299   MutexLock mu(self, lock_);
300   for (ProfilingInfo* info : profiling_infos_) {
301     if (!info->IsInUseByCompiler()) {
302       info->ClearGcRootsInInlineCaches();
303     }
304   }
305 }
306 
CommitCodeInternal(Thread * self,ArtMethod * method,const uint8_t * vmap_table,size_t frame_size_in_bytes,size_t core_spill_mask,size_t fp_spill_mask,const uint8_t * code,size_t code_size,bool osr)307 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
308                                           ArtMethod* method,
309                                           const uint8_t* vmap_table,
310                                           size_t frame_size_in_bytes,
311                                           size_t core_spill_mask,
312                                           size_t fp_spill_mask,
313                                           const uint8_t* code,
314                                           size_t code_size,
315                                           bool osr) {
316   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
317   // Ensure the header ends up at expected instruction alignment.
318   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
319   size_t total_size = header_size + code_size;
320 
321   OatQuickMethodHeader* method_header = nullptr;
322   uint8_t* code_ptr = nullptr;
323   uint8_t* memory = nullptr;
324   {
325     ScopedThreadSuspension sts(self, kSuspended);
326     MutexLock mu(self, lock_);
327     WaitForPotentialCollectionToComplete(self);
328     {
329       ScopedCodeCacheWrite scc(code_map_.get());
330       memory = AllocateCode(total_size);
331       if (memory == nullptr) {
332         return nullptr;
333       }
334       code_ptr = memory + header_size;
335 
336       std::copy(code, code + code_size, code_ptr);
337       method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
338       new (method_header) OatQuickMethodHeader(
339           (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
340           frame_size_in_bytes,
341           core_spill_mask,
342           fp_spill_mask,
343           code_size);
344     }
345 
346     FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
347                           reinterpret_cast<char*>(code_ptr + code_size));
348     number_of_compilations_++;
349   }
350   // We need to update the entry point in the runnable state for the instrumentation.
351   {
352     MutexLock mu(self, lock_);
353     method_code_map_.Put(code_ptr, method);
354     if (osr) {
355       number_of_osr_compilations_++;
356       osr_code_map_.Put(method, code_ptr);
357     } else {
358       Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
359           method, method_header->GetEntryPoint());
360     }
361     if (collection_in_progress_) {
362       // We need to update the live bitmap if there is a GC to ensure it sees this new
363       // code.
364       GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
365     }
366     last_update_time_ns_.StoreRelease(NanoTime());
367     VLOG(jit)
368         << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
369         << PrettyMethod(method) << "@" << method
370         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
371         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
372         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
373         << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
374     histogram_code_memory_use_.AddValue(code_size);
375     if (code_size > kCodeSizeLogThreshold) {
376       LOG(INFO) << "JIT allocated "
377                 << PrettySize(code_size)
378                 << " for compiled code of "
379                 << PrettyMethod(method);
380     }
381   }
382 
383   return reinterpret_cast<uint8_t*>(method_header);
384 }
385 
CodeCacheSize()386 size_t JitCodeCache::CodeCacheSize() {
387   MutexLock mu(Thread::Current(), lock_);
388   return CodeCacheSizeLocked();
389 }
390 
CodeCacheSizeLocked()391 size_t JitCodeCache::CodeCacheSizeLocked() {
392   return used_memory_for_code_;
393 }
394 
DataCacheSize()395 size_t JitCodeCache::DataCacheSize() {
396   MutexLock mu(Thread::Current(), lock_);
397   return DataCacheSizeLocked();
398 }
399 
DataCacheSizeLocked()400 size_t JitCodeCache::DataCacheSizeLocked() {
401   return used_memory_for_data_;
402 }
403 
ClearData(Thread * self,void * data)404 void JitCodeCache::ClearData(Thread* self, void* data) {
405   MutexLock mu(self, lock_);
406   FreeData(reinterpret_cast<uint8_t*>(data));
407 }
408 
ReserveData(Thread * self,size_t size,ArtMethod * method)409 uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size, ArtMethod* method) {
410   size = RoundUp(size, sizeof(void*));
411   uint8_t* result = nullptr;
412 
413   {
414     ScopedThreadSuspension sts(self, kSuspended);
415     MutexLock mu(self, lock_);
416     WaitForPotentialCollectionToComplete(self);
417     result = AllocateData(size);
418   }
419 
420   if (result == nullptr) {
421     // Retry.
422     GarbageCollectCache(self);
423     ScopedThreadSuspension sts(self, kSuspended);
424     MutexLock mu(self, lock_);
425     WaitForPotentialCollectionToComplete(self);
426     result = AllocateData(size);
427   }
428 
429   MutexLock mu(self, lock_);
430   histogram_stack_map_memory_use_.AddValue(size);
431   if (size > kStackMapSizeLogThreshold) {
432     LOG(INFO) << "JIT allocated "
433               << PrettySize(size)
434               << " for stack maps of "
435               << PrettyMethod(method);
436   }
437   return result;
438 }
439 
440 class MarkCodeVisitor FINAL : public StackVisitor {
441  public:
MarkCodeVisitor(Thread * thread_in,JitCodeCache * code_cache_in)442   MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
443       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
444         code_cache_(code_cache_in),
445         bitmap_(code_cache_->GetLiveBitmap()) {}
446 
VisitFrame()447   bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
448     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
449     if (method_header == nullptr) {
450       return true;
451     }
452     const void* code = method_header->GetCode();
453     if (code_cache_->ContainsPc(code)) {
454       // Use the atomic set version, as multiple threads are executing this code.
455       bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
456     }
457     return true;
458   }
459 
460  private:
461   JitCodeCache* const code_cache_;
462   CodeCacheBitmap* const bitmap_;
463 };
464 
465 class MarkCodeClosure FINAL : public Closure {
466  public:
MarkCodeClosure(JitCodeCache * code_cache,Barrier * barrier)467   MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
468       : code_cache_(code_cache), barrier_(barrier) {}
469 
Run(Thread * thread)470   void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
471     ScopedTrace trace(__PRETTY_FUNCTION__);
472     DCHECK(thread == Thread::Current() || thread->IsSuspended());
473     MarkCodeVisitor visitor(thread, code_cache_);
474     visitor.WalkStack();
475     if (kIsDebugBuild) {
476       // The stack walking code queries the side instrumentation stack if it
477       // sees an instrumentation exit pc, so the JIT code of methods in that stack
478       // must have been seen. We sanity check this below.
479       for (const instrumentation::InstrumentationStackFrame& frame
480               : *thread->GetInstrumentationStack()) {
481         // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
482         // its stack frame, it is not the method owning return_pc_. We just pass null to
483         // LookupMethodHeader: the method is only checked against in debug builds.
484         OatQuickMethodHeader* method_header =
485             code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
486         if (method_header != nullptr) {
487           const void* code = method_header->GetCode();
488           CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
489         }
490       }
491     }
492     barrier_->Pass(Thread::Current());
493   }
494 
495  private:
496   JitCodeCache* const code_cache_;
497   Barrier* const barrier_;
498 };
499 
NotifyCollectionDone(Thread * self)500 void JitCodeCache::NotifyCollectionDone(Thread* self) {
501   collection_in_progress_ = false;
502   lock_cond_.Broadcast(self);
503 }
504 
SetFootprintLimit(size_t new_footprint)505 void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
506   size_t per_space_footprint = new_footprint / 2;
507   DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
508   DCHECK_EQ(per_space_footprint * 2, new_footprint);
509   mspace_set_footprint_limit(data_mspace_, per_space_footprint);
510   {
511     ScopedCodeCacheWrite scc(code_map_.get());
512     mspace_set_footprint_limit(code_mspace_, per_space_footprint);
513   }
514 }
515 
IncreaseCodeCacheCapacity()516 bool JitCodeCache::IncreaseCodeCacheCapacity() {
517   if (current_capacity_ == max_capacity_) {
518     return false;
519   }
520 
521   // Double the capacity if we're below 1MB, or increase it by 1MB if
522   // we're above.
523   if (current_capacity_ < 1 * MB) {
524     current_capacity_ *= 2;
525   } else {
526     current_capacity_ += 1 * MB;
527   }
528   if (current_capacity_ > max_capacity_) {
529     current_capacity_ = max_capacity_;
530   }
531 
532   if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
533     LOG(INFO) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
534   }
535 
536   SetFootprintLimit(current_capacity_);
537 
538   return true;
539 }
540 
MarkCompiledCodeOnThreadStacks(Thread * self)541 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
542   Barrier barrier(0);
543   size_t threads_running_checkpoint = 0;
544   MarkCodeClosure closure(this, &barrier);
545   threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
546   // Now that we have run our checkpoint, move to a suspended state and wait
547   // for other threads to run the checkpoint.
548   ScopedThreadSuspension sts(self, kSuspended);
549   if (threads_running_checkpoint != 0) {
550     barrier.Increment(self, threads_running_checkpoint);
551   }
552 }
553 
ShouldDoFullCollection()554 bool JitCodeCache::ShouldDoFullCollection() {
555   if (current_capacity_ == max_capacity_) {
556     // Always do a full collection when the code cache is full.
557     return true;
558   } else if (current_capacity_ < kReservedCapacity) {
559     // Always do partial collection when the code cache size is below the reserved
560     // capacity.
561     return false;
562   } else if (last_collection_increased_code_cache_) {
563     // This time do a full collection.
564     return true;
565   } else {
566     // This time do a partial collection.
567     return false;
568   }
569 }
570 
GarbageCollectCache(Thread * self)571 void JitCodeCache::GarbageCollectCache(Thread* self) {
572   ScopedTrace trace(__FUNCTION__);
573   if (!garbage_collect_code_) {
574     MutexLock mu(self, lock_);
575     IncreaseCodeCacheCapacity();
576     return;
577   }
578 
579   // Wait for an existing collection, or let everyone know we are starting one.
580   {
581     ScopedThreadSuspension sts(self, kSuspended);
582     MutexLock mu(self, lock_);
583     if (WaitForPotentialCollectionToComplete(self)) {
584       return;
585     } else {
586       number_of_collections_++;
587       live_bitmap_.reset(CodeCacheBitmap::Create(
588           "code-cache-bitmap",
589           reinterpret_cast<uintptr_t>(code_map_->Begin()),
590           reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
591       collection_in_progress_ = true;
592     }
593   }
594 
595   TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
596   {
597     TimingLogger::ScopedTiming st("Code cache collection", &logger);
598 
599     bool do_full_collection = false;
600     {
601       MutexLock mu(self, lock_);
602       do_full_collection = ShouldDoFullCollection();
603     }
604 
605     if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
606       LOG(INFO) << "Do "
607                 << (do_full_collection ? "full" : "partial")
608                 << " code cache collection, code="
609                 << PrettySize(CodeCacheSize())
610                 << ", data=" << PrettySize(DataCacheSize());
611     }
612 
613     DoCollection(self, /* collect_profiling_info */ do_full_collection);
614 
615     if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
616       LOG(INFO) << "After code cache collection, code="
617                 << PrettySize(CodeCacheSize())
618                 << ", data=" << PrettySize(DataCacheSize());
619     }
620 
621     {
622       MutexLock mu(self, lock_);
623 
624       // Increase the code cache only when we do partial collections.
625       // TODO: base this strategy on how full the code cache is?
626       if (do_full_collection) {
627         last_collection_increased_code_cache_ = false;
628       } else {
629         last_collection_increased_code_cache_ = true;
630         IncreaseCodeCacheCapacity();
631       }
632 
633       bool next_collection_will_be_full = ShouldDoFullCollection();
634 
635       // Start polling the liveness of compiled code to prepare for the next full collection.
636       if (next_collection_will_be_full) {
637         // Save the entry point of methods we have compiled, and update the entry
638         // point of those methods to the interpreter. If the method is invoked, the
639         // interpreter will update its entry point to the compiled code and call it.
640         for (ProfilingInfo* info : profiling_infos_) {
641           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
642           if (ContainsPc(entry_point)) {
643             info->SetSavedEntryPoint(entry_point);
644             Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
645                 info->GetMethod(), GetQuickToInterpreterBridge());
646           }
647         }
648 
649         DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
650       }
651       live_bitmap_.reset(nullptr);
652       NotifyCollectionDone(self);
653     }
654   }
655   Runtime::Current()->GetJit()->AddTimingLogger(logger);
656 }
657 
RemoveUnmarkedCode(Thread * self)658 void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
659   ScopedTrace trace(__FUNCTION__);
660   MutexLock mu(self, lock_);
661   ScopedCodeCacheWrite scc(code_map_.get());
662   // Iterate over all compiled code and remove entries that are not marked.
663   for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
664     const void* code_ptr = it->first;
665     ArtMethod* method = it->second;
666     uintptr_t allocation = FromCodeToAllocation(code_ptr);
667     if (GetLiveBitmap()->Test(allocation)) {
668       ++it;
669     } else {
670       FreeCode(code_ptr, method);
671       it = method_code_map_.erase(it);
672     }
673   }
674 }
675 
DoCollection(Thread * self,bool collect_profiling_info)676 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
677   ScopedTrace trace(__FUNCTION__);
678   {
679     MutexLock mu(self, lock_);
680     if (collect_profiling_info) {
681       // Clear the profiling info of methods that do not have compiled code as entrypoint.
682       // Also remove the saved entry point from the ProfilingInfo objects.
683       for (ProfilingInfo* info : profiling_infos_) {
684         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
685         if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
686           info->GetMethod()->SetProfilingInfo(nullptr);
687         }
688 
689         if (info->GetSavedEntryPoint() != nullptr) {
690           info->SetSavedEntryPoint(nullptr);
691           // We are going to move this method back to interpreter. Clear the counter now to
692           // give it a chance to be hot again.
693           info->GetMethod()->ClearCounter();
694         }
695       }
696     } else if (kIsDebugBuild) {
697       // Sanity check that the profiling infos do not have a dangling entry point.
698       for (ProfilingInfo* info : profiling_infos_) {
699         DCHECK(info->GetSavedEntryPoint() == nullptr);
700       }
701     }
702 
703     // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
704     // an entry point is either:
705     // - an osr compiled code, that will be removed if not in a thread call stack.
706     // - discarded compiled code, that will be removed if not in a thread call stack.
707     for (const auto& it : method_code_map_) {
708       ArtMethod* method = it.second;
709       const void* code_ptr = it.first;
710       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
711       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
712         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
713       }
714     }
715 
716     // Empty osr method map, as osr compiled code will be deleted (except the ones
717     // on thread stacks).
718     osr_code_map_.clear();
719   }
720 
721   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
722   MarkCompiledCodeOnThreadStacks(self);
723 
724   // At this point, mutator threads are still running, and entrypoints of methods can
725   // change. We do know they cannot change to a code cache entry that is not marked,
726   // therefore we can safely remove those entries.
727   RemoveUnmarkedCode(self);
728 
729   if (collect_profiling_info) {
730     MutexLock mu(self, lock_);
731     // Free all profiling infos of methods not compiled nor being compiled.
732     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
733       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
734         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
735         // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
736         // that the compiled code would not get revived. As mutator threads run concurrently,
737         // they may have revived the compiled code, and now we are in the situation where
738         // a method has compiled code but no ProfilingInfo.
739         // We make sure compiled methods have a ProfilingInfo object. It is needed for
740         // code cache collection.
741         if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
742           // We clear the inline caches as classes in it might be stalled.
743           info->ClearGcRootsInInlineCaches();
744           // Do a fence to make sure the clearing is seen before attaching to the method.
745           QuasiAtomic::ThreadFenceRelease();
746           info->GetMethod()->SetProfilingInfo(info);
747         } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
748           // No need for this ProfilingInfo object anymore.
749           FreeData(reinterpret_cast<uint8_t*>(info));
750           return true;
751         }
752         return false;
753       });
754     profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
755     DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
756   }
757 }
758 
CheckLiveCompiledCodeHasProfilingInfo()759 bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
760   ScopedTrace trace(__FUNCTION__);
761   // Check that methods we have compiled do have a ProfilingInfo object. We would
762   // have memory leaks of compiled code otherwise.
763   for (const auto& it : method_code_map_) {
764     ArtMethod* method = it.second;
765     if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
766       const void* code_ptr = it.first;
767       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
768       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
769         // If the code is not dead, then we have a problem. Note that this can even
770         // happen just after a collection, as mutator threads are running in parallel
771         // and could deoptimize an existing compiled code.
772         return false;
773       }
774     }
775   }
776   return true;
777 }
778 
LookupMethodHeader(uintptr_t pc,ArtMethod * method)779 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
780   static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
781   if (kRuntimeISA == kArm) {
782     // On Thumb-2, the pc is offset by one.
783     --pc;
784   }
785   if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
786     return nullptr;
787   }
788 
789   MutexLock mu(Thread::Current(), lock_);
790   if (method_code_map_.empty()) {
791     return nullptr;
792   }
793   auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
794   --it;
795 
796   const void* code_ptr = it->first;
797   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
798   if (!method_header->Contains(pc)) {
799     return nullptr;
800   }
801   if (kIsDebugBuild && method != nullptr) {
802     DCHECK_EQ(it->second, method)
803         << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
804   }
805   return method_header;
806 }
807 
LookupOsrMethodHeader(ArtMethod * method)808 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
809   MutexLock mu(Thread::Current(), lock_);
810   auto it = osr_code_map_.find(method);
811   if (it == osr_code_map_.end()) {
812     return nullptr;
813   }
814   return OatQuickMethodHeader::FromCodePointer(it->second);
815 }
816 
AddProfilingInfo(Thread * self,ArtMethod * method,const std::vector<uint32_t> & entries,bool retry_allocation)817 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
818                                               ArtMethod* method,
819                                               const std::vector<uint32_t>& entries,
820                                               bool retry_allocation)
821     // No thread safety analysis as we are using TryLock/Unlock explicitly.
822     NO_THREAD_SAFETY_ANALYSIS {
823   ProfilingInfo* info = nullptr;
824   if (!retry_allocation) {
825     // If we are allocating for the interpreter, just try to lock, to avoid
826     // lock contention with the JIT.
827     if (lock_.ExclusiveTryLock(self)) {
828       info = AddProfilingInfoInternal(self, method, entries);
829       lock_.ExclusiveUnlock(self);
830     }
831   } else {
832     {
833       MutexLock mu(self, lock_);
834       info = AddProfilingInfoInternal(self, method, entries);
835     }
836 
837     if (info == nullptr) {
838       GarbageCollectCache(self);
839       MutexLock mu(self, lock_);
840       info = AddProfilingInfoInternal(self, method, entries);
841     }
842   }
843   return info;
844 }
845 
AddProfilingInfoInternal(Thread * self ATTRIBUTE_UNUSED,ArtMethod * method,const std::vector<uint32_t> & entries)846 ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
847                                                       ArtMethod* method,
848                                                       const std::vector<uint32_t>& entries) {
849   size_t profile_info_size = RoundUp(
850       sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
851       sizeof(void*));
852 
853   // Check whether some other thread has concurrently created it.
854   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
855   if (info != nullptr) {
856     return info;
857   }
858 
859   uint8_t* data = AllocateData(profile_info_size);
860   if (data == nullptr) {
861     return nullptr;
862   }
863   info = new (data) ProfilingInfo(method, entries);
864 
865   // Make sure other threads see the data in the profiling info object before the
866   // store in the ArtMethod's ProfilingInfo pointer.
867   QuasiAtomic::ThreadFenceRelease();
868 
869   method->SetProfilingInfo(info);
870   profiling_infos_.push_back(info);
871   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
872   return info;
873 }
874 
875 // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
876 // is already held.
MoreCore(const void * mspace,intptr_t increment)877 void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
878   if (code_mspace_ == mspace) {
879     size_t result = code_end_;
880     code_end_ += increment;
881     return reinterpret_cast<void*>(result + code_map_->Begin());
882   } else {
883     DCHECK_EQ(data_mspace_, mspace);
884     size_t result = data_end_;
885     data_end_ += increment;
886     return reinterpret_cast<void*>(result + data_map_->Begin());
887   }
888 }
889 
GetProfiledMethods(const std::set<std::string> & dex_base_locations,std::vector<MethodReference> & methods)890 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
891                                       std::vector<MethodReference>& methods) {
892   ScopedTrace trace(__FUNCTION__);
893   MutexLock mu(Thread::Current(), lock_);
894   for (const ProfilingInfo* info : profiling_infos_) {
895     ArtMethod* method = info->GetMethod();
896     const DexFile* dex_file = method->GetDexFile();
897     if (ContainsElement(dex_base_locations, dex_file->GetBaseLocation())) {
898       methods.emplace_back(dex_file,  method->GetDexMethodIndex());
899     }
900   }
901 }
902 
GetLastUpdateTimeNs() const903 uint64_t JitCodeCache::GetLastUpdateTimeNs() const {
904   return last_update_time_ns_.LoadAcquire();
905 }
906 
IsOsrCompiled(ArtMethod * method)907 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
908   MutexLock mu(Thread::Current(), lock_);
909   return osr_code_map_.find(method) != osr_code_map_.end();
910 }
911 
NotifyCompilationOf(ArtMethod * method,Thread * self,bool osr)912 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
913   if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
914     return false;
915   }
916 
917   MutexLock mu(self, lock_);
918   if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
919     return false;
920   }
921 
922   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
923   if (info == nullptr) {
924     VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled";
925     // Because the counter is not atomic, there are some rare cases where we may not
926     // hit the threshold for creating the ProfilingInfo. Reset the counter now to
927     // "correct" this.
928     method->ClearCounter();
929     return false;
930   }
931 
932   if (info->IsMethodBeingCompiled(osr)) {
933     return false;
934   }
935 
936   info->SetIsMethodBeingCompiled(true, osr);
937   return true;
938 }
939 
NotifyCompilerUse(ArtMethod * method,Thread * self)940 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
941   MutexLock mu(self, lock_);
942   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
943   if (info != nullptr) {
944     info->IncrementInlineUse();
945   }
946   return info;
947 }
948 
DoneCompilerUse(ArtMethod * method,Thread * self)949 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
950   MutexLock mu(self, lock_);
951   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
952   DCHECK(info != nullptr);
953   info->DecrementInlineUse();
954 }
955 
DoneCompiling(ArtMethod * method,Thread * self ATTRIBUTE_UNUSED,bool osr)956 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
957   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
958   DCHECK(info->IsMethodBeingCompiled(osr));
959   info->SetIsMethodBeingCompiled(false, osr);
960 }
961 
GetMemorySizeOfCodePointer(const void * ptr)962 size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
963   MutexLock mu(Thread::Current(), lock_);
964   return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
965 }
966 
InvalidateCompiledCodeFor(ArtMethod * method,const OatQuickMethodHeader * header)967 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
968                                              const OatQuickMethodHeader* header) {
969   ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
970   if ((profiling_info != nullptr) &&
971       (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
972     // Prevent future uses of the compiled code.
973     profiling_info->SetSavedEntryPoint(nullptr);
974   }
975 
976   if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
977     // The entrypoint is the one to invalidate, so we just update
978     // it to the interpreter entry point and clear the counter to get the method
979     // Jitted again.
980     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
981         method, GetQuickToInterpreterBridge());
982     method->ClearCounter();
983   } else {
984     MutexLock mu(Thread::Current(), lock_);
985     auto it = osr_code_map_.find(method);
986     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
987       // Remove the OSR method, to avoid using it again.
988       osr_code_map_.erase(it);
989     }
990   }
991   MutexLock mu(Thread::Current(), lock_);
992   number_of_deoptimizations_++;
993 }
994 
AllocateCode(size_t code_size)995 uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
996   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
997   uint8_t* result = reinterpret_cast<uint8_t*>(
998       mspace_memalign(code_mspace_, alignment, code_size));
999   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
1000   // Ensure the header ends up at expected instruction alignment.
1001   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
1002   used_memory_for_code_ += mspace_usable_size(result);
1003   return result;
1004 }
1005 
FreeCode(uint8_t * code)1006 void JitCodeCache::FreeCode(uint8_t* code) {
1007   used_memory_for_code_ -= mspace_usable_size(code);
1008   mspace_free(code_mspace_, code);
1009 }
1010 
AllocateData(size_t data_size)1011 uint8_t* JitCodeCache::AllocateData(size_t data_size) {
1012   void* result = mspace_malloc(data_mspace_, data_size);
1013   used_memory_for_data_ += mspace_usable_size(result);
1014   return reinterpret_cast<uint8_t*>(result);
1015 }
1016 
FreeData(uint8_t * data)1017 void JitCodeCache::FreeData(uint8_t* data) {
1018   used_memory_for_data_ -= mspace_usable_size(data);
1019   mspace_free(data_mspace_, data);
1020 }
1021 
Dump(std::ostream & os)1022 void JitCodeCache::Dump(std::ostream& os) {
1023   MutexLock mu(Thread::Current(), lock_);
1024   os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
1025      << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
1026      << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
1027      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
1028      << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
1029      << "Total number of JIT compilations for on stack replacement: "
1030         << number_of_osr_compilations_ << "\n"
1031      << "Total number of deoptimizations: " << number_of_deoptimizations_ << "\n"
1032      << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
1033   histogram_stack_map_memory_use_.PrintMemoryUse(os);
1034   histogram_code_memory_use_.PrintMemoryUse(os);
1035   histogram_profiling_info_memory_use_.PrintMemoryUse(os);
1036 }
1037 
1038 }  // namespace jit
1039 }  // namespace art
1040