• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "optimizing_compiler.h"
18 
19 #include <fstream>
20 #include <memory>
21 #include <sstream>
22 
23 #include <stdint.h>
24 
25 #include "art_method-inl.h"
26 #include "base/arena_allocator.h"
27 #include "base/arena_containers.h"
28 #include "base/dumpable.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/scoped_arena_allocator.h"
33 #include "base/systrace.h"
34 #include "base/timing_logger.h"
35 #include "builder.h"
36 #include "code_generator.h"
37 #include "compiler.h"
38 #include "com_android_art_flags.h"
39 #include "debug/elf_debug_writer.h"
40 #include "debug/method_debug_info.h"
41 #include "dex/dex_file_types.h"
42 #include "driver/compiled_code_storage.h"
43 #include "driver/compiler_options.h"
44 #include "driver/dex_compilation_unit.h"
45 #include "fast_compiler.h"
46 #include "graph_checker.h"
47 #include "graph_visualizer.h"
48 #include "inliner.h"
49 #include "jit/debugger_interface.h"
50 #include "jit/jit.h"
51 #include "jit/jit_code_cache.h"
52 #include "jit/jit_logger.h"
53 #include "jni/quick/jni_compiler.h"
54 #include "linker/linker_patch.h"
55 #include "nodes.h"
56 #include "oat/oat_quick_method_header.h"
57 #include "optimizing/write_barrier_elimination.h"
58 #include "prepare_for_register_allocation.h"
59 #include "profiling_info_builder.h"
60 #include "reference_type_propagation.h"
61 #include "register_allocator_linear_scan.h"
62 #include "ssa_builder.h"
63 #include "ssa_liveness_analysis.h"
64 #include "stack_map_stream.h"
65 #include "utils/assembler.h"
66 
67 namespace art HIDDEN {
68 
69 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
70 
71 static constexpr const char* kPassNameSeparator = "$";
72 
73 /**
74  * Filter to apply to the visualizer. Methods whose name contain that filter will
75  * be dumped.
76  */
77 static constexpr const char kStringFilter[] = "";
78 
79 class PassScope;
80 
81 class PassObserver : public ValueObject {
82  public:
PassObserver(HGraph * graph,CodeGenerator * codegen,std::ostream * visualizer_output,const CompilerOptions & compiler_options)83   PassObserver(HGraph* graph,
84                CodeGenerator* codegen,
85                std::ostream* visualizer_output,
86                const CompilerOptions& compiler_options)
87       : graph_(graph),
88         last_seen_graph_size_(0),
89         cached_method_name_(),
90         timing_logger_enabled_(compiler_options.GetDumpPassTimings()),
91         timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
92         disasm_info_(graph->GetAllocator()),
93         visualizer_oss_(),
94         visualizer_output_(visualizer_output),
95         visualizer_enabled_(!compiler_options.GetDumpCfgFileName().empty()),
96         visualizer_(&visualizer_oss_, graph, codegen),
97         codegen_(codegen),
98         graph_in_bad_state_(false) {
99     if (timing_logger_enabled_ || visualizer_enabled_) {
100       if (!IsVerboseMethod(compiler_options, GetMethodName())) {
101         timing_logger_enabled_ = visualizer_enabled_ = false;
102       }
103       if (visualizer_enabled_) {
104         visualizer_.PrintHeader(GetMethodName());
105         codegen->SetDisassemblyInformation(&disasm_info_);
106       }
107     }
108   }
109 
~PassObserver()110   ~PassObserver() {
111     if (timing_logger_enabled_) {
112       LOG(INFO) << "TIMINGS " << GetMethodName();
113       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
114     }
115     if (visualizer_enabled_) {
116       FlushVisualizer();
117     }
118     DCHECK(visualizer_oss_.str().empty());
119   }
120 
DumpDisassembly()121   void DumpDisassembly() {
122     if (visualizer_enabled_) {
123       visualizer_.DumpGraphWithDisassembly();
124       FlushVisualizer();
125     }
126   }
127 
SetGraphInBadState()128   void SetGraphInBadState() { graph_in_bad_state_ = true; }
129 
GetMethodName()130   const char* GetMethodName() {
131     // PrettyMethod() is expensive, so we delay calling it until we actually have to.
132     if (cached_method_name_.empty()) {
133       cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
134     }
135     return cached_method_name_.c_str();
136   }
137 
138  private:
StartPass(const char * pass_name)139   void StartPass(const char* pass_name) {
140     VLOG(compiler) << "Starting pass: " << pass_name;
141     // Dump graph first, then start timer.
142     if (visualizer_enabled_) {
143       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
144       FlushVisualizer();
145     }
146     if (timing_logger_enabled_) {
147       timing_logger_.StartTiming(pass_name);
148     }
149   }
150 
FlushVisualizer()151   void FlushVisualizer() {
152     *visualizer_output_ << visualizer_oss_.str();
153     visualizer_output_->flush();
154     visualizer_oss_.str("");
155     visualizer_oss_.clear();
156   }
157 
EndPass(const char * pass_name,bool pass_change)158   void EndPass(const char* pass_name, bool pass_change) {
159     // Pause timer first, then dump graph.
160     if (timing_logger_enabled_) {
161       timing_logger_.EndTiming();
162     }
163     if (visualizer_enabled_) {
164       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
165       FlushVisualizer();
166     }
167 
168     // Validate the HGraph if running in debug mode.
169     if (kIsDebugBuild) {
170       if (!graph_in_bad_state_) {
171         GraphChecker checker(graph_, codegen_);
172         last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
173         if (!checker.IsValid()) {
174           std::ostringstream stream;
175           graph_->Dump(stream, codegen_);
176           LOG(FATAL_WITHOUT_ABORT) << "Error after " << pass_name << "(" << graph_->PrettyMethod()
177                                    << "): " << stream.str();
178           LOG(FATAL) << "(" << pass_name <<  "): " << Dumpable<GraphChecker>(checker);
179         }
180       }
181     }
182   }
183 
IsVerboseMethod(const CompilerOptions & compiler_options,const char * method_name)184   static bool IsVerboseMethod(const CompilerOptions& compiler_options, const char* method_name) {
185     // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
186     // empty kStringFilter matching all methods.
187     if (compiler_options.HasVerboseMethods()) {
188       return compiler_options.IsVerboseMethod(method_name);
189     }
190 
191     // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
192     // warning when the string is empty.
193     constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
194     if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
195       return true;
196     }
197 
198     return false;
199   }
200 
201   HGraph* const graph_;
202   size_t last_seen_graph_size_;
203 
204   std::string cached_method_name_;
205 
206   bool timing_logger_enabled_;
207   TimingLogger timing_logger_;
208 
209   DisassemblyInformation disasm_info_;
210 
211   std::ostringstream visualizer_oss_;
212   std::ostream* visualizer_output_;
213   bool visualizer_enabled_;
214   HGraphVisualizer visualizer_;
215   CodeGenerator* codegen_;
216 
217   // Flag to be set by the compiler if the pass failed and the graph is not
218   // expected to validate.
219   bool graph_in_bad_state_;
220 
221   friend PassScope;
222 
223   DISALLOW_COPY_AND_ASSIGN(PassObserver);
224 };
225 
226 class PassScope : public ValueObject {
227  public:
PassScope(const char * pass_name,PassObserver * pass_observer)228   PassScope(const char *pass_name, PassObserver* pass_observer)
229       : pass_name_(pass_name),
230         pass_change_(true),  // assume change
231         pass_observer_(pass_observer) {
232     pass_observer_->StartPass(pass_name_);
233   }
234 
SetPassNotChanged()235   void SetPassNotChanged() {
236     pass_change_ = false;
237   }
238 
~PassScope()239   ~PassScope() {
240     pass_observer_->EndPass(pass_name_, pass_change_);
241   }
242 
243  private:
244   const char* const pass_name_;
245   bool pass_change_;
246   PassObserver* const pass_observer_;
247 };
248 
249 class OptimizingCompiler final : public Compiler {
250  public:
251   explicit OptimizingCompiler(const CompilerOptions& compiler_options,
252                               CompiledCodeStorage* storage);
253   ~OptimizingCompiler() override;
254 
255   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
256 
257   CompiledMethod* Compile(const dex::CodeItem* code_item,
258                           uint32_t access_flags,
259                           uint16_t class_def_idx,
260                           uint32_t method_idx,
261                           Handle<mirror::ClassLoader> class_loader,
262                           const DexFile& dex_file,
263                           Handle<mirror::DexCache> dex_cache) const override;
264 
265   CompiledMethod* JniCompile(uint32_t access_flags,
266                              uint32_t method_idx,
267                              const DexFile& dex_file,
268                              Handle<mirror::DexCache> dex_cache) const override;
269 
GetEntryPointOf(ArtMethod * method) const270   uintptr_t GetEntryPointOf(ArtMethod* method) const override
271       REQUIRES_SHARED(Locks::mutator_lock_) {
272     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
273         InstructionSetPointerSize(GetCompilerOptions().GetInstructionSet())));
274   }
275 
276   bool JitCompile(Thread* self,
277                   jit::JitCodeCache* code_cache,
278                   jit::JitMemoryRegion* region,
279                   ArtMethod* method,
280                   CompilationKind compilation_kind,
281                   jit::JitLogger* jit_logger)
282       override
283       REQUIRES_SHARED(Locks::mutator_lock_);
284 
285  private:
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef definitions[],size_t length) const286   bool RunOptimizations(HGraph* graph,
287                         CodeGenerator* codegen,
288                         const DexCompilationUnit& dex_compilation_unit,
289                         PassObserver* pass_observer,
290                         const OptimizationDef definitions[],
291                         size_t length) const {
292     // Convert definitions to optimization passes.
293     ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
294         definitions,
295         length,
296         graph->GetAllocator(),
297         graph,
298         compilation_stats_.get(),
299         codegen,
300         dex_compilation_unit);
301     DCHECK_EQ(length, optimizations.size());
302     // Run the optimization passes one by one. Any "depends_on" pass refers back to
303     // the most recent occurrence of that pass, skipped or executed.
304     std::bitset<static_cast<size_t>(OptimizationPass::kLast) + 1u> pass_changes;
305     pass_changes[static_cast<size_t>(OptimizationPass::kNone)] = true;
306     bool change = false;
307     for (size_t i = 0; i < length; ++i) {
308       if (pass_changes[static_cast<size_t>(definitions[i].depends_on)]) {
309         // Execute the pass and record whether it changed anything.
310         PassScope scope(optimizations[i]->GetPassName(), pass_observer);
311         bool pass_change = optimizations[i]->Run();
312         pass_changes[static_cast<size_t>(definitions[i].pass)] = pass_change;
313         if (pass_change) {
314           change = true;
315         } else {
316           scope.SetPassNotChanged();
317         }
318       } else {
319         // Skip the pass and record that nothing changed.
320         pass_changes[static_cast<size_t>(definitions[i].pass)] = false;
321       }
322     }
323     return change;
324   }
325 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef (& definitions)[length]) const326   template <size_t length> bool RunOptimizations(
327       HGraph* graph,
328       CodeGenerator* codegen,
329       const DexCompilationUnit& dex_compilation_unit,
330       PassObserver* pass_observer,
331       const OptimizationDef (&definitions)[length]) const {
332     return RunOptimizations(
333         graph, codegen, dex_compilation_unit, pass_observer, definitions, length);
334   }
335 
336   void RunOptimizations(HGraph* graph,
337                         CodeGenerator* codegen,
338                         const DexCompilationUnit& dex_compilation_unit,
339                         PassObserver* pass_observer) const;
340 
341   // Create a 'CompiledMethod' for an optimized graph.
342   CompiledMethod* Emit(ArenaAllocator* allocator,
343                        CodeGenerator* codegen,
344                        bool is_intrinsic,
345                        const dex::CodeItem* item) const;
346 
347   // Try compiling a method and return the code generator used for
348   // compiling it.
349   // This method:
350   // 1) Builds the graph. Returns null if it failed to build it.
351   // 2) Transforms the graph to SSA. Returns null if it failed.
352   // 3) Runs optimizations on the graph, including register allocator.
353   CodeGenerator* TryCompile(ArenaAllocator* allocator,
354                             ArenaStack* arena_stack,
355                             const DexCompilationUnit& dex_compilation_unit,
356                             ArtMethod* method,
357                             CompilationKind compilation_kind,
358                             VariableSizedHandleScope* handles) const;
359 
360   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
361                                      ArenaStack* arena_stack,
362                                      const DexCompilationUnit& dex_compilation_unit,
363                                      ArtMethod* method,
364                                      VariableSizedHandleScope* handles) const;
365 
366   bool RunArchOptimizations(HGraph* graph,
367                             CodeGenerator* codegen,
368                             const DexCompilationUnit& dex_compilation_unit,
369                             PassObserver* pass_observer) const;
370 
371   bool RunRequiredPasses(HGraph* graph,
372                          CodeGenerator* codegen,
373                          const DexCompilationUnit& dex_compilation_unit,
374                          PassObserver* pass_observer) const;
375 
376   std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
377 
378   // This must be called before any other function that dumps data to the cfg
379   void DumpInstructionSetFeaturesToCfg() const;
380 
381   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
382 
383   std::unique_ptr<std::ostream> visualizer_output_;
384 
385   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
386 };
387 
388 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
389 
OptimizingCompiler(const CompilerOptions & compiler_options,CompiledCodeStorage * storage)390 OptimizingCompiler::OptimizingCompiler(const CompilerOptions& compiler_options,
391                                        CompiledCodeStorage* storage)
392     : Compiler(compiler_options, storage, kMaximumCompilationTimeBeforeWarning) {
393   // Enable C1visualizer output.
394   const std::string& cfg_file_name = compiler_options.GetDumpCfgFileName();
395   if (!cfg_file_name.empty()) {
396     std::ios_base::openmode cfg_file_mode =
397         compiler_options.GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
398     visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
399     DumpInstructionSetFeaturesToCfg();
400   }
401   if (compiler_options.GetDumpStats()) {
402     compilation_stats_.reset(new OptimizingCompilerStats());
403   }
404 }
405 
~OptimizingCompiler()406 OptimizingCompiler::~OptimizingCompiler() {
407   if (compilation_stats_.get() != nullptr) {
408     compilation_stats_->Log();
409   }
410 }
411 
DumpInstructionSetFeaturesToCfg() const412 void OptimizingCompiler::DumpInstructionSetFeaturesToCfg() const {
413   const CompilerOptions& compiler_options = GetCompilerOptions();
414   const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
415   std::string isa_string =
416       std::string("isa:") + GetInstructionSetString(features->GetInstructionSet());
417   std::string features_string = "isa_features:" + features->GetFeatureString();
418   std::string read_barrier_type = "none";
419   if (compiler_options.EmitReadBarrier()) {
420     if (art::kUseBakerReadBarrier)
421       read_barrier_type = "baker";
422     else if (art::kUseTableLookupReadBarrier)
423       read_barrier_type = "tablelookup";
424   }
425   std::string read_barrier_string = ART_FORMAT("read_barrier_type:{}", read_barrier_type);
426   // It is assumed that visualizer_output_ is empty when calling this function, hence the fake
427   // compilation block containing the ISA features will be printed at the beginning of the .cfg
428   // file.
429   *visualizer_output_ << HGraphVisualizer::InsertMetaDataAsCompilationBlock(
430       isa_string + ' ' + features_string + ' ' + read_barrier_string);
431 }
432 
CanCompileMethod(uint32_t method_idx,const DexFile & dex_file) const433 bool OptimizingCompiler::CanCompileMethod([[maybe_unused]] uint32_t method_idx,
434                                           [[maybe_unused]] const DexFile& dex_file) const {
435   return true;
436 }
437 
IsInstructionSetSupported(InstructionSet instruction_set)438 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
439   return instruction_set == InstructionSet::kArm ||
440          instruction_set == InstructionSet::kArm64 ||
441          instruction_set == InstructionSet::kThumb2 ||
442          instruction_set == InstructionSet::kRiscv64 ||
443          instruction_set == InstructionSet::kX86 ||
444          instruction_set == InstructionSet::kX86_64;
445 }
446 
RunRequiredPasses(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const447 bool OptimizingCompiler::RunRequiredPasses(HGraph* graph,
448                                            CodeGenerator* codegen,
449                                            const DexCompilationUnit& dex_compilation_unit,
450                                            PassObserver* pass_observer) const {
451   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
452 #if defined(ART_ENABLE_CODEGEN_arm)
453     case InstructionSet::kThumb2:
454     case InstructionSet::kArm: {
455       OptimizationDef arm_optimizations[] = {
456           OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
457       };
458       return RunOptimizations(graph,
459                               codegen,
460                               dex_compilation_unit,
461                               pass_observer,
462                               arm_optimizations);
463     }
464 #endif
465 #if defined(ART_ENABLE_CODEGEN_riscv64)
466     case InstructionSet::kRiscv64: {
467       OptimizationDef riscv64_optimizations[] = {
468           OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64),
469       };
470       return RunOptimizations(graph,
471                               codegen,
472                               dex_compilation_unit,
473                               pass_observer,
474                               riscv64_optimizations);
475     }
476 #endif
477 #ifdef ART_ENABLE_CODEGEN_x86
478     case InstructionSet::kX86: {
479       OptimizationDef x86_optimizations[] = {
480           OptDef(OptimizationPass::kPcRelativeFixupsX86),
481       };
482       return RunOptimizations(graph,
483                               codegen,
484                               dex_compilation_unit,
485                               pass_observer,
486                               x86_optimizations);
487     }
488 #endif
489     default:
490       UNUSED(graph);
491       UNUSED(codegen);
492       UNUSED(dex_compilation_unit);
493       UNUSED(pass_observer);
494       return false;
495   }
496 }
497 
RunArchOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const498 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
499                                               CodeGenerator* codegen,
500                                               const DexCompilationUnit& dex_compilation_unit,
501                                               PassObserver* pass_observer) const {
502   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
503 #if defined(ART_ENABLE_CODEGEN_arm)
504     case InstructionSet::kThumb2:
505     case InstructionSet::kArm: {
506       OptimizationDef arm_optimizations[] = {
507           OptDef(OptimizationPass::kInstructionSimplifierArm),
508           OptDef(OptimizationPass::kSideEffectsAnalysis),
509           OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
510           OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
511           OptDef(OptimizationPass::kScheduling)
512       };
513       return RunOptimizations(graph,
514                               codegen,
515                               dex_compilation_unit,
516                               pass_observer,
517                               arm_optimizations);
518     }
519 #endif
520 #ifdef ART_ENABLE_CODEGEN_arm64
521     case InstructionSet::kArm64: {
522       OptimizationDef arm64_optimizations[] = {
523           OptDef(OptimizationPass::kInstructionSimplifierArm64),
524           OptDef(OptimizationPass::kSideEffectsAnalysis),
525           OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
526           OptDef(OptimizationPass::kScheduling)
527       };
528       return RunOptimizations(graph,
529                               codegen,
530                               dex_compilation_unit,
531                               pass_observer,
532                               arm64_optimizations);
533     }
534 #endif
535 #if defined(ART_ENABLE_CODEGEN_riscv64)
536     case InstructionSet::kRiscv64: {
537       OptimizationDef riscv64_optimizations[] = {
538           OptDef(OptimizationPass::kInstructionSimplifierRiscv64),
539           OptDef(OptimizationPass::kSideEffectsAnalysis),
540           OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
541           OptDef(OptimizationPass::kCriticalNativeAbiFixupRiscv64)
542       };
543       return RunOptimizations(graph,
544                               codegen,
545                               dex_compilation_unit,
546                               pass_observer,
547                               riscv64_optimizations);
548     }
549 #endif
550 #ifdef ART_ENABLE_CODEGEN_x86
551     case InstructionSet::kX86: {
552       OptimizationDef x86_optimizations[] = {
553           OptDef(OptimizationPass::kInstructionSimplifierX86),
554           OptDef(OptimizationPass::kSideEffectsAnalysis),
555           OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
556           OptDef(OptimizationPass::kPcRelativeFixupsX86),
557           OptDef(OptimizationPass::kX86MemoryOperandGeneration)
558       };
559       return RunOptimizations(graph,
560                               codegen,
561                               dex_compilation_unit,
562                               pass_observer,
563                               x86_optimizations);
564     }
565 #endif
566 #ifdef ART_ENABLE_CODEGEN_x86_64
567     case InstructionSet::kX86_64: {
568       OptimizationDef x86_64_optimizations[] = {
569           OptDef(OptimizationPass::kInstructionSimplifierX86_64),
570           OptDef(OptimizationPass::kSideEffectsAnalysis),
571           OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
572           OptDef(OptimizationPass::kX86MemoryOperandGeneration)
573       };
574       return RunOptimizations(graph,
575                               codegen,
576                               dex_compilation_unit,
577                               pass_observer,
578                               x86_64_optimizations);
579     }
580 #endif
581     default:
582       UNUSED(graph);
583       UNUSED(dex_compilation_unit);
584       UNUSED(pass_observer);
585       return false;
586   }
587 }
588 
589 NO_INLINE  // Avoid increasing caller's frame size by large stack-allocated objects.
AllocateRegisters(HGraph * graph,CodeGenerator * codegen,PassObserver * pass_observer,OptimizingCompilerStats * stats)590 static void AllocateRegisters(HGraph* graph,
591                               CodeGenerator* codegen,
592                               PassObserver* pass_observer,
593                               OptimizingCompilerStats* stats) {
594   {
595     PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
596                     pass_observer);
597     PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
598   }
599   // Use local allocator shared by SSA liveness analysis and register allocator.
600   // (Register allocator creates new objects in the liveness data.)
601   ScopedArenaAllocator local_allocator(graph->GetArenaStack());
602   SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
603   {
604     PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
605     liveness.Analyze();
606   }
607   {
608     PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
609     std::unique_ptr<RegisterAllocator> register_allocator =
610         RegisterAllocator::Create(&local_allocator, codegen, liveness);
611     register_allocator->AllocateRegisters();
612   }
613 }
614 
615 // Strip pass name suffix to get optimization name.
ConvertPassNameToOptimizationName(const std::string & pass_name)616 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
617   size_t pos = pass_name.find(kPassNameSeparator);
618   return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
619 }
620 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const621 void OptimizingCompiler::RunOptimizations(HGraph* graph,
622                                           CodeGenerator* codegen,
623                                           const DexCompilationUnit& dex_compilation_unit,
624                                           PassObserver* pass_observer) const {
625   const std::vector<std::string>* pass_names = GetCompilerOptions().GetPassesToRun();
626   if (pass_names != nullptr) {
627     // If passes were defined on command-line, build the optimization
628     // passes and run these instead of the built-in optimizations.
629     // TODO: a way to define depends_on via command-line?
630     const size_t length = pass_names->size();
631     std::vector<OptimizationDef> optimizations;
632     for (const std::string& pass_name : *pass_names) {
633       std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
634       optimizations.push_back(OptDef(OptimizationPassByName(opt_name), pass_name.c_str()));
635     }
636     RunOptimizations(graph,
637                      codegen,
638                      dex_compilation_unit,
639                      pass_observer,
640                      optimizations.data(),
641                      length);
642     return;
643   }
644 
645   OptimizationDef optimizations[] = {
646       // Initial optimizations.
647       OptDef(OptimizationPass::kConstantFolding),
648       OptDef(OptimizationPass::kInstructionSimplifier),
649       OptDef(OptimizationPass::kDeadCodeElimination,
650              "dead_code_elimination$initial"),
651       // Inlining.
652       OptDef(OptimizationPass::kInliner),
653       // Simplification (if inlining occurred, or if we analyzed the invoke as "always throwing").
654       OptDef(OptimizationPass::kConstantFolding,
655              "constant_folding$after_inlining",
656              OptimizationPass::kInliner),
657       OptDef(OptimizationPass::kInstructionSimplifier,
658              "instruction_simplifier$after_inlining",
659              OptimizationPass::kInliner),
660       OptDef(OptimizationPass::kDeadCodeElimination,
661              "dead_code_elimination$after_inlining",
662              OptimizationPass::kInliner),
663       // GVN.
664       OptDef(OptimizationPass::kSideEffectsAnalysis,
665              "side_effects$before_gvn"),
666       OptDef(OptimizationPass::kGlobalValueNumbering),
667       OptDef(OptimizationPass::kReferenceTypePropagation,
668              "reference_type_propagation$after_gvn",
669              OptimizationPass::kGlobalValueNumbering),
670       // Simplification (TODO: only if GVN occurred).
671       OptDef(OptimizationPass::kControlFlowSimplifier),
672       OptDef(OptimizationPass::kConstantFolding,
673              "constant_folding$after_gvn"),
674       OptDef(OptimizationPass::kInstructionSimplifier,
675              "instruction_simplifier$after_gvn"),
676       OptDef(OptimizationPass::kDeadCodeElimination,
677              "dead_code_elimination$after_gvn"),
678       // High-level optimizations.
679       OptDef(OptimizationPass::kSideEffectsAnalysis,
680              "side_effects$before_licm"),
681       OptDef(OptimizationPass::kInvariantCodeMotion),
682       OptDef(OptimizationPass::kInductionVarAnalysis),
683       OptDef(OptimizationPass::kBoundsCheckElimination),
684       OptDef(OptimizationPass::kLoopOptimization),
685       // Simplification.
686       OptDef(OptimizationPass::kConstantFolding,
687              "constant_folding$after_loop_opt"),
688       OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
689              "instruction_simplifier$after_loop_opt"),
690       OptDef(OptimizationPass::kDeadCodeElimination,
691              "dead_code_elimination$after_loop_opt"),
692       // Other high-level optimizations.
693       OptDef(OptimizationPass::kLoadStoreElimination),
694       OptDef(OptimizationPass::kCHAGuardOptimization),
695       OptDef(OptimizationPass::kCodeSinking),
696       // Simplification.
697       OptDef(OptimizationPass::kConstantFolding,
698              "constant_folding$before_codegen"),
699       // The codegen has a few assumptions that only the instruction simplifier
700       // can satisfy. For example, the code generator does not expect to see a
701       // HTypeConversion from a type to the same type.
702       OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
703              "instruction_simplifier$before_codegen"),
704       // Simplification may result in dead code that should be removed prior to
705       // code generation.
706       OptDef(OptimizationPass::kDeadCodeElimination,
707              "dead_code_elimination$before_codegen"),
708       // Eliminate constructor fences after code sinking to avoid
709       // complicated sinking logic to split a fence with many inputs.
710       OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
711   };
712   RunOptimizations(graph,
713                    codegen,
714                    dex_compilation_unit,
715                    pass_observer,
716                    optimizations);
717 
718   RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer);
719 }
720 
EmitAndSortLinkerPatches(CodeGenerator * codegen)721 static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
722   ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
723   codegen->EmitLinkerPatches(&linker_patches);
724 
725   // Sort patches by literal offset. Required for .oat_patches encoding.
726   std::sort(linker_patches.begin(), linker_patches.end(),
727             [](const linker::LinkerPatch& lhs, const linker::LinkerPatch& rhs) {
728     return lhs.LiteralOffset() < rhs.LiteralOffset();
729   });
730 
731   return linker_patches;
732 }
733 
Emit(ArenaAllocator * allocator,CodeGenerator * codegen,bool is_intrinsic,const dex::CodeItem * code_item_for_osr_check) const734 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
735                                          CodeGenerator* codegen,
736                                          bool is_intrinsic,
737                                          const dex::CodeItem* code_item_for_osr_check) const {
738   ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
739   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
740 
741   CompiledCodeStorage* storage = GetCompiledCodeStorage();
742   CompiledMethod* compiled_method = storage->CreateCompiledMethod(
743       codegen->GetInstructionSet(),
744       codegen->GetCode(),
745       ArrayRef<const uint8_t>(stack_map),
746       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
747       ArrayRef<const linker::LinkerPatch>(linker_patches),
748       is_intrinsic);
749 
750   for (const linker::LinkerPatch& patch : linker_patches) {
751     if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
752       ArenaVector<uint8_t> code(allocator->Adapter());
753       std::string debug_name;
754       codegen->EmitThunkCode(patch, &code, &debug_name);
755       storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
756     }
757   }
758 
759   return compiled_method;
760 }
761 
762 #ifdef ART_USE_RESTRICTED_MODE
763 
764 // This class acts as a filter and enables gradual enablement of ART Simulator work - we
765 // compile (and hence simulate) only limited types of methods.
766 class CompilationFilterForRestrictedMode : public HGraphDelegateVisitor {
767  public:
CompilationFilterForRestrictedMode(HGraph * graph)768   explicit CompilationFilterForRestrictedMode(HGraph* graph)
769       : HGraphDelegateVisitor(graph),
770         has_unsupported_instructions_(false) {}
771 
772   // Returns true if the graph contains instructions which are not currently supported in
773   // the restricted mode.
GraphRejected() const774   bool GraphRejected() const { return has_unsupported_instructions_; }
775 
776  private:
VisitInstruction(HInstruction *)777   void VisitInstruction(HInstruction*) override {
778     // Currently we don't support compiling methods unless they were annotated with $compile$.
779     RejectGraph();
780   }
RejectGraph()781   void RejectGraph() {
782     has_unsupported_instructions_ = true;
783   }
784 
785   bool has_unsupported_instructions_;
786 };
787 
788 // Returns whether an ArtMethod, specified by a name, should be compiled. Used in restricted
789 // mode.
790 //
791 // In restricted mode, the simulator will execute only those methods which are compiled; thus
792 // this is going to be an effective filter for methods to be simulated.
793 //
794 // TODO(Simulator): compile and simulate all the methods as in regular host mode.
ShouldMethodBeCompiled(HGraph * graph,const std::string & method_name)795 bool ShouldMethodBeCompiled(HGraph* graph, const std::string& method_name) {
796   if (method_name.find("$compile$") != std::string::npos) {
797     return true;
798   }
799 
800   CompilationFilterForRestrictedMode filter_visitor(graph);
801   filter_visitor.VisitReversePostOrder();
802 
803   return !filter_visitor.GraphRejected();
804 }
805 #endif  // ART_USE_RESTRICTED_MODE
806 
TryCompile(ArenaAllocator * allocator,ArenaStack * arena_stack,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,CompilationKind compilation_kind,VariableSizedHandleScope * handles) const807 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
808                                               ArenaStack* arena_stack,
809                                               const DexCompilationUnit& dex_compilation_unit,
810                                               ArtMethod* method,
811                                               CompilationKind compilation_kind,
812                                               VariableSizedHandleScope* handles) const {
813   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
814   const CompilerOptions& compiler_options = GetCompilerOptions();
815   InstructionSet instruction_set = compiler_options.GetInstructionSet();
816   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
817   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
818   const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
819 
820   // Always use the Thumb-2 assembler: some runtime functionality
821   // (like implicit stack overflow checks) assume Thumb-2.
822   DCHECK_NE(instruction_set, InstructionSet::kArm);
823 
824   // Do not attempt to compile on architectures we do not support.
825   if (!IsInstructionSetSupported(instruction_set)) {
826     MaybeRecordStat(compilation_stats_.get(),
827                     MethodCompilationStat::kNotCompiledUnsupportedIsa);
828     return nullptr;
829   }
830 
831   if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
832     SCOPED_TRACE << "Not compiling because of pathological case";
833     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
834     return nullptr;
835   }
836 
837   // Implementation of the space filter: do not compile a code item whose size in
838   // code units is bigger than 128.
839   static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
840   if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
841       && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
842           kSpaceFilterOptimizingThreshold)) {
843     SCOPED_TRACE << "Not compiling because of space filter";
844     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
845     return nullptr;
846   }
847 
848   CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
849 
850   bool dead_reference_safe;
851   // For AOT compilation, we may not get a method, for example if its class is erroneous,
852   // possibly due to an unavailable superclass.  JIT should always have a method.
853   DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
854   if (method != nullptr) {
855     const dex::ClassDef* containing_class;
856     {
857       ScopedObjectAccess soa(Thread::Current());
858       containing_class = &method->GetClassDef();
859     }
860     // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
861     // is currently rarely true.
862     dead_reference_safe =
863         annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
864         && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
865   } else {
866     // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
867     dead_reference_safe = false;
868   }
869 
870   HGraph* graph = new (allocator) HGraph(
871       allocator,
872       arena_stack,
873       handles,
874       dex_file,
875       method_idx,
876       compiler_options.GetInstructionSet(),
877       kInvalidInvokeType,
878       dead_reference_safe,
879       compiler_options.GetDebuggable(),
880       compilation_kind);
881 
882   if (method != nullptr) {
883     graph->SetArtMethod(method);
884   }
885 
886   jit::Jit* jit = Runtime::Current()->GetJit();
887   if (jit != nullptr) {
888     ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current());
889     graph->SetProfilingInfo(info);
890   }
891 
892   std::unique_ptr<CodeGenerator> codegen(
893       CodeGenerator::Create(graph,
894                             compiler_options,
895                             compilation_stats_.get()));
896   if (codegen.get() == nullptr) {
897     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
898     return nullptr;
899   }
900   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
901 
902   PassObserver pass_observer(graph,
903                              codegen.get(),
904                              visualizer_output_.get(),
905                              compiler_options);
906 
907   {
908     VLOG(compiler) << "Building " << pass_observer.GetMethodName();
909     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
910     HGraphBuilder builder(graph,
911                           code_item_accessor,
912                           &dex_compilation_unit,
913                           &dex_compilation_unit,
914                           codegen.get(),
915                           compilation_stats_.get());
916     GraphAnalysisResult result = builder.BuildGraph();
917     if (result != kAnalysisSuccess) {
918       // Don't try recompiling this method again.
919       if (method != nullptr) {
920         ScopedObjectAccess soa(Thread::Current());
921         method->SetDontCompile();
922       }
923       SCOPED_TRACE << "Not compiling because of " << result;
924       switch (result) {
925         case kAnalysisSkipped: {
926           MaybeRecordStat(compilation_stats_.get(),
927                           MethodCompilationStat::kNotCompiledSkipped);
928           break;
929         }
930         case kAnalysisInvalidBytecode: {
931           MaybeRecordStat(compilation_stats_.get(),
932                           MethodCompilationStat::kNotCompiledInvalidBytecode);
933           break;
934         }
935         case kAnalysisFailThrowCatchLoop: {
936           MaybeRecordStat(compilation_stats_.get(),
937                           MethodCompilationStat::kNotCompiledThrowCatchLoop);
938           break;
939         }
940         case kAnalysisFailAmbiguousArrayOp: {
941           MaybeRecordStat(compilation_stats_.get(),
942                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
943           break;
944         }
945         case kAnalysisFailIrreducibleLoopAndStringInit: {
946           MaybeRecordStat(compilation_stats_.get(),
947                           MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
948           break;
949         }
950         case kAnalysisFailPhiEquivalentInOsr: {
951           MaybeRecordStat(compilation_stats_.get(),
952                           MethodCompilationStat::kNotCompiledPhiEquivalentInOsr);
953           break;
954         }
955         case kAnalysisSuccess:
956           LOG(FATAL) << "Unreachable";
957           UNREACHABLE();
958       }
959       pass_observer.SetGraphInBadState();
960       return nullptr;
961     }
962   }
963 
964   if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
965     graph->SetUsefulOptimizing();
966     // Branch profiling currently doesn't support running optimizations.
967     RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
968   } else {
969     RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
970     PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
971     WriteBarrierElimination(graph, compilation_stats_.get()).Run();
972   }
973 
974   // If we are compiling baseline and we haven't created a profiling info for
975   // this method already, do it now.
976   if (jit != nullptr &&
977       compilation_kind == CompilationKind::kBaseline &&
978       graph->IsUsefulOptimizing() &&
979       graph->GetProfilingInfo() == nullptr) {
980     ProfilingInfoBuilder(
981         graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run();
982     // We expect a profiling info to be created and attached to the graph.
983     // However, we may have run out of memory trying to create it, so in this
984     // case just abort the compilation.
985     if (graph->GetProfilingInfo() == nullptr) {
986       SCOPED_TRACE << "Not compiling because of out of memory";
987       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
988       return nullptr;
989     }
990   }
991 
992   AllocateRegisters(graph,
993                     codegen.get(),
994                     &pass_observer,
995                     compilation_stats_.get());
996 
997   if (UNLIKELY(codegen->GetFrameSize() > codegen->GetMaximumFrameSize())) {
998     SCOPED_TRACE << "Not compiling because of stack frame too large";
999     LOG(WARNING) << "Stack frame size is " << codegen->GetFrameSize()
1000                  << " which is larger than the maximum of " << codegen->GetMaximumFrameSize()
1001                  << " bytes. Method: " << graph->PrettyMethod();
1002     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledFrameTooBig);
1003     return nullptr;
1004   }
1005 
1006 #ifdef ART_USE_RESTRICTED_MODE
1007   // Check whether the method should be compiled according to the compilation filter. Note: this
1008   // relies on a LocationSummary being available for each instruction so should take place after
1009   // register allocation does liveness analysis.
1010   // TODO(Simulator): support and compile all methods.
1011   std::string method_name = dex_file.PrettyMethod(method_idx);
1012   if (!ShouldMethodBeCompiled(graph, method_name)) {
1013     return nullptr;
1014   }
1015 #endif  // ART_USE_RESTRICTED_MODE
1016 
1017   codegen->Compile();
1018   pass_observer.DumpDisassembly();
1019 
1020   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
1021   return codegen.release();
1022 }
1023 
TryCompileIntrinsic(ArenaAllocator * allocator,ArenaStack * arena_stack,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,VariableSizedHandleScope * handles) const1024 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
1025     ArenaAllocator* allocator,
1026     ArenaStack* arena_stack,
1027     const DexCompilationUnit& dex_compilation_unit,
1028     ArtMethod* method,
1029     VariableSizedHandleScope* handles) const {
1030   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
1031   const CompilerOptions& compiler_options = GetCompilerOptions();
1032   InstructionSet instruction_set = compiler_options.GetInstructionSet();
1033   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
1034   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
1035 
1036   // TODO(Simulator): Reenable compilation of intrinsics.
1037 #ifdef ART_USE_RESTRICTED_MODE
1038   return nullptr;
1039 #endif  // ART_USE_RESTRICTED_MODE
1040 
1041   // Always use the Thumb-2 assembler: some runtime functionality
1042   // (like implicit stack overflow checks) assume Thumb-2.
1043   DCHECK_NE(instruction_set, InstructionSet::kArm);
1044 
1045   // Do not attempt to compile on architectures we do not support.
1046   if (!IsInstructionSetSupported(instruction_set)) {
1047     return nullptr;
1048   }
1049 
1050   HGraph* graph = new (allocator) HGraph(
1051       allocator,
1052       arena_stack,
1053       handles,
1054       dex_file,
1055       method_idx,
1056       compiler_options.GetInstructionSet(),
1057       kInvalidInvokeType,
1058       /* dead_reference_safe= */ true,  // Intrinsics don't affect dead reference safety.
1059       compiler_options.GetDebuggable(),
1060       CompilationKind::kOptimized);
1061 
1062   DCHECK(Runtime::Current()->IsAotCompiler());
1063   DCHECK(method != nullptr);
1064   graph->SetArtMethod(method);
1065 
1066   std::unique_ptr<CodeGenerator> codegen(
1067       CodeGenerator::Create(graph,
1068                             compiler_options,
1069                             compilation_stats_.get()));
1070   if (codegen.get() == nullptr) {
1071     return nullptr;
1072   }
1073   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
1074 
1075   PassObserver pass_observer(graph,
1076                              codegen.get(),
1077                              visualizer_output_.get(),
1078                              compiler_options);
1079 
1080   {
1081     VLOG(compiler) << "Building intrinsic graph " << pass_observer.GetMethodName();
1082     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
1083     HGraphBuilder builder(graph,
1084                           CodeItemDebugInfoAccessor(),  // Null code item.
1085                           &dex_compilation_unit,
1086                           &dex_compilation_unit,
1087                           codegen.get(),
1088                           compilation_stats_.get());
1089     builder.BuildIntrinsicGraph(method);
1090   }
1091 
1092   OptimizationDef optimizations[] = {
1093       // The codegen has a few assumptions that only the instruction simplifier
1094       // can satisfy.
1095       OptDef(OptimizationPass::kInstructionSimplifier),
1096   };
1097   RunOptimizations(graph,
1098                    codegen.get(),
1099                    dex_compilation_unit,
1100                    &pass_observer,
1101                    optimizations);
1102 
1103   RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
1104   {
1105     PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
1106     WriteBarrierElimination(graph, compilation_stats_.get()).Run();
1107   }
1108 
1109   AllocateRegisters(graph,
1110                     codegen.get(),
1111                     &pass_observer,
1112                     compilation_stats_.get());
1113   if (!codegen->IsLeafMethod()) {
1114     VLOG(compiler) << "Intrinsic method is not leaf: " << method->GetIntrinsic()
1115         << " " << graph->PrettyMethod();
1116     return nullptr;
1117   }
1118 
1119   CHECK_LE(codegen->GetFrameSize(), codegen->GetMaximumFrameSize());
1120   codegen->Compile();
1121   pass_observer.DumpDisassembly();
1122 
1123   VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
1124       << " " << graph->PrettyMethod();
1125   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
1126   return codegen.release();
1127 }
1128 
Compile(const dex::CodeItem * code_item,uint32_t access_flags,uint16_t class_def_idx,uint32_t method_idx,Handle<mirror::ClassLoader> jclass_loader,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1129 CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
1130                                             uint32_t access_flags,
1131                                             uint16_t class_def_idx,
1132                                             uint32_t method_idx,
1133                                             Handle<mirror::ClassLoader> jclass_loader,
1134                                             const DexFile& dex_file,
1135                                             Handle<mirror::DexCache> dex_cache) const {
1136   const CompilerOptions& compiler_options = GetCompilerOptions();
1137   DCHECK(compiler_options.IsAotCompiler());
1138   CompiledMethod* compiled_method = nullptr;
1139   Runtime* runtime = Runtime::Current();
1140   DCHECK(runtime->IsAotCompiler());
1141   ArenaAllocator allocator(runtime->GetArenaPool());
1142   ArenaStack arena_stack(runtime->GetArenaPool());
1143   std::unique_ptr<CodeGenerator> codegen;
1144   bool compiled_intrinsic = false;
1145   {
1146     ScopedObjectAccess soa(Thread::Current());
1147     ArtMethod* method =
1148         runtime->GetClassLinker()->ResolveMethodId(method_idx, dex_cache, jclass_loader);
1149     soa.Self()->ClearException();  // Suppress exception if any.
1150     VariableSizedHandleScope handles(soa.Self());
1151     Handle<mirror::Class> compiling_class =
1152         handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
1153     DexCompilationUnit dex_compilation_unit(
1154         jclass_loader,
1155         runtime->GetClassLinker(),
1156         dex_file,
1157         code_item,
1158         class_def_idx,
1159         method_idx,
1160         access_flags,
1161         /*verified_method=*/ nullptr,  // Not needed by the Optimizing compiler.
1162         dex_cache,
1163         compiling_class);
1164     // All signature polymorphic methods are native.
1165     DCHECK(method == nullptr || !method->IsSignaturePolymorphic());
1166     // Go to native so that we don't block GC during compilation.
1167     ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1168     // Try to compile a fully intrinsified implementation.
1169     if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1170       DCHECK(compiler_options.IsBootImage());
1171       codegen.reset(
1172           TryCompileIntrinsic(&allocator,
1173                               &arena_stack,
1174                               dex_compilation_unit,
1175                               method,
1176                               &handles));
1177       if (codegen != nullptr) {
1178         compiled_intrinsic = true;
1179       }
1180     }
1181     if (codegen == nullptr) {
1182       codegen.reset(
1183           TryCompile(&allocator,
1184                      &arena_stack,
1185                      dex_compilation_unit,
1186                      method,
1187                      compiler_options.IsBaseline()
1188                         ? CompilationKind::kBaseline
1189                         : CompilationKind::kOptimized,
1190                      &handles));
1191     }
1192   }
1193   if (codegen.get() != nullptr) {
1194     compiled_method = Emit(&allocator,
1195                            codegen.get(),
1196                            compiled_intrinsic,
1197                            compiled_intrinsic ? nullptr : code_item);
1198 
1199     if (kArenaAllocatorCountAllocations) {
1200       codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1201       size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1202       if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1203         MemStats mem_stats(allocator.GetMemStats());
1204         MemStats peak_stats(arena_stack.GetPeakStats());
1205         LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1206                   << dex_file.PrettyMethod(method_idx)
1207                   << "\n" << Dumpable<MemStats>(mem_stats)
1208                   << "\n" << Dumpable<MemStats>(peak_stats);
1209       }
1210     }
1211   }
1212 
1213   // TODO(Simulator): Check for $opt$ in method name and that such method is compiled.
1214 #ifndef ART_USE_RESTRICTED_MODE
1215   if (kIsDebugBuild &&
1216       compiler_options.CompileArtTest() &&
1217       IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
1218     // For testing purposes, we put a special marker on method names
1219     // that should be compiled with this compiler (when the
1220     // instruction set is supported). This makes sure we're not
1221     // regressing.
1222     std::string method_name = dex_file.PrettyMethod(method_idx);
1223     bool shouldCompile = method_name.find("$opt$") != std::string::npos;
1224     DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name;
1225   }
1226 #endif  // #ifndef ART_USE_RESTRICTED_MODE
1227 
1228   return compiled_method;
1229 }
1230 
CreateJniStackMap(ScopedArenaAllocator * allocator,const JniCompiledMethod & jni_compiled_method,size_t code_size,bool debuggable)1231 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
1232                                                     const JniCompiledMethod& jni_compiled_method,
1233                                                     size_t code_size,
1234                                                     bool debuggable) {
1235   // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
1236   // to stay clear of the frame size limit.
1237   std::unique_ptr<StackMapStream> stack_map_stream(
1238       new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
1239   stack_map_stream->BeginMethod(jni_compiled_method.GetFrameSize(),
1240                                 jni_compiled_method.GetCoreSpillMask(),
1241                                 jni_compiled_method.GetFpSpillMask(),
1242                                 /* num_dex_registers= */ 0,
1243                                 /* baseline= */ false,
1244                                 debuggable);
1245   stack_map_stream->EndMethod(code_size);
1246   return stack_map_stream->Encode();
1247 }
1248 
JniCompile(uint32_t access_flags,uint32_t method_idx,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1249 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
1250                                                uint32_t method_idx,
1251                                                const DexFile& dex_file,
1252                                                Handle<mirror::DexCache> dex_cache) const {
1253   Runtime* runtime = Runtime::Current();
1254   ArenaAllocator allocator(runtime->GetArenaPool());
1255   ArenaStack arena_stack(runtime->GetArenaPool());
1256 
1257   const CompilerOptions& compiler_options = GetCompilerOptions();
1258   if (compiler_options.IsBootImage()) {
1259     ScopedObjectAccess soa(Thread::Current());
1260     ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
1261         method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
1262     // Try to compile a fully intrinsified implementation. Do not try to do this for
1263     // signature polymorphic methods as the InstructionBuilder cannot handle them;
1264     // and it would be useless as they always have a slow path for type conversions.
1265     if (method != nullptr && UNLIKELY(method->IsIntrinsic()) && !method->IsSignaturePolymorphic()) {
1266       VariableSizedHandleScope handles(soa.Self());
1267       ScopedNullHandle<mirror::ClassLoader> class_loader;  // null means boot class path loader.
1268       Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1269       DexCompilationUnit dex_compilation_unit(
1270           class_loader,
1271           runtime->GetClassLinker(),
1272           dex_file,
1273           /*code_item=*/ nullptr,
1274           /*class_def_idx=*/ DexFile::kDexNoIndex16,
1275           method_idx,
1276           access_flags,
1277           /*verified_method=*/ nullptr,
1278           dex_cache,
1279           compiling_class);
1280       // Go to native so that we don't block GC during compilation.
1281       ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1282       std::unique_ptr<CodeGenerator> codegen(
1283           TryCompileIntrinsic(&allocator,
1284                               &arena_stack,
1285                               dex_compilation_unit,
1286                               method,
1287                               &handles));
1288       if (codegen != nullptr) {
1289         return Emit(&allocator, codegen.get(), /*is_intrinsic=*/ true, /*item=*/ nullptr);
1290       }
1291     }
1292   }
1293 
1294   JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1295       compiler_options, dex_file.GetMethodShortyView(method_idx), access_flags, &allocator);
1296   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
1297 
1298   ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1299   ScopedArenaVector<uint8_t> stack_map =
1300       CreateJniStackMap(&stack_map_allocator,
1301                         jni_compiled_method,
1302                         jni_compiled_method.GetCode().size(),
1303                         compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
1304   return GetCompiledCodeStorage()->CreateCompiledMethod(
1305       jni_compiled_method.GetInstructionSet(),
1306       jni_compiled_method.GetCode(),
1307       ArrayRef<const uint8_t>(stack_map),
1308       jni_compiled_method.GetCfi(),
1309       /*patches=*/ ArrayRef<const linker::LinkerPatch>(),
1310       /*is_intrinsic=*/ false);
1311 }
1312 
CreateOptimizingCompiler(const CompilerOptions & compiler_options,CompiledCodeStorage * storage)1313 Compiler* CreateOptimizingCompiler(const CompilerOptions& compiler_options,
1314                                    CompiledCodeStorage* storage) {
1315   return new OptimizingCompiler(compiler_options, storage);
1316 }
1317 
EncodeArtMethodInInlineInfo(ArtMethod * method)1318 bool EncodeArtMethodInInlineInfo([[maybe_unused]] ArtMethod* method) {
1319   // Note: the runtime is null only for unit testing.
1320   return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
1321 }
1322 
JitCompile(Thread * self,jit::JitCodeCache * code_cache,jit::JitMemoryRegion * region,ArtMethod * method,CompilationKind compilation_kind,jit::JitLogger * jit_logger)1323 bool OptimizingCompiler::JitCompile(Thread* self,
1324                                     jit::JitCodeCache* code_cache,
1325                                     jit::JitMemoryRegion* region,
1326                                     ArtMethod* method,
1327                                     CompilationKind compilation_kind,
1328                                     jit::JitLogger* jit_logger) {
1329   const CompilerOptions& compiler_options = GetCompilerOptions();
1330   DCHECK(compiler_options.IsJitCompiler());
1331   DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
1332   StackHandleScope<3> hs(self);
1333   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
1334       method->GetDeclaringClass()->GetClassLoader()));
1335   Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
1336   DCHECK(method->IsCompilable());
1337 
1338   const DexFile* dex_file = method->GetDexFile();
1339   const uint16_t class_def_idx = method->GetClassDefIndex();
1340   const dex::CodeItem* code_item = method->GetCodeItem();
1341   const uint32_t method_idx = method->GetDexMethodIndex();
1342   const uint32_t access_flags = method->GetAccessFlags();
1343 
1344   Runtime* runtime = Runtime::Current();
1345   ArenaAllocator allocator(runtime->GetJitArenaPool());
1346 
1347   std::vector<uint8_t> debug_info;
1348 
1349   auto create_method_debug_info = [&]() {
1350     debug::MethodDebugInfo method_debug_info = {};
1351     DCHECK(method_debug_info.custom_name.empty());
1352     method_debug_info.dex_file = dex_file;
1353     method_debug_info.class_def_index = class_def_idx;
1354     method_debug_info.dex_method_index = method_idx;
1355     method_debug_info.access_flags = access_flags;
1356     method_debug_info.code_item = code_item;
1357     method_debug_info.isa = compiler_options.GetInstructionSet();
1358     method_debug_info.deduped = false;
1359     method_debug_info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1360     method_debug_info.is_code_address_text_relative = false;
1361     method_debug_info.is_optimized = true;
1362     return method_debug_info;
1363   };
1364 
1365   if (UNLIKELY(method->IsNative())) {
1366     // Use GenericJniTrampoline for critical native methods in debuggable runtimes. We don't
1367     // support calling method entry / exit hooks for critical native methods yet.
1368     // TODO(mythria): Add support for calling method entry / exit hooks in JITed stubs for critical
1369     // native methods too.
1370     if (compiler_options.GetDebuggable() && method->IsCriticalNative()) {
1371       DCHECK(compiler_options.IsJitCompiler());
1372       return false;
1373     }
1374     // Java debuggable runtimes should set compiler options to debuggable, so that we either
1375     // generate method entry / exit hooks or skip JITing. For critical native methods we don't
1376     // generate method entry / exit hooks so we shouldn't JIT them in debuggable runtimes.
1377     DCHECK_IMPLIES(method->IsCriticalNative(), !runtime->IsJavaDebuggable());
1378 
1379     JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1380         compiler_options, dex_file->GetMethodShortyView(method_idx), access_flags, &allocator);
1381     std::vector<Handle<mirror::Object>> roots;
1382     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
1383         allocator.Adapter(kArenaAllocCHA));
1384     ArenaStack arena_stack(runtime->GetJitArenaPool());
1385     // StackMapStream is large and it does not fit into this frame, so we need helper method.
1386     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1387     ScopedArenaVector<uint8_t> stack_map =
1388         CreateJniStackMap(&stack_map_allocator,
1389                           jni_compiled_method,
1390                           jni_compiled_method.GetCode().size(),
1391                           compiler_options.GetDebuggable() && compiler_options.IsJitCompiler());
1392 
1393     ArrayRef<const uint8_t> reserved_code;
1394     ArrayRef<const uint8_t> reserved_data;
1395     if (!code_cache->Reserve(self,
1396                              region,
1397                              jni_compiled_method.GetCode().size(),
1398                              stack_map.size(),
1399                              /* number_of_roots= */ 0,
1400                              method,
1401                              /*out*/ &reserved_code,
1402                              /*out*/ &reserved_data)) {
1403       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1404       return false;
1405     }
1406     const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1407 
1408     // Add debug info after we know the code location but before we update entry-point.
1409     if (compiler_options.GenerateAnyDebugInfo()) {
1410       debug::MethodDebugInfo method_debug_info = create_method_debug_info();
1411       // Simpleperf relies on art_jni_trampoline to detect jni methods.
1412       method_debug_info.custom_name = "art_jni_trampoline";
1413       method_debug_info.code_address = reinterpret_cast<uintptr_t>(code);
1414       method_debug_info.code_size = jni_compiled_method.GetCode().size();
1415       method_debug_info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
1416       method_debug_info.code_info = nullptr;
1417       method_debug_info.cfi = jni_compiled_method.GetCfi();
1418       debug_info = GenerateJitDebugInfo(method_debug_info);
1419     }
1420 
1421     if (!code_cache->Commit(self,
1422                             region,
1423                             method,
1424                             reserved_code,
1425                             jni_compiled_method.GetCode(),
1426                             reserved_data,
1427                             roots,
1428                             ArrayRef<const uint8_t>(stack_map),
1429                             debug_info,
1430                             /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1431                             compilation_kind,
1432                             cha_single_implementation_list)) {
1433       code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1434       return false;
1435     }
1436 
1437     Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1438     if (jit_logger != nullptr) {
1439       jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
1440     }
1441     return true;
1442   }
1443 
1444   ArenaStack arena_stack(runtime->GetJitArenaPool());
1445   VariableSizedHandleScope handles(self);
1446 
1447   std::unique_ptr<CodeGenerator> codegen;
1448   std::unique_ptr<FastCompiler> fast_compiler;
1449   Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1450   DexCompilationUnit dex_compilation_unit(
1451       class_loader,
1452       runtime->GetClassLinker(),
1453       *dex_file,
1454       code_item,
1455       class_def_idx,
1456       method_idx,
1457       access_flags,
1458       /*verified_method=*/ nullptr,
1459       dex_cache,
1460       compiling_class);
1461   {
1462     // Go to native so that we don't block GC during compilation.
1463     ScopedThreadSuspension sts(self, ThreadState::kNative);
1464     if (com::android::art::flags::fast_baseline_compiler() &&
1465         compilation_kind == CompilationKind::kBaseline &&
1466         !compiler_options.GetDebuggable()) {
1467       fast_compiler = FastCompiler::Compile(method,
1468                                             &allocator,
1469                                             &arena_stack,
1470                                             &handles,
1471                                             compiler_options,
1472                                             dex_compilation_unit);
1473     }
1474     if (fast_compiler == nullptr) {
1475       codegen.reset(
1476           TryCompile(&allocator,
1477                      &arena_stack,
1478                      dex_compilation_unit,
1479                      method,
1480                      compilation_kind,
1481                      &handles));
1482       if (codegen.get() == nullptr) {
1483         return false;
1484       }
1485     }
1486   }
1487 
1488   if (fast_compiler != nullptr) {
1489     ArrayRef<const uint8_t> reserved_code;
1490     ArrayRef<const uint8_t> reserved_data;
1491     ScopedArenaVector<uint8_t> stack_maps = fast_compiler->BuildStackMaps();
1492     if (!code_cache->Reserve(self,
1493                              region,
1494                              fast_compiler->GetCode().size(),
1495                              stack_maps.size(),
1496                              fast_compiler->GetNumberOfJitRoots(),
1497                              method,
1498                              /*out*/ &reserved_code,
1499                              /*out*/ &reserved_data)) {
1500       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1501       return false;
1502     }
1503     const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1504     if (compiler_options.GenerateAnyDebugInfo()) {
1505       debug::MethodDebugInfo method_debug_info = create_method_debug_info();
1506       method_debug_info.code_address = reinterpret_cast<uintptr_t>(code);
1507       method_debug_info.code_size = fast_compiler->GetCode().size();
1508       method_debug_info.frame_size_in_bytes = fast_compiler->GetFrameSize();
1509       method_debug_info.code_info = stack_maps.size() == 0 ? nullptr : stack_maps.data();
1510       method_debug_info.cfi = ArrayRef<const uint8_t>(fast_compiler->GetCfiData());
1511       debug_info = GenerateJitDebugInfo(method_debug_info);
1512     }
1513 
1514     const uint8_t* roots_data = reserved_data.data();
1515     std::vector<Handle<mirror::Object>> roots;
1516     fast_compiler->EmitJitRoots(const_cast<uint8_t*>(fast_compiler->GetCode().data()),
1517                                 roots_data,
1518                                 &roots);
1519     // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1520     DCHECK(std::all_of(roots.begin(),
1521                        roots.end(),
1522                        [&handles](Handle<mirror::Object> root){
1523                          return handles.Contains(root.GetReference());
1524                        }));
1525     ArenaSet<ArtMethod*> cha_single_implementation_list(allocator.Adapter(kArenaAllocCHA));
1526     if (!code_cache->Commit(self,
1527                             region,
1528                             method,
1529                             reserved_code,
1530                             fast_compiler->GetCode(),
1531                             reserved_data,
1532                             roots,
1533                             ArrayRef<const uint8_t>(stack_maps),
1534                             debug_info,
1535                             /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1536                             compilation_kind,
1537                             cha_single_implementation_list)) {
1538       code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1539       return false;
1540     }
1541     if (jit_logger != nullptr) {
1542       jit_logger->WriteLog(code, fast_compiler->GetCode().size(), method);
1543     }
1544     VLOG(jit) << "Fast compiled " << method->PrettyMethod();
1545   } else {
1546     ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
1547     ArrayRef<const uint8_t> reserved_code;
1548     ArrayRef<const uint8_t> reserved_data;
1549     if (!code_cache->Reserve(self,
1550                              region,
1551                              codegen->GetAssembler()->CodeSize(),
1552                              stack_map.size(),
1553                              /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
1554                              method,
1555                              /*out*/ &reserved_code,
1556                              /*out*/ &reserved_data)) {
1557       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1558       return false;
1559     }
1560     const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1561     const uint8_t* roots_data = reserved_data.data();
1562 
1563     std::vector<Handle<mirror::Object>> roots;
1564     codegen->EmitJitRoots(const_cast<uint8_t*>(codegen->GetAssembler()->CodeBufferBaseAddress()),
1565                         roots_data,
1566                         &roots);
1567     // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1568     DCHECK(std::all_of(roots.begin(),
1569                        roots.end(),
1570                        [&handles](Handle<mirror::Object> root){
1571                          return handles.Contains(root.GetReference());
1572                        }));
1573 
1574     // Add debug info after we know the code location but before we update entry-point.
1575     if (compiler_options.GenerateAnyDebugInfo()) {
1576       debug::MethodDebugInfo method_debug_info = create_method_debug_info();
1577       method_debug_info.code_address = reinterpret_cast<uintptr_t>(code);
1578       method_debug_info.code_size = codegen->GetAssembler()->CodeSize();
1579       method_debug_info.frame_size_in_bytes = codegen->GetFrameSize();
1580       method_debug_info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
1581       method_debug_info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
1582       debug_info = GenerateJitDebugInfo(method_debug_info);
1583     }
1584 
1585     if (compilation_kind == CompilationKind::kBaseline &&
1586         !codegen->GetGraph()->IsUsefulOptimizing()) {
1587       // The baseline compilation detected that it has done all the optimizations
1588       // that the full compiler would do. Therefore we set the compilation kind to
1589       // be `kOptimized`
1590       compilation_kind = CompilationKind::kOptimized;
1591     }
1592 
1593     if (!code_cache->Commit(self,
1594                             region,
1595                             method,
1596                             reserved_code,
1597                             codegen->GetCode(),
1598                             reserved_data,
1599                             roots,
1600                             ArrayRef<const uint8_t>(stack_map),
1601                             debug_info,
1602                             /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1603                             compilation_kind,
1604                             codegen->GetGraph()->GetCHASingleImplementationList())) {
1605       CHECK_EQ(CodeInfo::HasShouldDeoptimizeFlag(stack_map.data()),
1606                codegen->GetGraph()->HasShouldDeoptimizeFlag());
1607       code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1608       return false;
1609     }
1610 
1611     if (jit_logger != nullptr) {
1612       jit_logger->WriteLog(code, codegen->GetAssembler()->CodeSize(), method);
1613     }
1614   }
1615 
1616   Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1617 
1618   if (kArenaAllocatorCountAllocations) {
1619     codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1620     size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1621     if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1622       MemStats mem_stats(allocator.GetMemStats());
1623       MemStats peak_stats(arena_stack.GetPeakStats());
1624       LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1625                 << dex_file->PrettyMethod(method_idx)
1626                 << "\n" << Dumpable<MemStats>(mem_stats)
1627                 << "\n" << Dumpable<MemStats>(peak_stats);
1628     }
1629   }
1630 
1631   return true;
1632 }
1633 
GenerateJitDebugInfo(const debug::MethodDebugInfo & info)1634 std::vector<uint8_t> OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) {
1635   const CompilerOptions& compiler_options = GetCompilerOptions();
1636   if (compiler_options.GenerateAnyDebugInfo()) {
1637     // If both flags are passed, generate full debug info.
1638     const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
1639 
1640     // Create entry for the single method that we just compiled.
1641     InstructionSet isa = compiler_options.GetInstructionSet();
1642     const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
1643     return debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
1644   }
1645   return std::vector<uint8_t>();
1646 }
1647 
1648 }  // namespace art
1649