• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "optimizing_compiler.h"
18 
19 #include <fstream>
20 #include <memory>
21 #include <sstream>
22 
23 #include <stdint.h>
24 
25 #include "art_method-inl.h"
26 #include "base/arena_allocator.h"
27 #include "base/arena_containers.h"
28 #include "base/dumpable.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/scoped_arena_allocator.h"
33 #include "base/timing_logger.h"
34 #include "builder.h"
35 #include "code_generator.h"
36 #include "compiled_method.h"
37 #include "compiler.h"
38 #include "debug/elf_debug_writer.h"
39 #include "debug/method_debug_info.h"
40 #include "dex/dex_file_types.h"
41 #include "driver/compiled_method_storage.h"
42 #include "driver/compiler_options.h"
43 #include "driver/dex_compilation_unit.h"
44 #include "graph_checker.h"
45 #include "graph_visualizer.h"
46 #include "inliner.h"
47 #include "jit/debugger_interface.h"
48 #include "jit/jit.h"
49 #include "jit/jit_code_cache.h"
50 #include "jit/jit_logger.h"
51 #include "jni/quick/jni_compiler.h"
52 #include "linker/linker_patch.h"
53 #include "nodes.h"
54 #include "oat_quick_method_header.h"
55 #include "prepare_for_register_allocation.h"
56 #include "reference_type_propagation.h"
57 #include "register_allocator_linear_scan.h"
58 #include "select_generator.h"
59 #include "ssa_builder.h"
60 #include "ssa_liveness_analysis.h"
61 #include "ssa_phi_elimination.h"
62 #include "stack_map_stream.h"
63 #include "utils/assembler.h"
64 
65 namespace art {
66 
67 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
68 
69 static constexpr const char* kPassNameSeparator = "$";
70 
71 /**
72  * Used by the code generator, to allocate the code in a vector.
73  */
74 class CodeVectorAllocator final : public CodeAllocator {
75  public:
CodeVectorAllocator(ArenaAllocator * allocator)76   explicit CodeVectorAllocator(ArenaAllocator* allocator)
77       : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
78 
Allocate(size_t size)79   uint8_t* Allocate(size_t size) override {
80     memory_.resize(size);
81     return &memory_[0];
82   }
83 
GetMemory() const84   ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
GetData()85   uint8_t* GetData() { return memory_.data(); }
86 
87  private:
88   ArenaVector<uint8_t> memory_;
89 
90   DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
91 };
92 
93 /**
94  * Filter to apply to the visualizer. Methods whose name contain that filter will
95  * be dumped.
96  */
97 static constexpr const char kStringFilter[] = "";
98 
99 class PassScope;
100 
101 class PassObserver : public ValueObject {
102  public:
PassObserver(HGraph * graph,CodeGenerator * codegen,std::ostream * visualizer_output,const CompilerOptions & compiler_options)103   PassObserver(HGraph* graph,
104                CodeGenerator* codegen,
105                std::ostream* visualizer_output,
106                const CompilerOptions& compiler_options)
107       : graph_(graph),
108         last_seen_graph_size_(0),
109         cached_method_name_(),
110         timing_logger_enabled_(compiler_options.GetDumpPassTimings()),
111         timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
112         disasm_info_(graph->GetAllocator()),
113         visualizer_oss_(),
114         visualizer_output_(visualizer_output),
115         visualizer_enabled_(!compiler_options.GetDumpCfgFileName().empty()),
116         visualizer_(&visualizer_oss_, graph, codegen),
117         codegen_(codegen),
118         graph_in_bad_state_(false) {
119     if (timing_logger_enabled_ || visualizer_enabled_) {
120       if (!IsVerboseMethod(compiler_options, GetMethodName())) {
121         timing_logger_enabled_ = visualizer_enabled_ = false;
122       }
123       if (visualizer_enabled_) {
124         visualizer_.PrintHeader(GetMethodName());
125         codegen->SetDisassemblyInformation(&disasm_info_);
126       }
127     }
128   }
129 
~PassObserver()130   ~PassObserver() {
131     if (timing_logger_enabled_) {
132       LOG(INFO) << "TIMINGS " << GetMethodName();
133       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
134     }
135     if (visualizer_enabled_) {
136       FlushVisualizer();
137     }
138     DCHECK(visualizer_oss_.str().empty());
139   }
140 
DumpDisassembly()141   void DumpDisassembly() {
142     if (visualizer_enabled_) {
143       visualizer_.DumpGraphWithDisassembly();
144       FlushVisualizer();
145     }
146   }
147 
SetGraphInBadState()148   void SetGraphInBadState() { graph_in_bad_state_ = true; }
149 
GetMethodName()150   const char* GetMethodName() {
151     // PrettyMethod() is expensive, so we delay calling it until we actually have to.
152     if (cached_method_name_.empty()) {
153       cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
154     }
155     return cached_method_name_.c_str();
156   }
157 
158  private:
StartPass(const char * pass_name)159   void StartPass(const char* pass_name) {
160     VLOG(compiler) << "Starting pass: " << pass_name;
161     // Dump graph first, then start timer.
162     if (visualizer_enabled_) {
163       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
164       FlushVisualizer();
165     }
166     if (timing_logger_enabled_) {
167       timing_logger_.StartTiming(pass_name);
168     }
169   }
170 
FlushVisualizer()171   void FlushVisualizer() {
172     *visualizer_output_ << visualizer_oss_.str();
173     visualizer_output_->flush();
174     visualizer_oss_.str("");
175     visualizer_oss_.clear();
176   }
177 
EndPass(const char * pass_name,bool pass_change)178   void EndPass(const char* pass_name, bool pass_change) {
179     // Pause timer first, then dump graph.
180     if (timing_logger_enabled_) {
181       timing_logger_.EndTiming();
182     }
183     if (visualizer_enabled_) {
184       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
185       FlushVisualizer();
186     }
187 
188     // Validate the HGraph if running in debug mode.
189     if (kIsDebugBuild) {
190       if (!graph_in_bad_state_) {
191         GraphChecker checker(graph_, codegen_);
192         last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
193         if (!checker.IsValid()) {
194           std::ostringstream stream;
195           graph_->Dump(stream, codegen_);
196           LOG(FATAL_WITHOUT_ABORT) << "Error after " << pass_name << "(" << graph_->PrettyMethod()
197                                    << "): " << stream.str();
198           LOG(FATAL) << "(" << pass_name <<  "): " << Dumpable<GraphChecker>(checker);
199         }
200       }
201     }
202   }
203 
IsVerboseMethod(const CompilerOptions & compiler_options,const char * method_name)204   static bool IsVerboseMethod(const CompilerOptions& compiler_options, const char* method_name) {
205     // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
206     // empty kStringFilter matching all methods.
207     if (compiler_options.HasVerboseMethods()) {
208       return compiler_options.IsVerboseMethod(method_name);
209     }
210 
211     // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
212     // warning when the string is empty.
213     constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
214     if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
215       return true;
216     }
217 
218     return false;
219   }
220 
221   HGraph* const graph_;
222   size_t last_seen_graph_size_;
223 
224   std::string cached_method_name_;
225 
226   bool timing_logger_enabled_;
227   TimingLogger timing_logger_;
228 
229   DisassemblyInformation disasm_info_;
230 
231   std::ostringstream visualizer_oss_;
232   std::ostream* visualizer_output_;
233   bool visualizer_enabled_;
234   HGraphVisualizer visualizer_;
235   CodeGenerator* codegen_;
236 
237   // Flag to be set by the compiler if the pass failed and the graph is not
238   // expected to validate.
239   bool graph_in_bad_state_;
240 
241   friend PassScope;
242 
243   DISALLOW_COPY_AND_ASSIGN(PassObserver);
244 };
245 
246 class PassScope : public ValueObject {
247  public:
PassScope(const char * pass_name,PassObserver * pass_observer)248   PassScope(const char *pass_name, PassObserver* pass_observer)
249       : pass_name_(pass_name),
250         pass_change_(true),  // assume change
251         pass_observer_(pass_observer) {
252     pass_observer_->StartPass(pass_name_);
253   }
254 
SetPassNotChanged()255   void SetPassNotChanged() {
256     pass_change_ = false;
257   }
258 
~PassScope()259   ~PassScope() {
260     pass_observer_->EndPass(pass_name_, pass_change_);
261   }
262 
263  private:
264   const char* const pass_name_;
265   bool pass_change_;
266   PassObserver* const pass_observer_;
267 };
268 
269 class OptimizingCompiler final : public Compiler {
270  public:
271   explicit OptimizingCompiler(const CompilerOptions& compiler_options,
272                               CompiledMethodStorage* storage);
273   ~OptimizingCompiler() override;
274 
275   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
276 
277   CompiledMethod* Compile(const dex::CodeItem* code_item,
278                           uint32_t access_flags,
279                           InvokeType invoke_type,
280                           uint16_t class_def_idx,
281                           uint32_t method_idx,
282                           Handle<mirror::ClassLoader> class_loader,
283                           const DexFile& dex_file,
284                           Handle<mirror::DexCache> dex_cache) const override;
285 
286   CompiledMethod* JniCompile(uint32_t access_flags,
287                              uint32_t method_idx,
288                              const DexFile& dex_file,
289                              Handle<mirror::DexCache> dex_cache) const override;
290 
GetEntryPointOf(ArtMethod * method) const291   uintptr_t GetEntryPointOf(ArtMethod* method) const override
292       REQUIRES_SHARED(Locks::mutator_lock_) {
293     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
294         InstructionSetPointerSize(GetCompilerOptions().GetInstructionSet())));
295   }
296 
297   bool JitCompile(Thread* self,
298                   jit::JitCodeCache* code_cache,
299                   jit::JitMemoryRegion* region,
300                   ArtMethod* method,
301                   CompilationKind compilation_kind,
302                   jit::JitLogger* jit_logger)
303       override
304       REQUIRES_SHARED(Locks::mutator_lock_);
305 
306  private:
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef definitions[],size_t length) const307   bool RunOptimizations(HGraph* graph,
308                         CodeGenerator* codegen,
309                         const DexCompilationUnit& dex_compilation_unit,
310                         PassObserver* pass_observer,
311                         const OptimizationDef definitions[],
312                         size_t length) const {
313     // Convert definitions to optimization passes.
314     ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
315         definitions,
316         length,
317         graph->GetAllocator(),
318         graph,
319         compilation_stats_.get(),
320         codegen,
321         dex_compilation_unit);
322     DCHECK_EQ(length, optimizations.size());
323     // Run the optimization passes one by one. Any "depends_on" pass refers back to
324     // the most recent occurrence of that pass, skipped or executed.
325     std::bitset<static_cast<size_t>(OptimizationPass::kLast) + 1u> pass_changes;
326     pass_changes[static_cast<size_t>(OptimizationPass::kNone)] = true;
327     bool change = false;
328     for (size_t i = 0; i < length; ++i) {
329       if (pass_changes[static_cast<size_t>(definitions[i].depends_on)]) {
330         // Execute the pass and record whether it changed anything.
331         PassScope scope(optimizations[i]->GetPassName(), pass_observer);
332         bool pass_change = optimizations[i]->Run();
333         pass_changes[static_cast<size_t>(definitions[i].pass)] = pass_change;
334         if (pass_change) {
335           change = true;
336         } else {
337           scope.SetPassNotChanged();
338         }
339       } else {
340         // Skip the pass and record that nothing changed.
341         pass_changes[static_cast<size_t>(definitions[i].pass)] = false;
342       }
343     }
344     return change;
345   }
346 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,const OptimizationDef (& definitions)[length]) const347   template <size_t length> bool RunOptimizations(
348       HGraph* graph,
349       CodeGenerator* codegen,
350       const DexCompilationUnit& dex_compilation_unit,
351       PassObserver* pass_observer,
352       const OptimizationDef (&definitions)[length]) const {
353     return RunOptimizations(
354         graph, codegen, dex_compilation_unit, pass_observer, definitions, length);
355   }
356 
357   void RunOptimizations(HGraph* graph,
358                         CodeGenerator* codegen,
359                         const DexCompilationUnit& dex_compilation_unit,
360                         PassObserver* pass_observer) const;
361 
362  private:
363   // Create a 'CompiledMethod' for an optimized graph.
364   CompiledMethod* Emit(ArenaAllocator* allocator,
365                        CodeVectorAllocator* code_allocator,
366                        CodeGenerator* codegen,
367                        const dex::CodeItem* item) const;
368 
369   // Try compiling a method and return the code generator used for
370   // compiling it.
371   // This method:
372   // 1) Builds the graph. Returns null if it failed to build it.
373   // 2) Transforms the graph to SSA. Returns null if it failed.
374   // 3) Runs optimizations on the graph, including register allocator.
375   // 4) Generates code with the `code_allocator` provided.
376   CodeGenerator* TryCompile(ArenaAllocator* allocator,
377                             ArenaStack* arena_stack,
378                             CodeVectorAllocator* code_allocator,
379                             const DexCompilationUnit& dex_compilation_unit,
380                             ArtMethod* method,
381                             CompilationKind compilation_kind,
382                             VariableSizedHandleScope* handles) const;
383 
384   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
385                                      ArenaStack* arena_stack,
386                                      CodeVectorAllocator* code_allocator,
387                                      const DexCompilationUnit& dex_compilation_unit,
388                                      ArtMethod* method,
389                                      VariableSizedHandleScope* handles) const;
390 
391   bool RunArchOptimizations(HGraph* graph,
392                             CodeGenerator* codegen,
393                             const DexCompilationUnit& dex_compilation_unit,
394                             PassObserver* pass_observer) const;
395 
396   bool RunBaselineOptimizations(HGraph* graph,
397                                 CodeGenerator* codegen,
398                                 const DexCompilationUnit& dex_compilation_unit,
399                                 PassObserver* pass_observer) const;
400 
401   std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
402 
403   // This must be called before any other function that dumps data to the cfg
404   void DumpInstructionSetFeaturesToCfg() const;
405 
406   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
407 
408   std::unique_ptr<std::ostream> visualizer_output_;
409 
410   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
411 };
412 
413 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
414 
OptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)415 OptimizingCompiler::OptimizingCompiler(const CompilerOptions& compiler_options,
416                                        CompiledMethodStorage* storage)
417     : Compiler(compiler_options, storage, kMaximumCompilationTimeBeforeWarning) {
418   // Enable C1visualizer output.
419   const std::string& cfg_file_name = compiler_options.GetDumpCfgFileName();
420   if (!cfg_file_name.empty()) {
421     std::ios_base::openmode cfg_file_mode =
422         compiler_options.GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
423     visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
424     DumpInstructionSetFeaturesToCfg();
425   }
426   if (compiler_options.GetDumpStats()) {
427     compilation_stats_.reset(new OptimizingCompilerStats());
428   }
429 }
430 
~OptimizingCompiler()431 OptimizingCompiler::~OptimizingCompiler() {
432   if (compilation_stats_.get() != nullptr) {
433     compilation_stats_->Log();
434   }
435 }
436 
DumpInstructionSetFeaturesToCfg() const437 void OptimizingCompiler::DumpInstructionSetFeaturesToCfg() const {
438   const CompilerOptions& compiler_options = GetCompilerOptions();
439   const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
440   std::string isa_string =
441       std::string("isa:") + GetInstructionSetString(features->GetInstructionSet());
442   std::string features_string = "isa_features:" + features->GetFeatureString();
443   // It is assumed that visualizer_output_ is empty when calling this function, hence the fake
444   // compilation block containing the ISA features will be printed at the beginning of the .cfg
445   // file.
446   *visualizer_output_
447       << HGraphVisualizer::InsertMetaDataAsCompilationBlock(isa_string + ' ' + features_string);
448 }
449 
CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,const DexFile & dex_file ATTRIBUTE_UNUSED) const450 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
451                                           const DexFile& dex_file ATTRIBUTE_UNUSED) const {
452   return true;
453 }
454 
IsInstructionSetSupported(InstructionSet instruction_set)455 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
456   return instruction_set == InstructionSet::kArm
457       || instruction_set == InstructionSet::kArm64
458       || instruction_set == InstructionSet::kThumb2
459       || instruction_set == InstructionSet::kX86
460       || instruction_set == InstructionSet::kX86_64;
461 }
462 
RunBaselineOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const463 bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
464                                                   CodeGenerator* codegen,
465                                                   const DexCompilationUnit& dex_compilation_unit,
466                                                   PassObserver* pass_observer) const {
467   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
468 #if defined(ART_ENABLE_CODEGEN_arm)
469     case InstructionSet::kThumb2:
470     case InstructionSet::kArm: {
471       OptimizationDef arm_optimizations[] = {
472         OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
473       };
474       return RunOptimizations(graph,
475                               codegen,
476                               dex_compilation_unit,
477                               pass_observer,
478                               arm_optimizations);
479     }
480 #endif
481 #ifdef ART_ENABLE_CODEGEN_x86
482     case InstructionSet::kX86: {
483       OptimizationDef x86_optimizations[] = {
484         OptDef(OptimizationPass::kPcRelativeFixupsX86),
485       };
486       return RunOptimizations(graph,
487                               codegen,
488                               dex_compilation_unit,
489                               pass_observer,
490                               x86_optimizations);
491     }
492 #endif
493     default:
494       UNUSED(graph);
495       UNUSED(codegen);
496       UNUSED(dex_compilation_unit);
497       UNUSED(pass_observer);
498       return false;
499   }
500 }
501 
RunArchOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const502 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
503                                               CodeGenerator* codegen,
504                                               const DexCompilationUnit& dex_compilation_unit,
505                                               PassObserver* pass_observer) const {
506   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
507 #if defined(ART_ENABLE_CODEGEN_arm)
508     case InstructionSet::kThumb2:
509     case InstructionSet::kArm: {
510       OptimizationDef arm_optimizations[] = {
511         OptDef(OptimizationPass::kInstructionSimplifierArm),
512         OptDef(OptimizationPass::kSideEffectsAnalysis),
513         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
514         OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
515         OptDef(OptimizationPass::kScheduling)
516       };
517       return RunOptimizations(graph,
518                               codegen,
519                               dex_compilation_unit,
520                               pass_observer,
521                               arm_optimizations);
522     }
523 #endif
524 #ifdef ART_ENABLE_CODEGEN_arm64
525     case InstructionSet::kArm64: {
526       OptimizationDef arm64_optimizations[] = {
527         OptDef(OptimizationPass::kInstructionSimplifierArm64),
528         OptDef(OptimizationPass::kSideEffectsAnalysis),
529         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
530         OptDef(OptimizationPass::kScheduling)
531       };
532       return RunOptimizations(graph,
533                               codegen,
534                               dex_compilation_unit,
535                               pass_observer,
536                               arm64_optimizations);
537     }
538 #endif
539 #ifdef ART_ENABLE_CODEGEN_x86
540     case InstructionSet::kX86: {
541       OptimizationDef x86_optimizations[] = {
542         OptDef(OptimizationPass::kInstructionSimplifierX86),
543         OptDef(OptimizationPass::kSideEffectsAnalysis),
544         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
545         OptDef(OptimizationPass::kPcRelativeFixupsX86),
546         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
547       };
548       return RunOptimizations(graph,
549                               codegen,
550                               dex_compilation_unit,
551                               pass_observer,
552                               x86_optimizations);
553     }
554 #endif
555 #ifdef ART_ENABLE_CODEGEN_x86_64
556     case InstructionSet::kX86_64: {
557       OptimizationDef x86_64_optimizations[] = {
558         OptDef(OptimizationPass::kInstructionSimplifierX86_64),
559         OptDef(OptimizationPass::kSideEffectsAnalysis),
560         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
561         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
562       };
563       return RunOptimizations(graph,
564                               codegen,
565                               dex_compilation_unit,
566                               pass_observer,
567                               x86_64_optimizations);
568     }
569 #endif
570     default:
571       return false;
572   }
573 }
574 
575 NO_INLINE  // Avoid increasing caller's frame size by large stack-allocated objects.
AllocateRegisters(HGraph * graph,CodeGenerator * codegen,PassObserver * pass_observer,RegisterAllocator::Strategy strategy,OptimizingCompilerStats * stats)576 static void AllocateRegisters(HGraph* graph,
577                               CodeGenerator* codegen,
578                               PassObserver* pass_observer,
579                               RegisterAllocator::Strategy strategy,
580                               OptimizingCompilerStats* stats) {
581   {
582     PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
583                     pass_observer);
584     PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
585   }
586   // Use local allocator shared by SSA liveness analysis and register allocator.
587   // (Register allocator creates new objects in the liveness data.)
588   ScopedArenaAllocator local_allocator(graph->GetArenaStack());
589   SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
590   {
591     PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
592     liveness.Analyze();
593   }
594   {
595     PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
596     std::unique_ptr<RegisterAllocator> register_allocator =
597         RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
598     register_allocator->AllocateRegisters();
599   }
600 }
601 
602 // Strip pass name suffix to get optimization name.
ConvertPassNameToOptimizationName(const std::string & pass_name)603 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
604   size_t pos = pass_name.find(kPassNameSeparator);
605   return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
606 }
607 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer) const608 void OptimizingCompiler::RunOptimizations(HGraph* graph,
609                                           CodeGenerator* codegen,
610                                           const DexCompilationUnit& dex_compilation_unit,
611                                           PassObserver* pass_observer) const {
612   const std::vector<std::string>* pass_names = GetCompilerOptions().GetPassesToRun();
613   if (pass_names != nullptr) {
614     // If passes were defined on command-line, build the optimization
615     // passes and run these instead of the built-in optimizations.
616     // TODO: a way to define depends_on via command-line?
617     const size_t length = pass_names->size();
618     std::vector<OptimizationDef> optimizations;
619     for (const std::string& pass_name : *pass_names) {
620       std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
621       optimizations.push_back(OptDef(OptimizationPassByName(opt_name), pass_name.c_str()));
622     }
623     RunOptimizations(graph,
624                      codegen,
625                      dex_compilation_unit,
626                      pass_observer,
627                      optimizations.data(),
628                      length);
629     return;
630   }
631 
632   OptimizationDef optimizations[] = {
633     // Initial optimizations.
634     OptDef(OptimizationPass::kConstantFolding),
635     OptDef(OptimizationPass::kInstructionSimplifier),
636     OptDef(OptimizationPass::kDeadCodeElimination,
637            "dead_code_elimination$initial"),
638     // Inlining.
639     OptDef(OptimizationPass::kInliner),
640     // Simplification (if inlining occurred, or if we analyzed the invoke as "always throwing").
641     OptDef(OptimizationPass::kConstantFolding,
642            "constant_folding$after_inlining",
643            OptimizationPass::kInliner),
644     OptDef(OptimizationPass::kInstructionSimplifier,
645            "instruction_simplifier$after_inlining",
646            OptimizationPass::kInliner),
647     OptDef(OptimizationPass::kDeadCodeElimination,
648            "dead_code_elimination$after_inlining",
649            OptimizationPass::kInliner),
650     // GVN.
651     OptDef(OptimizationPass::kSideEffectsAnalysis,
652            "side_effects$before_gvn"),
653     OptDef(OptimizationPass::kGlobalValueNumbering),
654     // Simplification (TODO: only if GVN occurred).
655     OptDef(OptimizationPass::kSelectGenerator),
656     OptDef(OptimizationPass::kConstantFolding,
657            "constant_folding$after_gvn"),
658     OptDef(OptimizationPass::kInstructionSimplifier,
659            "instruction_simplifier$after_gvn"),
660     OptDef(OptimizationPass::kDeadCodeElimination,
661            "dead_code_elimination$after_gvn"),
662     // High-level optimizations.
663     OptDef(OptimizationPass::kSideEffectsAnalysis,
664            "side_effects$before_licm"),
665     OptDef(OptimizationPass::kInvariantCodeMotion),
666     OptDef(OptimizationPass::kInductionVarAnalysis),
667     OptDef(OptimizationPass::kBoundsCheckElimination),
668     OptDef(OptimizationPass::kLoopOptimization),
669     // Simplification.
670     OptDef(OptimizationPass::kConstantFolding,
671            "constant_folding$after_bce"),
672     OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
673            "instruction_simplifier$after_bce"),
674     // Other high-level optimizations.
675     OptDef(OptimizationPass::kLoadStoreElimination),
676     OptDef(OptimizationPass::kCHAGuardOptimization),
677     OptDef(OptimizationPass::kDeadCodeElimination,
678            "dead_code_elimination$final"),
679     OptDef(OptimizationPass::kCodeSinking),
680     // The codegen has a few assumptions that only the instruction simplifier
681     // can satisfy. For example, the code generator does not expect to see a
682     // HTypeConversion from a type to the same type.
683     OptDef(OptimizationPass::kAggressiveInstructionSimplifier,
684            "instruction_simplifier$before_codegen"),
685     // Eliminate constructor fences after code sinking to avoid
686     // complicated sinking logic to split a fence with many inputs.
687     OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
688   };
689   RunOptimizations(graph,
690                    codegen,
691                    dex_compilation_unit,
692                    pass_observer,
693                    optimizations);
694 
695   RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer);
696 }
697 
EmitAndSortLinkerPatches(CodeGenerator * codegen)698 static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
699   ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
700   codegen->EmitLinkerPatches(&linker_patches);
701 
702   // Sort patches by literal offset. Required for .oat_patches encoding.
703   std::sort(linker_patches.begin(), linker_patches.end(),
704             [](const linker::LinkerPatch& lhs, const linker::LinkerPatch& rhs) {
705     return lhs.LiteralOffset() < rhs.LiteralOffset();
706   });
707 
708   return linker_patches;
709 }
710 
Emit(ArenaAllocator * allocator,CodeVectorAllocator * code_allocator,CodeGenerator * codegen,const dex::CodeItem * code_item_for_osr_check) const711 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
712                                          CodeVectorAllocator* code_allocator,
713                                          CodeGenerator* codegen,
714                                          const dex::CodeItem* code_item_for_osr_check) const {
715   ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
716   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
717 
718   CompiledMethodStorage* storage = GetCompiledMethodStorage();
719   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
720       storage,
721       codegen->GetInstructionSet(),
722       code_allocator->GetMemory(),
723       ArrayRef<const uint8_t>(stack_map),
724       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
725       ArrayRef<const linker::LinkerPatch>(linker_patches));
726 
727   for (const linker::LinkerPatch& patch : linker_patches) {
728     if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
729       ArenaVector<uint8_t> code(allocator->Adapter());
730       std::string debug_name;
731       codegen->EmitThunkCode(patch, &code, &debug_name);
732       storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
733     }
734   }
735 
736   return compiled_method;
737 }
738 
TryCompile(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,CompilationKind compilation_kind,VariableSizedHandleScope * handles) const739 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
740                                               ArenaStack* arena_stack,
741                                               CodeVectorAllocator* code_allocator,
742                                               const DexCompilationUnit& dex_compilation_unit,
743                                               ArtMethod* method,
744                                               CompilationKind compilation_kind,
745                                               VariableSizedHandleScope* handles) const {
746   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
747   const CompilerOptions& compiler_options = GetCompilerOptions();
748   InstructionSet instruction_set = compiler_options.GetInstructionSet();
749   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
750   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
751   const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
752 
753   // Always use the Thumb-2 assembler: some runtime functionality
754   // (like implicit stack overflow checks) assume Thumb-2.
755   DCHECK_NE(instruction_set, InstructionSet::kArm);
756 
757   // Do not attempt to compile on architectures we do not support.
758   if (!IsInstructionSetSupported(instruction_set)) {
759     MaybeRecordStat(compilation_stats_.get(),
760                     MethodCompilationStat::kNotCompiledUnsupportedIsa);
761     return nullptr;
762   }
763 
764   if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
765     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
766     return nullptr;
767   }
768 
769   // Implementation of the space filter: do not compile a code item whose size in
770   // code units is bigger than 128.
771   static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
772   if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
773       && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
774           kSpaceFilterOptimizingThreshold)) {
775     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
776     return nullptr;
777   }
778 
779   CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
780 
781   bool dead_reference_safe;
782   // For AOT compilation, we may not get a method, for example if its class is erroneous,
783   // possibly due to an unavailable superclass.  JIT should always have a method.
784   DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
785   if (method != nullptr) {
786     const dex::ClassDef* containing_class;
787     {
788       ScopedObjectAccess soa(Thread::Current());
789       containing_class = &method->GetClassDef();
790     }
791     // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
792     // is currently rarely true.
793     dead_reference_safe =
794         annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
795         && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
796   } else {
797     // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
798     dead_reference_safe = false;
799   }
800 
801   HGraph* graph = new (allocator) HGraph(
802       allocator,
803       arena_stack,
804       handles,
805       dex_file,
806       method_idx,
807       compiler_options.GetInstructionSet(),
808       kInvalidInvokeType,
809       dead_reference_safe,
810       compiler_options.GetDebuggable(),
811       compilation_kind);
812 
813   if (method != nullptr) {
814     graph->SetArtMethod(method);
815   }
816 
817   jit::Jit* jit = Runtime::Current()->GetJit();
818   if (jit != nullptr) {
819     ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current());
820     DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr)
821         << "Compiling a method baseline should always have a ProfilingInfo";
822     graph->SetProfilingInfo(info);
823   }
824 
825   std::unique_ptr<CodeGenerator> codegen(
826       CodeGenerator::Create(graph,
827                             compiler_options,
828                             compilation_stats_.get()));
829   if (codegen.get() == nullptr) {
830     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
831     return nullptr;
832   }
833   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
834 
835   PassObserver pass_observer(graph,
836                              codegen.get(),
837                              visualizer_output_.get(),
838                              compiler_options);
839 
840   {
841     VLOG(compiler) << "Building " << pass_observer.GetMethodName();
842     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
843     HGraphBuilder builder(graph,
844                           code_item_accessor,
845                           &dex_compilation_unit,
846                           &dex_compilation_unit,
847                           codegen.get(),
848                           compilation_stats_.get());
849     GraphAnalysisResult result = builder.BuildGraph();
850     if (result != kAnalysisSuccess) {
851       switch (result) {
852         case kAnalysisSkipped: {
853           MaybeRecordStat(compilation_stats_.get(),
854                           MethodCompilationStat::kNotCompiledSkipped);
855           break;
856         }
857         case kAnalysisInvalidBytecode: {
858           MaybeRecordStat(compilation_stats_.get(),
859                           MethodCompilationStat::kNotCompiledInvalidBytecode);
860           break;
861         }
862         case kAnalysisFailThrowCatchLoop: {
863           MaybeRecordStat(compilation_stats_.get(),
864                           MethodCompilationStat::kNotCompiledThrowCatchLoop);
865           break;
866         }
867         case kAnalysisFailAmbiguousArrayOp: {
868           MaybeRecordStat(compilation_stats_.get(),
869                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
870           break;
871         }
872         case kAnalysisFailIrreducibleLoopAndStringInit: {
873           MaybeRecordStat(compilation_stats_.get(),
874                           MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
875           break;
876         }
877         case kAnalysisFailPhiEquivalentInOsr: {
878           MaybeRecordStat(compilation_stats_.get(),
879                           MethodCompilationStat::kNotCompiledPhiEquivalentInOsr);
880           break;
881         }
882         case kAnalysisSuccess:
883           UNREACHABLE();
884       }
885       pass_observer.SetGraphInBadState();
886       return nullptr;
887     }
888   }
889 
890   if (compilation_kind == CompilationKind::kBaseline) {
891     RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
892   } else {
893     RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
894   }
895 
896   RegisterAllocator::Strategy regalloc_strategy =
897     compiler_options.GetRegisterAllocationStrategy();
898   AllocateRegisters(graph,
899                     codegen.get(),
900                     &pass_observer,
901                     regalloc_strategy,
902                     compilation_stats_.get());
903 
904   codegen->Compile(code_allocator);
905   pass_observer.DumpDisassembly();
906 
907   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
908   return codegen.release();
909 }
910 
TryCompileIntrinsic(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,VariableSizedHandleScope * handles) const911 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
912     ArenaAllocator* allocator,
913     ArenaStack* arena_stack,
914     CodeVectorAllocator* code_allocator,
915     const DexCompilationUnit& dex_compilation_unit,
916     ArtMethod* method,
917     VariableSizedHandleScope* handles) const {
918   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
919   const CompilerOptions& compiler_options = GetCompilerOptions();
920   InstructionSet instruction_set = compiler_options.GetInstructionSet();
921   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
922   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
923 
924   // Always use the Thumb-2 assembler: some runtime functionality
925   // (like implicit stack overflow checks) assume Thumb-2.
926   DCHECK_NE(instruction_set, InstructionSet::kArm);
927 
928   // Do not attempt to compile on architectures we do not support.
929   if (!IsInstructionSetSupported(instruction_set)) {
930     return nullptr;
931   }
932 
933   HGraph* graph = new (allocator) HGraph(
934       allocator,
935       arena_stack,
936       handles,
937       dex_file,
938       method_idx,
939       compiler_options.GetInstructionSet(),
940       kInvalidInvokeType,
941       /* dead_reference_safe= */ true,  // Intrinsics don't affect dead reference safety.
942       compiler_options.GetDebuggable(),
943       CompilationKind::kOptimized);
944 
945   DCHECK(Runtime::Current()->IsAotCompiler());
946   DCHECK(method != nullptr);
947   graph->SetArtMethod(method);
948 
949   std::unique_ptr<CodeGenerator> codegen(
950       CodeGenerator::Create(graph,
951                             compiler_options,
952                             compilation_stats_.get()));
953   if (codegen.get() == nullptr) {
954     return nullptr;
955   }
956   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
957 
958   PassObserver pass_observer(graph,
959                              codegen.get(),
960                              visualizer_output_.get(),
961                              compiler_options);
962 
963   {
964     VLOG(compiler) << "Building intrinsic graph " << pass_observer.GetMethodName();
965     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
966     HGraphBuilder builder(graph,
967                           CodeItemDebugInfoAccessor(),  // Null code item.
968                           &dex_compilation_unit,
969                           &dex_compilation_unit,
970                           codegen.get(),
971                           compilation_stats_.get());
972     builder.BuildIntrinsicGraph(method);
973   }
974 
975   OptimizationDef optimizations[] = {
976     // The codegen has a few assumptions that only the instruction simplifier
977     // can satisfy.
978     OptDef(OptimizationPass::kInstructionSimplifier),
979   };
980   RunOptimizations(graph,
981                    codegen.get(),
982                    dex_compilation_unit,
983                    &pass_observer,
984                    optimizations);
985 
986   RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
987 
988   AllocateRegisters(graph,
989                     codegen.get(),
990                     &pass_observer,
991                     compiler_options.GetRegisterAllocationStrategy(),
992                     compilation_stats_.get());
993   if (!codegen->IsLeafMethod()) {
994     VLOG(compiler) << "Intrinsic method is not leaf: " << method->GetIntrinsic()
995         << " " << graph->PrettyMethod();
996     return nullptr;
997   }
998 
999   codegen->Compile(code_allocator);
1000   pass_observer.DumpDisassembly();
1001 
1002   VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
1003       << " " << graph->PrettyMethod();
1004   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
1005   return codegen.release();
1006 }
1007 
Compile(const dex::CodeItem * code_item,uint32_t access_flags,InvokeType invoke_type,uint16_t class_def_idx,uint32_t method_idx,Handle<mirror::ClassLoader> jclass_loader,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1008 CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
1009                                             uint32_t access_flags,
1010                                             InvokeType invoke_type,
1011                                             uint16_t class_def_idx,
1012                                             uint32_t method_idx,
1013                                             Handle<mirror::ClassLoader> jclass_loader,
1014                                             const DexFile& dex_file,
1015                                             Handle<mirror::DexCache> dex_cache) const {
1016   const CompilerOptions& compiler_options = GetCompilerOptions();
1017   DCHECK(compiler_options.IsAotCompiler());
1018   CompiledMethod* compiled_method = nullptr;
1019   Runtime* runtime = Runtime::Current();
1020   DCHECK(runtime->IsAotCompiler());
1021   ArenaAllocator allocator(runtime->GetArenaPool());
1022   ArenaStack arena_stack(runtime->GetArenaPool());
1023   CodeVectorAllocator code_allocator(&allocator);
1024   std::unique_ptr<CodeGenerator> codegen;
1025   bool compiled_intrinsic = false;
1026   {
1027     ScopedObjectAccess soa(Thread::Current());
1028     ArtMethod* method =
1029         runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1030             method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
1031     DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
1032     soa.Self()->ClearException();  // Suppress exception if any.
1033     VariableSizedHandleScope handles(soa.Self());
1034     Handle<mirror::Class> compiling_class =
1035         handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
1036     DexCompilationUnit dex_compilation_unit(
1037         jclass_loader,
1038         runtime->GetClassLinker(),
1039         dex_file,
1040         code_item,
1041         class_def_idx,
1042         method_idx,
1043         access_flags,
1044         /*verified_method=*/ nullptr,  // Not needed by the Optimizing compiler.
1045         dex_cache,
1046         compiling_class);
1047     // All signature polymorphic methods are native.
1048     DCHECK(method == nullptr || !method->IsSignaturePolymorphic());
1049     // Go to native so that we don't block GC during compilation.
1050     ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1051     // Try to compile a fully intrinsified implementation.
1052     if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1053       DCHECK(compiler_options.IsBootImage());
1054       codegen.reset(
1055           TryCompileIntrinsic(&allocator,
1056                               &arena_stack,
1057                               &code_allocator,
1058                               dex_compilation_unit,
1059                               method,
1060                               &handles));
1061       if (codegen != nullptr) {
1062         compiled_intrinsic = true;
1063       }
1064     }
1065     if (codegen == nullptr) {
1066       codegen.reset(
1067           TryCompile(&allocator,
1068                      &arena_stack,
1069                      &code_allocator,
1070                      dex_compilation_unit,
1071                      method,
1072                      compiler_options.IsBaseline()
1073                         ? CompilationKind::kBaseline
1074                         : CompilationKind::kOptimized,
1075                      &handles));
1076     }
1077   }
1078   if (codegen.get() != nullptr) {
1079     compiled_method = Emit(&allocator,
1080                            &code_allocator,
1081                            codegen.get(),
1082                            compiled_intrinsic ? nullptr : code_item);
1083     if (compiled_intrinsic) {
1084       compiled_method->MarkAsIntrinsic();
1085     }
1086 
1087     if (kArenaAllocatorCountAllocations) {
1088       codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1089       size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1090       if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1091         MemStats mem_stats(allocator.GetMemStats());
1092         MemStats peak_stats(arena_stack.GetPeakStats());
1093         LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1094                   << dex_file.PrettyMethod(method_idx)
1095                   << "\n" << Dumpable<MemStats>(mem_stats)
1096                   << "\n" << Dumpable<MemStats>(peak_stats);
1097       }
1098     }
1099   }
1100 
1101   if (kIsDebugBuild &&
1102       compiler_options.CompileArtTest() &&
1103       IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
1104     // For testing purposes, we put a special marker on method names
1105     // that should be compiled with this compiler (when the
1106     // instruction set is supported). This makes sure we're not
1107     // regressing.
1108     std::string method_name = dex_file.PrettyMethod(method_idx);
1109     bool shouldCompile = method_name.find("$opt$") != std::string::npos;
1110     DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name;
1111   }
1112 
1113   return compiled_method;
1114 }
1115 
CreateJniStackMap(ScopedArenaAllocator * allocator,const JniCompiledMethod & jni_compiled_method,size_t code_size)1116 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
1117                                                     const JniCompiledMethod& jni_compiled_method,
1118                                                     size_t code_size) {
1119   // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
1120   // to stay clear of the frame size limit.
1121   std::unique_ptr<StackMapStream> stack_map_stream(
1122       new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
1123   stack_map_stream->BeginMethod(
1124       jni_compiled_method.GetFrameSize(),
1125       jni_compiled_method.GetCoreSpillMask(),
1126       jni_compiled_method.GetFpSpillMask(),
1127       /* num_dex_registers= */ 0,
1128       /* baseline= */ false);
1129   stack_map_stream->EndMethod(code_size);
1130   return stack_map_stream->Encode();
1131 }
1132 
JniCompile(uint32_t access_flags,uint32_t method_idx,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1133 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
1134                                                uint32_t method_idx,
1135                                                const DexFile& dex_file,
1136                                                Handle<mirror::DexCache> dex_cache) const {
1137   Runtime* runtime = Runtime::Current();
1138   ArenaAllocator allocator(runtime->GetArenaPool());
1139   ArenaStack arena_stack(runtime->GetArenaPool());
1140 
1141   const CompilerOptions& compiler_options = GetCompilerOptions();
1142   if (compiler_options.IsBootImage()) {
1143     ScopedObjectAccess soa(Thread::Current());
1144     ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
1145         method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
1146     // Try to compile a fully intrinsified implementation. Do not try to do this for
1147     // signature polymorphic methods as the InstructionBuilder cannot handle them;
1148     // and it would be useless as they always have a slow path for type conversions.
1149     if (method != nullptr && UNLIKELY(method->IsIntrinsic()) && !method->IsSignaturePolymorphic()) {
1150       VariableSizedHandleScope handles(soa.Self());
1151       ScopedNullHandle<mirror::ClassLoader> class_loader;  // null means boot class path loader.
1152       Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1153       DexCompilationUnit dex_compilation_unit(
1154           class_loader,
1155           runtime->GetClassLinker(),
1156           dex_file,
1157           /*code_item=*/ nullptr,
1158           /*class_def_idx=*/ DexFile::kDexNoIndex16,
1159           method_idx,
1160           access_flags,
1161           /*verified_method=*/ nullptr,
1162           dex_cache,
1163           compiling_class);
1164       CodeVectorAllocator code_allocator(&allocator);
1165       // Go to native so that we don't block GC during compilation.
1166       ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
1167       std::unique_ptr<CodeGenerator> codegen(
1168           TryCompileIntrinsic(&allocator,
1169                               &arena_stack,
1170                               &code_allocator,
1171                               dex_compilation_unit,
1172                               method,
1173                               &handles));
1174       if (codegen != nullptr) {
1175         CompiledMethod* compiled_method = Emit(&allocator,
1176                                                &code_allocator,
1177                                                codegen.get(),
1178                                                /* item= */ nullptr);
1179         compiled_method->MarkAsIntrinsic();
1180         return compiled_method;
1181       }
1182     }
1183   }
1184 
1185   JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1186       compiler_options, access_flags, method_idx, dex_file, &allocator);
1187   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
1188 
1189   ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1190   ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
1191       &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
1192   return CompiledMethod::SwapAllocCompiledMethod(
1193       GetCompiledMethodStorage(),
1194       jni_compiled_method.GetInstructionSet(),
1195       jni_compiled_method.GetCode(),
1196       ArrayRef<const uint8_t>(stack_map),
1197       jni_compiled_method.GetCfi(),
1198       /* patches= */ ArrayRef<const linker::LinkerPatch>());
1199 }
1200 
CreateOptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)1201 Compiler* CreateOptimizingCompiler(const CompilerOptions& compiler_options,
1202                                    CompiledMethodStorage* storage) {
1203   return new OptimizingCompiler(compiler_options, storage);
1204 }
1205 
EncodeArtMethodInInlineInfo(ArtMethod * method ATTRIBUTE_UNUSED)1206 bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
1207   // Note: the runtime is null only for unit testing.
1208   return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
1209 }
1210 
JitCompile(Thread * self,jit::JitCodeCache * code_cache,jit::JitMemoryRegion * region,ArtMethod * method,CompilationKind compilation_kind,jit::JitLogger * jit_logger)1211 bool OptimizingCompiler::JitCompile(Thread* self,
1212                                     jit::JitCodeCache* code_cache,
1213                                     jit::JitMemoryRegion* region,
1214                                     ArtMethod* method,
1215                                     CompilationKind compilation_kind,
1216                                     jit::JitLogger* jit_logger) {
1217   const CompilerOptions& compiler_options = GetCompilerOptions();
1218   DCHECK(compiler_options.IsJitCompiler());
1219   DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
1220   StackHandleScope<3> hs(self);
1221   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
1222       method->GetDeclaringClass()->GetClassLoader()));
1223   Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
1224   DCHECK(method->IsCompilable());
1225 
1226   const DexFile* dex_file = method->GetDexFile();
1227   const uint16_t class_def_idx = method->GetClassDefIndex();
1228   const dex::CodeItem* code_item = method->GetCodeItem();
1229   const uint32_t method_idx = method->GetDexMethodIndex();
1230   const uint32_t access_flags = method->GetAccessFlags();
1231 
1232   Runtime* runtime = Runtime::Current();
1233   ArenaAllocator allocator(runtime->GetJitArenaPool());
1234 
1235   if (UNLIKELY(method->IsNative())) {
1236     JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1237         compiler_options, access_flags, method_idx, *dex_file, &allocator);
1238     std::vector<Handle<mirror::Object>> roots;
1239     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
1240         allocator.Adapter(kArenaAllocCHA));
1241     ArenaStack arena_stack(runtime->GetJitArenaPool());
1242     // StackMapStream is large and it does not fit into this frame, so we need helper method.
1243     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1244     ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(
1245         &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size());
1246 
1247     ArrayRef<const uint8_t> reserved_code;
1248     ArrayRef<const uint8_t> reserved_data;
1249     if (!code_cache->Reserve(self,
1250                              region,
1251                              jni_compiled_method.GetCode().size(),
1252                              stack_map.size(),
1253                              /* number_of_roots= */ 0,
1254                              method,
1255                              /*out*/ &reserved_code,
1256                              /*out*/ &reserved_data)) {
1257       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1258       return false;
1259     }
1260     const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1261 
1262     // Add debug info after we know the code location but before we update entry-point.
1263     std::vector<uint8_t> debug_info;
1264     if (compiler_options.GenerateAnyDebugInfo()) {
1265       debug::MethodDebugInfo info = {};
1266       // Simpleperf relies on art_jni_trampoline to detect jni methods.
1267       info.custom_name = "art_jni_trampoline";
1268       info.dex_file = dex_file;
1269       info.class_def_index = class_def_idx;
1270       info.dex_method_index = method_idx;
1271       info.access_flags = access_flags;
1272       info.code_item = code_item;
1273       info.isa = jni_compiled_method.GetInstructionSet();
1274       info.deduped = false;
1275       info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1276       info.is_optimized = true;
1277       info.is_code_address_text_relative = false;
1278       info.code_address = reinterpret_cast<uintptr_t>(code);
1279       info.code_size = jni_compiled_method.GetCode().size();
1280       info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
1281       info.code_info = nullptr;
1282       info.cfi = jni_compiled_method.GetCfi();
1283       debug_info = GenerateJitDebugInfo(info);
1284     }
1285 
1286     if (!code_cache->Commit(self,
1287                             region,
1288                             method,
1289                             reserved_code,
1290                             jni_compiled_method.GetCode(),
1291                             reserved_data,
1292                             roots,
1293                             ArrayRef<const uint8_t>(stack_map),
1294                             debug_info,
1295                             /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1296                             compilation_kind,
1297                             /* has_should_deoptimize_flag= */ false,
1298                             cha_single_implementation_list)) {
1299       code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1300       return false;
1301     }
1302 
1303     Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1304     if (jit_logger != nullptr) {
1305       jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
1306     }
1307     return true;
1308   }
1309 
1310   ArenaStack arena_stack(runtime->GetJitArenaPool());
1311   CodeVectorAllocator code_allocator(&allocator);
1312   VariableSizedHandleScope handles(self);
1313 
1314   std::unique_ptr<CodeGenerator> codegen;
1315   {
1316     Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1317     DexCompilationUnit dex_compilation_unit(
1318         class_loader,
1319         runtime->GetClassLinker(),
1320         *dex_file,
1321         code_item,
1322         class_def_idx,
1323         method_idx,
1324         access_flags,
1325         /*verified_method=*/ nullptr,
1326         dex_cache,
1327         compiling_class);
1328 
1329     // Go to native so that we don't block GC during compilation.
1330     ScopedThreadSuspension sts(self, ThreadState::kNative);
1331     codegen.reset(
1332         TryCompile(&allocator,
1333                    &arena_stack,
1334                    &code_allocator,
1335                    dex_compilation_unit,
1336                    method,
1337                    compilation_kind,
1338                    &handles));
1339     if (codegen.get() == nullptr) {
1340       return false;
1341     }
1342   }
1343 
1344   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
1345 
1346   ArrayRef<const uint8_t> reserved_code;
1347   ArrayRef<const uint8_t> reserved_data;
1348   if (!code_cache->Reserve(self,
1349                            region,
1350                            code_allocator.GetMemory().size(),
1351                            stack_map.size(),
1352                            /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
1353                            method,
1354                            /*out*/ &reserved_code,
1355                            /*out*/ &reserved_data)) {
1356     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1357     return false;
1358   }
1359   const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
1360   const uint8_t* roots_data = reserved_data.data();
1361 
1362   std::vector<Handle<mirror::Object>> roots;
1363   codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
1364   // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1365   DCHECK(std::all_of(roots.begin(),
1366                      roots.end(),
1367                      [&handles](Handle<mirror::Object> root){
1368                        return handles.Contains(root.GetReference());
1369                      }));
1370 
1371   // Add debug info after we know the code location but before we update entry-point.
1372   std::vector<uint8_t> debug_info;
1373   if (compiler_options.GenerateAnyDebugInfo()) {
1374     debug::MethodDebugInfo info = {};
1375     DCHECK(info.custom_name.empty());
1376     info.dex_file = dex_file;
1377     info.class_def_index = class_def_idx;
1378     info.dex_method_index = method_idx;
1379     info.access_flags = access_flags;
1380     info.code_item = code_item;
1381     info.isa = codegen->GetInstructionSet();
1382     info.deduped = false;
1383     info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1384     info.is_optimized = true;
1385     info.is_code_address_text_relative = false;
1386     info.code_address = reinterpret_cast<uintptr_t>(code);
1387     info.code_size = code_allocator.GetMemory().size();
1388     info.frame_size_in_bytes = codegen->GetFrameSize();
1389     info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
1390     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
1391     debug_info = GenerateJitDebugInfo(info);
1392   }
1393 
1394   if (!code_cache->Commit(self,
1395                           region,
1396                           method,
1397                           reserved_code,
1398                           code_allocator.GetMemory(),
1399                           reserved_data,
1400                           roots,
1401                           ArrayRef<const uint8_t>(stack_map),
1402                           debug_info,
1403                           /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(),
1404                           compilation_kind,
1405                           codegen->GetGraph()->HasShouldDeoptimizeFlag(),
1406                           codegen->GetGraph()->GetCHASingleImplementationList())) {
1407     code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
1408     return false;
1409   }
1410 
1411   Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1412   if (jit_logger != nullptr) {
1413     jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method);
1414   }
1415 
1416   if (kArenaAllocatorCountAllocations) {
1417     codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1418     size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1419     if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1420       MemStats mem_stats(allocator.GetMemStats());
1421       MemStats peak_stats(arena_stack.GetPeakStats());
1422       LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1423                 << dex_file->PrettyMethod(method_idx)
1424                 << "\n" << Dumpable<MemStats>(mem_stats)
1425                 << "\n" << Dumpable<MemStats>(peak_stats);
1426     }
1427   }
1428 
1429   return true;
1430 }
1431 
GenerateJitDebugInfo(const debug::MethodDebugInfo & info)1432 std::vector<uint8_t> OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) {
1433   const CompilerOptions& compiler_options = GetCompilerOptions();
1434   if (compiler_options.GenerateAnyDebugInfo()) {
1435     // If both flags are passed, generate full debug info.
1436     const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
1437 
1438     // Create entry for the single method that we just compiled.
1439     InstructionSet isa = compiler_options.GetInstructionSet();
1440     const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
1441     return debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
1442   }
1443   return std::vector<uint8_t>();
1444 }
1445 
1446 }  // namespace art
1447