• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "optimizing_compiler.h"
18 
19 #include <fstream>
20 #include <memory>
21 #include <sstream>
22 
23 #include <stdint.h>
24 
25 #include "art_method-inl.h"
26 #include "base/arena_allocator.h"
27 #include "base/arena_containers.h"
28 #include "base/dumpable.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/mutex.h"
32 #include "base/scoped_arena_allocator.h"
33 #include "base/timing_logger.h"
34 #include "builder.h"
35 #include "class_root.h"
36 #include "code_generator.h"
37 #include "compiled_method.h"
38 #include "compiler.h"
39 #include "debug/elf_debug_writer.h"
40 #include "debug/method_debug_info.h"
41 #include "dex/dex_file_types.h"
42 #include "dex/verification_results.h"
43 #include "dex/verified_method.h"
44 #include "driver/compiled_method_storage.h"
45 #include "driver/compiler_options.h"
46 #include "driver/dex_compilation_unit.h"
47 #include "graph_checker.h"
48 #include "graph_visualizer.h"
49 #include "inliner.h"
50 #include "jit/debugger_interface.h"
51 #include "jit/jit.h"
52 #include "jit/jit_code_cache.h"
53 #include "jit/jit_logger.h"
54 #include "jni/quick/jni_compiler.h"
55 #include "linker/linker_patch.h"
56 #include "nodes.h"
57 #include "oat_quick_method_header.h"
58 #include "prepare_for_register_allocation.h"
59 #include "reference_type_propagation.h"
60 #include "register_allocator_linear_scan.h"
61 #include "select_generator.h"
62 #include "ssa_builder.h"
63 #include "ssa_liveness_analysis.h"
64 #include "ssa_phi_elimination.h"
65 #include "stack_map_stream.h"
66 #include "utils/assembler.h"
67 #include "verifier/verifier_compiler_binding.h"
68 
69 namespace art {
70 
71 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
72 
73 static constexpr const char* kPassNameSeparator = "$";
74 
75 /**
76  * Used by the code generator, to allocate the code in a vector.
77  */
78 class CodeVectorAllocator final : public CodeAllocator {
79  public:
CodeVectorAllocator(ArenaAllocator * allocator)80   explicit CodeVectorAllocator(ArenaAllocator* allocator)
81       : memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
82 
Allocate(size_t size)83   uint8_t* Allocate(size_t size) override {
84     memory_.resize(size);
85     return &memory_[0];
86   }
87 
GetMemory() const88   ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
GetData()89   uint8_t* GetData() { return memory_.data(); }
90 
91  private:
92   ArenaVector<uint8_t> memory_;
93 
94   DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
95 };
96 
97 /**
98  * Filter to apply to the visualizer. Methods whose name contain that filter will
99  * be dumped.
100  */
101 static constexpr const char kStringFilter[] = "";
102 
103 class PassScope;
104 
105 class PassObserver : public ValueObject {
106  public:
PassObserver(HGraph * graph,CodeGenerator * codegen,std::ostream * visualizer_output,const CompilerOptions & compiler_options,Mutex & dump_mutex)107   PassObserver(HGraph* graph,
108                CodeGenerator* codegen,
109                std::ostream* visualizer_output,
110                const CompilerOptions& compiler_options,
111                Mutex& dump_mutex)
112       : graph_(graph),
113         last_seen_graph_size_(0),
114         cached_method_name_(),
115         timing_logger_enabled_(compiler_options.GetDumpPassTimings()),
116         timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
117         disasm_info_(graph->GetAllocator()),
118         visualizer_oss_(),
119         visualizer_output_(visualizer_output),
120         visualizer_enabled_(!compiler_options.GetDumpCfgFileName().empty()),
121         visualizer_(&visualizer_oss_, graph, *codegen),
122         visualizer_dump_mutex_(dump_mutex),
123         graph_in_bad_state_(false) {
124     if (timing_logger_enabled_ || visualizer_enabled_) {
125       if (!IsVerboseMethod(compiler_options, GetMethodName())) {
126         timing_logger_enabled_ = visualizer_enabled_ = false;
127       }
128       if (visualizer_enabled_) {
129         visualizer_.PrintHeader(GetMethodName());
130         codegen->SetDisassemblyInformation(&disasm_info_);
131       }
132     }
133   }
134 
~PassObserver()135   ~PassObserver() {
136     if (timing_logger_enabled_) {
137       LOG(INFO) << "TIMINGS " << GetMethodName();
138       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
139     }
140     DCHECK(visualizer_oss_.str().empty());
141   }
142 
DumpDisassembly()143   void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) {
144     if (visualizer_enabled_) {
145       visualizer_.DumpGraphWithDisassembly();
146       FlushVisualizer();
147     }
148   }
149 
SetGraphInBadState()150   void SetGraphInBadState() { graph_in_bad_state_ = true; }
151 
GetMethodName()152   const char* GetMethodName() {
153     // PrettyMethod() is expensive, so we delay calling it until we actually have to.
154     if (cached_method_name_.empty()) {
155       cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
156     }
157     return cached_method_name_.c_str();
158   }
159 
160  private:
StartPass(const char * pass_name)161   void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
162     VLOG(compiler) << "Starting pass: " << pass_name;
163     // Dump graph first, then start timer.
164     if (visualizer_enabled_) {
165       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
166       FlushVisualizer();
167     }
168     if (timing_logger_enabled_) {
169       timing_logger_.StartTiming(pass_name);
170     }
171   }
172 
FlushVisualizer()173   void FlushVisualizer() REQUIRES(!visualizer_dump_mutex_) {
174     MutexLock mu(Thread::Current(), visualizer_dump_mutex_);
175     *visualizer_output_ << visualizer_oss_.str();
176     visualizer_output_->flush();
177     visualizer_oss_.str("");
178     visualizer_oss_.clear();
179   }
180 
EndPass(const char * pass_name,bool pass_change)181   void EndPass(const char* pass_name, bool pass_change) REQUIRES(!visualizer_dump_mutex_) {
182     // Pause timer first, then dump graph.
183     if (timing_logger_enabled_) {
184       timing_logger_.EndTiming();
185     }
186     if (visualizer_enabled_) {
187       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
188       FlushVisualizer();
189     }
190 
191     // Validate the HGraph if running in debug mode.
192     if (kIsDebugBuild) {
193       if (!graph_in_bad_state_) {
194         GraphChecker checker(graph_);
195         last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
196         if (!checker.IsValid()) {
197           LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
198         }
199       }
200     }
201   }
202 
IsVerboseMethod(const CompilerOptions & compiler_options,const char * method_name)203   static bool IsVerboseMethod(const CompilerOptions& compiler_options, const char* method_name) {
204     // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
205     // empty kStringFilter matching all methods.
206     if (compiler_options.HasVerboseMethods()) {
207       return compiler_options.IsVerboseMethod(method_name);
208     }
209 
210     // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
211     // warning when the string is empty.
212     constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
213     if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
214       return true;
215     }
216 
217     return false;
218   }
219 
220   HGraph* const graph_;
221   size_t last_seen_graph_size_;
222 
223   std::string cached_method_name_;
224 
225   bool timing_logger_enabled_;
226   TimingLogger timing_logger_;
227 
228   DisassemblyInformation disasm_info_;
229 
230   std::ostringstream visualizer_oss_;
231   std::ostream* visualizer_output_;
232   bool visualizer_enabled_;
233   HGraphVisualizer visualizer_;
234   Mutex& visualizer_dump_mutex_;
235 
236   // Flag to be set by the compiler if the pass failed and the graph is not
237   // expected to validate.
238   bool graph_in_bad_state_;
239 
240   friend PassScope;
241 
242   DISALLOW_COPY_AND_ASSIGN(PassObserver);
243 };
244 
245 class PassScope : public ValueObject {
246  public:
PassScope(const char * pass_name,PassObserver * pass_observer)247   PassScope(const char *pass_name, PassObserver* pass_observer)
248       : pass_name_(pass_name),
249         pass_change_(true),  // assume change
250         pass_observer_(pass_observer) {
251     pass_observer_->StartPass(pass_name_);
252   }
253 
SetPassNotChanged()254   void SetPassNotChanged() {
255     pass_change_ = false;
256   }
257 
~PassScope()258   ~PassScope() {
259     pass_observer_->EndPass(pass_name_, pass_change_);
260   }
261 
262  private:
263   const char* const pass_name_;
264   bool pass_change_;
265   PassObserver* const pass_observer_;
266 };
267 
268 class OptimizingCompiler final : public Compiler {
269  public:
270   explicit OptimizingCompiler(const CompilerOptions& compiler_options,
271                               CompiledMethodStorage* storage);
272   ~OptimizingCompiler() override;
273 
274   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
275 
276   CompiledMethod* Compile(const dex::CodeItem* code_item,
277                           uint32_t access_flags,
278                           InvokeType invoke_type,
279                           uint16_t class_def_idx,
280                           uint32_t method_idx,
281                           Handle<mirror::ClassLoader> class_loader,
282                           const DexFile& dex_file,
283                           Handle<mirror::DexCache> dex_cache) const override;
284 
285   CompiledMethod* JniCompile(uint32_t access_flags,
286                              uint32_t method_idx,
287                              const DexFile& dex_file,
288                              Handle<mirror::DexCache> dex_cache) const override;
289 
GetEntryPointOf(ArtMethod * method) const290   uintptr_t GetEntryPointOf(ArtMethod* method) const override
291       REQUIRES_SHARED(Locks::mutator_lock_) {
292     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
293         InstructionSetPointerSize(GetCompilerOptions().GetInstructionSet())));
294   }
295 
296   bool JitCompile(Thread* self,
297                   jit::JitCodeCache* code_cache,
298                   ArtMethod* method,
299                   bool baseline,
300                   bool osr,
301                   jit::JitLogger* jit_logger)
302       override
303       REQUIRES_SHARED(Locks::mutator_lock_);
304 
305  private:
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,VariableSizedHandleScope * handles,const OptimizationDef definitions[],size_t length) const306   bool RunOptimizations(HGraph* graph,
307                         CodeGenerator* codegen,
308                         const DexCompilationUnit& dex_compilation_unit,
309                         PassObserver* pass_observer,
310                         VariableSizedHandleScope* handles,
311                         const OptimizationDef definitions[],
312                         size_t length) const {
313     // Convert definitions to optimization passes.
314     ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
315         definitions,
316         length,
317         graph->GetAllocator(),
318         graph,
319         compilation_stats_.get(),
320         codegen,
321         dex_compilation_unit,
322         handles);
323     DCHECK_EQ(length, optimizations.size());
324     // Run the optimization passes one by one. Any "depends_on" pass refers back to
325     // the most recent occurrence of that pass, skipped or executed.
326     std::bitset<static_cast<size_t>(OptimizationPass::kLast) + 1u> pass_changes;
327     pass_changes[static_cast<size_t>(OptimizationPass::kNone)] = true;
328     bool change = false;
329     for (size_t i = 0; i < length; ++i) {
330       if (pass_changes[static_cast<size_t>(definitions[i].depends_on)]) {
331         // Execute the pass and record whether it changed anything.
332         PassScope scope(optimizations[i]->GetPassName(), pass_observer);
333         bool pass_change = optimizations[i]->Run();
334         pass_changes[static_cast<size_t>(definitions[i].pass)] = pass_change;
335         if (pass_change) {
336           change = true;
337         } else {
338           scope.SetPassNotChanged();
339         }
340       } else {
341         // Skip the pass and record that nothing changed.
342         pass_changes[static_cast<size_t>(definitions[i].pass)] = false;
343       }
344     }
345     return change;
346   }
347 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,VariableSizedHandleScope * handles,const OptimizationDef (& definitions)[length]) const348   template <size_t length> bool RunOptimizations(
349       HGraph* graph,
350       CodeGenerator* codegen,
351       const DexCompilationUnit& dex_compilation_unit,
352       PassObserver* pass_observer,
353       VariableSizedHandleScope* handles,
354       const OptimizationDef (&definitions)[length]) const {
355     return RunOptimizations(
356         graph, codegen, dex_compilation_unit, pass_observer, handles, definitions, length);
357   }
358 
359   void RunOptimizations(HGraph* graph,
360                         CodeGenerator* codegen,
361                         const DexCompilationUnit& dex_compilation_unit,
362                         PassObserver* pass_observer,
363                         VariableSizedHandleScope* handles) const;
364 
365  private:
366   // Create a 'CompiledMethod' for an optimized graph.
367   CompiledMethod* Emit(ArenaAllocator* allocator,
368                        CodeVectorAllocator* code_allocator,
369                        CodeGenerator* codegen,
370                        const dex::CodeItem* item) const;
371 
372   // Try compiling a method and return the code generator used for
373   // compiling it.
374   // This method:
375   // 1) Builds the graph. Returns null if it failed to build it.
376   // 2) Transforms the graph to SSA. Returns null if it failed.
377   // 3) Runs optimizations on the graph, including register allocator.
378   // 4) Generates code with the `code_allocator` provided.
379   CodeGenerator* TryCompile(ArenaAllocator* allocator,
380                             ArenaStack* arena_stack,
381                             CodeVectorAllocator* code_allocator,
382                             const DexCompilationUnit& dex_compilation_unit,
383                             ArtMethod* method,
384                             bool baseline,
385                             bool osr,
386                             VariableSizedHandleScope* handles) const;
387 
388   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
389                                      ArenaStack* arena_stack,
390                                      CodeVectorAllocator* code_allocator,
391                                      const DexCompilationUnit& dex_compilation_unit,
392                                      ArtMethod* method,
393                                      VariableSizedHandleScope* handles) const;
394 
395   bool RunArchOptimizations(HGraph* graph,
396                             CodeGenerator* codegen,
397                             const DexCompilationUnit& dex_compilation_unit,
398                             PassObserver* pass_observer,
399                             VariableSizedHandleScope* handles) const;
400 
401   bool RunBaselineOptimizations(HGraph* graph,
402                                 CodeGenerator* codegen,
403                                 const DexCompilationUnit& dex_compilation_unit,
404                                 PassObserver* pass_observer,
405                                 VariableSizedHandleScope* handles) const;
406 
407   void GenerateJitDebugInfo(ArtMethod* method,
408                             const debug::MethodDebugInfo& method_debug_info)
409       REQUIRES_SHARED(Locks::mutator_lock_);
410 
411   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
412 
413   std::unique_ptr<std::ostream> visualizer_output_;
414 
415   mutable Mutex dump_mutex_;  // To synchronize visualizer writing.
416 
417   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
418 };
419 
420 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
421 
OptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)422 OptimizingCompiler::OptimizingCompiler(const CompilerOptions& compiler_options,
423                                        CompiledMethodStorage* storage)
424     : Compiler(compiler_options, storage, kMaximumCompilationTimeBeforeWarning),
425       dump_mutex_("Visualizer dump lock") {
426   // Enable C1visualizer output.
427   const std::string& cfg_file_name = compiler_options.GetDumpCfgFileName();
428   if (!cfg_file_name.empty()) {
429     std::ios_base::openmode cfg_file_mode =
430         compiler_options.GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
431     visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
432   }
433   if (compiler_options.GetDumpStats()) {
434     compilation_stats_.reset(new OptimizingCompilerStats());
435   }
436 }
437 
~OptimizingCompiler()438 OptimizingCompiler::~OptimizingCompiler() {
439   if (compilation_stats_.get() != nullptr) {
440     compilation_stats_->Log();
441   }
442 }
443 
CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,const DexFile & dex_file ATTRIBUTE_UNUSED) const444 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
445                                           const DexFile& dex_file ATTRIBUTE_UNUSED) const {
446   return true;
447 }
448 
IsInstructionSetSupported(InstructionSet instruction_set)449 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
450   return instruction_set == InstructionSet::kArm
451       || instruction_set == InstructionSet::kArm64
452       || instruction_set == InstructionSet::kThumb2
453       || instruction_set == InstructionSet::kMips
454       || instruction_set == InstructionSet::kMips64
455       || instruction_set == InstructionSet::kX86
456       || instruction_set == InstructionSet::kX86_64;
457 }
458 
RunBaselineOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,VariableSizedHandleScope * handles) const459 bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
460                                                   CodeGenerator* codegen,
461                                                   const DexCompilationUnit& dex_compilation_unit,
462                                                   PassObserver* pass_observer,
463                                                   VariableSizedHandleScope* handles) const {
464   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
465 #ifdef ART_ENABLE_CODEGEN_mips
466     case InstructionSet::kMips: {
467       OptimizationDef mips_optimizations[] = {
468         OptDef(OptimizationPass::kPcRelativeFixupsMips)
469       };
470       return RunOptimizations(graph,
471                               codegen,
472                               dex_compilation_unit,
473                               pass_observer,
474                               handles,
475                               mips_optimizations);
476     }
477 #endif
478 #ifdef ART_ENABLE_CODEGEN_x86
479     case InstructionSet::kX86: {
480       OptimizationDef x86_optimizations[] = {
481         OptDef(OptimizationPass::kPcRelativeFixupsX86),
482       };
483       return RunOptimizations(graph,
484                               codegen,
485                               dex_compilation_unit,
486                               pass_observer,
487                               handles,
488                               x86_optimizations);
489     }
490 #endif
491     default:
492       UNUSED(graph);
493       UNUSED(codegen);
494       UNUSED(dex_compilation_unit);
495       UNUSED(pass_observer);
496       UNUSED(handles);
497       return false;
498   }
499 }
500 
RunArchOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,VariableSizedHandleScope * handles) const501 bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
502                                               CodeGenerator* codegen,
503                                               const DexCompilationUnit& dex_compilation_unit,
504                                               PassObserver* pass_observer,
505                                               VariableSizedHandleScope* handles) const {
506   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
507 #if defined(ART_ENABLE_CODEGEN_arm)
508     case InstructionSet::kThumb2:
509     case InstructionSet::kArm: {
510       OptimizationDef arm_optimizations[] = {
511         OptDef(OptimizationPass::kInstructionSimplifierArm),
512         OptDef(OptimizationPass::kSideEffectsAnalysis),
513         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
514         OptDef(OptimizationPass::kScheduling)
515       };
516       return RunOptimizations(graph,
517                               codegen,
518                               dex_compilation_unit,
519                               pass_observer,
520                               handles,
521                               arm_optimizations);
522     }
523 #endif
524 #ifdef ART_ENABLE_CODEGEN_arm64
525     case InstructionSet::kArm64: {
526       OptimizationDef arm64_optimizations[] = {
527         OptDef(OptimizationPass::kInstructionSimplifierArm64),
528         OptDef(OptimizationPass::kSideEffectsAnalysis),
529         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
530         OptDef(OptimizationPass::kScheduling)
531       };
532       return RunOptimizations(graph,
533                               codegen,
534                               dex_compilation_unit,
535                               pass_observer,
536                               handles,
537                               arm64_optimizations);
538     }
539 #endif
540 #ifdef ART_ENABLE_CODEGEN_mips
541     case InstructionSet::kMips: {
542       OptimizationDef mips_optimizations[] = {
543         OptDef(OptimizationPass::kInstructionSimplifierMips),
544         OptDef(OptimizationPass::kSideEffectsAnalysis),
545         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
546         OptDef(OptimizationPass::kPcRelativeFixupsMips)
547       };
548       return RunOptimizations(graph,
549                               codegen,
550                               dex_compilation_unit,
551                               pass_observer,
552                               handles,
553                               mips_optimizations);
554     }
555 #endif
556 #ifdef ART_ENABLE_CODEGEN_mips64
557     case InstructionSet::kMips64: {
558       OptimizationDef mips64_optimizations[] = {
559         OptDef(OptimizationPass::kSideEffectsAnalysis),
560         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch")
561       };
562       return RunOptimizations(graph,
563                               codegen,
564                               dex_compilation_unit,
565                               pass_observer,
566                               handles,
567                               mips64_optimizations);
568     }
569 #endif
570 #ifdef ART_ENABLE_CODEGEN_x86
571     case InstructionSet::kX86: {
572       OptimizationDef x86_optimizations[] = {
573         OptDef(OptimizationPass::kInstructionSimplifierX86),
574         OptDef(OptimizationPass::kSideEffectsAnalysis),
575         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
576         OptDef(OptimizationPass::kPcRelativeFixupsX86),
577         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
578       };
579       return RunOptimizations(graph,
580                               codegen,
581                               dex_compilation_unit,
582                               pass_observer,
583                               handles,
584                               x86_optimizations);
585     }
586 #endif
587 #ifdef ART_ENABLE_CODEGEN_x86_64
588     case InstructionSet::kX86_64: {
589       OptimizationDef x86_64_optimizations[] = {
590         OptDef(OptimizationPass::kInstructionSimplifierX86_64),
591         OptDef(OptimizationPass::kSideEffectsAnalysis),
592         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
593         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
594       };
595       return RunOptimizations(graph,
596                               codegen,
597                               dex_compilation_unit,
598                               pass_observer,
599                               handles,
600                               x86_64_optimizations);
601     }
602 #endif
603     default:
604       return false;
605   }
606 }
607 
608 NO_INLINE  // Avoid increasing caller's frame size by large stack-allocated objects.
AllocateRegisters(HGraph * graph,CodeGenerator * codegen,PassObserver * pass_observer,RegisterAllocator::Strategy strategy,OptimizingCompilerStats * stats)609 static void AllocateRegisters(HGraph* graph,
610                               CodeGenerator* codegen,
611                               PassObserver* pass_observer,
612                               RegisterAllocator::Strategy strategy,
613                               OptimizingCompilerStats* stats) {
614   {
615     PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
616                     pass_observer);
617     PrepareForRegisterAllocation(graph, codegen->GetCompilerOptions(), stats).Run();
618   }
619   // Use local allocator shared by SSA liveness analysis and register allocator.
620   // (Register allocator creates new objects in the liveness data.)
621   ScopedArenaAllocator local_allocator(graph->GetArenaStack());
622   SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
623   {
624     PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
625     liveness.Analyze();
626   }
627   {
628     PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
629     std::unique_ptr<RegisterAllocator> register_allocator =
630         RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
631     register_allocator->AllocateRegisters();
632   }
633 }
634 
635 // Strip pass name suffix to get optimization name.
ConvertPassNameToOptimizationName(const std::string & pass_name)636 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
637   size_t pos = pass_name.find(kPassNameSeparator);
638   return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
639 }
640 
RunOptimizations(HGraph * graph,CodeGenerator * codegen,const DexCompilationUnit & dex_compilation_unit,PassObserver * pass_observer,VariableSizedHandleScope * handles) const641 void OptimizingCompiler::RunOptimizations(HGraph* graph,
642                                           CodeGenerator* codegen,
643                                           const DexCompilationUnit& dex_compilation_unit,
644                                           PassObserver* pass_observer,
645                                           VariableSizedHandleScope* handles) const {
646   const std::vector<std::string>* pass_names = GetCompilerOptions().GetPassesToRun();
647   if (pass_names != nullptr) {
648     // If passes were defined on command-line, build the optimization
649     // passes and run these instead of the built-in optimizations.
650     // TODO: a way to define depends_on via command-line?
651     const size_t length = pass_names->size();
652     std::vector<OptimizationDef> optimizations;
653     for (const std::string& pass_name : *pass_names) {
654       std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
655       optimizations.push_back(OptDef(OptimizationPassByName(opt_name), pass_name.c_str()));
656     }
657     RunOptimizations(graph,
658                      codegen,
659                      dex_compilation_unit,
660                      pass_observer,
661                      handles,
662                      optimizations.data(),
663                      length);
664     return;
665   }
666 
667   OptimizationDef optimizations[] = {
668     // Initial optimizations.
669     OptDef(OptimizationPass::kConstantFolding),
670     OptDef(OptimizationPass::kInstructionSimplifier),
671     OptDef(OptimizationPass::kDeadCodeElimination,
672            "dead_code_elimination$initial"),
673     // Inlining.
674     OptDef(OptimizationPass::kInliner),
675     // Simplification (only if inlining occurred).
676     OptDef(OptimizationPass::kConstantFolding,
677            "constant_folding$after_inlining",
678            OptimizationPass::kInliner),
679     OptDef(OptimizationPass::kInstructionSimplifier,
680            "instruction_simplifier$after_inlining",
681            OptimizationPass::kInliner),
682     OptDef(OptimizationPass::kDeadCodeElimination,
683            "dead_code_elimination$after_inlining",
684            OptimizationPass::kInliner),
685     // GVN.
686     OptDef(OptimizationPass::kSideEffectsAnalysis,
687            "side_effects$before_gvn"),
688     OptDef(OptimizationPass::kGlobalValueNumbering),
689     // Simplification (TODO: only if GVN occurred).
690     OptDef(OptimizationPass::kSelectGenerator),
691     OptDef(OptimizationPass::kConstantFolding,
692            "constant_folding$after_gvn"),
693     OptDef(OptimizationPass::kInstructionSimplifier,
694            "instruction_simplifier$after_gvn"),
695     OptDef(OptimizationPass::kDeadCodeElimination,
696            "dead_code_elimination$after_gvn"),
697     // High-level optimizations.
698     OptDef(OptimizationPass::kSideEffectsAnalysis,
699            "side_effects$before_licm"),
700     OptDef(OptimizationPass::kInvariantCodeMotion),
701     OptDef(OptimizationPass::kInductionVarAnalysis),
702     OptDef(OptimizationPass::kBoundsCheckElimination),
703     OptDef(OptimizationPass::kLoopOptimization),
704     // Simplification.
705     OptDef(OptimizationPass::kConstantFolding,
706            "constant_folding$after_bce"),
707     OptDef(OptimizationPass::kInstructionSimplifier,
708            "instruction_simplifier$after_bce"),
709     // Other high-level optimizations.
710     OptDef(OptimizationPass::kSideEffectsAnalysis,
711            "side_effects$before_lse"),
712     OptDef(OptimizationPass::kLoadStoreAnalysis),
713     OptDef(OptimizationPass::kLoadStoreElimination),
714     OptDef(OptimizationPass::kCHAGuardOptimization),
715     OptDef(OptimizationPass::kDeadCodeElimination,
716            "dead_code_elimination$final"),
717     OptDef(OptimizationPass::kCodeSinking),
718     // The codegen has a few assumptions that only the instruction simplifier
719     // can satisfy. For example, the code generator does not expect to see a
720     // HTypeConversion from a type to the same type.
721     OptDef(OptimizationPass::kInstructionSimplifier,
722            "instruction_simplifier$before_codegen"),
723     // Eliminate constructor fences after code sinking to avoid
724     // complicated sinking logic to split a fence with many inputs.
725     OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
726   };
727   RunOptimizations(graph,
728                    codegen,
729                    dex_compilation_unit,
730                    pass_observer,
731                    handles,
732                    optimizations);
733 
734   RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer, handles);
735 }
736 
EmitAndSortLinkerPatches(CodeGenerator * codegen)737 static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
738   ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
739   codegen->EmitLinkerPatches(&linker_patches);
740 
741   // Sort patches by literal offset. Required for .oat_patches encoding.
742   std::sort(linker_patches.begin(), linker_patches.end(),
743             [](const linker::LinkerPatch& lhs, const linker::LinkerPatch& rhs) {
744     return lhs.LiteralOffset() < rhs.LiteralOffset();
745   });
746 
747   return linker_patches;
748 }
749 
Emit(ArenaAllocator * allocator,CodeVectorAllocator * code_allocator,CodeGenerator * codegen,const dex::CodeItem * code_item_for_osr_check) const750 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
751                                          CodeVectorAllocator* code_allocator,
752                                          CodeGenerator* codegen,
753                                          const dex::CodeItem* code_item_for_osr_check) const {
754   ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
755   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
756 
757   CompiledMethodStorage* storage = GetCompiledMethodStorage();
758   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
759       storage,
760       codegen->GetInstructionSet(),
761       code_allocator->GetMemory(),
762       ArrayRef<const uint8_t>(stack_map),
763       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
764       ArrayRef<const linker::LinkerPatch>(linker_patches));
765 
766   for (const linker::LinkerPatch& patch : linker_patches) {
767     if (codegen->NeedsThunkCode(patch) && storage->GetThunkCode(patch).empty()) {
768       ArenaVector<uint8_t> code(allocator->Adapter());
769       std::string debug_name;
770       codegen->EmitThunkCode(patch, &code, &debug_name);
771       storage->SetThunkCode(patch, ArrayRef<const uint8_t>(code), debug_name);
772     }
773   }
774 
775   return compiled_method;
776 }
777 
TryCompile(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,bool baseline,bool osr,VariableSizedHandleScope * handles) const778 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
779                                               ArenaStack* arena_stack,
780                                               CodeVectorAllocator* code_allocator,
781                                               const DexCompilationUnit& dex_compilation_unit,
782                                               ArtMethod* method,
783                                               bool baseline,
784                                               bool osr,
785                                               VariableSizedHandleScope* handles) const {
786   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
787   const CompilerOptions& compiler_options = GetCompilerOptions();
788   InstructionSet instruction_set = compiler_options.GetInstructionSet();
789   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
790   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
791   const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
792 
793   // Always use the Thumb-2 assembler: some runtime functionality
794   // (like implicit stack overflow checks) assume Thumb-2.
795   DCHECK_NE(instruction_set, InstructionSet::kArm);
796 
797   // Do not attempt to compile on architectures we do not support.
798   if (!IsInstructionSetSupported(instruction_set)) {
799     MaybeRecordStat(compilation_stats_.get(),
800                     MethodCompilationStat::kNotCompiledUnsupportedIsa);
801     return nullptr;
802   }
803 
804   if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
805     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
806     return nullptr;
807   }
808 
809   // Implementation of the space filter: do not compile a code item whose size in
810   // code units is bigger than 128.
811   static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
812   if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
813       && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
814           kSpaceFilterOptimizingThreshold)) {
815     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
816     return nullptr;
817   }
818 
819   CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
820 
821   bool dead_reference_safe;
822   ArrayRef<const uint8_t> interpreter_metadata;
823   // For AOT compilation, we may not get a method, for example if its class is erroneous,
824   // possibly due to an unavailable superclass.  JIT should always have a method.
825   DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
826   if (method != nullptr) {
827     const dex::ClassDef* containing_class;
828     {
829       ScopedObjectAccess soa(Thread::Current());
830       containing_class = &method->GetClassDef();
831       interpreter_metadata = method->GetQuickenedInfo();
832     }
833     // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
834     // is currently rarely true.
835     dead_reference_safe =
836         annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
837         && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
838   } else {
839     // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
840     dead_reference_safe = false;
841   }
842 
843   HGraph* graph = new (allocator) HGraph(
844       allocator,
845       arena_stack,
846       dex_file,
847       method_idx,
848       compiler_options.GetInstructionSet(),
849       kInvalidInvokeType,
850       dead_reference_safe,
851       compiler_options.GetDebuggable(),
852       /* osr= */ osr);
853 
854   if (method != nullptr) {
855     graph->SetArtMethod(method);
856   }
857 
858   std::unique_ptr<CodeGenerator> codegen(
859       CodeGenerator::Create(graph,
860                             compiler_options,
861                             compilation_stats_.get()));
862   if (codegen.get() == nullptr) {
863     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
864     return nullptr;
865   }
866   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
867 
868   PassObserver pass_observer(graph,
869                              codegen.get(),
870                              visualizer_output_.get(),
871                              compiler_options,
872                              dump_mutex_);
873 
874   {
875     VLOG(compiler) << "Building " << pass_observer.GetMethodName();
876     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
877     HGraphBuilder builder(graph,
878                           code_item_accessor,
879                           &dex_compilation_unit,
880                           &dex_compilation_unit,
881                           codegen.get(),
882                           compilation_stats_.get(),
883                           interpreter_metadata,
884                           handles);
885     GraphAnalysisResult result = builder.BuildGraph();
886     if (result != kAnalysisSuccess) {
887       switch (result) {
888         case kAnalysisSkipped: {
889           MaybeRecordStat(compilation_stats_.get(),
890                           MethodCompilationStat::kNotCompiledSkipped);
891           break;
892         }
893         case kAnalysisInvalidBytecode: {
894           MaybeRecordStat(compilation_stats_.get(),
895                           MethodCompilationStat::kNotCompiledInvalidBytecode);
896           break;
897         }
898         case kAnalysisFailThrowCatchLoop: {
899           MaybeRecordStat(compilation_stats_.get(),
900                           MethodCompilationStat::kNotCompiledThrowCatchLoop);
901           break;
902         }
903         case kAnalysisFailAmbiguousArrayOp: {
904           MaybeRecordStat(compilation_stats_.get(),
905                           MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
906           break;
907         }
908         case kAnalysisFailIrreducibleLoopAndStringInit: {
909           MaybeRecordStat(compilation_stats_.get(),
910                           MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
911           break;
912         }
913         case kAnalysisSuccess:
914           UNREACHABLE();
915       }
916       pass_observer.SetGraphInBadState();
917       return nullptr;
918     }
919   }
920 
921   if (baseline) {
922     RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
923   } else {
924     RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
925   }
926 
927   RegisterAllocator::Strategy regalloc_strategy =
928     compiler_options.GetRegisterAllocationStrategy();
929   AllocateRegisters(graph,
930                     codegen.get(),
931                     &pass_observer,
932                     regalloc_strategy,
933                     compilation_stats_.get());
934 
935   codegen->Compile(code_allocator);
936   pass_observer.DumpDisassembly();
937 
938   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
939   return codegen.release();
940 }
941 
TryCompileIntrinsic(ArenaAllocator * allocator,ArenaStack * arena_stack,CodeVectorAllocator * code_allocator,const DexCompilationUnit & dex_compilation_unit,ArtMethod * method,VariableSizedHandleScope * handles) const942 CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
943     ArenaAllocator* allocator,
944     ArenaStack* arena_stack,
945     CodeVectorAllocator* code_allocator,
946     const DexCompilationUnit& dex_compilation_unit,
947     ArtMethod* method,
948     VariableSizedHandleScope* handles) const {
949   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
950   const CompilerOptions& compiler_options = GetCompilerOptions();
951   InstructionSet instruction_set = compiler_options.GetInstructionSet();
952   const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
953   uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
954 
955   // Always use the Thumb-2 assembler: some runtime functionality
956   // (like implicit stack overflow checks) assume Thumb-2.
957   DCHECK_NE(instruction_set, InstructionSet::kArm);
958 
959   // Do not attempt to compile on architectures we do not support.
960   if (!IsInstructionSetSupported(instruction_set)) {
961     return nullptr;
962   }
963 
964   HGraph* graph = new (allocator) HGraph(
965       allocator,
966       arena_stack,
967       dex_file,
968       method_idx,
969       compiler_options.GetInstructionSet(),
970       kInvalidInvokeType,
971       /* dead_reference_safe= */ true,  // Intrinsics don't affect dead reference safety.
972       compiler_options.GetDebuggable(),
973       /* osr= */ false);
974 
975   DCHECK(Runtime::Current()->IsAotCompiler());
976   DCHECK(method != nullptr);
977   graph->SetArtMethod(method);
978 
979   std::unique_ptr<CodeGenerator> codegen(
980       CodeGenerator::Create(graph,
981                             compiler_options,
982                             compilation_stats_.get()));
983   if (codegen.get() == nullptr) {
984     return nullptr;
985   }
986   codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
987 
988   PassObserver pass_observer(graph,
989                              codegen.get(),
990                              visualizer_output_.get(),
991                              compiler_options,
992                              dump_mutex_);
993 
994   {
995     VLOG(compiler) << "Building intrinsic graph " << pass_observer.GetMethodName();
996     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
997     HGraphBuilder builder(graph,
998                           CodeItemDebugInfoAccessor(),  // Null code item.
999                           &dex_compilation_unit,
1000                           &dex_compilation_unit,
1001                           codegen.get(),
1002                           compilation_stats_.get(),
1003                           /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
1004                           handles);
1005     builder.BuildIntrinsicGraph(method);
1006   }
1007 
1008   OptimizationDef optimizations[] = {
1009     // The codegen has a few assumptions that only the instruction simplifier
1010     // can satisfy.
1011     OptDef(OptimizationPass::kInstructionSimplifier),
1012   };
1013   RunOptimizations(graph,
1014                    codegen.get(),
1015                    dex_compilation_unit,
1016                    &pass_observer,
1017                    handles,
1018                    optimizations);
1019 
1020   RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
1021 
1022   AllocateRegisters(graph,
1023                     codegen.get(),
1024                     &pass_observer,
1025                     compiler_options.GetRegisterAllocationStrategy(),
1026                     compilation_stats_.get());
1027   if (!codegen->IsLeafMethod()) {
1028     VLOG(compiler) << "Intrinsic method is not leaf: " << method->GetIntrinsic()
1029         << " " << graph->PrettyMethod();
1030     return nullptr;
1031   }
1032 
1033   codegen->Compile(code_allocator);
1034   pass_observer.DumpDisassembly();
1035 
1036   VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
1037       << " " << graph->PrettyMethod();
1038   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
1039   return codegen.release();
1040 }
1041 
Compile(const dex::CodeItem * code_item,uint32_t access_flags,InvokeType invoke_type,uint16_t class_def_idx,uint32_t method_idx,Handle<mirror::ClassLoader> jclass_loader,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1042 CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
1043                                             uint32_t access_flags,
1044                                             InvokeType invoke_type,
1045                                             uint16_t class_def_idx,
1046                                             uint32_t method_idx,
1047                                             Handle<mirror::ClassLoader> jclass_loader,
1048                                             const DexFile& dex_file,
1049                                             Handle<mirror::DexCache> dex_cache) const {
1050   const CompilerOptions& compiler_options = GetCompilerOptions();
1051   CompiledMethod* compiled_method = nullptr;
1052   Runtime* runtime = Runtime::Current();
1053   DCHECK(runtime->IsAotCompiler());
1054   const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
1055   DCHECK(!verified_method->HasRuntimeThrow());
1056   if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
1057       verifier::CanCompilerHandleVerificationFailure(
1058           verified_method->GetEncounteredVerificationFailures())) {
1059     ArenaAllocator allocator(runtime->GetArenaPool());
1060     ArenaStack arena_stack(runtime->GetArenaPool());
1061     CodeVectorAllocator code_allocator(&allocator);
1062     std::unique_ptr<CodeGenerator> codegen;
1063     bool compiled_intrinsic = false;
1064     {
1065       ScopedObjectAccess soa(Thread::Current());
1066       ArtMethod* method =
1067           runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
1068               method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
1069       DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
1070       soa.Self()->ClearException();  // Suppress exception if any.
1071       VariableSizedHandleScope handles(soa.Self());
1072       Handle<mirror::Class> compiling_class =
1073           handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
1074       DexCompilationUnit dex_compilation_unit(
1075           jclass_loader,
1076           runtime->GetClassLinker(),
1077           dex_file,
1078           code_item,
1079           class_def_idx,
1080           method_idx,
1081           access_flags,
1082           /*verified_method=*/ nullptr,  // Not needed by the Optimizing compiler.
1083           dex_cache,
1084           compiling_class);
1085       // Go to native so that we don't block GC during compilation.
1086       ScopedThreadSuspension sts(soa.Self(), kNative);
1087       if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1088         DCHECK(compiler_options.IsBootImage());
1089         codegen.reset(
1090             TryCompileIntrinsic(&allocator,
1091                                 &arena_stack,
1092                                 &code_allocator,
1093                                 dex_compilation_unit,
1094                                 method,
1095                                 &handles));
1096         if (codegen != nullptr) {
1097           compiled_intrinsic = true;
1098         }
1099       }
1100       if (codegen == nullptr) {
1101         codegen.reset(
1102             TryCompile(&allocator,
1103                        &arena_stack,
1104                        &code_allocator,
1105                        dex_compilation_unit,
1106                        method,
1107                        compiler_options.IsBaseline(),
1108                        /* osr= */ false,
1109                        &handles));
1110       }
1111     }
1112     if (codegen.get() != nullptr) {
1113       compiled_method = Emit(&allocator,
1114                              &code_allocator,
1115                              codegen.get(),
1116                              compiled_intrinsic ? nullptr : code_item);
1117       if (compiled_intrinsic) {
1118         compiled_method->MarkAsIntrinsic();
1119       }
1120 
1121       if (kArenaAllocatorCountAllocations) {
1122         codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1123         size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1124         if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1125           MemStats mem_stats(allocator.GetMemStats());
1126           MemStats peak_stats(arena_stack.GetPeakStats());
1127           LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1128                     << dex_file.PrettyMethod(method_idx)
1129                     << "\n" << Dumpable<MemStats>(mem_stats)
1130                     << "\n" << Dumpable<MemStats>(peak_stats);
1131         }
1132       }
1133     }
1134   } else {
1135     MethodCompilationStat method_stat;
1136     if (compiler_options.VerifyAtRuntime()) {
1137       method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
1138     } else {
1139       method_stat = MethodCompilationStat::kNotCompiledVerificationError;
1140     }
1141     MaybeRecordStat(compilation_stats_.get(), method_stat);
1142   }
1143 
1144   if (kIsDebugBuild &&
1145       compiler_options.CompilingWithCoreImage() &&
1146       IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
1147     // For testing purposes, we put a special marker on method names
1148     // that should be compiled with this compiler (when the
1149     // instruction set is supported). This makes sure we're not
1150     // regressing.
1151     std::string method_name = dex_file.PrettyMethod(method_idx);
1152     bool shouldCompile = method_name.find("$opt$") != std::string::npos;
1153     DCHECK((compiled_method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
1154   }
1155 
1156   return compiled_method;
1157 }
1158 
CreateJniStackMap(ScopedArenaAllocator * allocator,const JniCompiledMethod & jni_compiled_method)1159 static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
1160                                                     const JniCompiledMethod& jni_compiled_method) {
1161   // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
1162   // to stay clear of the frame size limit.
1163   std::unique_ptr<StackMapStream> stack_map_stream(
1164       new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
1165   stack_map_stream->BeginMethod(
1166       jni_compiled_method.GetFrameSize(),
1167       jni_compiled_method.GetCoreSpillMask(),
1168       jni_compiled_method.GetFpSpillMask(),
1169       /* num_dex_registers= */ 0);
1170   stack_map_stream->EndMethod();
1171   return stack_map_stream->Encode();
1172 }
1173 
JniCompile(uint32_t access_flags,uint32_t method_idx,const DexFile & dex_file,Handle<mirror::DexCache> dex_cache) const1174 CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
1175                                                uint32_t method_idx,
1176                                                const DexFile& dex_file,
1177                                                Handle<mirror::DexCache> dex_cache) const {
1178   Runtime* runtime = Runtime::Current();
1179   ArenaAllocator allocator(runtime->GetArenaPool());
1180   ArenaStack arena_stack(runtime->GetArenaPool());
1181 
1182   const CompilerOptions& compiler_options = GetCompilerOptions();
1183   if (compiler_options.IsBootImage()) {
1184     ScopedObjectAccess soa(Thread::Current());
1185     ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
1186         method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
1187     if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
1188       VariableSizedHandleScope handles(soa.Self());
1189       ScopedNullHandle<mirror::ClassLoader> class_loader;  // null means boot class path loader.
1190       Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1191       DexCompilationUnit dex_compilation_unit(
1192           class_loader,
1193           runtime->GetClassLinker(),
1194           dex_file,
1195           /*code_item=*/ nullptr,
1196           /*class_def_idx=*/ DexFile::kDexNoIndex16,
1197           method_idx,
1198           access_flags,
1199           /*verified_method=*/ nullptr,
1200           dex_cache,
1201           compiling_class);
1202       CodeVectorAllocator code_allocator(&allocator);
1203       // Go to native so that we don't block GC during compilation.
1204       ScopedThreadSuspension sts(soa.Self(), kNative);
1205       std::unique_ptr<CodeGenerator> codegen(
1206           TryCompileIntrinsic(&allocator,
1207                               &arena_stack,
1208                               &code_allocator,
1209                               dex_compilation_unit,
1210                               method,
1211                               &handles));
1212       if (codegen != nullptr) {
1213         CompiledMethod* compiled_method = Emit(&allocator,
1214                                                &code_allocator,
1215                                                codegen.get(),
1216                                                /* item= */ nullptr);
1217         compiled_method->MarkAsIntrinsic();
1218         return compiled_method;
1219       }
1220     }
1221   }
1222 
1223   JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1224       compiler_options, access_flags, method_idx, dex_file);
1225   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
1226 
1227   ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1228   ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
1229                                                            jni_compiled_method);
1230   return CompiledMethod::SwapAllocCompiledMethod(
1231       GetCompiledMethodStorage(),
1232       jni_compiled_method.GetInstructionSet(),
1233       jni_compiled_method.GetCode(),
1234       ArrayRef<const uint8_t>(stack_map),
1235       jni_compiled_method.GetCfi(),
1236       /* patches= */ ArrayRef<const linker::LinkerPatch>());
1237 }
1238 
CreateOptimizingCompiler(const CompilerOptions & compiler_options,CompiledMethodStorage * storage)1239 Compiler* CreateOptimizingCompiler(const CompilerOptions& compiler_options,
1240                                    CompiledMethodStorage* storage) {
1241   return new OptimizingCompiler(compiler_options, storage);
1242 }
1243 
EncodeArtMethodInInlineInfo(ArtMethod * method ATTRIBUTE_UNUSED)1244 bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
1245   // Note: the runtime is null only for unit testing.
1246   return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
1247 }
1248 
JitCompile(Thread * self,jit::JitCodeCache * code_cache,ArtMethod * method,bool baseline,bool osr,jit::JitLogger * jit_logger)1249 bool OptimizingCompiler::JitCompile(Thread* self,
1250                                     jit::JitCodeCache* code_cache,
1251                                     ArtMethod* method,
1252                                     bool baseline,
1253                                     bool osr,
1254                                     jit::JitLogger* jit_logger) {
1255   StackHandleScope<3> hs(self);
1256   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
1257       method->GetDeclaringClass()->GetClassLoader()));
1258   Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
1259   DCHECK(method->IsCompilable());
1260 
1261   const DexFile* dex_file = method->GetDexFile();
1262   const uint16_t class_def_idx = method->GetClassDefIndex();
1263   const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
1264   const uint32_t method_idx = method->GetDexMethodIndex();
1265   const uint32_t access_flags = method->GetAccessFlags();
1266 
1267   Runtime* runtime = Runtime::Current();
1268   ArenaAllocator allocator(runtime->GetJitArenaPool());
1269 
1270   if (UNLIKELY(method->IsNative())) {
1271     const CompilerOptions& compiler_options = GetCompilerOptions();
1272     JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
1273         compiler_options, access_flags, method_idx, *dex_file);
1274     std::vector<Handle<mirror::Object>> roots;
1275     ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
1276         allocator.Adapter(kArenaAllocCHA));
1277     ArenaStack arena_stack(runtime->GetJitArenaPool());
1278     // StackMapStream is large and it does not fit into this frame, so we need helper method.
1279     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
1280     ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
1281                                                              jni_compiled_method);
1282     uint8_t* stack_map_data = nullptr;
1283     uint8_t* roots_data = nullptr;
1284     uint32_t data_size = code_cache->ReserveData(self,
1285                                                  stack_map.size(),
1286                                                  /* number_of_roots= */ 0,
1287                                                  method,
1288                                                  &stack_map_data,
1289                                                  &roots_data);
1290     if (stack_map_data == nullptr || roots_data == nullptr) {
1291       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1292       return false;
1293     }
1294     memcpy(stack_map_data, stack_map.data(), stack_map.size());
1295 
1296     const void* code = code_cache->CommitCode(
1297         self,
1298         method,
1299         stack_map_data,
1300         roots_data,
1301         jni_compiled_method.GetCode().data(),
1302         jni_compiled_method.GetCode().size(),
1303         data_size,
1304         osr,
1305         roots,
1306         /* has_should_deoptimize_flag= */ false,
1307         cha_single_implementation_list);
1308     if (code == nullptr) {
1309       return false;
1310     }
1311 
1312     if (compiler_options.GenerateAnyDebugInfo()) {
1313       const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
1314       const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
1315       debug::MethodDebugInfo info = {};
1316       info.custom_name = "art_jni_trampoline";
1317       info.dex_file = dex_file;
1318       info.class_def_index = class_def_idx;
1319       info.dex_method_index = method_idx;
1320       info.access_flags = access_flags;
1321       info.code_item = code_item;
1322       info.isa = jni_compiled_method.GetInstructionSet();
1323       info.deduped = false;
1324       info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1325       info.is_optimized = true;
1326       info.is_code_address_text_relative = false;
1327       info.code_address = code_address;
1328       info.code_size = jni_compiled_method.GetCode().size();
1329       info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
1330       info.code_info = nullptr;
1331       info.cfi = jni_compiled_method.GetCfi();
1332       GenerateJitDebugInfo(method, info);
1333     }
1334 
1335     Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1336     if (jit_logger != nullptr) {
1337       jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
1338     }
1339     return true;
1340   }
1341 
1342   ArenaStack arena_stack(runtime->GetJitArenaPool());
1343   CodeVectorAllocator code_allocator(&allocator);
1344   VariableSizedHandleScope handles(self);
1345 
1346   std::unique_ptr<CodeGenerator> codegen;
1347   {
1348     Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
1349     DexCompilationUnit dex_compilation_unit(
1350         class_loader,
1351         runtime->GetClassLinker(),
1352         *dex_file,
1353         code_item,
1354         class_def_idx,
1355         method_idx,
1356         access_flags,
1357         /*verified_method=*/ nullptr,
1358         dex_cache,
1359         compiling_class);
1360 
1361     // Go to native so that we don't block GC during compilation.
1362     ScopedThreadSuspension sts(self, kNative);
1363     codegen.reset(
1364         TryCompile(&allocator,
1365                    &arena_stack,
1366                    &code_allocator,
1367                    dex_compilation_unit,
1368                    method,
1369                    baseline,
1370                    osr,
1371                    &handles));
1372     if (codegen.get() == nullptr) {
1373       return false;
1374     }
1375   }
1376 
1377   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
1378   size_t number_of_roots = codegen->GetNumberOfJitRoots();
1379   uint8_t* stack_map_data = nullptr;
1380   uint8_t* roots_data = nullptr;
1381   uint32_t data_size = code_cache->ReserveData(self,
1382                                                stack_map.size(),
1383                                                number_of_roots,
1384                                                method,
1385                                                &stack_map_data,
1386                                                &roots_data);
1387   if (stack_map_data == nullptr || roots_data == nullptr) {
1388     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1389     return false;
1390   }
1391   memcpy(stack_map_data, stack_map.data(), stack_map.size());
1392   std::vector<Handle<mirror::Object>> roots;
1393   codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
1394   // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
1395   DCHECK(std::all_of(roots.begin(),
1396                      roots.end(),
1397                      [&handles](Handle<mirror::Object> root){
1398                        return handles.Contains(root.GetReference());
1399                      }));
1400 
1401   const void* code = code_cache->CommitCode(
1402       self,
1403       method,
1404       stack_map_data,
1405       roots_data,
1406       code_allocator.GetMemory().data(),
1407       code_allocator.GetMemory().size(),
1408       data_size,
1409       osr,
1410       roots,
1411       codegen->GetGraph()->HasShouldDeoptimizeFlag(),
1412       codegen->GetGraph()->GetCHASingleImplementationList());
1413 
1414   if (code == nullptr) {
1415     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
1416     code_cache->ClearData(self, stack_map_data, roots_data);
1417     return false;
1418   }
1419 
1420   const CompilerOptions& compiler_options = GetCompilerOptions();
1421   if (compiler_options.GenerateAnyDebugInfo()) {
1422     const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
1423     const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
1424     debug::MethodDebugInfo info = {};
1425     DCHECK(info.custom_name.empty());
1426     info.dex_file = dex_file;
1427     info.class_def_index = class_def_idx;
1428     info.dex_method_index = method_idx;
1429     info.access_flags = access_flags;
1430     info.code_item = code_item;
1431     info.isa = codegen->GetInstructionSet();
1432     info.deduped = false;
1433     info.is_native_debuggable = compiler_options.GetNativeDebuggable();
1434     info.is_optimized = true;
1435     info.is_code_address_text_relative = false;
1436     info.code_address = code_address;
1437     info.code_size = code_allocator.GetMemory().size();
1438     info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
1439     info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data;
1440     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
1441     GenerateJitDebugInfo(method, info);
1442   }
1443 
1444   Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
1445   if (jit_logger != nullptr) {
1446     jit_logger->WriteLog(code, code_allocator.GetMemory().size(), method);
1447   }
1448 
1449   if (kArenaAllocatorCountAllocations) {
1450     codegen.reset();  // Release codegen's ScopedArenaAllocator for memory accounting.
1451     size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
1452     if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
1453       MemStats mem_stats(allocator.GetMemStats());
1454       MemStats peak_stats(arena_stack.GetPeakStats());
1455       LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling "
1456                 << dex_file->PrettyMethod(method_idx)
1457                 << "\n" << Dumpable<MemStats>(mem_stats)
1458                 << "\n" << Dumpable<MemStats>(peak_stats);
1459     }
1460   }
1461 
1462   return true;
1463 }
1464 
GenerateJitDebugInfo(ArtMethod * method ATTRIBUTE_UNUSED,const debug::MethodDebugInfo & info)1465 void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
1466                                               const debug::MethodDebugInfo& info) {
1467   const CompilerOptions& compiler_options = GetCompilerOptions();
1468   DCHECK(compiler_options.GenerateAnyDebugInfo());
1469   TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit));
1470   {
1471     TimingLogger::ScopedTiming st("Generate JIT debug info", &logger);
1472 
1473     // If both flags are passed, generate full debug info.
1474     const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
1475 
1476     // Create entry for the single method that we just compiled.
1477     std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
1478         compiler_options.GetInstructionSet(),
1479         compiler_options.GetInstructionSetFeatures(),
1480         mini_debug_info,
1481         info);
1482     AddNativeDebugInfoForJit(Thread::Current(),
1483                              reinterpret_cast<const void*>(info.code_address),
1484                              elf_file,
1485                              debug::PackElfFileForJIT,
1486                              compiler_options.GetInstructionSet(),
1487                              compiler_options.GetInstructionSetFeatures());
1488   }
1489   Runtime::Current()->GetJit()->AddTimingLogger(logger);
1490 }
1491 
1492 }  // namespace art
1493