• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_generator.h"
18 
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22 
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26 
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30 
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34 
35 #include "art_method-inl.h"
36 #include "base/bit_utils.h"
37 #include "base/bit_utils_iterator.h"
38 #include "base/casts.h"
39 #include "base/leb128.h"
40 #include "class_linker.h"
41 #include "class_root-inl.h"
42 #include "compiled_method.h"
43 #include "dex/bytecode_utils.h"
44 #include "dex/code_item_accessors-inl.h"
45 #include "graph_visualizer.h"
46 #include "image.h"
47 #include "gc/space/image_space.h"
48 #include "intern_table.h"
49 #include "intrinsics.h"
50 #include "mirror/array-inl.h"
51 #include "mirror/object_array-inl.h"
52 #include "mirror/object_reference.h"
53 #include "mirror/reference.h"
54 #include "mirror/string.h"
55 #include "parallel_move_resolver.h"
56 #include "scoped_thread_state_change-inl.h"
57 #include "ssa_liveness_analysis.h"
58 #include "stack_map.h"
59 #include "stack_map_stream.h"
60 #include "string_builder_append.h"
61 #include "thread-current-inl.h"
62 #include "utils/assembler.h"
63 
64 namespace art {
65 
66 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)67 static bool CheckType(DataType::Type type, Location location) {
68   if (location.IsFpuRegister()
69       || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
70     return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
71   } else if (location.IsRegister() ||
72              (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
73     return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
74   } else if (location.IsRegisterPair()) {
75     return type == DataType::Type::kInt64;
76   } else if (location.IsFpuRegisterPair()) {
77     return type == DataType::Type::kFloat64;
78   } else if (location.IsStackSlot()) {
79     return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
80            || (type == DataType::Type::kFloat32)
81            || (type == DataType::Type::kReference);
82   } else if (location.IsDoubleStackSlot()) {
83     return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
84   } else if (location.IsConstant()) {
85     if (location.GetConstant()->IsIntConstant()) {
86       return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
87     } else if (location.GetConstant()->IsNullConstant()) {
88       return type == DataType::Type::kReference;
89     } else if (location.GetConstant()->IsLongConstant()) {
90       return type == DataType::Type::kInt64;
91     } else if (location.GetConstant()->IsFloatConstant()) {
92       return type == DataType::Type::kFloat32;
93     } else {
94       return location.GetConstant()->IsDoubleConstant()
95           && (type == DataType::Type::kFloat64);
96     }
97   } else {
98     return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
99   }
100 }
101 
102 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)103 static bool CheckTypeConsistency(HInstruction* instruction) {
104   LocationSummary* locations = instruction->GetLocations();
105   if (locations == nullptr) {
106     return true;
107   }
108 
109   if (locations->Out().IsUnallocated()
110       && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
111     DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
112         << instruction->GetType()
113         << " " << locations->InAt(0);
114   } else {
115     DCHECK(CheckType(instruction->GetType(), locations->Out()))
116         << instruction->GetType()
117         << " " << locations->Out();
118   }
119 
120   HConstInputsRef inputs = instruction->GetInputs();
121   for (size_t i = 0; i < inputs.size(); ++i) {
122     DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
123       << inputs[i]->GetType() << " " << locations->InAt(i);
124   }
125 
126   HEnvironment* environment = instruction->GetEnvironment();
127   for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
128     if (environment->GetInstructionAt(i) != nullptr) {
129       DataType::Type type = environment->GetInstructionAt(i)->GetType();
130       DCHECK(CheckType(type, environment->GetLocationAt(i)))
131         << type << " " << environment->GetLocationAt(i);
132     } else {
133       DCHECK(environment->GetLocationAt(i).IsInvalid())
134         << environment->GetLocationAt(i);
135     }
136   }
137   return true;
138 }
139 
140 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
141  public:
Create(ArenaStack * arena_stack,InstructionSet instruction_set)142   static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
143                                                     InstructionSet instruction_set) {
144     ScopedArenaAllocator allocator(arena_stack);
145     void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
146     return std::unique_ptr<CodeGenerationData>(
147         ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
148   }
149 
GetScopedAllocator()150   ScopedArenaAllocator* GetScopedAllocator() {
151     return &allocator_;
152   }
153 
AddSlowPath(SlowPathCode * slow_path)154   void AddSlowPath(SlowPathCode* slow_path) {
155     slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
156   }
157 
GetSlowPaths() const158   ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
159     return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
160   }
161 
GetStackMapStream()162   StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
163 
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)164   void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
165     jit_string_roots_.Overwrite(string_reference,
166                                 reinterpret_cast64<uint64_t>(string.GetReference()));
167   }
168 
GetJitStringRootIndex(StringReference string_reference) const169   uint64_t GetJitStringRootIndex(StringReference string_reference) const {
170     return jit_string_roots_.Get(string_reference);
171   }
172 
GetNumberOfJitStringRoots() const173   size_t GetNumberOfJitStringRoots() const {
174     return jit_string_roots_.size();
175   }
176 
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)177   void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
178     jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
179   }
180 
GetJitClassRootIndex(TypeReference type_reference) const181   uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
182     return jit_class_roots_.Get(type_reference);
183   }
184 
GetNumberOfJitClassRoots() const185   size_t GetNumberOfJitClassRoots() const {
186     return jit_class_roots_.size();
187   }
188 
GetNumberOfJitRoots() const189   size_t GetNumberOfJitRoots() const {
190     return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
191   }
192 
193   void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
194       REQUIRES_SHARED(Locks::mutator_lock_);
195 
196  private:
CodeGenerationData(ScopedArenaAllocator && allocator,InstructionSet instruction_set)197   CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
198       : allocator_(std::move(allocator)),
199         stack_map_stream_(&allocator_, instruction_set),
200         slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
201         jit_string_roots_(StringReferenceValueComparator(),
202                           allocator_.Adapter(kArenaAllocCodeGenerator)),
203         jit_class_roots_(TypeReferenceValueComparator(),
204                          allocator_.Adapter(kArenaAllocCodeGenerator)) {
205     slow_paths_.reserve(kDefaultSlowPathsCapacity);
206   }
207 
208   static constexpr size_t kDefaultSlowPathsCapacity = 8;
209 
210   ScopedArenaAllocator allocator_;
211   StackMapStream stack_map_stream_;
212   ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
213 
214   // Maps a StringReference (dex_file, string_index) to the index in the literal table.
215   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
216   // will compute all the indices.
217   ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
218 
219   // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
220   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
221   // will compute all the indices.
222   ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
223 };
224 
EmitJitRoots(std::vector<Handle<mirror::Object>> * roots)225 void CodeGenerator::CodeGenerationData::EmitJitRoots(
226     /*out*/std::vector<Handle<mirror::Object>>* roots) {
227   DCHECK(roots->empty());
228   roots->reserve(GetNumberOfJitRoots());
229   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
230   size_t index = 0;
231   for (auto& entry : jit_string_roots_) {
232     // Update the `roots` with the string, and replace the address temporarily
233     // stored to the index in the table.
234     uint64_t address = entry.second;
235     roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
236     DCHECK(roots->back() != nullptr);
237     DCHECK(roots->back()->IsString());
238     entry.second = index;
239     // Ensure the string is strongly interned. This is a requirement on how the JIT
240     // handles strings. b/32995596
241     class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
242     ++index;
243   }
244   for (auto& entry : jit_class_roots_) {
245     // Update the `roots` with the class, and replace the address temporarily
246     // stored to the index in the table.
247     uint64_t address = entry.second;
248     roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
249     DCHECK(roots->back() != nullptr);
250     DCHECK(roots->back()->IsClass());
251     entry.second = index;
252     ++index;
253   }
254 }
255 
GetScopedAllocator()256 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
257   DCHECK(code_generation_data_ != nullptr);
258   return code_generation_data_->GetScopedAllocator();
259 }
260 
GetStackMapStream()261 StackMapStream* CodeGenerator::GetStackMapStream() {
262   DCHECK(code_generation_data_ != nullptr);
263   return code_generation_data_->GetStackMapStream();
264 }
265 
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)266 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
267                                          Handle<mirror::String> string) {
268   DCHECK(code_generation_data_ != nullptr);
269   code_generation_data_->ReserveJitStringRoot(string_reference, string);
270 }
271 
GetJitStringRootIndex(StringReference string_reference)272 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
273   DCHECK(code_generation_data_ != nullptr);
274   return code_generation_data_->GetJitStringRootIndex(string_reference);
275 }
276 
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)277 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
278   DCHECK(code_generation_data_ != nullptr);
279   code_generation_data_->ReserveJitClassRoot(type_reference, klass);
280 }
281 
GetJitClassRootIndex(TypeReference type_reference)282 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
283   DCHECK(code_generation_data_ != nullptr);
284   return code_generation_data_->GetJitClassRootIndex(type_reference);
285 }
286 
EmitJitRootPatches(uint8_t * code ATTRIBUTE_UNUSED,const uint8_t * roots_data ATTRIBUTE_UNUSED)287 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
288                                        const uint8_t* roots_data ATTRIBUTE_UNUSED) {
289   DCHECK(code_generation_data_ != nullptr);
290   DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
291   DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
292 }
293 
GetArrayLengthOffset(HArrayLength * array_length)294 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
295   return array_length->IsStringLength()
296       ? mirror::String::CountOffset().Uint32Value()
297       : mirror::Array::LengthOffset().Uint32Value();
298 }
299 
GetArrayDataOffset(HArrayGet * array_get)300 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
301   DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
302   return array_get->IsStringCharAt()
303       ? mirror::String::ValueOffset().Uint32Value()
304       : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
305 }
306 
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const307 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
308   DCHECK_EQ((*block_order_)[current_block_index_], current);
309   return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
310 }
311 
GetNextBlockToEmit() const312 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
313   for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
314     HBasicBlock* block = (*block_order_)[i];
315     if (!block->IsSingleJump()) {
316       return block;
317     }
318   }
319   return nullptr;
320 }
321 
FirstNonEmptyBlock(HBasicBlock * block) const322 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
323   while (block->IsSingleJump()) {
324     block = block->GetSuccessors()[0];
325   }
326   return block;
327 }
328 
329 class DisassemblyScope {
330  public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)331   DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
332       : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
333     if (codegen_.GetDisassemblyInformation() != nullptr) {
334       start_offset_ = codegen_.GetAssembler().CodeSize();
335     }
336   }
337 
~DisassemblyScope()338   ~DisassemblyScope() {
339     // We avoid building this data when we know it will not be used.
340     if (codegen_.GetDisassemblyInformation() != nullptr) {
341       codegen_.GetDisassemblyInformation()->AddInstructionInterval(
342           instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
343     }
344   }
345 
346  private:
347   const CodeGenerator& codegen_;
348   HInstruction* instruction_;
349   size_t start_offset_;
350 };
351 
352 
GenerateSlowPaths()353 void CodeGenerator::GenerateSlowPaths() {
354   DCHECK(code_generation_data_ != nullptr);
355   size_t code_start = 0;
356   for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
357     SlowPathCode* slow_path = slow_path_ptr.get();
358     current_slow_path_ = slow_path;
359     if (disasm_info_ != nullptr) {
360       code_start = GetAssembler()->CodeSize();
361     }
362     // Record the dex pc at start of slow path (required for java line number mapping).
363     MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
364     slow_path->EmitNativeCode(this);
365     if (disasm_info_ != nullptr) {
366       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
367     }
368   }
369   current_slow_path_ = nullptr;
370 }
371 
InitializeCodeGenerationData()372 void CodeGenerator::InitializeCodeGenerationData() {
373   DCHECK(code_generation_data_ == nullptr);
374   code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
375 }
376 
Compile(CodeAllocator * allocator)377 void CodeGenerator::Compile(CodeAllocator* allocator) {
378   InitializeCodeGenerationData();
379 
380   // The register allocator already called `InitializeCodeGeneration`,
381   // where the frame size has been computed.
382   DCHECK(block_order_ != nullptr);
383   Initialize();
384 
385   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
386   DCHECK_EQ(current_block_index_, 0u);
387 
388   GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
389                                    core_spill_mask_,
390                                    fpu_spill_mask_,
391                                    GetGraph()->GetNumberOfVRegs(),
392                                    GetGraph()->IsCompilingBaseline());
393 
394   size_t frame_start = GetAssembler()->CodeSize();
395   GenerateFrameEntry();
396   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
397   if (disasm_info_ != nullptr) {
398     disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
399   }
400 
401   for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
402     HBasicBlock* block = (*block_order_)[current_block_index_];
403     // Don't generate code for an empty block. Its predecessors will branch to its successor
404     // directly. Also, the label of that block will not be emitted, so this helps catch
405     // errors where we reference that label.
406     if (block->IsSingleJump()) continue;
407     Bind(block);
408     // This ensures that we have correct native line mapping for all native instructions.
409     // It is necessary to make stepping over a statement work. Otherwise, any initial
410     // instructions (e.g. moves) would be assumed to be the start of next statement.
411     MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
412     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
413       HInstruction* current = it.Current();
414       if (current->HasEnvironment()) {
415         // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
416         // Note that we need correct mapping for the native PC of the call instruction,
417         // so the runtime's stackmap is not sufficient since it is at PC after the call.
418         MaybeRecordNativeDebugInfo(current, block->GetDexPc());
419       }
420       DisassemblyScope disassembly_scope(current, *this);
421       DCHECK(CheckTypeConsistency(current));
422       current->Accept(instruction_visitor);
423     }
424   }
425 
426   GenerateSlowPaths();
427 
428   // Emit catch stack maps at the end of the stack map stream as expected by the
429   // runtime exception handler.
430   if (graph_->HasTryCatch()) {
431     RecordCatchBlockInfo();
432   }
433 
434   // Finalize instructions in assember;
435   Finalize(allocator);
436 
437   GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
438 }
439 
Finalize(CodeAllocator * allocator)440 void CodeGenerator::Finalize(CodeAllocator* allocator) {
441   size_t code_size = GetAssembler()->CodeSize();
442   uint8_t* buffer = allocator->Allocate(code_size);
443 
444   MemoryRegion code(buffer, code_size);
445   GetAssembler()->FinalizeInstructions(code);
446 }
447 
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)448 void CodeGenerator::EmitLinkerPatches(
449     ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
450   // No linker patches by default.
451 }
452 
NeedsThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED) const453 bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
454   // Code generators that create patches requiring thunk compilation should override this function.
455   return false;
456 }
457 
EmitThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED,ArenaVector<uint8_t> * code ATTRIBUTE_UNUSED,std::string * debug_name ATTRIBUTE_UNUSED)458 void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
459                                   /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
460                                   /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
461   // Code generators that create patches requiring thunk compilation should override this function.
462   LOG(FATAL) << "Unexpected call to EmitThunkCode().";
463 }
464 
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)465 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
466                                              size_t maximum_safepoint_spill_size,
467                                              size_t number_of_out_slots,
468                                              const ArenaVector<HBasicBlock*>& block_order) {
469   block_order_ = &block_order;
470   DCHECK(!block_order.empty());
471   DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
472   ComputeSpillMask();
473   first_register_slot_in_slow_path_ = RoundUp(
474       (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
475 
476   if (number_of_spill_slots == 0
477       && !HasAllocatedCalleeSaveRegisters()
478       && IsLeafMethod()
479       && !RequiresCurrentMethod()) {
480     DCHECK_EQ(maximum_safepoint_spill_size, 0u);
481     SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
482   } else {
483     SetFrameSize(RoundUp(
484         first_register_slot_in_slow_path_
485         + maximum_safepoint_spill_size
486         + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
487         + FrameEntrySpillSize(),
488         kStackAlignment));
489   }
490 }
491 
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)492 void CodeGenerator::CreateCommonInvokeLocationSummary(
493     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
494   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
495   LocationSummary* locations = new (allocator) LocationSummary(invoke,
496                                                                LocationSummary::kCallOnMainOnly);
497 
498   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
499     HInstruction* input = invoke->InputAt(i);
500     locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
501   }
502 
503   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
504 
505   if (invoke->IsInvokeStaticOrDirect()) {
506     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
507     MethodLoadKind method_load_kind = call->GetMethodLoadKind();
508     CodePtrLocation code_ptr_location = call->GetCodePtrLocation();
509     if (code_ptr_location == CodePtrLocation::kCallCriticalNative) {
510       locations->AddTemp(Location::RequiresRegister());  // For target method.
511     }
512     if (code_ptr_location == CodePtrLocation::kCallCriticalNative ||
513         method_load_kind == MethodLoadKind::kRecursive) {
514       // For `kCallCriticalNative` we need the current method as the hidden argument
515       // if we reach the dlsym lookup stub for @CriticalNative.
516       locations->SetInAt(call->GetCurrentMethodIndex(), visitor->GetMethodLocation());
517     } else {
518       locations->AddTemp(visitor->GetMethodLocation());
519       if (method_load_kind == MethodLoadKind::kRuntimeCall) {
520         locations->SetInAt(call->GetCurrentMethodIndex(), Location::RequiresRegister());
521       }
522     }
523   } else if (!invoke->IsInvokePolymorphic()) {
524     locations->AddTemp(visitor->GetMethodLocation());
525   }
526 }
527 
PrepareCriticalNativeArgumentMoves(HInvokeStaticOrDirect * invoke,InvokeDexCallingConventionVisitor * visitor,HParallelMove * parallel_move)528 void CodeGenerator::PrepareCriticalNativeArgumentMoves(
529     HInvokeStaticOrDirect* invoke,
530     /*inout*/InvokeDexCallingConventionVisitor* visitor,
531     /*out*/HParallelMove* parallel_move) {
532   LocationSummary* locations = invoke->GetLocations();
533   for (size_t i = 0, num = invoke->GetNumberOfArguments(); i != num; ++i) {
534     Location in_location = locations->InAt(i);
535     DataType::Type type = invoke->InputAt(i)->GetType();
536     DCHECK_NE(type, DataType::Type::kReference);
537     Location out_location = visitor->GetNextLocation(type);
538     if (out_location.IsStackSlot() || out_location.IsDoubleStackSlot()) {
539       // Stack arguments will need to be moved after adjusting the SP.
540       parallel_move->AddMove(in_location, out_location, type, /*instruction=*/ nullptr);
541     } else {
542       // Register arguments should have been assigned their final locations for register allocation.
543       DCHECK(out_location.Equals(in_location)) << in_location << " -> " << out_location;
544     }
545   }
546 }
547 
FinishCriticalNativeFrameSetup(size_t out_frame_size,HParallelMove * parallel_move)548 void CodeGenerator::FinishCriticalNativeFrameSetup(size_t out_frame_size,
549                                                    /*inout*/HParallelMove* parallel_move) {
550   DCHECK_NE(out_frame_size, 0u);
551   IncreaseFrame(out_frame_size);
552   // Adjust the source stack offsets by `out_frame_size`, i.e. the additional
553   // frame size needed for outgoing stack arguments.
554   for (size_t i = 0, num = parallel_move->NumMoves(); i != num; ++i) {
555     MoveOperands* operands = parallel_move->MoveOperandsAt(i);
556     Location source = operands->GetSource();
557     if (operands->GetSource().IsStackSlot()) {
558       operands->SetSource(Location::StackSlot(source.GetStackIndex() +  out_frame_size));
559     } else if (operands->GetSource().IsDoubleStackSlot()) {
560       operands->SetSource(Location::DoubleStackSlot(source.GetStackIndex() +  out_frame_size));
561     }
562   }
563   // Emit the moves.
564   GetMoveResolver()->EmitNativeCode(parallel_move);
565 }
566 
GetCriticalNativeShorty(HInvokeStaticOrDirect * invoke,uint32_t * shorty_len)567 const char* CodeGenerator::GetCriticalNativeShorty(HInvokeStaticOrDirect* invoke,
568                                                    uint32_t* shorty_len) {
569   ScopedObjectAccess soa(Thread::Current());
570   DCHECK(invoke->GetResolvedMethod()->IsCriticalNative());
571   return invoke->GetResolvedMethod()->GetShorty(shorty_len);
572 }
573 
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)574 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
575     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
576   MethodReference method_reference(invoke->GetMethodReference());
577   MoveConstant(temp, method_reference.index);
578 
579   // The access check is unnecessary but we do not want to introduce
580   // extra entrypoints for the codegens that do not support some
581   // invoke type and fall back to the runtime call.
582 
583   // Initialize to anything to silent compiler warnings.
584   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
585   switch (invoke->GetInvokeType()) {
586     case kStatic:
587       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
588       break;
589     case kDirect:
590       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
591       break;
592     case kSuper:
593       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
594       break;
595     case kVirtual:
596     case kInterface:
597     case kPolymorphic:
598     case kCustom:
599       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
600       UNREACHABLE();
601   }
602 
603   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
604 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)605 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
606   MethodReference method_reference(invoke->GetMethodReference());
607   MoveConstant(invoke->GetLocations()->GetTemp(0), method_reference.index);
608 
609   // Initialize to anything to silent compiler warnings.
610   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
611   switch (invoke->GetInvokeType()) {
612     case kStatic:
613       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
614       break;
615     case kDirect:
616       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
617       break;
618     case kVirtual:
619       entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
620       break;
621     case kSuper:
622       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
623       break;
624     case kInterface:
625       entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
626       break;
627     case kPolymorphic:
628     case kCustom:
629       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
630       UNREACHABLE();
631   }
632   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
633 }
634 
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke,SlowPathCode * slow_path)635 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke,
636                                                   SlowPathCode* slow_path) {
637   // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
638   // method index) since it requires multiple info from the instruction (registers A, B, H). Not
639   // using the reservation has no effect on the registers used in the runtime call.
640   QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
641   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
642 }
643 
GenerateInvokeCustomCall(HInvokeCustom * invoke)644 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
645   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
646   QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
647   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
648 }
649 
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)650 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
651                                                        Location out) {
652   ArenaAllocator* allocator = GetGraph()->GetAllocator();
653   LocationSummary* locations =
654       new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
655   locations->SetOut(out);
656   instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
657                                        Location::ConstantLocation(instruction->GetFormat()));
658 
659   uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
660   uint32_t f = format;
661   PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
662   size_t stack_offset = static_cast<size_t>(pointer_size);  // Start after the ArtMethod*.
663   for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
664     StringBuilderAppend::Argument arg_type =
665         static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
666     switch (arg_type) {
667       case StringBuilderAppend::Argument::kStringBuilder:
668       case StringBuilderAppend::Argument::kString:
669       case StringBuilderAppend::Argument::kCharArray:
670         static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
671         FALLTHROUGH_INTENDED;
672       case StringBuilderAppend::Argument::kBoolean:
673       case StringBuilderAppend::Argument::kChar:
674       case StringBuilderAppend::Argument::kInt:
675       case StringBuilderAppend::Argument::kFloat:
676         locations->SetInAt(i, Location::StackSlot(stack_offset));
677         break;
678       case StringBuilderAppend::Argument::kLong:
679       case StringBuilderAppend::Argument::kDouble:
680         stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
681         locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
682         // Skip the low word, let the common code skip the high word.
683         stack_offset += sizeof(uint32_t);
684         break;
685       default:
686         LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
687             << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
688         UNREACHABLE();
689     }
690     f >>= StringBuilderAppend::kBitsPerArg;
691     stack_offset += sizeof(uint32_t);
692   }
693   DCHECK_EQ(f, 0u);
694 
695   size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
696   DCHECK_ALIGNED(param_size, kVRegSize);
697   size_t num_vregs = param_size / kVRegSize;
698   graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
699 }
700 
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)701 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
702     HInstruction* field_access,
703     DataType::Type field_type,
704     const FieldAccessCallingConvention& calling_convention) {
705   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
706       || field_access->IsUnresolvedInstanceFieldSet();
707   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
708       || field_access->IsUnresolvedStaticFieldGet();
709 
710   ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
711   LocationSummary* locations =
712       new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
713 
714   locations->AddTemp(calling_convention.GetFieldIndexLocation());
715 
716   if (is_instance) {
717     // Add the `this` object for instance field accesses.
718     locations->SetInAt(0, calling_convention.GetObjectLocation());
719   }
720 
721   // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
722   // regardless of the the type. Because of that we forced to special case
723   // the access to floating point values.
724   if (is_get) {
725     if (DataType::IsFloatingPointType(field_type)) {
726       // The return value will be stored in regular registers while register
727       // allocator expects it in a floating point register.
728       // Note We don't need to request additional temps because the return
729       // register(s) are already blocked due the call and they may overlap with
730       // the input or field index.
731       // The transfer between the two will be done at codegen level.
732       locations->SetOut(calling_convention.GetFpuLocation(field_type));
733     } else {
734       locations->SetOut(calling_convention.GetReturnLocation(field_type));
735     }
736   } else {
737      size_t set_index = is_instance ? 1 : 0;
738      if (DataType::IsFloatingPointType(field_type)) {
739       // The set value comes from a float location while the calling convention
740       // expects it in a regular register location. Allocate a temp for it and
741       // make the transfer at codegen.
742       AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
743       locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
744     } else {
745       locations->SetInAt(set_index,
746           calling_convention.GetSetValueLocation(field_type, is_instance));
747     }
748   }
749 }
750 
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)751 void CodeGenerator::GenerateUnresolvedFieldAccess(
752     HInstruction* field_access,
753     DataType::Type field_type,
754     uint32_t field_index,
755     uint32_t dex_pc,
756     const FieldAccessCallingConvention& calling_convention) {
757   LocationSummary* locations = field_access->GetLocations();
758 
759   MoveConstant(locations->GetTemp(0), field_index);
760 
761   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
762       || field_access->IsUnresolvedInstanceFieldSet();
763   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
764       || field_access->IsUnresolvedStaticFieldGet();
765 
766   if (!is_get && DataType::IsFloatingPointType(field_type)) {
767     // Copy the float value to be set into the calling convention register.
768     // Note that using directly the temp location is problematic as we don't
769     // support temp register pairs. To avoid boilerplate conversion code, use
770     // the location from the calling convention.
771     MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
772                  locations->InAt(is_instance ? 1 : 0),
773                  (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
774                                                     : DataType::Type::kInt32));
775   }
776 
777   QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
778   switch (field_type) {
779     case DataType::Type::kBool:
780       entrypoint = is_instance
781           ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
782           : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
783       break;
784     case DataType::Type::kInt8:
785       entrypoint = is_instance
786           ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
787           : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
788       break;
789     case DataType::Type::kInt16:
790       entrypoint = is_instance
791           ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
792           : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
793       break;
794     case DataType::Type::kUint16:
795       entrypoint = is_instance
796           ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
797           : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
798       break;
799     case DataType::Type::kInt32:
800     case DataType::Type::kFloat32:
801       entrypoint = is_instance
802           ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
803           : (is_get ? kQuickGet32Static : kQuickSet32Static);
804       break;
805     case DataType::Type::kReference:
806       entrypoint = is_instance
807           ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
808           : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
809       break;
810     case DataType::Type::kInt64:
811     case DataType::Type::kFloat64:
812       entrypoint = is_instance
813           ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
814           : (is_get ? kQuickGet64Static : kQuickSet64Static);
815       break;
816     default:
817       LOG(FATAL) << "Invalid type " << field_type;
818   }
819   InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
820 
821   if (is_get && DataType::IsFloatingPointType(field_type)) {
822     MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
823   }
824 }
825 
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)826 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
827                                                               Location runtime_type_index_location,
828                                                               Location runtime_return_location) {
829   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
830   DCHECK_EQ(cls->InputCount(), 1u);
831   LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
832       cls, LocationSummary::kCallOnMainOnly);
833   locations->SetInAt(0, Location::NoLocation());
834   locations->AddTemp(runtime_type_index_location);
835   locations->SetOut(runtime_return_location);
836 }
837 
GenerateLoadClassRuntimeCall(HLoadClass * cls)838 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
839   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
840   DCHECK(!cls->MustGenerateClinitCheck());
841   LocationSummary* locations = cls->GetLocations();
842   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
843   if (cls->NeedsAccessCheck()) {
844     CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
845     InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
846   } else {
847     CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
848     InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
849   }
850 }
851 
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)852 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
853     HLoadMethodHandle* method_handle,
854     Location runtime_proto_index_location,
855     Location runtime_return_location) {
856   DCHECK_EQ(method_handle->InputCount(), 1u);
857   LocationSummary* locations =
858       new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
859           method_handle, LocationSummary::kCallOnMainOnly);
860   locations->SetInAt(0, Location::NoLocation());
861   locations->AddTemp(runtime_proto_index_location);
862   locations->SetOut(runtime_return_location);
863 }
864 
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)865 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
866   LocationSummary* locations = method_handle->GetLocations();
867   MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
868   CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
869   InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
870 }
871 
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)872 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
873     HLoadMethodType* method_type,
874     Location runtime_proto_index_location,
875     Location runtime_return_location) {
876   DCHECK_EQ(method_type->InputCount(), 1u);
877   LocationSummary* locations =
878       new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
879           method_type, LocationSummary::kCallOnMainOnly);
880   locations->SetInAt(0, Location::NoLocation());
881   locations->AddTemp(runtime_proto_index_location);
882   locations->SetOut(runtime_return_location);
883 }
884 
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)885 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
886   LocationSummary* locations = method_type->GetLocations();
887   MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
888   CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
889   InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
890 }
891 
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)892 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
893   Runtime* runtime = Runtime::Current();
894   const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
895       runtime->GetHeap()->GetBootImageSpaces();
896   // Check that the `object` is in the expected section of one of the boot image files.
897   DCHECK(std::any_of(boot_image_spaces.begin(),
898                      boot_image_spaces.end(),
899                      [object, section](gc::space::ImageSpace* space) {
900                        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
901                        uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
902                        return space->GetImageHeader().GetImageSection(section).Contains(offset);
903                      }));
904   uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
905   uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
906   return dchecked_integral_cast<uint32_t>(offset);
907 }
908 
GetBootImageOffset(ObjPtr<mirror::Object> object)909 uint32_t CodeGenerator::GetBootImageOffset(ObjPtr<mirror::Object> object) {
910   return GetBootImageOffsetImpl(object.Ptr(), ImageHeader::kSectionObjects);
911 }
912 
913 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)914 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
915   DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
916   ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
917   DCHECK(klass != nullptr);
918   return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
919 }
920 
921 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)922 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
923   DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
924   ObjPtr<mirror::String> string = load_string->GetString().Get();
925   DCHECK(string != nullptr);
926   return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
927 }
928 
GetBootImageOffset(HInvoke * invoke)929 uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
930   ArtMethod* method = invoke->GetResolvedMethod();
931   DCHECK(method != nullptr);
932   return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
933 }
934 
935 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image objects are non-moveable.
GetBootImageOffset(ClassRoot class_root)936 uint32_t CodeGenerator::GetBootImageOffset(ClassRoot class_root) NO_THREAD_SAFETY_ANALYSIS {
937   ObjPtr<mirror::Class> klass = GetClassRoot<kWithoutReadBarrier>(class_root);
938   return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
939 }
940 
941 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke * invoke)942 uint32_t CodeGenerator::GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke* invoke)
943     NO_THREAD_SAFETY_ANALYSIS {
944   DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
945   ArtMethod* method = invoke->GetResolvedMethod();
946   DCHECK(method != nullptr);
947   ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass<kWithoutReadBarrier>();
948   return GetBootImageOffsetImpl(declaring_class.Ptr(), ImageHeader::kSectionObjects);
949 }
950 
BlockIfInRegister(Location location,bool is_out) const951 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
952   // The DCHECKS below check that a register is not specified twice in
953   // the summary. The out location can overlap with an input, so we need
954   // to special case it.
955   if (location.IsRegister()) {
956     DCHECK(is_out || !blocked_core_registers_[location.reg()]);
957     blocked_core_registers_[location.reg()] = true;
958   } else if (location.IsFpuRegister()) {
959     DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
960     blocked_fpu_registers_[location.reg()] = true;
961   } else if (location.IsFpuRegisterPair()) {
962     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
963     blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
964     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
965     blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
966   } else if (location.IsRegisterPair()) {
967     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
968     blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
969     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
970     blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
971   }
972 }
973 
AllocateLocations(HInstruction * instruction)974 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
975   for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
976     env->AllocateLocations();
977   }
978   instruction->Accept(GetLocationBuilder());
979   DCHECK(CheckTypeConsistency(instruction));
980   LocationSummary* locations = instruction->GetLocations();
981   if (!instruction->IsSuspendCheckEntry()) {
982     if (locations != nullptr) {
983       if (locations->CanCall()) {
984         MarkNotLeaf();
985         if (locations->NeedsSuspendCheckEntry()) {
986           MarkNeedsSuspendCheckEntry();
987         }
988       } else if (locations->Intrinsified() &&
989                  instruction->IsInvokeStaticOrDirect() &&
990                  !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
991         // A static method call that has been fully intrinsified, and cannot call on the slow
992         // path or refer to the current method directly, no longer needs current method.
993         return;
994       }
995     }
996     if (instruction->NeedsCurrentMethod()) {
997       SetRequiresCurrentMethod();
998     }
999   }
1000 }
1001 
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)1002 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
1003                                                      const CompilerOptions& compiler_options,
1004                                                      OptimizingCompilerStats* stats) {
1005   ArenaAllocator* allocator = graph->GetAllocator();
1006   switch (compiler_options.GetInstructionSet()) {
1007 #ifdef ART_ENABLE_CODEGEN_arm
1008     case InstructionSet::kArm:
1009     case InstructionSet::kThumb2: {
1010       return std::unique_ptr<CodeGenerator>(
1011           new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
1012     }
1013 #endif
1014 #ifdef ART_ENABLE_CODEGEN_arm64
1015     case InstructionSet::kArm64: {
1016       return std::unique_ptr<CodeGenerator>(
1017           new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
1018     }
1019 #endif
1020 #ifdef ART_ENABLE_CODEGEN_x86
1021     case InstructionSet::kX86: {
1022       return std::unique_ptr<CodeGenerator>(
1023           new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
1024     }
1025 #endif
1026 #ifdef ART_ENABLE_CODEGEN_x86_64
1027     case InstructionSet::kX86_64: {
1028       return std::unique_ptr<CodeGenerator>(
1029           new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
1030     }
1031 #endif
1032     default:
1033       return nullptr;
1034   }
1035 }
1036 
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)1037 CodeGenerator::CodeGenerator(HGraph* graph,
1038                              size_t number_of_core_registers,
1039                              size_t number_of_fpu_registers,
1040                              size_t number_of_register_pairs,
1041                              uint32_t core_callee_save_mask,
1042                              uint32_t fpu_callee_save_mask,
1043                              const CompilerOptions& compiler_options,
1044                              OptimizingCompilerStats* stats)
1045     : frame_size_(0),
1046       core_spill_mask_(0),
1047       fpu_spill_mask_(0),
1048       first_register_slot_in_slow_path_(0),
1049       allocated_registers_(RegisterSet::Empty()),
1050       blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
1051                                                                       kArenaAllocCodeGenerator)),
1052       blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
1053                                                                      kArenaAllocCodeGenerator)),
1054       number_of_core_registers_(number_of_core_registers),
1055       number_of_fpu_registers_(number_of_fpu_registers),
1056       number_of_register_pairs_(number_of_register_pairs),
1057       core_callee_save_mask_(core_callee_save_mask),
1058       fpu_callee_save_mask_(fpu_callee_save_mask),
1059       block_order_(nullptr),
1060       disasm_info_(nullptr),
1061       stats_(stats),
1062       graph_(graph),
1063       compiler_options_(compiler_options),
1064       current_slow_path_(nullptr),
1065       current_block_index_(0),
1066       is_leaf_(true),
1067       needs_suspend_check_entry_(false),
1068       requires_current_method_(false),
1069       code_generation_data_() {
1070   if (GetGraph()->IsCompilingOsr()) {
1071     // Make OSR methods have all registers spilled, this simplifies the logic of
1072     // jumping to the compiled code directly.
1073     for (size_t i = 0; i < number_of_core_registers_; ++i) {
1074       if (IsCoreCalleeSaveRegister(i)) {
1075         AddAllocatedRegister(Location::RegisterLocation(i));
1076       }
1077     }
1078     for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1079       if (IsFloatingPointCalleeSaveRegister(i)) {
1080         AddAllocatedRegister(Location::FpuRegisterLocation(i));
1081       }
1082     }
1083   }
1084   if (GetGraph()->IsCompilingBaseline()) {
1085     // We need the current method in case we reach the hotness threshold. As a
1086     // side effect this makes the frame non-empty.
1087     SetRequiresCurrentMethod();
1088   }
1089 }
1090 
~CodeGenerator()1091 CodeGenerator::~CodeGenerator() {}
1092 
GetNumberOfJitRoots() const1093 size_t CodeGenerator::GetNumberOfJitRoots() const {
1094   DCHECK(code_generation_data_ != nullptr);
1095   return code_generation_data_->GetNumberOfJitRoots();
1096 }
1097 
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1098 static void CheckCovers(uint32_t dex_pc,
1099                         const HGraph& graph,
1100                         const CodeInfo& code_info,
1101                         const ArenaVector<HSuspendCheck*>& loop_headers,
1102                         ArenaVector<size_t>* covered) {
1103   for (size_t i = 0; i < loop_headers.size(); ++i) {
1104     if (loop_headers[i]->GetDexPc() == dex_pc) {
1105       if (graph.IsCompilingOsr()) {
1106         DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1107       }
1108       ++(*covered)[i];
1109     }
1110   }
1111 }
1112 
1113 // Debug helper to ensure loop entries in compiled code are matched by
1114 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1115 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1116                                             const CodeInfo& code_info,
1117                                             const dex::CodeItem& code_item) {
1118   if (graph.HasTryCatch()) {
1119     // One can write loops through try/catch, which we do not support for OSR anyway.
1120     return;
1121   }
1122   ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1123   for (HBasicBlock* block : graph.GetReversePostOrder()) {
1124     if (block->IsLoopHeader()) {
1125       HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1126       if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1127         loop_headers.push_back(suspend_check);
1128       }
1129     }
1130   }
1131   ArenaVector<size_t> covered(
1132       loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1133   for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1134                                                                       &code_item)) {
1135     const uint32_t dex_pc = pair.DexPc();
1136     const Instruction& instruction = pair.Inst();
1137     if (instruction.IsBranch()) {
1138       uint32_t target = dex_pc + instruction.GetTargetOffset();
1139       CheckCovers(target, graph, code_info, loop_headers, &covered);
1140     } else if (instruction.IsSwitch()) {
1141       DexSwitchTable table(instruction, dex_pc);
1142       uint16_t num_entries = table.GetNumEntries();
1143       size_t offset = table.GetFirstValueIndex();
1144 
1145       // Use a larger loop counter type to avoid overflow issues.
1146       for (size_t i = 0; i < num_entries; ++i) {
1147         // The target of the case.
1148         uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1149         CheckCovers(target, graph, code_info, loop_headers, &covered);
1150       }
1151     }
1152   }
1153 
1154   for (size_t i = 0; i < covered.size(); ++i) {
1155     DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1156   }
1157 }
1158 
BuildStackMaps(const dex::CodeItem * code_item)1159 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1160   ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1161   if (kIsDebugBuild && code_item != nullptr) {
1162     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1163   }
1164   return stack_map;
1165 }
1166 
1167 // Returns whether stackmap dex register info is needed for the instruction.
1168 //
1169 // The following cases mandate having a dex register map:
1170 //  * Deoptimization
1171 //    when we need to obtain the values to restore actual vregisters for interpreter.
1172 //  * Debuggability
1173 //    when we want to observe the values / asynchronously deoptimize.
1174 //  * Monitor operations
1175 //    to allow dumping in a stack trace locked dex registers for non-debuggable code.
1176 //  * On-stack-replacement (OSR)
1177 //    when entering compiled for OSR code from the interpreter we need to initialize the compiled
1178 //    code values with the values from the vregisters.
1179 //  * Method local catch blocks
1180 //    a catch block must see the environment of the instruction from the same method that can
1181 //    throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1182 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1183   HGraph* graph = instruction->GetBlock()->GetGraph();
1184   return instruction->IsDeoptimize() ||
1185          graph->IsDebuggable() ||
1186          graph->HasMonitorOperations() ||
1187          osr ||
1188          instruction->CanThrowIntoCatchBlock();
1189 }
1190 
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path,bool native_debug_info)1191 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1192                                  uint32_t dex_pc,
1193                                  SlowPathCode* slow_path,
1194                                  bool native_debug_info) {
1195   RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
1196 }
1197 
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1198 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1199                                  uint32_t dex_pc,
1200                                  uint32_t native_pc,
1201                                  SlowPathCode* slow_path,
1202                                  bool native_debug_info) {
1203   if (instruction != nullptr) {
1204     // The code generated for some type conversions
1205     // may call the runtime, thus normally requiring a subsequent
1206     // call to this method. However, the method verifier does not
1207     // produce PC information for certain instructions, which are
1208     // considered "atomic" (they cannot join a GC).
1209     // Therefore we do not currently record PC information for such
1210     // instructions.  As this may change later, we added this special
1211     // case so that code generators may nevertheless call
1212     // CodeGenerator::RecordPcInfo without triggering an error in
1213     // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
1214     // thereafter.
1215     if (instruction->IsTypeConversion()) {
1216       return;
1217     }
1218     if (instruction->IsRem()) {
1219       DataType::Type type = instruction->AsRem()->GetResultType();
1220       if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
1221         return;
1222       }
1223     }
1224   }
1225 
1226   StackMapStream* stack_map_stream = GetStackMapStream();
1227   if (instruction == nullptr) {
1228     // For stack overflow checks and native-debug-info entries without dex register
1229     // mapping (i.e. start of basic block or start of slow path).
1230     stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
1231     stack_map_stream->EndStackMapEntry();
1232     return;
1233   }
1234 
1235   LocationSummary* locations = instruction->GetLocations();
1236   uint32_t register_mask = locations->GetRegisterMask();
1237   DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1238   if (locations->OnlyCallsOnSlowPath()) {
1239     // In case of slow path, we currently set the location of caller-save registers
1240     // to register (instead of their stack location when pushed before the slow-path
1241     // call). Therefore register_mask contains both callee-save and caller-save
1242     // registers that hold objects. We must remove the spilled caller-save from the
1243     // mask, since they will be overwritten by the callee.
1244     uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1245     register_mask &= ~spills;
1246   } else {
1247     // The register mask must be a subset of callee-save registers.
1248     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1249   }
1250 
1251   uint32_t outer_dex_pc = dex_pc;
1252   uint32_t inlining_depth = 0;
1253   HEnvironment* const environment = instruction->GetEnvironment();
1254   if (environment != nullptr) {
1255     HEnvironment* outer_environment = environment;
1256     while (outer_environment->GetParent() != nullptr) {
1257       outer_environment = outer_environment->GetParent();
1258       ++inlining_depth;
1259     }
1260     outer_dex_pc = outer_environment->GetDexPc();
1261   }
1262 
1263   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1264   bool osr =
1265       instruction->IsSuspendCheck() &&
1266       (info != nullptr) &&
1267       graph_->IsCompilingOsr() &&
1268       (inlining_depth == 0);
1269   StackMap::Kind kind = native_debug_info
1270       ? StackMap::Kind::Debug
1271       : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1272   bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1273   stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1274                                        native_pc,
1275                                        register_mask,
1276                                        locations->GetStackMask(),
1277                                        kind,
1278                                        needs_vreg_info);
1279 
1280   EmitEnvironment(environment, slow_path, needs_vreg_info);
1281   stack_map_stream->EndStackMapEntry();
1282 
1283   if (osr) {
1284     DCHECK_EQ(info->GetSuspendCheck(), instruction);
1285     DCHECK(info->IsIrreducible());
1286     DCHECK(environment != nullptr);
1287     if (kIsDebugBuild) {
1288       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1289         HInstruction* in_environment = environment->GetInstructionAt(i);
1290         if (in_environment != nullptr) {
1291           DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1292           Location location = environment->GetLocationAt(i);
1293           DCHECK(location.IsStackSlot() ||
1294                  location.IsDoubleStackSlot() ||
1295                  location.IsConstant() ||
1296                  location.IsInvalid());
1297           if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1298             DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1299           }
1300         }
1301       }
1302     }
1303   }
1304 }
1305 
HasStackMapAtCurrentPc()1306 bool CodeGenerator::HasStackMapAtCurrentPc() {
1307   uint32_t pc = GetAssembler()->CodeSize();
1308   StackMapStream* stack_map_stream = GetStackMapStream();
1309   size_t count = stack_map_stream->GetNumberOfStackMaps();
1310   if (count == 0) {
1311     return false;
1312   }
1313   return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1314 }
1315 
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1316 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1317                                                uint32_t dex_pc,
1318                                                SlowPathCode* slow_path) {
1319   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1320     if (HasStackMapAtCurrentPc()) {
1321       // Ensure that we do not collide with the stack map of the previous instruction.
1322       GenerateNop();
1323     }
1324     RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
1325   }
1326 }
1327 
RecordCatchBlockInfo()1328 void CodeGenerator::RecordCatchBlockInfo() {
1329   StackMapStream* stack_map_stream = GetStackMapStream();
1330 
1331   for (HBasicBlock* block : *block_order_) {
1332     if (!block->IsCatchBlock()) {
1333       continue;
1334     }
1335 
1336     uint32_t dex_pc = block->GetDexPc();
1337     uint32_t num_vregs = graph_->GetNumberOfVRegs();
1338     uint32_t native_pc = GetAddressOf(block);
1339 
1340     stack_map_stream->BeginStackMapEntry(dex_pc,
1341                                          native_pc,
1342                                          /* register_mask= */ 0,
1343                                          /* sp_mask= */ nullptr,
1344                                          StackMap::Kind::Catch);
1345 
1346     HInstruction* current_phi = block->GetFirstPhi();
1347     for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
1348       while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1349         HInstruction* next_phi = current_phi->GetNext();
1350         DCHECK(next_phi == nullptr ||
1351                current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1352             << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1353         current_phi = next_phi;
1354       }
1355 
1356       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1357         stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1358       } else {
1359         Location location = current_phi->GetLocations()->Out();
1360         switch (location.GetKind()) {
1361           case Location::kStackSlot: {
1362             stack_map_stream->AddDexRegisterEntry(
1363                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1364             break;
1365           }
1366           case Location::kDoubleStackSlot: {
1367             stack_map_stream->AddDexRegisterEntry(
1368                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1369             stack_map_stream->AddDexRegisterEntry(
1370                 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1371             ++vreg;
1372             DCHECK_LT(vreg, num_vregs);
1373             break;
1374           }
1375           default: {
1376             // All catch phis must be allocated to a stack slot.
1377             LOG(FATAL) << "Unexpected kind " << location.GetKind();
1378             UNREACHABLE();
1379           }
1380         }
1381       }
1382     }
1383 
1384     stack_map_stream->EndStackMapEntry();
1385   }
1386 }
1387 
AddSlowPath(SlowPathCode * slow_path)1388 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1389   DCHECK(code_generation_data_ != nullptr);
1390   code_generation_data_->AddSlowPath(slow_path);
1391 }
1392 
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path)1393 void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) {
1394   StackMapStream* stack_map_stream = GetStackMapStream();
1395   // Walk over the environment, and record the location of dex registers.
1396   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1397     HInstruction* current = environment->GetInstructionAt(i);
1398     if (current == nullptr) {
1399       stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1400       continue;
1401     }
1402 
1403     using Kind = DexRegisterLocation::Kind;
1404     Location location = environment->GetLocationAt(i);
1405     switch (location.GetKind()) {
1406       case Location::kConstant: {
1407         DCHECK_EQ(current, location.GetConstant());
1408         if (current->IsLongConstant()) {
1409           int64_t value = current->AsLongConstant()->GetValue();
1410           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1411           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1412           ++i;
1413           DCHECK_LT(i, environment_size);
1414         } else if (current->IsDoubleConstant()) {
1415           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1416           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1417           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1418           ++i;
1419           DCHECK_LT(i, environment_size);
1420         } else if (current->IsIntConstant()) {
1421           int32_t value = current->AsIntConstant()->GetValue();
1422           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1423         } else if (current->IsNullConstant()) {
1424           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1425         } else {
1426           DCHECK(current->IsFloatConstant()) << current->DebugName();
1427           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1428           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1429         }
1430         break;
1431       }
1432 
1433       case Location::kStackSlot: {
1434         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1435         break;
1436       }
1437 
1438       case Location::kDoubleStackSlot: {
1439         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1440         stack_map_stream->AddDexRegisterEntry(
1441             Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1442         ++i;
1443         DCHECK_LT(i, environment_size);
1444         break;
1445       }
1446 
1447       case Location::kRegister : {
1448         int id = location.reg();
1449         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1450           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1451           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1452           if (current->GetType() == DataType::Type::kInt64) {
1453             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1454             ++i;
1455             DCHECK_LT(i, environment_size);
1456           }
1457         } else {
1458           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1459           if (current->GetType() == DataType::Type::kInt64) {
1460             stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1461             ++i;
1462             DCHECK_LT(i, environment_size);
1463           }
1464         }
1465         break;
1466       }
1467 
1468       case Location::kFpuRegister : {
1469         int id = location.reg();
1470         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1471           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1472           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1473           if (current->GetType() == DataType::Type::kFloat64) {
1474             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1475             ++i;
1476             DCHECK_LT(i, environment_size);
1477           }
1478         } else {
1479           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1480           if (current->GetType() == DataType::Type::kFloat64) {
1481             stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1482             ++i;
1483             DCHECK_LT(i, environment_size);
1484           }
1485         }
1486         break;
1487       }
1488 
1489       case Location::kFpuRegisterPair : {
1490         int low = location.low();
1491         int high = location.high();
1492         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1493           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1494           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1495         } else {
1496           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1497         }
1498         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1499           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1500           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1501           ++i;
1502         } else {
1503           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1504           ++i;
1505         }
1506         DCHECK_LT(i, environment_size);
1507         break;
1508       }
1509 
1510       case Location::kRegisterPair : {
1511         int low = location.low();
1512         int high = location.high();
1513         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1514           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1515           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1516         } else {
1517           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1518         }
1519         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1520           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1521           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1522         } else {
1523           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1524         }
1525         ++i;
1526         DCHECK_LT(i, environment_size);
1527         break;
1528       }
1529 
1530       case Location::kInvalid: {
1531         stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1532         break;
1533       }
1534 
1535       default:
1536         LOG(FATAL) << "Unexpected kind " << location.GetKind();
1537     }
1538   }
1539 }
1540 
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info)1541 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1542                                     SlowPathCode* slow_path,
1543                                     bool needs_vreg_info) {
1544   if (environment == nullptr) return;
1545 
1546   StackMapStream* stack_map_stream = GetStackMapStream();
1547   bool emit_inline_info = environment->GetParent() != nullptr;
1548 
1549   if (emit_inline_info) {
1550     // We emit the parent environment first.
1551     EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info);
1552     stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1553                                            environment->GetDexPc(),
1554                                            needs_vreg_info ? environment->Size() : 0,
1555                                            &graph_->GetDexFile(),
1556                                            this);
1557   }
1558 
1559   if (needs_vreg_info) {
1560     // If a dex register map is not required we just won't emit it.
1561     EmitVRegInfo(environment, slow_path);
1562   }
1563 
1564   if (emit_inline_info) {
1565     stack_map_stream->EndInlineInfoEntry();
1566   }
1567 }
1568 
CanMoveNullCheckToUser(HNullCheck * null_check)1569 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1570   return null_check->IsEmittedAtUseSite();
1571 }
1572 
MaybeRecordImplicitNullCheck(HInstruction * instr)1573 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1574   HNullCheck* null_check = instr->GetImplicitNullCheck();
1575   if (null_check != nullptr) {
1576     RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
1577   }
1578 }
1579 
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1580 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1581                                                                 RegisterSet caller_saves) {
1582   // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1583   // HSuspendCheck from entry block). However, it will still get a valid stack frame
1584   // because the HNullCheck needs an environment.
1585   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1586   // When throwing from a try block, we may need to retrieve dalvik registers from
1587   // physical registers and we also need to set up stack mask for GC. This is
1588   // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1589   bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1590   if (can_throw_into_catch_block) {
1591     call_kind = LocationSummary::kCallOnSlowPath;
1592   }
1593   LocationSummary* locations =
1594       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1595   if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1596     locations->SetCustomSlowPathCallerSaves(caller_saves);  // Default: no caller-save registers.
1597   }
1598   DCHECK(!instruction->HasUses());
1599   return locations;
1600 }
1601 
GenerateNullCheck(HNullCheck * instruction)1602 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1603   if (compiler_options_.GetImplicitNullChecks()) {
1604     MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1605     GenerateImplicitNullCheck(instruction);
1606   } else {
1607     MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1608     GenerateExplicitNullCheck(instruction);
1609   }
1610 }
1611 
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1612 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1613                                                           HParallelMove* spills) const {
1614   LocationSummary* locations = suspend_check->GetLocations();
1615   HBasicBlock* block = suspend_check->GetBlock();
1616   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1617   DCHECK(block->IsLoopHeader());
1618   DCHECK(block->GetFirstInstruction() == spills);
1619 
1620   for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1621     Location dest = spills->MoveOperandsAt(i)->GetDestination();
1622     // All parallel moves in loop headers are spills.
1623     DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1624     // Clear the stack bit marking a reference. Do not bother to check if the spill is
1625     // actually a reference spill, clearing bits that are already zero is harmless.
1626     locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1627   }
1628 }
1629 
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1630 void CodeGenerator::EmitParallelMoves(Location from1,
1631                                       Location to1,
1632                                       DataType::Type type1,
1633                                       Location from2,
1634                                       Location to2,
1635                                       DataType::Type type2) {
1636   HParallelMove parallel_move(GetGraph()->GetAllocator());
1637   parallel_move.AddMove(from1, to1, type1, nullptr);
1638   parallel_move.AddMove(from2, to2, type2, nullptr);
1639   GetMoveResolver()->EmitNativeCode(&parallel_move);
1640 }
1641 
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1642 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1643                                           HInstruction* instruction,
1644                                           SlowPathCode* slow_path) {
1645   // Ensure that the call kind indication given to the register allocator is
1646   // coherent with the runtime call generated.
1647   if (slow_path == nullptr) {
1648     DCHECK(instruction->GetLocations()->WillCall())
1649         << "instruction->DebugName()=" << instruction->DebugName();
1650   } else {
1651     DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1652         << "instruction->DebugName()=" << instruction->DebugName()
1653         << " slow_path->GetDescription()=" << slow_path->GetDescription();
1654   }
1655 
1656   // Check that the GC side effect is set when required.
1657   // TODO: Reverse EntrypointCanTriggerGC
1658   if (EntrypointCanTriggerGC(entrypoint)) {
1659     if (slow_path == nullptr) {
1660       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1661           << "instruction->DebugName()=" << instruction->DebugName()
1662           << " instruction->GetSideEffects().ToString()="
1663           << instruction->GetSideEffects().ToString();
1664     } else {
1665       // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1666       // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1667       // if execution never returns to the compiled code from a GC point this restriction is
1668       // unnecessary - in particular for fatal slow paths which might trigger GC.
1669       DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1670              instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1671              // When (non-Baker) read barriers are enabled, some instructions
1672              // use a slow path to emit a read barrier, which does not trigger
1673              // GC.
1674              (kEmitCompilerReadBarrier &&
1675               !kUseBakerReadBarrier &&
1676               (instruction->IsInstanceFieldGet() ||
1677                instruction->IsPredicatedInstanceFieldGet() ||
1678                instruction->IsStaticFieldGet() ||
1679                instruction->IsArrayGet() ||
1680                instruction->IsLoadClass() ||
1681                instruction->IsLoadString() ||
1682                instruction->IsInstanceOf() ||
1683                instruction->IsCheckCast() ||
1684                (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1685           << "instruction->DebugName()=" << instruction->DebugName()
1686           << " instruction->GetSideEffects().ToString()="
1687           << instruction->GetSideEffects().ToString()
1688           << " slow_path->GetDescription()=" << slow_path->GetDescription() << std::endl
1689           << "Instruction and args: " << instruction->DumpWithArgs();
1690     }
1691   } else {
1692     // The GC side effect is not required for the instruction. But the instruction might still have
1693     // it, for example if it calls other entrypoints requiring it.
1694   }
1695 
1696   // Check the coherency of leaf information.
1697   DCHECK(instruction->IsSuspendCheck()
1698          || ((slow_path != nullptr) && slow_path->IsFatal())
1699          || instruction->GetLocations()->CanCall()
1700          || !IsLeafMethod())
1701       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1702 }
1703 
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1704 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1705                                                                 SlowPathCode* slow_path) {
1706   DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1707       << "instruction->DebugName()=" << instruction->DebugName()
1708       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1709   // Only the Baker read barrier marking slow path used by certains
1710   // instructions is expected to invoke the runtime without recording
1711   // PC-related information.
1712   DCHECK(kUseBakerReadBarrier);
1713   DCHECK(instruction->IsInstanceFieldGet() ||
1714          instruction->IsPredicatedInstanceFieldGet() ||
1715          instruction->IsStaticFieldGet() ||
1716          instruction->IsArrayGet() ||
1717          instruction->IsArraySet() ||
1718          instruction->IsLoadClass() ||
1719          instruction->IsLoadString() ||
1720          instruction->IsInstanceOf() ||
1721          instruction->IsCheckCast() ||
1722          (instruction->IsInvoke() && instruction->GetLocations()->Intrinsified()))
1723       << "instruction->DebugName()=" << instruction->DebugName()
1724       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1725 }
1726 
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1727 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1728   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1729 
1730   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1731   for (uint32_t i : LowToHighBits(core_spills)) {
1732     // If the register holds an object, update the stack mask.
1733     if (locations->RegisterContainsObject(i)) {
1734       locations->SetStackBit(stack_offset / kVRegSize);
1735     }
1736     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1737     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1738     saved_core_stack_offsets_[i] = stack_offset;
1739     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1740   }
1741 
1742   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1743   for (uint32_t i : LowToHighBits(fp_spills)) {
1744     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1745     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1746     saved_fpu_stack_offsets_[i] = stack_offset;
1747     stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1748   }
1749 }
1750 
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1751 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1752   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1753 
1754   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1755   for (uint32_t i : LowToHighBits(core_spills)) {
1756     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1757     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1758     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1759   }
1760 
1761   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1762   for (uint32_t i : LowToHighBits(fp_spills)) {
1763     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1764     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1765     stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1766   }
1767 }
1768 
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1769 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1770   // Check to see if we have known failures that will cause us to have to bail out
1771   // to the runtime, and just generate the runtime call directly.
1772   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1773   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1774 
1775   // The positions must be non-negative.
1776   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1777       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1778     // We will have to fail anyways.
1779     return;
1780   }
1781 
1782   // The length must be >= 0.
1783   HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1784   if (length != nullptr) {
1785     int32_t len = length->GetValue();
1786     if (len < 0) {
1787       // Just call as normal.
1788       return;
1789     }
1790   }
1791 
1792   SystemArrayCopyOptimizations optimizations(invoke);
1793 
1794   if (optimizations.GetDestinationIsSource()) {
1795     if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1796       // We only support backward copying if source and destination are the same.
1797       return;
1798     }
1799   }
1800 
1801   if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1802     // We currently don't intrinsify primitive copying.
1803     return;
1804   }
1805 
1806   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1807   LocationSummary* locations = new (allocator) LocationSummary(invoke,
1808                                                                LocationSummary::kCallOnSlowPath,
1809                                                                kIntrinsified);
1810   // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1811   locations->SetInAt(0, Location::RequiresRegister());
1812   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1813   locations->SetInAt(2, Location::RequiresRegister());
1814   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1815   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1816 
1817   locations->AddTemp(Location::RequiresRegister());
1818   locations->AddTemp(Location::RequiresRegister());
1819   locations->AddTemp(Location::RequiresRegister());
1820 }
1821 
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1822 void CodeGenerator::EmitJitRoots(uint8_t* code,
1823                                  const uint8_t* roots_data,
1824                                  /*out*/std::vector<Handle<mirror::Object>>* roots) {
1825   code_generation_data_->EmitJitRoots(roots);
1826   EmitJitRootPatches(code, roots_data);
1827 }
1828 
GetArrayAllocationEntrypoint(HNewArray * new_array)1829 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1830   switch (new_array->GetComponentSizeShift()) {
1831     case 0: return kQuickAllocArrayResolved8;
1832     case 1: return kQuickAllocArrayResolved16;
1833     case 2: return kQuickAllocArrayResolved32;
1834     case 3: return kQuickAllocArrayResolved64;
1835   }
1836   LOG(FATAL) << "Unreachable";
1837   UNREACHABLE();
1838 }
1839 
ScaleFactorForType(DataType::Type type)1840 ScaleFactor CodeGenerator::ScaleFactorForType(DataType::Type type) {
1841   switch (type) {
1842     case DataType::Type::kBool:
1843     case DataType::Type::kUint8:
1844     case DataType::Type::kInt8:
1845       return TIMES_1;
1846     case DataType::Type::kUint16:
1847     case DataType::Type::kInt16:
1848       return TIMES_2;
1849     case DataType::Type::kInt32:
1850     case DataType::Type::kUint32:
1851     case DataType::Type::kFloat32:
1852     case DataType::Type::kReference:
1853       return TIMES_4;
1854     case DataType::Type::kInt64:
1855     case DataType::Type::kUint64:
1856     case DataType::Type::kFloat64:
1857       return TIMES_8;
1858     case DataType::Type::kVoid:
1859       LOG(FATAL) << "Unreachable type " << type;
1860       UNREACHABLE();
1861   }
1862 }
1863 
1864 }  // namespace art
1865