• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_generator.h"
18 #include "base/globals.h"
19 #include "mirror/method_type.h"
20 
21 #ifdef ART_ENABLE_CODEGEN_arm
22 #include "code_generator_arm_vixl.h"
23 #endif
24 
25 #ifdef ART_ENABLE_CODEGEN_arm64
26 #include "code_generator_arm64.h"
27 #endif
28 
29 #ifdef ART_ENABLE_CODEGEN_riscv64
30 #include "code_generator_riscv64.h"
31 #endif
32 
33 #ifdef ART_ENABLE_CODEGEN_x86
34 #include "code_generator_x86.h"
35 #endif
36 
37 #ifdef ART_ENABLE_CODEGEN_x86_64
38 #include "code_generator_x86_64.h"
39 #endif
40 
41 #include "art_method-inl.h"
42 #include "base/bit_utils.h"
43 #include "base/bit_utils_iterator.h"
44 #include "base/casts.h"
45 #include "base/leb128.h"
46 #include "class_linker.h"
47 #include "class_root-inl.h"
48 #include "code_generation_data.h"
49 #include "dex/bytecode_utils.h"
50 #include "dex/code_item_accessors-inl.h"
51 #include "graph_visualizer.h"
52 #include "gc/space/image_space.h"
53 #include "intern_table.h"
54 #include "intrinsics.h"
55 #include "mirror/array-inl.h"
56 #include "mirror/object_array-inl.h"
57 #include "mirror/object_reference.h"
58 #include "mirror/reference.h"
59 #include "mirror/string.h"
60 #include "parallel_move_resolver.h"
61 #include "scoped_thread_state_change-inl.h"
62 #include "ssa_liveness_analysis.h"
63 #include "oat/image.h"
64 #include "oat/stack_map.h"
65 #include "stack_map_stream.h"
66 #include "string_builder_append.h"
67 #include "thread-current-inl.h"
68 #include "utils/assembler.h"
69 
70 namespace art HIDDEN {
71 
72 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)73 static bool CheckType(DataType::Type type, Location location) {
74   if (location.IsFpuRegister()
75       || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
76     return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
77   } else if (location.IsRegister() ||
78              (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
79     return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
80   } else if (location.IsRegisterPair()) {
81     return type == DataType::Type::kInt64;
82   } else if (location.IsFpuRegisterPair()) {
83     return type == DataType::Type::kFloat64;
84   } else if (location.IsStackSlot()) {
85     return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
86            || (type == DataType::Type::kFloat32)
87            || (type == DataType::Type::kReference);
88   } else if (location.IsDoubleStackSlot()) {
89     return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
90   } else if (location.IsConstant()) {
91     if (location.GetConstant()->IsIntConstant()) {
92       return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
93     } else if (location.GetConstant()->IsNullConstant()) {
94       return type == DataType::Type::kReference;
95     } else if (location.GetConstant()->IsLongConstant()) {
96       return type == DataType::Type::kInt64;
97     } else if (location.GetConstant()->IsFloatConstant()) {
98       return type == DataType::Type::kFloat32;
99     } else {
100       return location.GetConstant()->IsDoubleConstant()
101           && (type == DataType::Type::kFloat64);
102     }
103   } else {
104     return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
105   }
106 }
107 
108 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)109 static bool CheckTypeConsistency(HInstruction* instruction) {
110   LocationSummary* locations = instruction->GetLocations();
111   if (locations == nullptr) {
112     return true;
113   }
114 
115   if (locations->Out().IsUnallocated()
116       && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
117     DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
118         << instruction->GetType()
119         << " " << locations->InAt(0);
120   } else {
121     DCHECK(CheckType(instruction->GetType(), locations->Out()))
122         << instruction->GetType()
123         << " " << locations->Out();
124   }
125 
126   HConstInputsRef inputs = instruction->GetInputs();
127   for (size_t i = 0; i < inputs.size(); ++i) {
128     DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
129       << inputs[i]->GetType() << " " << locations->InAt(i);
130   }
131 
132   HEnvironment* environment = instruction->GetEnvironment();
133   for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
134     if (environment->GetInstructionAt(i) != nullptr) {
135       DataType::Type type = environment->GetInstructionAt(i)->GetType();
136       DCHECK(CheckType(type, environment->GetLocationAt(i)))
137         << type << " " << environment->GetLocationAt(i);
138     } else {
139       DCHECK(environment->GetLocationAt(i).IsInvalid())
140         << environment->GetLocationAt(i);
141     }
142   }
143   return true;
144 }
145 
EmitReadBarrier() const146 bool CodeGenerator::EmitReadBarrier() const {
147   return GetCompilerOptions().EmitReadBarrier();
148 }
149 
EmitBakerReadBarrier() const150 bool CodeGenerator::EmitBakerReadBarrier() const {
151   return kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
152 }
153 
EmitNonBakerReadBarrier() const154 bool CodeGenerator::EmitNonBakerReadBarrier() const {
155   return !kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
156 }
157 
GetCompilerReadBarrierOption() const158 ReadBarrierOption CodeGenerator::GetCompilerReadBarrierOption() const {
159   return EmitReadBarrier() ? kWithReadBarrier : kWithoutReadBarrier;
160 }
161 
ShouldCheckGCCard(DataType::Type type,HInstruction * value,WriteBarrierKind write_barrier_kind) const162 bool CodeGenerator::ShouldCheckGCCard(DataType::Type type,
163                                       HInstruction* value,
164                                       WriteBarrierKind write_barrier_kind) const {
165   const CompilerOptions& options = GetCompilerOptions();
166   const bool result =
167       // Check the GC card in debug mode,
168       options.EmitRunTimeChecksInDebugMode() &&
169       // only for CC GC,
170       options.EmitReadBarrier() &&
171       // and if we eliminated the write barrier in WBE.
172       !StoreNeedsWriteBarrier(type, value, write_barrier_kind) &&
173       CodeGenerator::StoreNeedsWriteBarrier(type, value);
174 
175   DCHECK_IMPLIES(result, write_barrier_kind == WriteBarrierKind::kDontEmit);
176   DCHECK_IMPLIES(
177       result, !(GetGraph()->IsCompilingBaseline() && compiler_options_.ProfileBranches()));
178 
179   return result;
180 }
181 
GetScopedAllocator()182 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
183   DCHECK(code_generation_data_ != nullptr);
184   return code_generation_data_->GetScopedAllocator();
185 }
186 
GetStackMapStream()187 StackMapStream* CodeGenerator::GetStackMapStream() {
188   DCHECK(code_generation_data_ != nullptr);
189   return code_generation_data_->GetStackMapStream();
190 }
191 
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)192 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
193                                          Handle<mirror::String> string) {
194   DCHECK(code_generation_data_ != nullptr);
195   code_generation_data_->ReserveJitStringRoot(string_reference, string);
196 }
197 
GetJitStringRootIndex(StringReference string_reference)198 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
199   DCHECK(code_generation_data_ != nullptr);
200   return code_generation_data_->GetJitStringRootIndex(string_reference);
201 }
202 
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)203 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
204   DCHECK(code_generation_data_ != nullptr);
205   code_generation_data_->ReserveJitClassRoot(type_reference, klass);
206 }
207 
GetJitClassRootIndex(TypeReference type_reference)208 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
209   DCHECK(code_generation_data_ != nullptr);
210   return code_generation_data_->GetJitClassRootIndex(type_reference);
211 }
212 
ReserveJitMethodTypeRoot(ProtoReference proto_reference,Handle<mirror::MethodType> method_type)213 void CodeGenerator::ReserveJitMethodTypeRoot(ProtoReference proto_reference,
214                                              Handle<mirror::MethodType> method_type) {
215   DCHECK(code_generation_data_ != nullptr);
216   code_generation_data_->ReserveJitMethodTypeRoot(proto_reference, method_type);
217 }
218 
GetJitMethodTypeRootIndex(ProtoReference proto_reference)219 uint64_t CodeGenerator::GetJitMethodTypeRootIndex(ProtoReference proto_reference) {
220   DCHECK(code_generation_data_ != nullptr);
221   return code_generation_data_->GetJitMethodTypeRootIndex(proto_reference);
222 }
223 
EmitJitRootPatches(uint8_t * code,const uint8_t * roots_data)224 void CodeGenerator::EmitJitRootPatches([[maybe_unused]] uint8_t* code,
225                                        [[maybe_unused]] const uint8_t* roots_data) {
226   DCHECK(code_generation_data_ != nullptr);
227   DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
228   DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
229   DCHECK_EQ(code_generation_data_->GetNumberOfJitMethodTypeRoots(), 0u);
230 }
231 
GetArrayLengthOffset(HArrayLength * array_length)232 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
233   return array_length->IsStringLength()
234       ? mirror::String::CountOffset().Uint32Value()
235       : mirror::Array::LengthOffset().Uint32Value();
236 }
237 
GetArrayDataOffset(HArrayGet * array_get)238 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
239   DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
240   return array_get->IsStringCharAt()
241       ? mirror::String::ValueOffset().Uint32Value()
242       : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
243 }
244 
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const245 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
246   DCHECK_EQ((*block_order_)[current_block_index_], current);
247   return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
248 }
249 
GetNextBlockToEmit() const250 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
251   for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
252     HBasicBlock* block = (*block_order_)[i];
253     if (!block->IsSingleJump()) {
254       return block;
255     }
256   }
257   return nullptr;
258 }
259 
FirstNonEmptyBlock(HBasicBlock * block) const260 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
261   while (block->IsSingleJump()) {
262     block = block->GetSuccessors()[0];
263   }
264   return block;
265 }
266 
267 class DisassemblyScope {
268  public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)269   DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
270       : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
271     if (codegen_.GetDisassemblyInformation() != nullptr) {
272       start_offset_ = codegen_.GetAssembler().CodeSize();
273     }
274   }
275 
~DisassemblyScope()276   ~DisassemblyScope() {
277     // We avoid building this data when we know it will not be used.
278     if (codegen_.GetDisassemblyInformation() != nullptr) {
279       codegen_.GetDisassemblyInformation()->AddInstructionInterval(
280           instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
281     }
282   }
283 
284  private:
285   const CodeGenerator& codegen_;
286   HInstruction* instruction_;
287   size_t start_offset_;
288 };
289 
290 
GenerateSlowPaths()291 void CodeGenerator::GenerateSlowPaths() {
292   DCHECK(code_generation_data_ != nullptr);
293   size_t code_start = 0;
294   for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
295     SlowPathCode* slow_path = slow_path_ptr.get();
296     current_slow_path_ = slow_path;
297     if (disasm_info_ != nullptr) {
298       code_start = GetAssembler()->CodeSize();
299     }
300     // Record the dex pc at start of slow path (required for java line number mapping).
301     MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
302     slow_path->EmitNativeCode(this);
303     if (disasm_info_ != nullptr) {
304       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
305     }
306   }
307   current_slow_path_ = nullptr;
308 }
309 
InitializeCodeGenerationData()310 void CodeGenerator::InitializeCodeGenerationData() {
311   DCHECK(code_generation_data_ == nullptr);
312   code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
313 }
314 
Compile()315 void CodeGenerator::Compile() {
316   InitializeCodeGenerationData();
317 
318   // The register allocator already called `InitializeCodeGeneration`,
319   // where the frame size has been computed.
320   DCHECK(block_order_ != nullptr);
321   Initialize();
322 
323   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
324   DCHECK_EQ(current_block_index_, 0u);
325 
326   GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
327                                    core_spill_mask_,
328                                    fpu_spill_mask_,
329                                    GetGraph()->GetNumberOfVRegs(),
330                                    GetGraph()->IsCompilingBaseline(),
331                                    GetGraph()->IsDebuggable(),
332                                    GetGraph()->HasShouldDeoptimizeFlag());
333 
334   size_t frame_start = GetAssembler()->CodeSize();
335   GenerateFrameEntry();
336   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
337   if (disasm_info_ != nullptr) {
338     disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
339   }
340 
341   for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
342     HBasicBlock* block = (*block_order_)[current_block_index_];
343     // Don't generate code for an empty block. Its predecessors will branch to its successor
344     // directly. Also, the label of that block will not be emitted, so this helps catch
345     // errors where we reference that label.
346     if (block->IsSingleJump()) continue;
347     Bind(block);
348     // This ensures that we have correct native line mapping for all native instructions.
349     // It is necessary to make stepping over a statement work. Otherwise, any initial
350     // instructions (e.g. moves) would be assumed to be the start of next statement.
351     MaybeRecordNativeDebugInfoForBlockEntry(block->GetDexPc());
352     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
353       HInstruction* current = it.Current();
354       if (current->HasEnvironment()) {
355         // Catch StackMaps are dealt with later on in `RecordCatchBlockInfo`.
356         if (block->IsCatchBlock() && block->GetFirstInstruction() == current) {
357           DCHECK(current->IsNop());
358           continue;
359         }
360 
361         // Create stackmap for HNop or any instruction which calls native code.
362         // Note that we need correct mapping for the native PC of the call instruction,
363         // so the runtime's stackmap is not sufficient since it is at PC after the call.
364         MaybeRecordNativeDebugInfo(current, block->GetDexPc());
365       }
366       DisassemblyScope disassembly_scope(current, *this);
367       DCHECK(CheckTypeConsistency(current));
368       current->Accept(instruction_visitor);
369     }
370   }
371 
372   GenerateSlowPaths();
373 
374   // Emit catch stack maps at the end of the stack map stream as expected by the
375   // runtime exception handler.
376   if (graph_->HasTryCatch()) {
377     RecordCatchBlockInfo();
378   }
379 
380   // Finalize instructions in the assembler.
381   Finalize();
382 
383   GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
384 }
385 
Finalize()386 void CodeGenerator::Finalize() {
387   GetAssembler()->FinalizeCode();
388 }
389 
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches)390 void CodeGenerator::EmitLinkerPatches(
391     [[maybe_unused]] ArenaVector<linker::LinkerPatch>* linker_patches) {
392   // No linker patches by default.
393 }
394 
NeedsThunkCode(const linker::LinkerPatch & patch) const395 bool CodeGenerator::NeedsThunkCode([[maybe_unused]] const linker::LinkerPatch& patch) const {
396   // Code generators that create patches requiring thunk compilation should override this function.
397   return false;
398 }
399 
EmitThunkCode(const linker::LinkerPatch & patch,ArenaVector<uint8_t> * code,std::string * debug_name)400 void CodeGenerator::EmitThunkCode([[maybe_unused]] const linker::LinkerPatch& patch,
401                                   [[maybe_unused]] /*out*/ ArenaVector<uint8_t>* code,
402                                   [[maybe_unused]] /*out*/ std::string* debug_name) {
403   // Code generators that create patches requiring thunk compilation should override this function.
404   LOG(FATAL) << "Unexpected call to EmitThunkCode().";
405 }
406 
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)407 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
408                                              size_t maximum_safepoint_spill_size,
409                                              size_t number_of_out_slots,
410                                              const ArenaVector<HBasicBlock*>& block_order) {
411   block_order_ = &block_order;
412   DCHECK(!block_order.empty());
413   DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
414   ComputeSpillMask();
415   first_register_slot_in_slow_path_ = RoundUp(
416       (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
417 
418   if (number_of_spill_slots == 0
419       && !HasAllocatedCalleeSaveRegisters()
420       && IsLeafMethod()
421       && !RequiresCurrentMethod()) {
422     DCHECK_EQ(maximum_safepoint_spill_size, 0u);
423     SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
424   } else {
425     SetFrameSize(RoundUp(
426         first_register_slot_in_slow_path_
427         + maximum_safepoint_spill_size
428         + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
429         + FrameEntrySpillSize(),
430         kStackAlignment));
431   }
432 }
433 
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)434 void CodeGenerator::CreateCommonInvokeLocationSummary(
435     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
436   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
437   LocationSummary* locations = new (allocator) LocationSummary(invoke,
438                                                                LocationSummary::kCallOnMainOnly);
439 
440   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
441     HInstruction* input = invoke->InputAt(i);
442     locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
443   }
444 
445   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
446 
447   if (invoke->IsInvokeStaticOrDirect()) {
448     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
449     MethodLoadKind method_load_kind = call->GetMethodLoadKind();
450     CodePtrLocation code_ptr_location = call->GetCodePtrLocation();
451     if (code_ptr_location == CodePtrLocation::kCallCriticalNative) {
452       locations->AddTemp(Location::RequiresRegister());  // For target method.
453     }
454     if (code_ptr_location == CodePtrLocation::kCallCriticalNative ||
455         method_load_kind == MethodLoadKind::kRecursive) {
456       // For `kCallCriticalNative` we need the current method as the hidden argument
457       // if we reach the dlsym lookup stub for @CriticalNative.
458       locations->SetInAt(call->GetCurrentMethodIndex(), visitor->GetMethodLocation());
459     } else {
460       locations->AddTemp(visitor->GetMethodLocation());
461       if (method_load_kind == MethodLoadKind::kRuntimeCall) {
462         locations->SetInAt(call->GetCurrentMethodIndex(), Location::RequiresRegister());
463       }
464     }
465   } else if (!invoke->IsInvokePolymorphic()) {
466     locations->AddTemp(visitor->GetMethodLocation());
467   }
468 }
469 
PrepareCriticalNativeArgumentMoves(HInvokeStaticOrDirect * invoke,InvokeDexCallingConventionVisitor * visitor,HParallelMove * parallel_move)470 void CodeGenerator::PrepareCriticalNativeArgumentMoves(
471     HInvokeStaticOrDirect* invoke,
472     /*inout*/InvokeDexCallingConventionVisitor* visitor,
473     /*out*/HParallelMove* parallel_move) {
474   LocationSummary* locations = invoke->GetLocations();
475   for (size_t i = 0, num = invoke->GetNumberOfArguments(); i != num; ++i) {
476     Location in_location = locations->InAt(i);
477     DataType::Type type = invoke->InputAt(i)->GetType();
478     DCHECK_NE(type, DataType::Type::kReference);
479     Location out_location = visitor->GetNextLocation(type);
480     if (out_location.IsStackSlot() || out_location.IsDoubleStackSlot()) {
481       // Stack arguments will need to be moved after adjusting the SP.
482       parallel_move->AddMove(in_location, out_location, type, /*instruction=*/ nullptr);
483     } else {
484       // Register arguments should have been assigned their final locations for register allocation.
485       DCHECK(out_location.Equals(in_location)) << in_location << " -> " << out_location;
486     }
487   }
488 }
489 
FinishCriticalNativeFrameSetup(size_t out_frame_size,HParallelMove * parallel_move)490 void CodeGenerator::FinishCriticalNativeFrameSetup(size_t out_frame_size,
491                                                    /*inout*/HParallelMove* parallel_move) {
492   DCHECK_NE(out_frame_size, 0u);
493   IncreaseFrame(out_frame_size);
494   // Adjust the source stack offsets by `out_frame_size`, i.e. the additional
495   // frame size needed for outgoing stack arguments.
496   for (size_t i = 0, num = parallel_move->NumMoves(); i != num; ++i) {
497     MoveOperands* operands = parallel_move->MoveOperandsAt(i);
498     Location source = operands->GetSource();
499     if (operands->GetSource().IsStackSlot()) {
500       operands->SetSource(Location::StackSlot(source.GetStackIndex() +  out_frame_size));
501     } else if (operands->GetSource().IsDoubleStackSlot()) {
502       operands->SetSource(Location::DoubleStackSlot(source.GetStackIndex() +  out_frame_size));
503     }
504   }
505   // Emit the moves.
506   GetMoveResolver()->EmitNativeCode(parallel_move);
507 }
508 
GetCriticalNativeShorty(HInvokeStaticOrDirect * invoke)509 std::string_view CodeGenerator::GetCriticalNativeShorty(HInvokeStaticOrDirect* invoke) {
510   ScopedObjectAccess soa(Thread::Current());
511   DCHECK(invoke->GetResolvedMethod()->IsCriticalNative());
512   return invoke->GetResolvedMethod()->GetShortyView();
513 }
514 
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)515 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
516     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
517   MethodReference method_reference(invoke->GetMethodReference());
518   MoveConstant(temp, method_reference.index);
519 
520   // The access check is unnecessary but we do not want to introduce
521   // extra entrypoints for the codegens that do not support some
522   // invoke type and fall back to the runtime call.
523 
524   // Initialize to anything to silent compiler warnings.
525   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
526   switch (invoke->GetInvokeType()) {
527     case kStatic:
528       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
529       break;
530     case kDirect:
531       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
532       break;
533     case kSuper:
534       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
535       break;
536     case kVirtual:
537     case kInterface:
538     case kPolymorphic:
539     case kCustom:
540       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
541       UNREACHABLE();
542   }
543 
544   InvokeRuntime(entrypoint, invoke, slow_path);
545 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)546 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
547   MethodReference method_reference(invoke->GetMethodReference());
548   MoveConstant(invoke->GetLocations()->GetTemp(0), method_reference.index);
549 
550   // Initialize to anything to silent compiler warnings.
551   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
552   switch (invoke->GetInvokeType()) {
553     case kStatic:
554       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
555       break;
556     case kDirect:
557       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
558       break;
559     case kVirtual:
560       entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
561       break;
562     case kSuper:
563       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
564       break;
565     case kInterface:
566       entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
567       break;
568     case kPolymorphic:
569     case kCustom:
570       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
571       UNREACHABLE();
572   }
573   InvokeRuntime(entrypoint, invoke);
574 }
575 
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke,SlowPathCode * slow_path)576 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke,
577                                                   SlowPathCode* slow_path) {
578   // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
579   // method index) since it requires multiple info from the instruction (registers A, B, H). Not
580   // using the reservation has no effect on the registers used in the runtime call.
581   QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
582   InvokeRuntime(entrypoint, invoke, slow_path);
583 }
584 
GenerateInvokeCustomCall(HInvokeCustom * invoke)585 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
586   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
587   QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
588   InvokeRuntime(entrypoint, invoke);
589 }
590 
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)591 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
592                                                        Location out) {
593   ArenaAllocator* allocator = GetGraph()->GetAllocator();
594   LocationSummary* locations =
595       new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
596   locations->SetOut(out);
597   instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
598                                        Location::ConstantLocation(instruction->GetFormat()));
599 
600   uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
601   uint32_t f = format;
602   PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
603   size_t stack_offset = static_cast<size_t>(pointer_size);  // Start after the ArtMethod*.
604   for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
605     StringBuilderAppend::Argument arg_type =
606         static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
607     switch (arg_type) {
608       case StringBuilderAppend::Argument::kStringBuilder:
609       case StringBuilderAppend::Argument::kString:
610       case StringBuilderAppend::Argument::kCharArray:
611         static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
612         FALLTHROUGH_INTENDED;
613       case StringBuilderAppend::Argument::kBoolean:
614       case StringBuilderAppend::Argument::kChar:
615       case StringBuilderAppend::Argument::kInt:
616       case StringBuilderAppend::Argument::kFloat:
617         locations->SetInAt(i, Location::StackSlot(stack_offset));
618         break;
619       case StringBuilderAppend::Argument::kLong:
620       case StringBuilderAppend::Argument::kDouble:
621         stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
622         locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
623         // Skip the low word, let the common code skip the high word.
624         stack_offset += sizeof(uint32_t);
625         break;
626       default:
627         LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
628             << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
629         UNREACHABLE();
630     }
631     f >>= StringBuilderAppend::kBitsPerArg;
632     stack_offset += sizeof(uint32_t);
633   }
634   DCHECK_EQ(f, 0u);
635   DCHECK_EQ(stack_offset,
636             static_cast<size_t>(pointer_size) + kVRegSize * instruction->GetNumberOfOutVRegs());
637 }
638 
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)639 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
640     HInstruction* field_access,
641     DataType::Type field_type,
642     const FieldAccessCallingConvention& calling_convention) {
643   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
644       || field_access->IsUnresolvedInstanceFieldSet();
645   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
646       || field_access->IsUnresolvedStaticFieldGet();
647 
648   ArenaAllocator* allocator = GetGraph()->GetAllocator();
649   LocationSummary* locations =
650       new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
651 
652   locations->AddTemp(calling_convention.GetFieldIndexLocation());
653 
654   if (is_instance) {
655     // Add the `this` object for instance field accesses.
656     locations->SetInAt(0, calling_convention.GetObjectLocation());
657   }
658 
659   // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
660   // regardless of the type. Because of that we forced to special case
661   // the access to floating point values.
662   if (is_get) {
663     if (DataType::IsFloatingPointType(field_type)) {
664       // The return value will be stored in regular registers while register
665       // allocator expects it in a floating point register.
666       // Note We don't need to request additional temps because the return
667       // register(s) are already blocked due the call and they may overlap with
668       // the input or field index.
669       // The transfer between the two will be done at codegen level.
670       locations->SetOut(calling_convention.GetFpuLocation(field_type));
671     } else {
672       locations->SetOut(calling_convention.GetReturnLocation(field_type));
673     }
674   } else {
675     size_t set_index = is_instance ? 1 : 0;
676     if (DataType::IsFloatingPointType(field_type)) {
677       // The set value comes from a float location while the calling convention
678       // expects it in a regular register location. Allocate a temp for it and
679       // make the transfer at codegen.
680       AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
681       locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
682     } else {
683       locations->SetInAt(set_index,
684           calling_convention.GetSetValueLocation(field_type, is_instance));
685     }
686   }
687 }
688 
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,const FieldAccessCallingConvention & calling_convention)689 void CodeGenerator::GenerateUnresolvedFieldAccess(
690     HInstruction* field_access,
691     DataType::Type field_type,
692     uint32_t field_index,
693     const FieldAccessCallingConvention& calling_convention) {
694   LocationSummary* locations = field_access->GetLocations();
695 
696   MoveConstant(locations->GetTemp(0), field_index);
697 
698   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
699       || field_access->IsUnresolvedInstanceFieldSet();
700   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
701       || field_access->IsUnresolvedStaticFieldGet();
702 
703   if (!is_get && DataType::IsFloatingPointType(field_type)) {
704     // Copy the float value to be set into the calling convention register.
705     // Note that using directly the temp location is problematic as we don't
706     // support temp register pairs. To avoid boilerplate conversion code, use
707     // the location from the calling convention.
708     MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
709                  locations->InAt(is_instance ? 1 : 0),
710                  (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
711                                                     : DataType::Type::kInt32));
712   }
713 
714   QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
715   switch (field_type) {
716     case DataType::Type::kBool:
717       entrypoint = is_instance
718           ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
719           : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
720       break;
721     case DataType::Type::kInt8:
722       entrypoint = is_instance
723           ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
724           : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
725       break;
726     case DataType::Type::kInt16:
727       entrypoint = is_instance
728           ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
729           : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
730       break;
731     case DataType::Type::kUint16:
732       entrypoint = is_instance
733           ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
734           : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
735       break;
736     case DataType::Type::kInt32:
737     case DataType::Type::kFloat32:
738       entrypoint = is_instance
739           ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
740           : (is_get ? kQuickGet32Static : kQuickSet32Static);
741       break;
742     case DataType::Type::kReference:
743       entrypoint = is_instance
744           ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
745           : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
746       break;
747     case DataType::Type::kInt64:
748     case DataType::Type::kFloat64:
749       entrypoint = is_instance
750           ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
751           : (is_get ? kQuickGet64Static : kQuickSet64Static);
752       break;
753     default:
754       LOG(FATAL) << "Invalid type " << field_type;
755   }
756   InvokeRuntime(entrypoint, field_access);
757 
758   if (is_get && DataType::IsFloatingPointType(field_type)) {
759     MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
760   }
761 }
762 
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)763 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
764                                                               Location runtime_type_index_location,
765                                                               Location runtime_return_location) {
766   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
767   DCHECK_EQ(cls->InputCount(), 1u);
768   LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
769       cls, LocationSummary::kCallOnMainOnly);
770   locations->SetInAt(0, Location::NoLocation());
771   locations->AddTemp(runtime_type_index_location);
772   locations->SetOut(runtime_return_location);
773 }
774 
GenerateLoadClassRuntimeCall(HLoadClass * cls)775 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
776   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
777   DCHECK(!cls->MustGenerateClinitCheck());
778   LocationSummary* locations = cls->GetLocations();
779   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
780   if (cls->NeedsAccessCheck()) {
781     CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
782     InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls);
783   } else {
784     CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
785     InvokeRuntime(kQuickResolveType, cls);
786   }
787 }
788 
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)789 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
790     HLoadMethodHandle* method_handle,
791     Location runtime_proto_index_location,
792     Location runtime_return_location) {
793   DCHECK_EQ(method_handle->InputCount(), 1u);
794   LocationSummary* locations =
795       new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
796           method_handle, LocationSummary::kCallOnMainOnly);
797   locations->SetInAt(0, Location::NoLocation());
798   locations->AddTemp(runtime_proto_index_location);
799   locations->SetOut(runtime_return_location);
800 }
801 
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)802 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
803   LocationSummary* locations = method_handle->GetLocations();
804   MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
805   CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
806   InvokeRuntime(kQuickResolveMethodHandle, method_handle);
807 }
808 
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)809 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
810     HLoadMethodType* method_type,
811     Location runtime_proto_index_location,
812     Location runtime_return_location) {
813   DCHECK_EQ(method_type->InputCount(), 1u);
814   LocationSummary* locations =
815       new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
816           method_type, LocationSummary::kCallOnMainOnly);
817   locations->SetInAt(0, Location::NoLocation());
818   locations->AddTemp(runtime_proto_index_location);
819   locations->SetOut(runtime_return_location);
820 }
821 
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)822 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
823   LocationSummary* locations = method_type->GetLocations();
824   MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
825   CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
826   InvokeRuntime(kQuickResolveMethodType, method_type);
827 }
828 
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)829 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
830   Runtime* runtime = Runtime::Current();
831   const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
832       runtime->GetHeap()->GetBootImageSpaces();
833   // Check that the `object` is in the expected section of one of the boot image files.
834   DCHECK(std::any_of(boot_image_spaces.begin(),
835                      boot_image_spaces.end(),
836                      [object, section](gc::space::ImageSpace* space) {
837                        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
838                        uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
839                        return space->GetImageHeader().GetImageSection(section).Contains(offset);
840                      }));
841   uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
842   uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
843   return dchecked_integral_cast<uint32_t>(offset);
844 }
845 
GetBootImageOffset(ObjPtr<mirror::Object> object)846 uint32_t CodeGenerator::GetBootImageOffset(ObjPtr<mirror::Object> object) {
847   return GetBootImageOffsetImpl(object.Ptr(), ImageHeader::kSectionObjects);
848 }
849 
850 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)851 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
852   DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
853   ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
854   DCHECK(klass != nullptr);
855   return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
856 }
857 
858 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)859 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
860   DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
861   ObjPtr<mirror::String> string = load_string->GetString().Get();
862   DCHECK(string != nullptr);
863   return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
864 }
865 
GetBootImageOffset(HInvoke * invoke)866 uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
867   ArtMethod* method = invoke->GetResolvedMethod();
868   DCHECK(method != nullptr);
869   return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
870 }
871 
872 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image objects are non-moveable.
GetBootImageOffset(ClassRoot class_root)873 uint32_t CodeGenerator::GetBootImageOffset(ClassRoot class_root) NO_THREAD_SAFETY_ANALYSIS {
874   ObjPtr<mirror::Class> klass = GetClassRoot<kWithoutReadBarrier>(class_root);
875   return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
876 }
877 
878 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke * invoke)879 uint32_t CodeGenerator::GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke* invoke)
880     NO_THREAD_SAFETY_ANALYSIS {
881   DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
882   ArtMethod* method = invoke->GetResolvedMethod();
883   DCHECK(method != nullptr);
884   ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass<kWithoutReadBarrier>();
885   return GetBootImageOffsetImpl(declaring_class.Ptr(), ImageHeader::kSectionObjects);
886 }
887 
BlockIfInRegister(Location location,bool is_out) const888 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
889   // The DCHECKS below check that a register is not specified twice in
890   // the summary. The out location can overlap with an input, so we need
891   // to special case it.
892   if (location.IsRegister()) {
893     DCHECK(is_out || !blocked_core_registers_[location.reg()]);
894     blocked_core_registers_[location.reg()] = true;
895   } else if (location.IsFpuRegister()) {
896     DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
897     blocked_fpu_registers_[location.reg()] = true;
898   } else if (location.IsFpuRegisterPair()) {
899     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
900     blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
901     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
902     blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
903   } else if (location.IsRegisterPair()) {
904     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
905     blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
906     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
907     blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
908   }
909 }
910 
AllocateLocations(HInstruction * instruction)911 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
912   ArenaAllocator* allocator = GetGraph()->GetAllocator();
913   for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
914     env->AllocateLocations(allocator);
915   }
916   instruction->Accept(GetLocationBuilder());
917   DCHECK(CheckTypeConsistency(instruction));
918   LocationSummary* locations = instruction->GetLocations();
919   if (!instruction->IsSuspendCheckEntry()) {
920     if (locations != nullptr) {
921       if (locations->CanCall()) {
922         MarkNotLeaf();
923         if (locations->NeedsSuspendCheckEntry()) {
924           MarkNeedsSuspendCheckEntry();
925         }
926       } else if (locations->Intrinsified() &&
927                  instruction->IsInvokeStaticOrDirect() &&
928                  !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
929         // A static method call that has been fully intrinsified, and cannot call on the slow
930         // path or refer to the current method directly, no longer needs current method.
931         return;
932       }
933     }
934     if (instruction->NeedsCurrentMethod()) {
935       SetRequiresCurrentMethod();
936     }
937   }
938 }
939 
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)940 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
941                                                      const CompilerOptions& compiler_options,
942                                                      OptimizingCompilerStats* stats) {
943   ArenaAllocator* allocator = graph->GetAllocator();
944   switch (compiler_options.GetInstructionSet()) {
945 #ifdef ART_ENABLE_CODEGEN_arm
946     case InstructionSet::kArm:
947     case InstructionSet::kThumb2: {
948       return std::unique_ptr<CodeGenerator>(
949           new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
950     }
951 #endif
952 #ifdef ART_ENABLE_CODEGEN_arm64
953     case InstructionSet::kArm64: {
954       return std::unique_ptr<CodeGenerator>(
955           new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
956     }
957 #endif
958 #ifdef ART_ENABLE_CODEGEN_riscv64
959     case InstructionSet::kRiscv64: {
960       return std::unique_ptr<CodeGenerator>(
961           new (allocator) riscv64::CodeGeneratorRISCV64(graph, compiler_options, stats));
962     }
963 #endif
964 #ifdef ART_ENABLE_CODEGEN_x86
965     case InstructionSet::kX86: {
966       return std::unique_ptr<CodeGenerator>(
967           new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
968     }
969 #endif
970 #ifdef ART_ENABLE_CODEGEN_x86_64
971     case InstructionSet::kX86_64: {
972       return std::unique_ptr<CodeGenerator>(
973           new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
974     }
975 #endif
976     default:
977       UNUSED(allocator);
978       UNUSED(graph);
979       UNUSED(stats);
980       return nullptr;
981   }
982 }
983 
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats,const art::ArrayRef<const bool> & unimplemented_intrinsics)984 CodeGenerator::CodeGenerator(HGraph* graph,
985                              size_t number_of_core_registers,
986                              size_t number_of_fpu_registers,
987                              size_t number_of_register_pairs,
988                              uint32_t core_callee_save_mask,
989                              uint32_t fpu_callee_save_mask,
990                              const CompilerOptions& compiler_options,
991                              OptimizingCompilerStats* stats,
992                              const art::ArrayRef<const bool>& unimplemented_intrinsics)
993     : frame_size_(0),
994       core_spill_mask_(0),
995       fpu_spill_mask_(0),
996       first_register_slot_in_slow_path_(0),
997       allocated_registers_(RegisterSet::Empty()),
998       blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
999                                                                       kArenaAllocCodeGenerator)),
1000       blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
1001                                                                      kArenaAllocCodeGenerator)),
1002       number_of_core_registers_(number_of_core_registers),
1003       number_of_fpu_registers_(number_of_fpu_registers),
1004       number_of_register_pairs_(number_of_register_pairs),
1005       core_callee_save_mask_(core_callee_save_mask),
1006       fpu_callee_save_mask_(fpu_callee_save_mask),
1007       block_order_(nullptr),
1008       disasm_info_(nullptr),
1009       stats_(stats),
1010       graph_(graph),
1011       compiler_options_(compiler_options),
1012       current_slow_path_(nullptr),
1013       current_block_index_(0),
1014       is_leaf_(true),
1015       needs_suspend_check_entry_(false),
1016       requires_current_method_(false),
1017       code_generation_data_(),
1018       unimplemented_intrinsics_(unimplemented_intrinsics) {
1019   if (GetGraph()->IsCompilingOsr()) {
1020     // Make OSR methods have all registers spilled, this simplifies the logic of
1021     // jumping to the compiled code directly.
1022     for (size_t i = 0; i < number_of_core_registers_; ++i) {
1023       if (IsCoreCalleeSaveRegister(i)) {
1024         AddAllocatedRegister(Location::RegisterLocation(i));
1025       }
1026     }
1027     for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1028       if (IsFloatingPointCalleeSaveRegister(i)) {
1029         AddAllocatedRegister(Location::FpuRegisterLocation(i));
1030       }
1031     }
1032   }
1033   if (GetGraph()->IsCompilingBaseline()) {
1034     // We need the current method in case we reach the hotness threshold. As a
1035     // side effect this makes the frame non-empty.
1036     SetRequiresCurrentMethod();
1037   }
1038 }
1039 
~CodeGenerator()1040 CodeGenerator::~CodeGenerator() {}
1041 
GetNumberOfJitRoots() const1042 size_t CodeGenerator::GetNumberOfJitRoots() const {
1043   DCHECK(code_generation_data_ != nullptr);
1044   return code_generation_data_->GetNumberOfJitRoots();
1045 }
1046 
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1047 static void CheckCovers(uint32_t dex_pc,
1048                         const HGraph& graph,
1049                         const CodeInfo& code_info,
1050                         const ArenaVector<HSuspendCheck*>& loop_headers,
1051                         ArenaVector<size_t>* covered) {
1052   for (size_t i = 0; i < loop_headers.size(); ++i) {
1053     if (loop_headers[i]->GetDexPc() == dex_pc) {
1054       if (graph.IsCompilingOsr()) {
1055         DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1056       }
1057       ++(*covered)[i];
1058     }
1059   }
1060 }
1061 
1062 // Debug helper to ensure loop entries in compiled code are matched by
1063 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1064 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1065                                             const CodeInfo& code_info,
1066                                             const dex::CodeItem& code_item) {
1067   if (graph.HasTryCatch()) {
1068     // One can write loops through try/catch, which we do not support for OSR anyway.
1069     return;
1070   }
1071   ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1072   for (HBasicBlock* block : graph.GetReversePostOrder()) {
1073     if (block->IsLoopHeader()) {
1074       HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1075       if (suspend_check != nullptr && !suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1076         loop_headers.push_back(suspend_check);
1077       }
1078     }
1079   }
1080   ArenaVector<size_t> covered(
1081       loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1082   for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1083                                                                       &code_item)) {
1084     const uint32_t dex_pc = pair.DexPc();
1085     const Instruction& instruction = pair.Inst();
1086     if (instruction.IsBranch()) {
1087       uint32_t target = dex_pc + instruction.GetTargetOffset();
1088       CheckCovers(target, graph, code_info, loop_headers, &covered);
1089     } else if (instruction.IsSwitch()) {
1090       DexSwitchTable table(instruction, dex_pc);
1091       uint16_t num_entries = table.GetNumEntries();
1092       size_t offset = table.GetFirstValueIndex();
1093 
1094       // Use a larger loop counter type to avoid overflow issues.
1095       for (size_t i = 0; i < num_entries; ++i) {
1096         // The target of the case.
1097         uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1098         CheckCovers(target, graph, code_info, loop_headers, &covered);
1099       }
1100     }
1101   }
1102 
1103   for (size_t i = 0; i < covered.size(); ++i) {
1104     DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1105   }
1106 }
1107 
BuildStackMaps(const dex::CodeItem * code_item)1108 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1109   ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1110   if (kIsDebugBuild && code_item != nullptr) {
1111     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1112   }
1113   return stack_map;
1114 }
1115 
1116 // Returns whether stackmap dex register info is needed for the instruction.
1117 //
1118 // The following cases mandate having a dex register map:
1119 //  * Deoptimization
1120 //    when we need to obtain the values to restore actual vregisters for interpreter.
1121 //  * Debuggability
1122 //    when we want to observe the values / asynchronously deoptimize.
1123 //  * Monitor operations
1124 //    to allow dumping in a stack trace locked dex registers for non-debuggable code.
1125 //  * On-stack-replacement (OSR)
1126 //    when entering compiled for OSR code from the interpreter we need to initialize the compiled
1127 //    code values with the values from the vregisters.
1128 //  * Method local catch blocks
1129 //    a catch block must see the environment of the instruction from the same method that can
1130 //    throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1131 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1132   HGraph* graph = instruction->GetBlock()->GetGraph();
1133   return instruction->IsDeoptimize() ||
1134          graph->IsDebuggable() ||
1135          graph->HasMonitorOperations() ||
1136          osr ||
1137          instruction->CanThrowIntoCatchBlock();
1138 }
1139 
RecordPcInfoForFrameOrBlockEntry(uint32_t dex_pc)1140 void CodeGenerator::RecordPcInfoForFrameOrBlockEntry(uint32_t dex_pc) {
1141   StackMapStream* stack_map_stream = GetStackMapStream();
1142   stack_map_stream->BeginStackMapEntry(dex_pc, GetAssembler()->CodePosition());
1143   stack_map_stream->EndStackMapEntry();
1144 }
1145 
RecordPcInfo(HInstruction * instruction,SlowPathCode * slow_path,bool native_debug_info)1146 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1147                                  SlowPathCode* slow_path,
1148                                  bool native_debug_info) {
1149   // Only for native debuggable apps we take a look at the dex_pc from the instruction itself. For
1150   // the regular case, we retrieve the dex_pc from the instruction's environment.
1151   DCHECK_IMPLIES(native_debug_info, GetCompilerOptions().GetNativeDebuggable());
1152   DCHECK_IMPLIES(!native_debug_info, instruction->HasEnvironment()) << *instruction;
1153   RecordPcInfo(instruction,
1154                native_debug_info ? instruction->GetDexPc() : kNoDexPc,
1155                GetAssembler()->CodePosition(),
1156                slow_path,
1157                native_debug_info);
1158 }
1159 
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1160 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1161                                  uint32_t dex_pc,
1162                                  uint32_t native_pc,
1163                                  SlowPathCode* slow_path,
1164                                  bool native_debug_info) {
1165   DCHECK(instruction != nullptr);
1166   // Only for native debuggable apps we take a look at the dex_pc from the instruction itself. For
1167   // the regular case, we retrieve the dex_pc from the instruction's environment.
1168   DCHECK_IMPLIES(native_debug_info, GetCompilerOptions().GetNativeDebuggable());
1169   DCHECK_IMPLIES(!native_debug_info, instruction->HasEnvironment()) << *instruction;
1170 
1171   LocationSummary* locations = instruction->GetLocations();
1172   uint32_t register_mask = locations->GetRegisterMask();
1173   DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1174   if (locations->OnlyCallsOnSlowPath()) {
1175     // In case of slow path, we currently set the location of caller-save registers
1176     // to register (instead of their stack location when pushed before the slow-path
1177     // call). Therefore register_mask contains both callee-save and caller-save
1178     // registers that hold objects. We must remove the spilled caller-save from the
1179     // mask, since they will be overwritten by the callee.
1180     uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1181     register_mask &= ~spills;
1182   } else {
1183     // The register mask must be a subset of callee-save registers.
1184     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1185   }
1186 
1187   uint32_t outer_dex_pc = dex_pc;
1188   uint32_t inlining_depth = 0;
1189   HEnvironment* const environment = instruction->GetEnvironment();
1190   if (environment != nullptr) {
1191     HEnvironment* outer_environment = environment;
1192     while (outer_environment->GetParent() != nullptr) {
1193       outer_environment = outer_environment->GetParent();
1194       ++inlining_depth;
1195     }
1196     outer_dex_pc = outer_environment->GetDexPc();
1197   }
1198 
1199   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1200   bool osr =
1201       instruction->IsSuspendCheck() &&
1202       (info != nullptr) &&
1203       graph_->IsCompilingOsr() &&
1204       (inlining_depth == 0);
1205   StackMap::Kind kind = native_debug_info
1206       ? StackMap::Kind::Debug
1207       : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1208   bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1209   StackMapStream* stack_map_stream = GetStackMapStream();
1210   stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1211                                        native_pc,
1212                                        register_mask,
1213                                        locations->GetStackMask(),
1214                                        kind,
1215                                        needs_vreg_info);
1216 
1217   EmitEnvironment(environment, slow_path, needs_vreg_info);
1218   stack_map_stream->EndStackMapEntry();
1219 
1220   if (osr) {
1221     DCHECK_EQ(info->GetSuspendCheck(), instruction);
1222     DCHECK(info->IsIrreducible());
1223     DCHECK(environment != nullptr);
1224     if (kIsDebugBuild) {
1225       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1226         HInstruction* in_environment = environment->GetInstructionAt(i);
1227         if (in_environment != nullptr) {
1228           DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1229           Location location = environment->GetLocationAt(i);
1230           DCHECK(location.IsStackSlot() ||
1231                  location.IsDoubleStackSlot() ||
1232                  location.IsConstant() ||
1233                  location.IsInvalid());
1234           if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1235             DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1236           }
1237         }
1238       }
1239     }
1240   }
1241 }
1242 
HasStackMapAtCurrentPc()1243 bool CodeGenerator::HasStackMapAtCurrentPc() {
1244   uint32_t pc = GetAssembler()->CodeSize();
1245   StackMapStream* stack_map_stream = GetStackMapStream();
1246   size_t count = stack_map_stream->GetNumberOfStackMaps();
1247   if (count == 0) {
1248     return false;
1249   }
1250   return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1251 }
1252 
MaybeRecordNativeDebugInfoForBlockEntry(uint32_t dex_pc)1253 void CodeGenerator::MaybeRecordNativeDebugInfoForBlockEntry(uint32_t dex_pc) {
1254   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1255     if (HasStackMapAtCurrentPc()) {
1256       // Ensure that we do not collide with the stack map of the previous instruction.
1257       GenerateNop();
1258     }
1259     RecordPcInfoForFrameOrBlockEntry(dex_pc);
1260   }
1261 }
1262 
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1263 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1264                                                uint32_t dex_pc,
1265                                                SlowPathCode* slow_path) {
1266   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1267     if (HasStackMapAtCurrentPc()) {
1268       // Ensure that we do not collide with the stack map of the previous instruction.
1269       GenerateNop();
1270     }
1271     RecordPcInfo(instruction, slow_path, /* native_debug_info= */ true);
1272   }
1273 }
1274 
RecordCatchBlockInfo()1275 void CodeGenerator::RecordCatchBlockInfo() {
1276   StackMapStream* stack_map_stream = GetStackMapStream();
1277 
1278   for (HBasicBlock* block : *block_order_) {
1279     if (!block->IsCatchBlock()) {
1280       continue;
1281     }
1282 
1283     // Get the outer dex_pc. We save the full environment list for DCHECK purposes in kIsDebugBuild.
1284     std::vector<uint32_t> dex_pc_list_for_verification;
1285     if (kIsDebugBuild) {
1286       dex_pc_list_for_verification.push_back(block->GetDexPc());
1287     }
1288     DCHECK(block->GetFirstInstruction()->IsNop());
1289     DCHECK(block->GetFirstInstruction()->AsNop()->NeedsEnvironment());
1290     HEnvironment* const environment = block->GetFirstInstruction()->GetEnvironment();
1291     DCHECK(environment != nullptr);
1292     HEnvironment* outer_environment = environment;
1293     while (outer_environment->GetParent() != nullptr) {
1294       outer_environment = outer_environment->GetParent();
1295       if (kIsDebugBuild) {
1296         dex_pc_list_for_verification.push_back(outer_environment->GetDexPc());
1297       }
1298     }
1299 
1300     if (kIsDebugBuild) {
1301       // dex_pc_list_for_verification is set from innnermost to outermost. Let's reverse it
1302       // since we are expected to pass from outermost to innermost.
1303       std::reverse(dex_pc_list_for_verification.begin(), dex_pc_list_for_verification.end());
1304       DCHECK_EQ(dex_pc_list_for_verification.front(), outer_environment->GetDexPc());
1305     }
1306 
1307     uint32_t native_pc = GetAddressOf(block);
1308     stack_map_stream->BeginStackMapEntry(outer_environment->GetDexPc(),
1309                                          native_pc,
1310                                          /* register_mask= */ 0,
1311                                          /* sp_mask= */ nullptr,
1312                                          StackMap::Kind::Catch,
1313                                          /* needs_vreg_info= */ true,
1314                                          dex_pc_list_for_verification);
1315 
1316     EmitEnvironment(environment,
1317                     /* slow_path= */ nullptr,
1318                     /* needs_vreg_info= */ true,
1319                     /* is_for_catch_handler= */ true);
1320 
1321     stack_map_stream->EndStackMapEntry();
1322   }
1323 }
1324 
AddSlowPath(SlowPathCode * slow_path)1325 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1326   DCHECK(code_generation_data_ != nullptr);
1327   code_generation_data_->AddSlowPath(slow_path);
1328 }
1329 
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path,bool is_for_catch_handler)1330 void CodeGenerator::EmitVRegInfo(HEnvironment* environment,
1331                                  SlowPathCode* slow_path,
1332                                  bool is_for_catch_handler) {
1333   StackMapStream* stack_map_stream = GetStackMapStream();
1334   // Walk over the environment, and record the location of dex registers.
1335   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1336     HInstruction* current = environment->GetInstructionAt(i);
1337     if (current == nullptr) {
1338       stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1339       continue;
1340     }
1341 
1342     using Kind = DexRegisterLocation::Kind;
1343     Location location = environment->GetLocationAt(i);
1344     switch (location.GetKind()) {
1345       case Location::kConstant: {
1346         DCHECK_EQ(current, location.GetConstant());
1347         if (current->IsLongConstant()) {
1348           int64_t value = current->AsLongConstant()->GetValue();
1349           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1350           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1351           ++i;
1352           DCHECK_LT(i, environment_size);
1353         } else if (current->IsDoubleConstant()) {
1354           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1355           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1356           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1357           ++i;
1358           DCHECK_LT(i, environment_size);
1359         } else if (current->IsIntConstant()) {
1360           int32_t value = current->AsIntConstant()->GetValue();
1361           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1362         } else if (current->IsNullConstant()) {
1363           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1364         } else {
1365           DCHECK(current->IsFloatConstant()) << current->DebugName();
1366           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1367           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1368         }
1369         break;
1370       }
1371 
1372       case Location::kStackSlot: {
1373         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1374         break;
1375       }
1376 
1377       case Location::kDoubleStackSlot: {
1378         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1379         stack_map_stream->AddDexRegisterEntry(
1380             Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1381         ++i;
1382         DCHECK_LT(i, environment_size);
1383         break;
1384       }
1385 
1386       case Location::kRegister : {
1387         DCHECK(!is_for_catch_handler);
1388         int id = location.reg();
1389         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1390           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1391           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1392           if (current->GetType() == DataType::Type::kInt64) {
1393             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1394             ++i;
1395             DCHECK_LT(i, environment_size);
1396           }
1397         } else {
1398           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1399           if (current->GetType() == DataType::Type::kInt64) {
1400             stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1401             ++i;
1402             DCHECK_LT(i, environment_size);
1403           }
1404         }
1405         break;
1406       }
1407 
1408       case Location::kFpuRegister : {
1409         DCHECK(!is_for_catch_handler);
1410         int id = location.reg();
1411         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1412           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1413           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1414           if (current->GetType() == DataType::Type::kFloat64) {
1415             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1416             ++i;
1417             DCHECK_LT(i, environment_size);
1418           }
1419         } else {
1420           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1421           if (current->GetType() == DataType::Type::kFloat64) {
1422             stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1423             ++i;
1424             DCHECK_LT(i, environment_size);
1425           }
1426         }
1427         break;
1428       }
1429 
1430       case Location::kFpuRegisterPair : {
1431         DCHECK(!is_for_catch_handler);
1432         int low = location.low();
1433         int high = location.high();
1434         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1435           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1436           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1437         } else {
1438           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1439         }
1440         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1441           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1442           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1443           ++i;
1444         } else {
1445           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1446           ++i;
1447         }
1448         DCHECK_LT(i, environment_size);
1449         break;
1450       }
1451 
1452       case Location::kRegisterPair : {
1453         DCHECK(!is_for_catch_handler);
1454         int low = location.low();
1455         int high = location.high();
1456         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1457           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1458           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1459         } else {
1460           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1461         }
1462         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1463           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1464           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1465         } else {
1466           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1467         }
1468         ++i;
1469         DCHECK_LT(i, environment_size);
1470         break;
1471       }
1472 
1473       case Location::kInvalid: {
1474         stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1475         break;
1476       }
1477 
1478       default:
1479         LOG(FATAL) << "Unexpected kind " << location.GetKind();
1480     }
1481   }
1482 }
1483 
EmitVRegInfoOnlyCatchPhis(HEnvironment * environment)1484 void CodeGenerator::EmitVRegInfoOnlyCatchPhis(HEnvironment* environment) {
1485   StackMapStream* stack_map_stream = GetStackMapStream();
1486   DCHECK(environment->GetHolder()->GetBlock()->IsCatchBlock());
1487   DCHECK_EQ(environment->GetHolder()->GetBlock()->GetFirstInstruction(), environment->GetHolder());
1488   HInstruction* current_phi = environment->GetHolder()->GetBlock()->GetFirstPhi();
1489   for (size_t vreg = 0; vreg < environment->Size(); ++vreg) {
1490     while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1491       HInstruction* next_phi = current_phi->GetNext();
1492       DCHECK(next_phi == nullptr ||
1493              current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1494           << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1495       current_phi = next_phi;
1496     }
1497 
1498     if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1499       stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1500     } else {
1501       Location location = current_phi->GetLocations()->Out();
1502       switch (location.GetKind()) {
1503         case Location::kStackSlot: {
1504           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1505                                                 location.GetStackIndex());
1506           break;
1507         }
1508         case Location::kDoubleStackSlot: {
1509           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1510                                                 location.GetStackIndex());
1511           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1512                                                 location.GetHighStackIndex(kVRegSize));
1513           ++vreg;
1514           DCHECK_LT(vreg, environment->Size());
1515           break;
1516         }
1517         default: {
1518           LOG(FATAL) << "All catch phis must be allocated to a stack slot. Unexpected kind "
1519                      << location.GetKind();
1520           UNREACHABLE();
1521         }
1522       }
1523     }
1524   }
1525 }
1526 
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info,bool is_for_catch_handler,bool innermost_environment)1527 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1528                                     SlowPathCode* slow_path,
1529                                     bool needs_vreg_info,
1530                                     bool is_for_catch_handler,
1531                                     bool innermost_environment) {
1532   if (environment == nullptr) return;
1533 
1534   StackMapStream* stack_map_stream = GetStackMapStream();
1535   bool emit_inline_info = environment->GetParent() != nullptr;
1536 
1537   if (emit_inline_info) {
1538     // We emit the parent environment first.
1539     EmitEnvironment(environment->GetParent(),
1540                     slow_path,
1541                     needs_vreg_info,
1542                     is_for_catch_handler,
1543                     /* innermost_environment= */ false);
1544     stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1545                                            environment->GetDexPc(),
1546                                            needs_vreg_info ? environment->Size() : 0,
1547                                            &graph_->GetDexFile(),
1548                                            this);
1549   }
1550 
1551   // If a dex register map is not required we just won't emit it.
1552   if (needs_vreg_info) {
1553     if (innermost_environment && is_for_catch_handler) {
1554       EmitVRegInfoOnlyCatchPhis(environment);
1555     } else {
1556       EmitVRegInfo(environment, slow_path, is_for_catch_handler);
1557     }
1558   }
1559 
1560   if (emit_inline_info) {
1561     stack_map_stream->EndInlineInfoEntry();
1562   }
1563 }
1564 
CanMoveNullCheckToUser(HNullCheck * null_check)1565 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1566   return null_check->IsEmittedAtUseSite();
1567 }
1568 
MaybeRecordImplicitNullCheck(HInstruction * instr)1569 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1570   HNullCheck* null_check = instr->GetImplicitNullCheck();
1571   if (null_check != nullptr) {
1572     DCHECK(compiler_options_.GetImplicitNullChecks());
1573     RecordPcInfo(null_check);
1574   }
1575 }
1576 
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1577 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1578                                                                 RegisterSet caller_saves) {
1579   // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1580   // HSuspendCheck from entry block). However, it will still get a valid stack frame
1581   // because the HNullCheck needs an environment.
1582   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1583   // When throwing from a try block, we may need to retrieve dalvik registers from
1584   // physical registers and we also need to set up stack mask for GC. This is
1585   // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1586   bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1587   if (can_throw_into_catch_block) {
1588     call_kind = LocationSummary::kCallOnSlowPath;
1589   }
1590   LocationSummary* locations =
1591       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1592   if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1593     locations->SetCustomSlowPathCallerSaves(caller_saves);  // Default: no caller-save registers.
1594   }
1595   DCHECK(!instruction->HasUses());
1596   return locations;
1597 }
1598 
GenerateNullCheck(HNullCheck * instruction)1599 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1600   if (compiler_options_.GetImplicitNullChecks()) {
1601     MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1602     GenerateImplicitNullCheck(instruction);
1603   } else {
1604     MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1605     GenerateExplicitNullCheck(instruction);
1606   }
1607 }
1608 
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1609 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1610                                                           HParallelMove* spills) const {
1611   LocationSummary* locations = suspend_check->GetLocations();
1612   HBasicBlock* block = suspend_check->GetBlock();
1613   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1614   DCHECK(block->IsLoopHeader());
1615   DCHECK(block->GetFirstInstruction() == spills);
1616 
1617   for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1618     Location dest = spills->MoveOperandsAt(i)->GetDestination();
1619     // All parallel moves in loop headers are spills.
1620     DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1621     // Clear the stack bit marking a reference. Do not bother to check if the spill is
1622     // actually a reference spill, clearing bits that are already zero is harmless.
1623     locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1624   }
1625 }
1626 
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1627 void CodeGenerator::EmitParallelMoves(Location from1,
1628                                       Location to1,
1629                                       DataType::Type type1,
1630                                       Location from2,
1631                                       Location to2,
1632                                       DataType::Type type2) {
1633   HParallelMove parallel_move(GetGraph()->GetAllocator());
1634   parallel_move.AddMove(from1, to1, type1, nullptr);
1635   parallel_move.AddMove(from2, to2, type2, nullptr);
1636   GetMoveResolver()->EmitNativeCode(&parallel_move);
1637 }
1638 
StoreNeedsWriteBarrier(DataType::Type type,HInstruction * value,WriteBarrierKind write_barrier_kind) const1639 bool CodeGenerator::StoreNeedsWriteBarrier(DataType::Type type,
1640                                            HInstruction* value,
1641                                            WriteBarrierKind write_barrier_kind) const {
1642   // Check that null value is not represented as an integer constant.
1643   DCHECK_IMPLIES(type == DataType::Type::kReference, !value->IsIntConstant());
1644   // Branch profiling currently doesn't support running optimizations.
1645   return (GetGraph()->IsCompilingBaseline() && compiler_options_.ProfileBranches())
1646             ? CodeGenerator::StoreNeedsWriteBarrier(type, value)
1647             : write_barrier_kind != WriteBarrierKind::kDontEmit;
1648 }
1649 
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1650 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1651                                           HInstruction* instruction,
1652                                           SlowPathCode* slow_path) {
1653   // Ensure that the call kind indication given to the register allocator is
1654   // coherent with the runtime call generated.
1655   if (slow_path == nullptr) {
1656     DCHECK(instruction->GetLocations()->WillCall())
1657         << "instruction->DebugName()=" << instruction->DebugName();
1658   } else {
1659     DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1660         << "instruction->DebugName()=" << instruction->DebugName()
1661         << " slow_path->GetDescription()=" << slow_path->GetDescription();
1662   }
1663 
1664   // Check that the GC side effect is set when required.
1665   // TODO: Reverse EntrypointCanTriggerGC
1666   if (EntrypointCanTriggerGC(entrypoint)) {
1667     if (slow_path == nullptr) {
1668       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1669           << "instruction->DebugName()=" << instruction->DebugName()
1670           << " instruction->GetSideEffects().ToString()="
1671           << instruction->GetSideEffects().ToString();
1672     } else {
1673       // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1674       // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1675       // if execution never returns to the compiled code from a GC point this restriction is
1676       // unnecessary - in particular for fatal slow paths which might trigger GC.
1677       DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1678              instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1679              // When (non-Baker) read barriers are enabled, some instructions
1680              // use a slow path to emit a read barrier, which does not trigger
1681              // GC.
1682              (EmitNonBakerReadBarrier() &&
1683               (instruction->IsInstanceFieldGet() ||
1684                instruction->IsStaticFieldGet() ||
1685                instruction->IsArrayGet() ||
1686                instruction->IsLoadClass() ||
1687                instruction->IsLoadString() ||
1688                instruction->IsInstanceOf() ||
1689                instruction->IsCheckCast() ||
1690                (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1691           << "instruction->DebugName()=" << instruction->DebugName()
1692           << " instruction->GetSideEffects().ToString()="
1693           << instruction->GetSideEffects().ToString()
1694           << " slow_path->GetDescription()=" << slow_path->GetDescription() << std::endl
1695           << "Instruction and args: " << instruction->DumpWithArgs();
1696     }
1697   } else {
1698     // The GC side effect is not required for the instruction. But the instruction might still have
1699     // it, for example if it calls other entrypoints requiring it.
1700   }
1701 
1702   // Check the coherency of leaf information.
1703   DCHECK(instruction->IsSuspendCheck()
1704          || ((slow_path != nullptr) && slow_path->IsFatal())
1705          || instruction->GetLocations()->CanCall()
1706          || !IsLeafMethod())
1707       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1708 }
1709 
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1710 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1711                                                                 SlowPathCode* slow_path) {
1712   DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1713       << "instruction->DebugName()=" << instruction->DebugName()
1714       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1715   // Only the Baker read barrier marking slow path used by certains
1716   // instructions is expected to invoke the runtime without recording
1717   // PC-related information.
1718   DCHECK(kUseBakerReadBarrier);
1719   DCHECK(instruction->IsInstanceFieldGet() ||
1720          instruction->IsStaticFieldGet() ||
1721          instruction->IsArrayGet() ||
1722          instruction->IsArraySet() ||
1723          instruction->IsLoadClass() ||
1724          instruction->IsLoadMethodType() ||
1725          instruction->IsLoadString() ||
1726          instruction->IsInstanceOf() ||
1727          instruction->IsCheckCast() ||
1728          (instruction->IsInvoke() && instruction->GetLocations()->Intrinsified()))
1729       << "instruction->DebugName()=" << instruction->DebugName()
1730       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1731 }
1732 
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1733 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1734   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1735 
1736   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1737   for (uint32_t i : LowToHighBits(core_spills)) {
1738     // If the register holds an object, update the stack mask.
1739     if (locations->RegisterContainsObject(i)) {
1740       locations->SetStackBit(stack_offset / kVRegSize);
1741     }
1742     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1743     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1744     saved_core_stack_offsets_[i] = stack_offset;
1745     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1746   }
1747 
1748   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1749   for (uint32_t i : LowToHighBits(fp_spills)) {
1750     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1751     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1752     saved_fpu_stack_offsets_[i] = stack_offset;
1753     stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1754   }
1755 }
1756 
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1757 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1758   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1759 
1760   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1761   for (uint32_t i : LowToHighBits(core_spills)) {
1762     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1763     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1764     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1765   }
1766 
1767   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1768   for (uint32_t i : LowToHighBits(fp_spills)) {
1769     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1770     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1771     stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1772   }
1773 }
1774 
CreateSystemArrayCopyLocationSummary(HInvoke * invoke,int32_t length_threshold,size_t num_temps)1775 LocationSummary* CodeGenerator::CreateSystemArrayCopyLocationSummary(
1776     HInvoke* invoke, int32_t length_threshold, size_t num_temps) {
1777   // Check to see if we have known failures that will cause us to have to bail out
1778   // to the runtime, and just generate the runtime call directly.
1779   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstantOrNull();
1780   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstantOrNull();
1781 
1782   // The positions must be non-negative.
1783   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1784       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1785     // We will have to fail anyways.
1786     return nullptr;
1787   }
1788 
1789   // The length must be >= 0. If a positive `length_threshold` is provided, lengths
1790   // greater or equal to the threshold are also handled by the normal implementation.
1791   HIntConstant* length = invoke->InputAt(4)->AsIntConstantOrNull();
1792   if (length != nullptr) {
1793     int32_t len = length->GetValue();
1794     if (len < 0 || (length_threshold > 0 && len >= length_threshold)) {
1795       // Just call as normal.
1796       return nullptr;
1797     }
1798   }
1799 
1800   SystemArrayCopyOptimizations optimizations(invoke);
1801 
1802   if (optimizations.GetDestinationIsSource()) {
1803     if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1804       // We only support backward copying if source and destination are the same.
1805       return nullptr;
1806     }
1807   }
1808 
1809   if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1810     // We currently don't intrinsify primitive copying.
1811     return nullptr;
1812   }
1813 
1814   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1815   LocationSummary* locations = new (allocator) LocationSummary(invoke,
1816                                                                LocationSummary::kCallOnSlowPath,
1817                                                                kIntrinsified);
1818   // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1819   locations->SetInAt(0, Location::RequiresRegister());
1820   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1821   locations->SetInAt(2, Location::RequiresRegister());
1822   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1823   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1824 
1825   if (num_temps != 0u) {
1826     locations->AddRegisterTemps(num_temps);
1827   }
1828   return locations;
1829 }
1830 
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1831 void CodeGenerator::EmitJitRoots(uint8_t* code,
1832                                  const uint8_t* roots_data,
1833                                  /*out*/std::vector<Handle<mirror::Object>>* roots) {
1834   code_generation_data_->EmitJitRoots(roots);
1835   EmitJitRootPatches(code, roots_data);
1836 }
1837 
GetArrayAllocationEntrypoint(HNewArray * new_array)1838 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1839   switch (new_array->GetComponentSizeShift()) {
1840     case 0: return kQuickAllocArrayResolved8;
1841     case 1: return kQuickAllocArrayResolved16;
1842     case 2: return kQuickAllocArrayResolved32;
1843     case 3: return kQuickAllocArrayResolved64;
1844   }
1845   LOG(FATAL) << "Unreachable";
1846   UNREACHABLE();
1847 }
1848 
ScaleFactorForType(DataType::Type type)1849 ScaleFactor CodeGenerator::ScaleFactorForType(DataType::Type type) {
1850   switch (type) {
1851     case DataType::Type::kBool:
1852     case DataType::Type::kUint8:
1853     case DataType::Type::kInt8:
1854       return TIMES_1;
1855     case DataType::Type::kUint16:
1856     case DataType::Type::kInt16:
1857       return TIMES_2;
1858     case DataType::Type::kInt32:
1859     case DataType::Type::kUint32:
1860     case DataType::Type::kFloat32:
1861     case DataType::Type::kReference:
1862       return TIMES_4;
1863     case DataType::Type::kInt64:
1864     case DataType::Type::kUint64:
1865     case DataType::Type::kFloat64:
1866       return TIMES_8;
1867     case DataType::Type::kVoid:
1868       LOG(FATAL) << "Unreachable type " << type;
1869       UNREACHABLE();
1870   }
1871 }
1872 
1873 }  // namespace art
1874