• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_generator.h"
18 
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22 
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26 
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30 
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34 
35 #ifdef ART_ENABLE_CODEGEN_mips
36 #include "code_generator_mips.h"
37 #endif
38 
39 #ifdef ART_ENABLE_CODEGEN_mips64
40 #include "code_generator_mips64.h"
41 #endif
42 
43 #include "base/bit_utils.h"
44 #include "base/bit_utils_iterator.h"
45 #include "bytecode_utils.h"
46 #include "class_linker.h"
47 #include "compiled_method.h"
48 #include "dex/verified_method.h"
49 #include "driver/compiler_driver.h"
50 #include "graph_visualizer.h"
51 #include "intern_table.h"
52 #include "intrinsics.h"
53 #include "leb128.h"
54 #include "mirror/array-inl.h"
55 #include "mirror/object_array-inl.h"
56 #include "mirror/object_reference.h"
57 #include "mirror/reference.h"
58 #include "mirror/string.h"
59 #include "parallel_move_resolver.h"
60 #include "ssa_liveness_analysis.h"
61 #include "scoped_thread_state_change-inl.h"
62 #include "thread-current-inl.h"
63 #include "utils/assembler.h"
64 
65 namespace art {
66 
67 // If true, we record the static and direct invokes in the invoke infos.
68 static constexpr bool kEnableDexLayoutOptimizations = false;
69 
70 // Return whether a location is consistent with a type.
CheckType(Primitive::Type type,Location location)71 static bool CheckType(Primitive::Type type, Location location) {
72   if (location.IsFpuRegister()
73       || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
74     return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble);
75   } else if (location.IsRegister() ||
76              (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
77     return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot);
78   } else if (location.IsRegisterPair()) {
79     return type == Primitive::kPrimLong;
80   } else if (location.IsFpuRegisterPair()) {
81     return type == Primitive::kPrimDouble;
82   } else if (location.IsStackSlot()) {
83     return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong)
84            || (type == Primitive::kPrimFloat)
85            || (type == Primitive::kPrimNot);
86   } else if (location.IsDoubleStackSlot()) {
87     return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
88   } else if (location.IsConstant()) {
89     if (location.GetConstant()->IsIntConstant()) {
90       return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong);
91     } else if (location.GetConstant()->IsNullConstant()) {
92       return type == Primitive::kPrimNot;
93     } else if (location.GetConstant()->IsLongConstant()) {
94       return type == Primitive::kPrimLong;
95     } else if (location.GetConstant()->IsFloatConstant()) {
96       return type == Primitive::kPrimFloat;
97     } else {
98       return location.GetConstant()->IsDoubleConstant()
99           && (type == Primitive::kPrimDouble);
100     }
101   } else {
102     return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
103   }
104 }
105 
106 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)107 static bool CheckTypeConsistency(HInstruction* instruction) {
108   LocationSummary* locations = instruction->GetLocations();
109   if (locations == nullptr) {
110     return true;
111   }
112 
113   if (locations->Out().IsUnallocated()
114       && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
115     DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
116         << instruction->GetType()
117         << " " << locations->InAt(0);
118   } else {
119     DCHECK(CheckType(instruction->GetType(), locations->Out()))
120         << instruction->GetType()
121         << " " << locations->Out();
122   }
123 
124   HConstInputsRef inputs = instruction->GetInputs();
125   for (size_t i = 0; i < inputs.size(); ++i) {
126     DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
127       << inputs[i]->GetType() << " " << locations->InAt(i);
128   }
129 
130   HEnvironment* environment = instruction->GetEnvironment();
131   for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
132     if (environment->GetInstructionAt(i) != nullptr) {
133       Primitive::Type type = environment->GetInstructionAt(i)->GetType();
134       DCHECK(CheckType(type, environment->GetLocationAt(i)))
135         << type << " " << environment->GetLocationAt(i);
136     } else {
137       DCHECK(environment->GetLocationAt(i).IsInvalid())
138         << environment->GetLocationAt(i);
139     }
140   }
141   return true;
142 }
143 
GetCacheOffset(uint32_t index)144 size_t CodeGenerator::GetCacheOffset(uint32_t index) {
145   return sizeof(GcRoot<mirror::Object>) * index;
146 }
147 
GetCachePointerOffset(uint32_t index)148 size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
149   PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
150   return static_cast<size_t>(pointer_size) * index;
151 }
152 
GetArrayLengthOffset(HArrayLength * array_length)153 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
154   return array_length->IsStringLength()
155       ? mirror::String::CountOffset().Uint32Value()
156       : mirror::Array::LengthOffset().Uint32Value();
157 }
158 
GetArrayDataOffset(HArrayGet * array_get)159 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
160   DCHECK(array_get->GetType() == Primitive::kPrimChar || !array_get->IsStringCharAt());
161   return array_get->IsStringCharAt()
162       ? mirror::String::ValueOffset().Uint32Value()
163       : mirror::Array::DataOffset(Primitive::ComponentSize(array_get->GetType())).Uint32Value();
164 }
165 
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const166 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
167   DCHECK_EQ((*block_order_)[current_block_index_], current);
168   return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
169 }
170 
GetNextBlockToEmit() const171 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
172   for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
173     HBasicBlock* block = (*block_order_)[i];
174     if (!block->IsSingleJump()) {
175       return block;
176     }
177   }
178   return nullptr;
179 }
180 
FirstNonEmptyBlock(HBasicBlock * block) const181 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
182   while (block->IsSingleJump()) {
183     block = block->GetSuccessors()[0];
184   }
185   return block;
186 }
187 
188 class DisassemblyScope {
189  public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)190   DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
191       : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
192     if (codegen_.GetDisassemblyInformation() != nullptr) {
193       start_offset_ = codegen_.GetAssembler().CodeSize();
194     }
195   }
196 
~DisassemblyScope()197   ~DisassemblyScope() {
198     // We avoid building this data when we know it will not be used.
199     if (codegen_.GetDisassemblyInformation() != nullptr) {
200       codegen_.GetDisassemblyInformation()->AddInstructionInterval(
201           instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
202     }
203   }
204 
205  private:
206   const CodeGenerator& codegen_;
207   HInstruction* instruction_;
208   size_t start_offset_;
209 };
210 
211 
GenerateSlowPaths()212 void CodeGenerator::GenerateSlowPaths() {
213   size_t code_start = 0;
214   for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
215     SlowPathCode* slow_path = slow_path_unique_ptr.get();
216     current_slow_path_ = slow_path;
217     if (disasm_info_ != nullptr) {
218       code_start = GetAssembler()->CodeSize();
219     }
220     // Record the dex pc at start of slow path (required for java line number mapping).
221     MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
222     slow_path->EmitNativeCode(this);
223     if (disasm_info_ != nullptr) {
224       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
225     }
226   }
227   current_slow_path_ = nullptr;
228 }
229 
Compile(CodeAllocator * allocator)230 void CodeGenerator::Compile(CodeAllocator* allocator) {
231   // The register allocator already called `InitializeCodeGeneration`,
232   // where the frame size has been computed.
233   DCHECK(block_order_ != nullptr);
234   Initialize();
235 
236   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
237   DCHECK_EQ(current_block_index_, 0u);
238 
239   size_t frame_start = GetAssembler()->CodeSize();
240   GenerateFrameEntry();
241   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
242   if (disasm_info_ != nullptr) {
243     disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
244   }
245 
246   for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
247     HBasicBlock* block = (*block_order_)[current_block_index_];
248     // Don't generate code for an empty block. Its predecessors will branch to its successor
249     // directly. Also, the label of that block will not be emitted, so this helps catch
250     // errors where we reference that label.
251     if (block->IsSingleJump()) continue;
252     Bind(block);
253     // This ensures that we have correct native line mapping for all native instructions.
254     // It is necessary to make stepping over a statement work. Otherwise, any initial
255     // instructions (e.g. moves) would be assumed to be the start of next statement.
256     MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
257     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
258       HInstruction* current = it.Current();
259       if (current->HasEnvironment()) {
260         // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
261         // Note that we need correct mapping for the native PC of the call instruction,
262         // so the runtime's stackmap is not sufficient since it is at PC after the call.
263         MaybeRecordNativeDebugInfo(current, block->GetDexPc());
264       }
265       DisassemblyScope disassembly_scope(current, *this);
266       DCHECK(CheckTypeConsistency(current));
267       current->Accept(instruction_visitor);
268     }
269   }
270 
271   GenerateSlowPaths();
272 
273   // Emit catch stack maps at the end of the stack map stream as expected by the
274   // runtime exception handler.
275   if (graph_->HasTryCatch()) {
276     RecordCatchBlockInfo();
277   }
278 
279   // Finalize instructions in assember;
280   Finalize(allocator);
281 }
282 
Finalize(CodeAllocator * allocator)283 void CodeGenerator::Finalize(CodeAllocator* allocator) {
284   size_t code_size = GetAssembler()->CodeSize();
285   uint8_t* buffer = allocator->Allocate(code_size);
286 
287   MemoryRegion code(buffer, code_size);
288   GetAssembler()->FinalizeInstructions(code);
289 }
290 
EmitLinkerPatches(ArenaVector<LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)291 void CodeGenerator::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
292   // No linker patches by default.
293 }
294 
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)295 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
296                                              size_t maximum_safepoint_spill_size,
297                                              size_t number_of_out_slots,
298                                              const ArenaVector<HBasicBlock*>& block_order) {
299   block_order_ = &block_order;
300   DCHECK(!block_order.empty());
301   DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
302   ComputeSpillMask();
303   first_register_slot_in_slow_path_ = RoundUp(
304       (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
305 
306   if (number_of_spill_slots == 0
307       && !HasAllocatedCalleeSaveRegisters()
308       && IsLeafMethod()
309       && !RequiresCurrentMethod()) {
310     DCHECK_EQ(maximum_safepoint_spill_size, 0u);
311     SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
312   } else {
313     SetFrameSize(RoundUp(
314         first_register_slot_in_slow_path_
315         + maximum_safepoint_spill_size
316         + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
317         + FrameEntrySpillSize(),
318         kStackAlignment));
319   }
320 }
321 
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)322 void CodeGenerator::CreateCommonInvokeLocationSummary(
323     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
324   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
325   LocationSummary* locations = new (allocator) LocationSummary(invoke,
326                                                                LocationSummary::kCallOnMainOnly);
327 
328   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
329     HInstruction* input = invoke->InputAt(i);
330     locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
331   }
332 
333   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
334 
335   if (invoke->IsInvokeStaticOrDirect()) {
336     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
337     switch (call->GetMethodLoadKind()) {
338       case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
339         locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
340         break;
341       case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
342         locations->AddTemp(visitor->GetMethodLocation());
343         locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
344         break;
345       default:
346         locations->AddTemp(visitor->GetMethodLocation());
347         break;
348     }
349   } else {
350     locations->AddTemp(visitor->GetMethodLocation());
351   }
352 }
353 
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)354 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
355     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
356   MoveConstant(temp, invoke->GetDexMethodIndex());
357 
358   // The access check is unnecessary but we do not want to introduce
359   // extra entrypoints for the codegens that do not support some
360   // invoke type and fall back to the runtime call.
361 
362   // Initialize to anything to silent compiler warnings.
363   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
364   switch (invoke->GetInvokeType()) {
365     case kStatic:
366       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
367       break;
368     case kDirect:
369       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
370       break;
371     case kSuper:
372       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
373       break;
374     case kVirtual:
375     case kInterface:
376       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
377       UNREACHABLE();
378   }
379 
380   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
381 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)382 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
383   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
384 
385   // Initialize to anything to silent compiler warnings.
386   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
387   switch (invoke->GetInvokeType()) {
388     case kStatic:
389       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
390       break;
391     case kDirect:
392       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
393       break;
394     case kVirtual:
395       entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
396       break;
397     case kSuper:
398       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
399       break;
400     case kInterface:
401       entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
402       break;
403   }
404   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
405 }
406 
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke)407 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
408   MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
409   QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
410   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
411 }
412 
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,Primitive::Type field_type,const FieldAccessCallingConvention & calling_convention)413 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
414     HInstruction* field_access,
415     Primitive::Type field_type,
416     const FieldAccessCallingConvention& calling_convention) {
417   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
418       || field_access->IsUnresolvedInstanceFieldSet();
419   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
420       || field_access->IsUnresolvedStaticFieldGet();
421 
422   ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
423   LocationSummary* locations =
424       new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
425 
426   locations->AddTemp(calling_convention.GetFieldIndexLocation());
427 
428   if (is_instance) {
429     // Add the `this` object for instance field accesses.
430     locations->SetInAt(0, calling_convention.GetObjectLocation());
431   }
432 
433   // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
434   // regardless of the the type. Because of that we forced to special case
435   // the access to floating point values.
436   if (is_get) {
437     if (Primitive::IsFloatingPointType(field_type)) {
438       // The return value will be stored in regular registers while register
439       // allocator expects it in a floating point register.
440       // Note We don't need to request additional temps because the return
441       // register(s) are already blocked due the call and they may overlap with
442       // the input or field index.
443       // The transfer between the two will be done at codegen level.
444       locations->SetOut(calling_convention.GetFpuLocation(field_type));
445     } else {
446       locations->SetOut(calling_convention.GetReturnLocation(field_type));
447     }
448   } else {
449      size_t set_index = is_instance ? 1 : 0;
450      if (Primitive::IsFloatingPointType(field_type)) {
451       // The set value comes from a float location while the calling convention
452       // expects it in a regular register location. Allocate a temp for it and
453       // make the transfer at codegen.
454       AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
455       locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
456     } else {
457       locations->SetInAt(set_index,
458           calling_convention.GetSetValueLocation(field_type, is_instance));
459     }
460   }
461 }
462 
GenerateUnresolvedFieldAccess(HInstruction * field_access,Primitive::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)463 void CodeGenerator::GenerateUnresolvedFieldAccess(
464     HInstruction* field_access,
465     Primitive::Type field_type,
466     uint32_t field_index,
467     uint32_t dex_pc,
468     const FieldAccessCallingConvention& calling_convention) {
469   LocationSummary* locations = field_access->GetLocations();
470 
471   MoveConstant(locations->GetTemp(0), field_index);
472 
473   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
474       || field_access->IsUnresolvedInstanceFieldSet();
475   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
476       || field_access->IsUnresolvedStaticFieldGet();
477 
478   if (!is_get && Primitive::IsFloatingPointType(field_type)) {
479     // Copy the float value to be set into the calling convention register.
480     // Note that using directly the temp location is problematic as we don't
481     // support temp register pairs. To avoid boilerplate conversion code, use
482     // the location from the calling convention.
483     MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
484                  locations->InAt(is_instance ? 1 : 0),
485                  (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
486   }
487 
488   QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
489   switch (field_type) {
490     case Primitive::kPrimBoolean:
491       entrypoint = is_instance
492           ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
493           : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
494       break;
495     case Primitive::kPrimByte:
496       entrypoint = is_instance
497           ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
498           : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
499       break;
500     case Primitive::kPrimShort:
501       entrypoint = is_instance
502           ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
503           : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
504       break;
505     case Primitive::kPrimChar:
506       entrypoint = is_instance
507           ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
508           : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
509       break;
510     case Primitive::kPrimInt:
511     case Primitive::kPrimFloat:
512       entrypoint = is_instance
513           ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
514           : (is_get ? kQuickGet32Static : kQuickSet32Static);
515       break;
516     case Primitive::kPrimNot:
517       entrypoint = is_instance
518           ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
519           : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
520       break;
521     case Primitive::kPrimLong:
522     case Primitive::kPrimDouble:
523       entrypoint = is_instance
524           ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
525           : (is_get ? kQuickGet64Static : kQuickSet64Static);
526       break;
527     default:
528       LOG(FATAL) << "Invalid type " << field_type;
529   }
530   InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
531 
532   if (is_get && Primitive::IsFloatingPointType(field_type)) {
533     MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
534   }
535 }
536 
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)537 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
538                                                               Location runtime_type_index_location,
539                                                               Location runtime_return_location) {
540   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
541   DCHECK_EQ(cls->InputCount(), 1u);
542   LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
543       cls, LocationSummary::kCallOnMainOnly);
544   locations->SetInAt(0, Location::NoLocation());
545   locations->AddTemp(runtime_type_index_location);
546   locations->SetOut(runtime_return_location);
547 }
548 
GenerateLoadClassRuntimeCall(HLoadClass * cls)549 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
550   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
551   LocationSummary* locations = cls->GetLocations();
552   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
553   if (cls->NeedsAccessCheck()) {
554     CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
555     InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
556   } else if (cls->MustGenerateClinitCheck()) {
557     CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
558     InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
559   } else {
560     CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
561     InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
562   }
563 }
564 
BlockIfInRegister(Location location,bool is_out) const565 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
566   // The DCHECKS below check that a register is not specified twice in
567   // the summary. The out location can overlap with an input, so we need
568   // to special case it.
569   if (location.IsRegister()) {
570     DCHECK(is_out || !blocked_core_registers_[location.reg()]);
571     blocked_core_registers_[location.reg()] = true;
572   } else if (location.IsFpuRegister()) {
573     DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
574     blocked_fpu_registers_[location.reg()] = true;
575   } else if (location.IsFpuRegisterPair()) {
576     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
577     blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
578     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
579     blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
580   } else if (location.IsRegisterPair()) {
581     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
582     blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
583     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
584     blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
585   }
586 }
587 
AllocateLocations(HInstruction * instruction)588 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
589   for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
590     env->AllocateLocations();
591   }
592   instruction->Accept(GetLocationBuilder());
593   DCHECK(CheckTypeConsistency(instruction));
594   LocationSummary* locations = instruction->GetLocations();
595   if (!instruction->IsSuspendCheckEntry()) {
596     if (locations != nullptr) {
597       if (locations->CanCall()) {
598         MarkNotLeaf();
599       } else if (locations->Intrinsified() &&
600                  instruction->IsInvokeStaticOrDirect() &&
601                  !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
602         // A static method call that has been fully intrinsified, and cannot call on the slow
603         // path or refer to the current method directly, no longer needs current method.
604         return;
605       }
606     }
607     if (instruction->NeedsCurrentMethod()) {
608       SetRequiresCurrentMethod();
609     }
610   }
611 }
612 
MaybeRecordStat(MethodCompilationStat compilation_stat,size_t count) const613 void CodeGenerator::MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count) const {
614   if (stats_ != nullptr) {
615     stats_->RecordStat(compilation_stat, count);
616   }
617 }
618 
Create(HGraph * graph,InstructionSet instruction_set,const InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)619 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
620                                                      InstructionSet instruction_set,
621                                                      const InstructionSetFeatures& isa_features,
622                                                      const CompilerOptions& compiler_options,
623                                                      OptimizingCompilerStats* stats) {
624   ArenaAllocator* arena = graph->GetArena();
625   switch (instruction_set) {
626 #ifdef ART_ENABLE_CODEGEN_arm
627     case kArm:
628     case kThumb2: {
629       return std::unique_ptr<CodeGenerator>(
630           new (arena) arm::CodeGeneratorARMVIXL(graph,
631                                                 *isa_features.AsArmInstructionSetFeatures(),
632                                                 compiler_options,
633                                                 stats));
634     }
635 #endif
636 #ifdef ART_ENABLE_CODEGEN_arm64
637     case kArm64: {
638       return std::unique_ptr<CodeGenerator>(
639           new (arena) arm64::CodeGeneratorARM64(graph,
640                                                 *isa_features.AsArm64InstructionSetFeatures(),
641                                                 compiler_options,
642                                                 stats));
643     }
644 #endif
645 #ifdef ART_ENABLE_CODEGEN_mips
646     case kMips: {
647       return std::unique_ptr<CodeGenerator>(
648           new (arena) mips::CodeGeneratorMIPS(graph,
649                                               *isa_features.AsMipsInstructionSetFeatures(),
650                                               compiler_options,
651                                               stats));
652     }
653 #endif
654 #ifdef ART_ENABLE_CODEGEN_mips64
655     case kMips64: {
656       return std::unique_ptr<CodeGenerator>(
657           new (arena) mips64::CodeGeneratorMIPS64(graph,
658                                                   *isa_features.AsMips64InstructionSetFeatures(),
659                                                   compiler_options,
660                                                   stats));
661     }
662 #endif
663 #ifdef ART_ENABLE_CODEGEN_x86
664     case kX86: {
665       return std::unique_ptr<CodeGenerator>(
666           new (arena) x86::CodeGeneratorX86(graph,
667                                             *isa_features.AsX86InstructionSetFeatures(),
668                                             compiler_options,
669                                             stats));
670     }
671 #endif
672 #ifdef ART_ENABLE_CODEGEN_x86_64
673     case kX86_64: {
674       return std::unique_ptr<CodeGenerator>(
675           new (arena) x86_64::CodeGeneratorX86_64(graph,
676                                                   *isa_features.AsX86_64InstructionSetFeatures(),
677                                                   compiler_options,
678                                                   stats));
679     }
680 #endif
681     default:
682       return nullptr;
683   }
684 }
685 
ComputeStackMapAndMethodInfoSize(size_t * stack_map_size,size_t * method_info_size)686 void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
687                                                      size_t* method_info_size) {
688   DCHECK(stack_map_size != nullptr);
689   DCHECK(method_info_size != nullptr);
690   *stack_map_size = stack_map_stream_.PrepareForFillIn();
691   *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
692 }
693 
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)694 static void CheckCovers(uint32_t dex_pc,
695                         const HGraph& graph,
696                         const CodeInfo& code_info,
697                         const ArenaVector<HSuspendCheck*>& loop_headers,
698                         ArenaVector<size_t>* covered) {
699   CodeInfoEncoding encoding = code_info.ExtractEncoding();
700   for (size_t i = 0; i < loop_headers.size(); ++i) {
701     if (loop_headers[i]->GetDexPc() == dex_pc) {
702       if (graph.IsCompilingOsr()) {
703         DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
704       }
705       ++(*covered)[i];
706     }
707   }
708 }
709 
710 // Debug helper to ensure loop entries in compiled code are matched by
711 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const DexFile::CodeItem & code_item)712 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
713                                             const CodeInfo& code_info,
714                                             const DexFile::CodeItem& code_item) {
715   if (graph.HasTryCatch()) {
716     // One can write loops through try/catch, which we do not support for OSR anyway.
717     return;
718   }
719   ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
720   for (HBasicBlock* block : graph.GetReversePostOrder()) {
721     if (block->IsLoopHeader()) {
722       HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
723       if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
724         loop_headers.push_back(suspend_check);
725       }
726     }
727   }
728   ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
729   const uint16_t* code_ptr = code_item.insns_;
730   const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
731 
732   size_t dex_pc = 0;
733   while (code_ptr < code_end) {
734     const Instruction& instruction = *Instruction::At(code_ptr);
735     if (instruction.IsBranch()) {
736       uint32_t target = dex_pc + instruction.GetTargetOffset();
737       CheckCovers(target, graph, code_info, loop_headers, &covered);
738     } else if (instruction.IsSwitch()) {
739       DexSwitchTable table(instruction, dex_pc);
740       uint16_t num_entries = table.GetNumEntries();
741       size_t offset = table.GetFirstValueIndex();
742 
743       // Use a larger loop counter type to avoid overflow issues.
744       for (size_t i = 0; i < num_entries; ++i) {
745         // The target of the case.
746         uint32_t target = dex_pc + table.GetEntryAt(i + offset);
747         CheckCovers(target, graph, code_info, loop_headers, &covered);
748       }
749     }
750     dex_pc += instruction.SizeInCodeUnits();
751     code_ptr += instruction.SizeInCodeUnits();
752   }
753 
754   for (size_t i = 0; i < covered.size(); ++i) {
755     DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
756   }
757 }
758 
BuildStackMaps(MemoryRegion stack_map_region,MemoryRegion method_info_region,const DexFile::CodeItem & code_item)759 void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
760                                    MemoryRegion method_info_region,
761                                    const DexFile::CodeItem& code_item) {
762   stack_map_stream_.FillInCodeInfo(stack_map_region);
763   stack_map_stream_.FillInMethodInfo(method_info_region);
764   if (kIsDebugBuild) {
765     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
766   }
767 }
768 
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)769 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
770                                  uint32_t dex_pc,
771                                  SlowPathCode* slow_path) {
772   if (instruction != nullptr) {
773     // The code generated for some type conversions
774     // may call the runtime, thus normally requiring a subsequent
775     // call to this method. However, the method verifier does not
776     // produce PC information for certain instructions, which are
777     // considered "atomic" (they cannot join a GC).
778     // Therefore we do not currently record PC information for such
779     // instructions.  As this may change later, we added this special
780     // case so that code generators may nevertheless call
781     // CodeGenerator::RecordPcInfo without triggering an error in
782     // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
783     // thereafter.
784     if (instruction->IsTypeConversion()) {
785       return;
786     }
787     if (instruction->IsRem()) {
788       Primitive::Type type = instruction->AsRem()->GetResultType();
789       if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) {
790         return;
791       }
792     }
793   }
794 
795   uint32_t outer_dex_pc = dex_pc;
796   uint32_t outer_environment_size = 0;
797   uint32_t inlining_depth = 0;
798   if (instruction != nullptr) {
799     for (HEnvironment* environment = instruction->GetEnvironment();
800          environment != nullptr;
801          environment = environment->GetParent()) {
802       outer_dex_pc = environment->GetDexPc();
803       outer_environment_size = environment->Size();
804       if (environment != instruction->GetEnvironment()) {
805         inlining_depth++;
806       }
807     }
808   }
809 
810   // Collect PC infos for the mapping table.
811   uint32_t native_pc = GetAssembler()->CodePosition();
812 
813   if (instruction == nullptr) {
814     // For stack overflow checks and native-debug-info entries without dex register
815     // mapping (i.e. start of basic block or start of slow path).
816     stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
817     stack_map_stream_.EndStackMapEntry();
818     return;
819   }
820   LocationSummary* locations = instruction->GetLocations();
821 
822   uint32_t register_mask = locations->GetRegisterMask();
823   DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
824   if (locations->OnlyCallsOnSlowPath()) {
825     // In case of slow path, we currently set the location of caller-save registers
826     // to register (instead of their stack location when pushed before the slow-path
827     // call). Therefore register_mask contains both callee-save and caller-save
828     // registers that hold objects. We must remove the spilled caller-save from the
829     // mask, since they will be overwritten by the callee.
830     uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
831     register_mask &= ~spills;
832   } else {
833     // The register mask must be a subset of callee-save registers.
834     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
835   }
836   stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
837                                        native_pc,
838                                        register_mask,
839                                        locations->GetStackMask(),
840                                        outer_environment_size,
841                                        inlining_depth);
842 
843   HEnvironment* const environment = instruction->GetEnvironment();
844   EmitEnvironment(environment, slow_path);
845   // Record invoke info, the common case for the trampoline is super and static invokes. Only
846   // record these to reduce oat file size.
847   if (kEnableDexLayoutOptimizations) {
848     if (environment != nullptr &&
849         instruction->IsInvoke() &&
850         instruction->IsInvokeStaticOrDirect()) {
851       HInvoke* const invoke = instruction->AsInvoke();
852       stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
853     }
854   }
855   stack_map_stream_.EndStackMapEntry();
856 
857   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
858   if (instruction->IsSuspendCheck() &&
859       (info != nullptr) &&
860       graph_->IsCompilingOsr() &&
861       (inlining_depth == 0)) {
862     DCHECK_EQ(info->GetSuspendCheck(), instruction);
863     // We duplicate the stack map as a marker that this stack map can be an OSR entry.
864     // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
865     DCHECK(info->IsIrreducible());
866     stack_map_stream_.BeginStackMapEntry(
867         dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
868     EmitEnvironment(instruction->GetEnvironment(), slow_path);
869     stack_map_stream_.EndStackMapEntry();
870     if (kIsDebugBuild) {
871       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
872         HInstruction* in_environment = environment->GetInstructionAt(i);
873         if (in_environment != nullptr) {
874           DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
875           Location location = environment->GetLocationAt(i);
876           DCHECK(location.IsStackSlot() ||
877                  location.IsDoubleStackSlot() ||
878                  location.IsConstant() ||
879                  location.IsInvalid());
880           if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
881             DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
882           }
883         }
884       }
885     }
886   } else if (kIsDebugBuild) {
887     // Ensure stack maps are unique, by checking that the native pc in the stack map
888     // last emitted is different than the native pc of the stack map just emitted.
889     size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
890     if (number_of_stack_maps > 1) {
891       DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
892                 stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
893     }
894   }
895 }
896 
HasStackMapAtCurrentPc()897 bool CodeGenerator::HasStackMapAtCurrentPc() {
898   uint32_t pc = GetAssembler()->CodeSize();
899   size_t count = stack_map_stream_.GetNumberOfStackMaps();
900   if (count == 0) {
901     return false;
902   }
903   CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
904   return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
905 }
906 
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)907 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
908                                                uint32_t dex_pc,
909                                                SlowPathCode* slow_path) {
910   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
911     if (HasStackMapAtCurrentPc()) {
912       // Ensure that we do not collide with the stack map of the previous instruction.
913       GenerateNop();
914     }
915     RecordPcInfo(instruction, dex_pc, slow_path);
916   }
917 }
918 
RecordCatchBlockInfo()919 void CodeGenerator::RecordCatchBlockInfo() {
920   ArenaAllocator* arena = graph_->GetArena();
921 
922   for (HBasicBlock* block : *block_order_) {
923     if (!block->IsCatchBlock()) {
924       continue;
925     }
926 
927     uint32_t dex_pc = block->GetDexPc();
928     uint32_t num_vregs = graph_->GetNumberOfVRegs();
929     uint32_t inlining_depth = 0;  // Inlining of catch blocks is not supported at the moment.
930     uint32_t native_pc = GetAddressOf(block);
931     uint32_t register_mask = 0;   // Not used.
932 
933     // The stack mask is not used, so we leave it empty.
934     ArenaBitVector* stack_mask =
935         ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
936 
937     stack_map_stream_.BeginStackMapEntry(dex_pc,
938                                          native_pc,
939                                          register_mask,
940                                          stack_mask,
941                                          num_vregs,
942                                          inlining_depth);
943 
944     HInstruction* current_phi = block->GetFirstPhi();
945     for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
946     while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
947       HInstruction* next_phi = current_phi->GetNext();
948       DCHECK(next_phi == nullptr ||
949              current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
950           << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
951       current_phi = next_phi;
952     }
953 
954       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
955         stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
956       } else {
957         Location location = current_phi->GetLiveInterval()->ToLocation();
958         switch (location.GetKind()) {
959           case Location::kStackSlot: {
960             stack_map_stream_.AddDexRegisterEntry(
961                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
962             break;
963           }
964           case Location::kDoubleStackSlot: {
965             stack_map_stream_.AddDexRegisterEntry(
966                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
967             stack_map_stream_.AddDexRegisterEntry(
968                 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
969             ++vreg;
970             DCHECK_LT(vreg, num_vregs);
971             break;
972           }
973           default: {
974             // All catch phis must be allocated to a stack slot.
975             LOG(FATAL) << "Unexpected kind " << location.GetKind();
976             UNREACHABLE();
977           }
978         }
979       }
980     }
981 
982     stack_map_stream_.EndStackMapEntry();
983   }
984 }
985 
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path)986 void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
987   if (environment == nullptr) return;
988 
989   if (environment->GetParent() != nullptr) {
990     // We emit the parent environment first.
991     EmitEnvironment(environment->GetParent(), slow_path);
992     stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
993                                            environment->GetDexPc(),
994                                            environment->Size(),
995                                            &graph_->GetDexFile());
996   }
997 
998   // Walk over the environment, and record the location of dex registers.
999   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1000     HInstruction* current = environment->GetInstructionAt(i);
1001     if (current == nullptr) {
1002       stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1003       continue;
1004     }
1005 
1006     Location location = environment->GetLocationAt(i);
1007     switch (location.GetKind()) {
1008       case Location::kConstant: {
1009         DCHECK_EQ(current, location.GetConstant());
1010         if (current->IsLongConstant()) {
1011           int64_t value = current->AsLongConstant()->GetValue();
1012           stack_map_stream_.AddDexRegisterEntry(
1013               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
1014           stack_map_stream_.AddDexRegisterEntry(
1015               DexRegisterLocation::Kind::kConstant, High32Bits(value));
1016           ++i;
1017           DCHECK_LT(i, environment_size);
1018         } else if (current->IsDoubleConstant()) {
1019           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1020           stack_map_stream_.AddDexRegisterEntry(
1021               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
1022           stack_map_stream_.AddDexRegisterEntry(
1023               DexRegisterLocation::Kind::kConstant, High32Bits(value));
1024           ++i;
1025           DCHECK_LT(i, environment_size);
1026         } else if (current->IsIntConstant()) {
1027           int32_t value = current->AsIntConstant()->GetValue();
1028           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1029         } else if (current->IsNullConstant()) {
1030           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
1031         } else {
1032           DCHECK(current->IsFloatConstant()) << current->DebugName();
1033           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1034           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1035         }
1036         break;
1037       }
1038 
1039       case Location::kStackSlot: {
1040         stack_map_stream_.AddDexRegisterEntry(
1041             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1042         break;
1043       }
1044 
1045       case Location::kDoubleStackSlot: {
1046         stack_map_stream_.AddDexRegisterEntry(
1047             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1048         stack_map_stream_.AddDexRegisterEntry(
1049             DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1050         ++i;
1051         DCHECK_LT(i, environment_size);
1052         break;
1053       }
1054 
1055       case Location::kRegister : {
1056         int id = location.reg();
1057         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1058           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1059           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1060           if (current->GetType() == Primitive::kPrimLong) {
1061             stack_map_stream_.AddDexRegisterEntry(
1062                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1063             ++i;
1064             DCHECK_LT(i, environment_size);
1065           }
1066         } else {
1067           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
1068           if (current->GetType() == Primitive::kPrimLong) {
1069             stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
1070             ++i;
1071             DCHECK_LT(i, environment_size);
1072           }
1073         }
1074         break;
1075       }
1076 
1077       case Location::kFpuRegister : {
1078         int id = location.reg();
1079         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1080           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1081           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1082           if (current->GetType() == Primitive::kPrimDouble) {
1083             stack_map_stream_.AddDexRegisterEntry(
1084                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1085             ++i;
1086             DCHECK_LT(i, environment_size);
1087           }
1088         } else {
1089           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
1090           if (current->GetType() == Primitive::kPrimDouble) {
1091             stack_map_stream_.AddDexRegisterEntry(
1092                 DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
1093             ++i;
1094             DCHECK_LT(i, environment_size);
1095           }
1096         }
1097         break;
1098       }
1099 
1100       case Location::kFpuRegisterPair : {
1101         int low = location.low();
1102         int high = location.high();
1103         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1104           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1105           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1106         } else {
1107           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
1108         }
1109         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1110           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1111           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1112           ++i;
1113         } else {
1114           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
1115           ++i;
1116         }
1117         DCHECK_LT(i, environment_size);
1118         break;
1119       }
1120 
1121       case Location::kRegisterPair : {
1122         int low = location.low();
1123         int high = location.high();
1124         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1125           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1126           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1127         } else {
1128           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
1129         }
1130         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1131           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1132           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1133         } else {
1134           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
1135         }
1136         ++i;
1137         DCHECK_LT(i, environment_size);
1138         break;
1139       }
1140 
1141       case Location::kInvalid: {
1142         stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1143         break;
1144       }
1145 
1146       default:
1147         LOG(FATAL) << "Unexpected kind " << location.GetKind();
1148     }
1149   }
1150 
1151   if (environment->GetParent() != nullptr) {
1152     stack_map_stream_.EndInlineInfoEntry();
1153   }
1154 }
1155 
CanMoveNullCheckToUser(HNullCheck * null_check)1156 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1157   HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
1158 
1159   return (first_next_not_move != nullptr)
1160       && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
1161 }
1162 
MaybeRecordImplicitNullCheck(HInstruction * instr)1163 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1164   if (!compiler_options_.GetImplicitNullChecks()) {
1165     return;
1166   }
1167 
1168   // If we are from a static path don't record the pc as we can't throw NPE.
1169   // NB: having the checks here makes the code much less verbose in the arch
1170   // specific code generators.
1171   if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
1172     return;
1173   }
1174 
1175   if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
1176     return;
1177   }
1178 
1179   // Find the first previous instruction which is not a move.
1180   HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
1181 
1182   // If the instruction is a null check it means that `instr` is the first user
1183   // and needs to record the pc.
1184   if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
1185     HNullCheck* null_check = first_prev_not_move->AsNullCheck();
1186     // TODO: The parallel moves modify the environment. Their changes need to be
1187     // reverted otherwise the stack maps at the throw point will not be correct.
1188     RecordPcInfo(null_check, null_check->GetDexPc());
1189   }
1190 }
1191 
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1192 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1193                                                                 RegisterSet caller_saves) {
1194   // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1195   // HSuspendCheck from entry block). However, it will still get a valid stack frame
1196   // because the HNullCheck needs an environment.
1197   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1198   // When throwing from a try block, we may need to retrieve dalvik registers from
1199   // physical registers and we also need to set up stack mask for GC. This is
1200   // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1201   bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1202   if (can_throw_into_catch_block) {
1203     call_kind = LocationSummary::kCallOnSlowPath;
1204   }
1205   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1206   if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1207     locations->SetCustomSlowPathCallerSaves(caller_saves);  // Default: no caller-save registers.
1208   }
1209   DCHECK(!instruction->HasUses());
1210   return locations;
1211 }
1212 
GenerateNullCheck(HNullCheck * instruction)1213 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1214   if (compiler_options_.GetImplicitNullChecks()) {
1215     MaybeRecordStat(kImplicitNullCheckGenerated);
1216     GenerateImplicitNullCheck(instruction);
1217   } else {
1218     MaybeRecordStat(kExplicitNullCheckGenerated);
1219     GenerateExplicitNullCheck(instruction);
1220   }
1221 }
1222 
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check) const1223 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
1224   LocationSummary* locations = suspend_check->GetLocations();
1225   HBasicBlock* block = suspend_check->GetBlock();
1226   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1227   DCHECK(block->IsLoopHeader());
1228 
1229   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
1230     HInstruction* current = it.Current();
1231     LiveInterval* interval = current->GetLiveInterval();
1232     // We only need to clear bits of loop phis containing objects and allocated in register.
1233     // Loop phis allocated on stack already have the object in the stack.
1234     if (current->GetType() == Primitive::kPrimNot
1235         && interval->HasRegister()
1236         && interval->HasSpillSlot()) {
1237       locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
1238     }
1239   }
1240 }
1241 
EmitParallelMoves(Location from1,Location to1,Primitive::Type type1,Location from2,Location to2,Primitive::Type type2)1242 void CodeGenerator::EmitParallelMoves(Location from1,
1243                                       Location to1,
1244                                       Primitive::Type type1,
1245                                       Location from2,
1246                                       Location to2,
1247                                       Primitive::Type type2) {
1248   HParallelMove parallel_move(GetGraph()->GetArena());
1249   parallel_move.AddMove(from1, to1, type1, nullptr);
1250   parallel_move.AddMove(from2, to2, type2, nullptr);
1251   GetMoveResolver()->EmitNativeCode(&parallel_move);
1252 }
1253 
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1254 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1255                                           HInstruction* instruction,
1256                                           SlowPathCode* slow_path) {
1257   // Ensure that the call kind indication given to the register allocator is
1258   // coherent with the runtime call generated.
1259   if (slow_path == nullptr) {
1260     DCHECK(instruction->GetLocations()->WillCall())
1261         << "instruction->DebugName()=" << instruction->DebugName();
1262   } else {
1263     DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1264         << "instruction->DebugName()=" << instruction->DebugName()
1265         << " slow_path->GetDescription()=" << slow_path->GetDescription();
1266   }
1267 
1268   // Check that the GC side effect is set when required.
1269   // TODO: Reverse EntrypointCanTriggerGC
1270   if (EntrypointCanTriggerGC(entrypoint)) {
1271     if (slow_path == nullptr) {
1272       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1273           << "instruction->DebugName()=" << instruction->DebugName()
1274           << " instruction->GetSideEffects().ToString()="
1275           << instruction->GetSideEffects().ToString();
1276     } else {
1277       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1278              // When (non-Baker) read barriers are enabled, some instructions
1279              // use a slow path to emit a read barrier, which does not trigger
1280              // GC.
1281              (kEmitCompilerReadBarrier &&
1282               !kUseBakerReadBarrier &&
1283               (instruction->IsInstanceFieldGet() ||
1284                instruction->IsStaticFieldGet() ||
1285                instruction->IsArrayGet() ||
1286                instruction->IsLoadClass() ||
1287                instruction->IsLoadString() ||
1288                instruction->IsInstanceOf() ||
1289                instruction->IsCheckCast() ||
1290                (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1291           << "instruction->DebugName()=" << instruction->DebugName()
1292           << " instruction->GetSideEffects().ToString()="
1293           << instruction->GetSideEffects().ToString()
1294           << " slow_path->GetDescription()=" << slow_path->GetDescription();
1295     }
1296   } else {
1297     // The GC side effect is not required for the instruction. But the instruction might still have
1298     // it, for example if it calls other entrypoints requiring it.
1299   }
1300 
1301   // Check the coherency of leaf information.
1302   DCHECK(instruction->IsSuspendCheck()
1303          || ((slow_path != nullptr) && slow_path->IsFatal())
1304          || instruction->GetLocations()->CanCall()
1305          || !IsLeafMethod())
1306       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1307 }
1308 
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1309 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1310                                                                 SlowPathCode* slow_path) {
1311   DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1312       << "instruction->DebugName()=" << instruction->DebugName()
1313       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1314   // Only the Baker read barrier marking slow path used by certains
1315   // instructions is expected to invoke the runtime without recording
1316   // PC-related information.
1317   DCHECK(kUseBakerReadBarrier);
1318   DCHECK(instruction->IsInstanceFieldGet() ||
1319          instruction->IsStaticFieldGet() ||
1320          instruction->IsArrayGet() ||
1321          instruction->IsArraySet() ||
1322          instruction->IsLoadClass() ||
1323          instruction->IsLoadString() ||
1324          instruction->IsInstanceOf() ||
1325          instruction->IsCheckCast() ||
1326          (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
1327          (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
1328       << "instruction->DebugName()=" << instruction->DebugName()
1329       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1330 }
1331 
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1332 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1333   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1334 
1335   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1336   for (uint32_t i : LowToHighBits(core_spills)) {
1337     // If the register holds an object, update the stack mask.
1338     if (locations->RegisterContainsObject(i)) {
1339       locations->SetStackBit(stack_offset / kVRegSize);
1340     }
1341     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1342     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1343     saved_core_stack_offsets_[i] = stack_offset;
1344     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1345   }
1346 
1347   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1348   for (uint32_t i : LowToHighBits(fp_spills)) {
1349     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1350     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1351     saved_fpu_stack_offsets_[i] = stack_offset;
1352     stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1353   }
1354 }
1355 
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1356 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1357   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1358 
1359   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1360   for (uint32_t i : LowToHighBits(core_spills)) {
1361     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1362     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1363     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1364   }
1365 
1366   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1367   for (uint32_t i : LowToHighBits(fp_spills)) {
1368     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1369     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1370     stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1371   }
1372 }
1373 
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1374 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1375   // Check to see if we have known failures that will cause us to have to bail out
1376   // to the runtime, and just generate the runtime call directly.
1377   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1378   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1379 
1380   // The positions must be non-negative.
1381   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1382       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1383     // We will have to fail anyways.
1384     return;
1385   }
1386 
1387   // The length must be >= 0.
1388   HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1389   if (length != nullptr) {
1390     int32_t len = length->GetValue();
1391     if (len < 0) {
1392       // Just call as normal.
1393       return;
1394     }
1395   }
1396 
1397   SystemArrayCopyOptimizations optimizations(invoke);
1398 
1399   if (optimizations.GetDestinationIsSource()) {
1400     if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1401       // We only support backward copying if source and destination are the same.
1402       return;
1403     }
1404   }
1405 
1406   if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1407     // We currently don't intrinsify primitive copying.
1408     return;
1409   }
1410 
1411   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
1412   LocationSummary* locations = new (allocator) LocationSummary(invoke,
1413                                                                LocationSummary::kCallOnSlowPath,
1414                                                                kIntrinsified);
1415   // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1416   locations->SetInAt(0, Location::RequiresRegister());
1417   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1418   locations->SetInAt(2, Location::RequiresRegister());
1419   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1420   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1421 
1422   locations->AddTemp(Location::RequiresRegister());
1423   locations->AddTemp(Location::RequiresRegister());
1424   locations->AddTemp(Location::RequiresRegister());
1425 }
1426 
EmitJitRoots(uint8_t * code,Handle<mirror::ObjectArray<mirror::Object>> roots,const uint8_t * roots_data)1427 void CodeGenerator::EmitJitRoots(uint8_t* code,
1428                                  Handle<mirror::ObjectArray<mirror::Object>> roots,
1429                                  const uint8_t* roots_data) {
1430   DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
1431   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1432   size_t index = 0;
1433   for (auto& entry : jit_string_roots_) {
1434     // Update the `roots` with the string, and replace the address temporarily
1435     // stored to the index in the table.
1436     uint64_t address = entry.second;
1437     roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
1438     DCHECK(roots->Get(index) != nullptr);
1439     entry.second = index;
1440     // Ensure the string is strongly interned. This is a requirement on how the JIT
1441     // handles strings. b/32995596
1442     class_linker->GetInternTable()->InternStrong(
1443         reinterpret_cast<mirror::String*>(roots->Get(index)));
1444     ++index;
1445   }
1446   for (auto& entry : jit_class_roots_) {
1447     // Update the `roots` with the class, and replace the address temporarily
1448     // stored to the index in the table.
1449     uint64_t address = entry.second;
1450     roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
1451     DCHECK(roots->Get(index) != nullptr);
1452     entry.second = index;
1453     ++index;
1454   }
1455   EmitJitRootPatches(code, roots_data);
1456 }
1457 
GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass)1458 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
1459   ScopedObjectAccess soa(Thread::Current());
1460   if (array_klass == nullptr) {
1461     // This can only happen for non-primitive arrays, as primitive arrays can always
1462     // be resolved.
1463     return kQuickAllocArrayResolved32;
1464   }
1465 
1466   switch (array_klass->GetComponentSize()) {
1467     case 1: return kQuickAllocArrayResolved8;
1468     case 2: return kQuickAllocArrayResolved16;
1469     case 4: return kQuickAllocArrayResolved32;
1470     case 8: return kQuickAllocArrayResolved64;
1471   }
1472   LOG(FATAL) << "Unreachable";
1473   return kQuickAllocArrayResolved;
1474 }
1475 
1476 }  // namespace art
1477