1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_ 18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_ 19 20 #include "arch/x86/instruction_set_features_x86.h" 21 #include "base/enums.h" 22 #include "base/macros.h" 23 #include "code_generator.h" 24 #include "dex/dex_file_types.h" 25 #include "driver/compiler_options.h" 26 #include "nodes.h" 27 #include "parallel_move_resolver.h" 28 #include "utils/x86/assembler_x86.h" 29 30 namespace art HIDDEN { 31 namespace x86 { 32 33 // Use a local definition to prevent copying mistakes. 34 static constexpr size_t kX86WordSize = static_cast<size_t>(kX86PointerSize); 35 36 class CodeGeneratorX86; 37 38 static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX }; 39 static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX }; 40 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 41 static constexpr XmmRegister kParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 }; 42 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters); 43 44 static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX }; 45 static constexpr size_t kRuntimeParameterCoreRegistersLength = 46 arraysize(kRuntimeParameterCoreRegisters); 47 static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 }; 48 static constexpr size_t kRuntimeParameterFpuRegistersLength = 49 arraysize(kRuntimeParameterFpuRegisters); 50 51 #define UNIMPLEMENTED_INTRINSIC_LIST_X86(V) \ 52 V(MathRoundDouble) \ 53 V(FloatIsInfinite) \ 54 V(DoubleIsInfinite) \ 55 V(IntegerHighestOneBit) \ 56 V(LongHighestOneBit) \ 57 V(LongDivideUnsigned) \ 58 V(CRC32Update) \ 59 V(CRC32UpdateBytes) \ 60 V(CRC32UpdateByteBuffer) \ 61 V(FP16ToFloat) \ 62 V(FP16ToHalf) \ 63 V(FP16Floor) \ 64 V(FP16Ceil) \ 65 V(FP16Rint) \ 66 V(FP16Greater) \ 67 V(FP16GreaterEquals) \ 68 V(FP16Less) \ 69 V(FP16LessEquals) \ 70 V(FP16Compare) \ 71 V(FP16Min) \ 72 V(FP16Max) \ 73 V(MathMultiplyHigh) \ 74 V(StringStringIndexOf) \ 75 V(StringStringIndexOfAfter) \ 76 V(StringBufferAppend) \ 77 V(StringBufferLength) \ 78 V(StringBufferToString) \ 79 V(StringBuilderAppendObject) \ 80 V(StringBuilderAppendString) \ 81 V(StringBuilderAppendCharSequence) \ 82 V(StringBuilderAppendCharArray) \ 83 V(StringBuilderAppendBoolean) \ 84 V(StringBuilderAppendChar) \ 85 V(StringBuilderAppendInt) \ 86 V(StringBuilderAppendLong) \ 87 V(StringBuilderAppendFloat) \ 88 V(StringBuilderAppendDouble) \ 89 V(StringBuilderLength) \ 90 V(StringBuilderToString) \ 91 /* 1.8 */ \ 92 V(UnsafeGetAndAddInt) \ 93 V(UnsafeGetAndAddLong) \ 94 V(UnsafeGetAndSetInt) \ 95 V(UnsafeGetAndSetLong) \ 96 V(UnsafeGetAndSetObject) \ 97 V(MethodHandleInvokeExact) \ 98 V(MethodHandleInvoke) \ 99 /* OpenJDK 11 */ \ 100 V(JdkUnsafeGetAndAddInt) \ 101 V(JdkUnsafeGetAndAddLong) \ 102 V(JdkUnsafeGetAndSetInt) \ 103 V(JdkUnsafeGetAndSetLong) \ 104 V(JdkUnsafeGetAndSetObject) 105 106 class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> { 107 public: InvokeRuntimeCallingConvention()108 InvokeRuntimeCallingConvention() 109 : CallingConvention(kRuntimeParameterCoreRegisters, 110 kRuntimeParameterCoreRegistersLength, 111 kRuntimeParameterFpuRegisters, 112 kRuntimeParameterFpuRegistersLength, 113 kX86PointerSize) {} 114 115 private: 116 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 117 }; 118 119 class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegister> { 120 public: InvokeDexCallingConvention()121 InvokeDexCallingConvention() : CallingConvention( 122 kParameterCoreRegisters, 123 kParameterCoreRegistersLength, 124 kParameterFpuRegisters, 125 kParameterFpuRegistersLength, 126 kX86PointerSize) {} 127 GetRegisterPairAt(size_t argument_index)128 RegisterPair GetRegisterPairAt(size_t argument_index) { 129 DCHECK_LT(argument_index + 1, GetNumberOfRegisters()); 130 return kParameterCorePairRegisters[argument_index]; 131 } 132 133 private: 134 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); 135 }; 136 137 class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVisitor { 138 public: InvokeDexCallingConventionVisitorX86()139 InvokeDexCallingConventionVisitorX86() {} ~InvokeDexCallingConventionVisitorX86()140 virtual ~InvokeDexCallingConventionVisitorX86() {} 141 142 Location GetNextLocation(DataType::Type type) override; 143 Location GetReturnLocation(DataType::Type type) const override; 144 Location GetMethodLocation() const override; 145 146 private: 147 InvokeDexCallingConvention calling_convention; 148 149 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86); 150 }; 151 152 class CriticalNativeCallingConventionVisitorX86 : public InvokeDexCallingConventionVisitor { 153 public: CriticalNativeCallingConventionVisitorX86(bool for_register_allocation)154 explicit CriticalNativeCallingConventionVisitorX86(bool for_register_allocation) 155 : for_register_allocation_(for_register_allocation) {} 156 ~CriticalNativeCallingConventionVisitorX86()157 virtual ~CriticalNativeCallingConventionVisitorX86() {} 158 159 Location GetNextLocation(DataType::Type type) override; 160 Location GetReturnLocation(DataType::Type type) const override; 161 Location GetMethodLocation() const override; 162 GetStackOffset()163 size_t GetStackOffset() const { return stack_offset_; } 164 165 private: 166 // Register allocator does not support adjusting frame size, so we cannot provide final locations 167 // of stack arguments for register allocation. We ask the register allocator for any location and 168 // move these arguments to the right place after adjusting the SP when generating the call. 169 const bool for_register_allocation_; 170 size_t stack_offset_ = 0u; 171 172 DISALLOW_COPY_AND_ASSIGN(CriticalNativeCallingConventionVisitorX86); 173 }; 174 175 class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { 176 public: FieldAccessCallingConventionX86()177 FieldAccessCallingConventionX86() {} 178 GetObjectLocation()179 Location GetObjectLocation() const override { 180 return Location::RegisterLocation(ECX); 181 } GetFieldIndexLocation()182 Location GetFieldIndexLocation() const override { 183 return Location::RegisterLocation(EAX); 184 } GetReturnLocation(DataType::Type type)185 Location GetReturnLocation(DataType::Type type) const override { 186 return DataType::Is64BitType(type) 187 ? Location::RegisterPairLocation(EAX, EDX) 188 : Location::RegisterLocation(EAX); 189 } GetSetValueLocation(DataType::Type type,bool is_instance)190 Location GetSetValueLocation(DataType::Type type, bool is_instance) const override { 191 return DataType::Is64BitType(type) 192 ? (is_instance 193 ? Location::RegisterPairLocation(EDX, EBX) 194 : Location::RegisterPairLocation(ECX, EDX)) 195 : (is_instance 196 ? Location::RegisterLocation(EDX) 197 : Location::RegisterLocation(ECX)); 198 } GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)199 Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { 200 return Location::FpuRegisterLocation(XMM0); 201 } 202 203 private: 204 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86); 205 }; 206 207 class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap { 208 public: ParallelMoveResolverX86(ArenaAllocator * allocator,CodeGeneratorX86 * codegen)209 ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen) 210 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} 211 212 void EmitMove(size_t index) override; 213 void EmitSwap(size_t index) override; 214 void SpillScratch(int reg) override; 215 void RestoreScratch(int reg) override; 216 217 X86Assembler* GetAssembler() const; 218 219 private: 220 void Exchange(Register reg, int mem); 221 void Exchange32(XmmRegister reg, int mem); 222 void Exchange128(XmmRegister reg, int mem); 223 void ExchangeMemory(int mem1, int mem2, int number_of_words); 224 void MoveMemoryToMemory(int dst, int src, int number_of_words); 225 226 CodeGeneratorX86* const codegen_; 227 228 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86); 229 }; 230 231 class LocationsBuilderX86 : public HGraphVisitor { 232 public: LocationsBuilderX86(HGraph * graph,CodeGeneratorX86 * codegen)233 LocationsBuilderX86(HGraph* graph, CodeGeneratorX86* codegen) 234 : HGraphVisitor(graph), codegen_(codegen) {} 235 236 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 237 void Visit##name(H##name* instr) override; 238 239 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)240 FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION) 241 FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION) 242 243 #undef DECLARE_VISIT_INSTRUCTION 244 245 void VisitInstruction(HInstruction* instruction) override { 246 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 247 << " (id " << instruction->GetId() << ")"; 248 } 249 250 private: 251 void HandleBitwiseOperation(HBinaryOperation* instruction); 252 void HandleInvoke(HInvoke* invoke); 253 void HandleCondition(HCondition* condition); 254 void HandleShift(HBinaryOperation* instruction); 255 void HandleFieldSet(HInstruction* instruction, 256 const FieldInfo& field_info, 257 WriteBarrierKind write_barrier_kind); 258 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 259 bool CpuHasAvxFeatureFlag(); 260 bool CpuHasAvx2FeatureFlag(); 261 262 CodeGeneratorX86* const codegen_; 263 InvokeDexCallingConventionVisitorX86 parameter_visitor_; 264 265 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86); 266 }; 267 268 class InstructionCodeGeneratorX86 : public InstructionCodeGenerator { 269 public: 270 InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen); 271 272 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 273 void Visit##name(H##name* instr) override; 274 275 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)276 FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION) 277 FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION) 278 279 #undef DECLARE_VISIT_INSTRUCTION 280 281 void VisitInstruction(HInstruction* instruction) override { 282 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 283 << " (id " << instruction->GetId() << ")"; 284 } 285 GetAssembler()286 X86Assembler* GetAssembler() const { return assembler_; } 287 288 // The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump 289 // table version generates 7 instructions and num_entries literals. Compare/jump sequence will 290 // generates less code/data with a small num_entries. 291 static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5; 292 293 // Generate a GC root reference load: 294 // 295 // root <- *address 296 // 297 // while honoring read barriers based on read_barrier_option. 298 void GenerateGcRootFieldLoad(HInstruction* instruction, 299 Location root, 300 const Address& address, 301 Label* fixup_label, 302 ReadBarrierOption read_barrier_option); 303 304 void HandleFieldSet(HInstruction* instruction, 305 uint32_t value_index, 306 DataType::Type type, 307 Address field_addr, 308 Register base, 309 bool is_volatile, 310 bool value_can_be_null, 311 WriteBarrierKind write_barrier_kind); 312 313 private: 314 // Generate code for the given suspend check. If not null, `successor` 315 // is the block to branch to if the suspend check is not needed, and after 316 // the suspend call. 317 void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor); 318 void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg); 319 void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, Register temp); 320 void HandleBitwiseOperation(HBinaryOperation* instruction); 321 void GenerateDivRemIntegral(HBinaryOperation* instruction); 322 void DivRemOneOrMinusOne(HBinaryOperation* instruction); 323 void DivByPowerOfTwo(HDiv* instruction); 324 void RemByPowerOfTwo(HRem* instruction); 325 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); 326 void GenerateRemFP(HRem* rem); 327 void HandleCondition(HCondition* condition); 328 void HandleShift(HBinaryOperation* instruction); 329 void GenerateShlLong(const Location& loc, Register shifter); 330 void GenerateShrLong(const Location& loc, Register shifter); 331 void GenerateUShrLong(const Location& loc, Register shifter); 332 void GenerateShlLong(const Location& loc, int shift); 333 void GenerateShrLong(const Location& loc, int shift); 334 void GenerateUShrLong(const Location& loc, int shift); 335 void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type); 336 void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type); 337 void GenerateMinMax(HBinaryOperation* minmax, bool is_min); 338 339 void HandleFieldSet(HInstruction* instruction, 340 const FieldInfo& field_info, 341 bool value_can_be_null, 342 WriteBarrierKind write_barrier_kind); 343 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 344 345 // Generate a heap reference load using one register `out`: 346 // 347 // out <- *(out + offset) 348 // 349 // while honoring heap poisoning and/or read barriers (if any). 350 // 351 // Location `maybe_temp` is used when generating a read barrier and 352 // shall be a register in that case; it may be an invalid location 353 // otherwise. 354 void GenerateReferenceLoadOneRegister(HInstruction* instruction, 355 Location out, 356 uint32_t offset, 357 Location maybe_temp, 358 ReadBarrierOption read_barrier_option); 359 // Generate a heap reference load using two different registers 360 // `out` and `obj`: 361 // 362 // out <- *(obj + offset) 363 // 364 // while honoring heap poisoning and/or read barriers (if any). 365 // 366 // Location `maybe_temp` is used when generating a Baker's (fast 367 // path) read barrier and shall be a register in that case; it may 368 // be an invalid location otherwise. 369 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, 370 Location out, 371 Location obj, 372 uint32_t offset, 373 ReadBarrierOption read_barrier_option); 374 375 // Push value to FPU stack. `is_fp` specifies whether the value is floating point or not. 376 // `is_wide` specifies whether it is long/double or not. 377 void PushOntoFPStack(Location source, uint32_t temp_offset, 378 uint32_t stack_adjustment, bool is_fp, bool is_wide); 379 380 template<class LabelType> 381 void GenerateTestAndBranch(HInstruction* instruction, 382 size_t condition_input_index, 383 LabelType* true_target, 384 LabelType* false_target); 385 template<class LabelType> 386 void GenerateCompareTestAndBranch(HCondition* condition, 387 LabelType* true_target, 388 LabelType* false_target); 389 template<class LabelType> 390 void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label); 391 template<class LabelType> 392 void GenerateLongComparesAndJumps(HCondition* cond, 393 LabelType* true_label, 394 LabelType* false_label); 395 396 void HandleGoto(HInstruction* got, HBasicBlock* successor); 397 void GenPackedSwitchWithCompares(Register value_reg, 398 int32_t lower_bound, 399 uint32_t num_entries, 400 HBasicBlock* switch_block, 401 HBasicBlock* default_block); 402 403 void GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double); 404 bool CpuHasAvxFeatureFlag(); 405 bool CpuHasAvx2FeatureFlag(); 406 407 void GenerateMethodEntryExitHook(HInstruction* instruction); 408 409 X86Assembler* const assembler_; 410 CodeGeneratorX86* const codegen_; 411 412 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86); 413 }; 414 415 class JumpTableRIPFixup; 416 417 class CodeGeneratorX86 : public CodeGenerator { 418 public: 419 CodeGeneratorX86(HGraph* graph, 420 const CompilerOptions& compiler_options, 421 OptimizingCompilerStats* stats = nullptr); ~CodeGeneratorX86()422 virtual ~CodeGeneratorX86() {} 423 424 void GenerateFrameEntry() override; 425 void GenerateFrameExit() override; 426 void Bind(HBasicBlock* block) override; 427 void MoveConstant(Location destination, int32_t value) override; 428 void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; 429 void AddLocationAsTemp(Location location, LocationSummary* locations) override; 430 431 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; 432 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; 433 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; 434 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; 435 436 // Generate code to invoke a runtime entry point. 437 void InvokeRuntime(QuickEntrypointEnum entrypoint, 438 HInstruction* instruction, 439 uint32_t dex_pc, 440 SlowPathCode* slow_path = nullptr) override; 441 442 // Generate code to invoke a runtime entry point, but do not record 443 // PC-related information in a stack map. 444 void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, 445 HInstruction* instruction, 446 SlowPathCode* slow_path); 447 448 void GenerateInvokeRuntime(int32_t entry_point_offset); 449 GetWordSize()450 size_t GetWordSize() const override { 451 return kX86WordSize; 452 } 453 GetSlowPathFPWidth()454 size_t GetSlowPathFPWidth() const override { 455 return GetGraph()->HasSIMD() 456 ? GetSIMDRegisterWidth() 457 : 2 * kX86WordSize; // 8 bytes == 2 words for each spill 458 } 459 GetCalleePreservedFPWidth()460 size_t GetCalleePreservedFPWidth() const override { 461 return 2 * kX86WordSize; 462 } 463 GetSIMDRegisterWidth()464 size_t GetSIMDRegisterWidth() const override { 465 return 4 * kX86WordSize; 466 } 467 GetLocationBuilder()468 HGraphVisitor* GetLocationBuilder() override { 469 return &location_builder_; 470 } 471 GetInstructionVisitor()472 HGraphVisitor* GetInstructionVisitor() override { 473 return &instruction_visitor_; 474 } 475 GetAssembler()476 X86Assembler* GetAssembler() override { 477 return &assembler_; 478 } 479 GetAssembler()480 const X86Assembler& GetAssembler() const override { 481 return assembler_; 482 } 483 GetAddressOf(HBasicBlock * block)484 uintptr_t GetAddressOf(HBasicBlock* block) override { 485 return GetLabelOf(block)->Position(); 486 } 487 488 void SetupBlockedRegisters() const override; 489 490 void DumpCoreRegister(std::ostream& stream, int reg) const override; 491 void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; 492 GetMoveResolver()493 ParallelMoveResolverX86* GetMoveResolver() override { 494 return &move_resolver_; 495 } 496 GetInstructionSet()497 InstructionSet GetInstructionSet() const override { 498 return InstructionSet::kX86; 499 } 500 501 const X86InstructionSetFeatures& GetInstructionSetFeatures() const; 502 503 // Helper method to move a 32bits value between two locations. 504 void Move32(Location destination, Location source); 505 // Helper method to move a 64bits value between two locations. 506 void Move64(Location destination, Location source); 507 // Helper method to load a value from an address to a register. 508 void LoadFromMemoryNoBarrier(DataType::Type dst_type, 509 Location dst, 510 Address src, 511 HInstruction* instr = nullptr, 512 XmmRegister temp = kNoXmmRegister, 513 bool is_atomic_load = false); 514 // Helper method to move a primitive value from a location to an address. 515 void MoveToMemory(DataType::Type src_type, 516 Location src, 517 Register dst_base, 518 Register dst_index = Register::kNoRegister, 519 ScaleFactor dst_scale = TIMES_1, 520 int32_t dst_disp = 0); 521 522 // Check if the desired_string_load_kind is supported. If it is, return it, 523 // otherwise return a fall-back kind that should be used instead. 524 HLoadString::LoadKind GetSupportedLoadStringKind( 525 HLoadString::LoadKind desired_string_load_kind) override; 526 527 // Check if the desired_class_load_kind is supported. If it is, return it, 528 // otherwise return a fall-back kind that should be used instead. 529 HLoadClass::LoadKind GetSupportedLoadClassKind( 530 HLoadClass::LoadKind desired_class_load_kind) override; 531 532 // Check if the desired_dispatch_info is supported. If it is, return it, 533 // otherwise return a fall-back info that should be used instead. 534 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( 535 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, 536 ArtMethod* method) override; 537 538 void LoadMethod(MethodLoadKind load_kind, Location temp, HInvoke* invoke); 539 // Generate a call to a static or direct method. 540 void GenerateStaticOrDirectCall( 541 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; 542 // Generate a call to a virtual method. 543 void GenerateVirtualCall( 544 HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; 545 546 void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address, 547 uint32_t intrinsic_data); 548 void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address, 549 uint32_t boot_image_offset); 550 void RecordBootImageMethodPatch(HInvoke* invoke); 551 void RecordMethodBssEntryPatch(HInvoke* invoke); 552 void RecordBootImageTypePatch(HLoadClass* load_class); 553 Label* NewTypeBssEntryPatch(HLoadClass* load_class); 554 void RecordBootImageStringPatch(HLoadString* load_string); 555 Label* NewStringBssEntryPatch(HLoadString* load_string); 556 void RecordBootImageJniEntrypointPatch(HInvokeStaticOrDirect* invoke); 557 558 void LoadBootImageAddress(Register reg, 559 uint32_t boot_image_reference, 560 HInvokeStaticOrDirect* invoke); 561 void LoadIntrinsicDeclaringClass(Register reg, HInvokeStaticOrDirect* invoke); 562 563 Label* NewJitRootStringPatch(const DexFile& dex_file, 564 dex::StringIndex string_index, 565 Handle<mirror::String> handle); 566 Label* NewJitRootClassPatch(const DexFile& dex_file, 567 dex::TypeIndex type_index, 568 Handle<mirror::Class> handle); 569 570 void MoveFromReturnRegister(Location trg, DataType::Type type) override; 571 572 // Emit linker patches. 573 void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; 574 575 void PatchJitRootUse(uint8_t* code, 576 const uint8_t* roots_data, 577 const PatchInfo<Label>& info, 578 uint64_t index_in_table) const; 579 void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; 580 581 // Emit a write barrier. 582 void MarkGCCard( 583 Register temp, Register card, Register object, Register value, bool emit_null_check); 584 585 void GenerateMemoryBarrier(MemBarrierKind kind); 586 GetLabelOf(HBasicBlock * block)587 Label* GetLabelOf(HBasicBlock* block) const { 588 return CommonGetLabelOf<Label>(block_labels_, block); 589 } 590 Initialize()591 void Initialize() override { 592 block_labels_ = CommonInitializeLabels<Label>(); 593 } 594 NeedsTwoRegisters(DataType::Type type)595 bool NeedsTwoRegisters(DataType::Type type) const override { 596 return type == DataType::Type::kInt64; 597 } 598 ShouldSplitLongMoves()599 bool ShouldSplitLongMoves() const override { return true; } 600 GetFrameEntryLabel()601 Label* GetFrameEntryLabel() { return &frame_entry_label_; } 602 AddMethodAddressOffset(HX86ComputeBaseMethodAddress * method_base,int32_t offset)603 void AddMethodAddressOffset(HX86ComputeBaseMethodAddress* method_base, int32_t offset) { 604 method_address_offset_.Put(method_base->GetId(), offset); 605 } 606 GetMethodAddressOffset(HX86ComputeBaseMethodAddress * method_base)607 int32_t GetMethodAddressOffset(HX86ComputeBaseMethodAddress* method_base) const { 608 return method_address_offset_.Get(method_base->GetId()); 609 } 610 ConstantAreaStart()611 int32_t ConstantAreaStart() const { 612 return constant_area_start_; 613 } 614 615 Address LiteralDoubleAddress(double v, HX86ComputeBaseMethodAddress* method_base, Register reg); 616 Address LiteralFloatAddress(float v, HX86ComputeBaseMethodAddress* method_base, Register reg); 617 Address LiteralInt32Address(int32_t v, HX86ComputeBaseMethodAddress* method_base, Register reg); 618 Address LiteralInt64Address(int64_t v, HX86ComputeBaseMethodAddress* method_base, Register reg); 619 620 // Load a 32-bit value into a register in the most efficient manner. 621 void Load32BitValue(Register dest, int32_t value); 622 623 // Compare a register with a 32-bit value in the most efficient manner. 624 void Compare32BitValue(Register dest, int32_t value); 625 626 // Compare int values. Supports only register locations for `lhs`. 627 void GenerateIntCompare(Location lhs, Location rhs); 628 void GenerateIntCompare(Register lhs, Location rhs); 629 630 // Construct address for array access. 631 static Address ArrayAddress(Register obj, 632 Location index, 633 ScaleFactor scale, 634 uint32_t data_offset); 635 636 Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value); 637 638 void Finalize(CodeAllocator* allocator) override; 639 640 // Fast path implementation of ReadBarrier::Barrier for a heap 641 // reference field load when Baker's read barriers are used. 642 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, 643 Location ref, 644 Register obj, 645 uint32_t offset, 646 bool needs_null_check); 647 // Fast path implementation of ReadBarrier::Barrier for a heap 648 // reference array load when Baker's read barriers are used. 649 void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, 650 Location ref, 651 Register obj, 652 uint32_t data_offset, 653 Location index, 654 bool needs_null_check); 655 // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier, 656 // GenerateArrayLoadWithBakerReadBarrier and some intrinsics. 657 // 658 // Load the object reference located at address `src`, held by 659 // object `obj`, into `ref`, and mark it if needed. The base of 660 // address `src` must be `obj`. 661 // 662 // If `always_update_field` is true, the value of the reference is 663 // atomically updated in the holder (`obj`). This operation 664 // requires a temporary register, which must be provided as a 665 // non-null pointer (`temp`). 666 void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, 667 Location ref, 668 Register obj, 669 const Address& src, 670 bool needs_null_check, 671 bool always_update_field = false, 672 Register* temp = nullptr); 673 674 // Generate a read barrier for a heap reference within `instruction` 675 // using a slow path. 676 // 677 // A read barrier for an object reference read from the heap is 678 // implemented as a call to the artReadBarrierSlow runtime entry 679 // point, which is passed the values in locations `ref`, `obj`, and 680 // `offset`: 681 // 682 // mirror::Object* artReadBarrierSlow(mirror::Object* ref, 683 // mirror::Object* obj, 684 // uint32_t offset); 685 // 686 // The `out` location contains the value returned by 687 // artReadBarrierSlow. 688 // 689 // When `index` is provided (i.e. for array accesses), the offset 690 // value passed to artReadBarrierSlow is adjusted to take `index` 691 // into account. 692 void GenerateReadBarrierSlow(HInstruction* instruction, 693 Location out, 694 Location ref, 695 Location obj, 696 uint32_t offset, 697 Location index = Location::NoLocation()); 698 699 // If read barriers are enabled, generate a read barrier for a heap 700 // reference using a slow path. If heap poisoning is enabled, also 701 // unpoison the reference in `out`. 702 void MaybeGenerateReadBarrierSlow(HInstruction* instruction, 703 Location out, 704 Location ref, 705 Location obj, 706 uint32_t offset, 707 Location index = Location::NoLocation()); 708 709 // Generate a read barrier for a GC root within `instruction` using 710 // a slow path. 711 // 712 // A read barrier for an object reference GC root is implemented as 713 // a call to the artReadBarrierForRootSlow runtime entry point, 714 // which is passed the value in location `root`: 715 // 716 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root); 717 // 718 // The `out` location contains the value returned by 719 // artReadBarrierForRootSlow. 720 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); 721 722 // Ensure that prior stores complete to memory before subsequent loads. 723 // The locked add implementation will avoid serializing device memory, but will 724 // touch (but not change) the top of the stack. 725 // The 'non_temporal' parameter should be used to ensure ordering of non-temporal stores. 726 void MemoryFence(bool non_temporal = false) { 727 if (!non_temporal) { 728 assembler_.lock()->addl(Address(ESP, 0), Immediate(0)); 729 } else { 730 assembler_.mfence(); 731 } 732 } 733 734 void IncreaseFrame(size_t adjustment) override; 735 void DecreaseFrame(size_t adjustment) override; 736 737 void GenerateNop() override; 738 void GenerateImplicitNullCheck(HNullCheck* instruction) override; 739 void GenerateExplicitNullCheck(HNullCheck* instruction) override; 740 741 void MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass); 742 void MaybeIncrementHotness(bool is_frame_entry); 743 744 // When we don't know the proper offset for the value, we use kPlaceholder32BitOffset. 745 // The correct value will be inserted when processing Assembler fixups. 746 static constexpr int32_t kPlaceholder32BitOffset = 256; 747 748 private: 749 struct X86PcRelativePatchInfo : PatchInfo<Label> { X86PcRelativePatchInfoX86PcRelativePatchInfo750 X86PcRelativePatchInfo(HX86ComputeBaseMethodAddress* address, 751 const DexFile* target_dex_file, 752 uint32_t target_index) 753 : PatchInfo(target_dex_file, target_index), 754 method_address(address) {} 755 HX86ComputeBaseMethodAddress* method_address; 756 }; 757 758 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)> 759 void EmitPcRelativeLinkerPatches(const ArenaDeque<X86PcRelativePatchInfo>& infos, 760 ArenaVector<linker::LinkerPatch>* linker_patches); 761 762 Register GetInvokeExtraParameter(HInvoke* invoke, Register temp); 763 Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp); 764 765 // Labels for each block that will be compiled. 766 Label* block_labels_; // Indexed by block id. 767 Label frame_entry_label_; 768 LocationsBuilderX86 location_builder_; 769 InstructionCodeGeneratorX86 instruction_visitor_; 770 ParallelMoveResolverX86 move_resolver_; 771 X86Assembler assembler_; 772 773 // PC-relative method patch info for kBootImageLinkTimePcRelative. 774 ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_; 775 // PC-relative method patch info for kBssEntry. 776 ArenaDeque<X86PcRelativePatchInfo> method_bss_entry_patches_; 777 // PC-relative type patch info for kBootImageLinkTimePcRelative. 778 ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_; 779 // PC-relative type patch info for kBssEntry. 780 ArenaDeque<X86PcRelativePatchInfo> type_bss_entry_patches_; 781 // PC-relative public type patch info for kBssEntryPublic. 782 ArenaDeque<X86PcRelativePatchInfo> public_type_bss_entry_patches_; 783 // PC-relative package type patch info for kBssEntryPackage. 784 ArenaDeque<X86PcRelativePatchInfo> package_type_bss_entry_patches_; 785 // PC-relative String patch info for kBootImageLinkTimePcRelative. 786 ArenaDeque<X86PcRelativePatchInfo> boot_image_string_patches_; 787 // PC-relative String patch info for kBssEntry. 788 ArenaDeque<X86PcRelativePatchInfo> string_bss_entry_patches_; 789 // PC-relative method patch info for kBootImageLinkTimePcRelative+kCallCriticalNative. 790 ArenaDeque<X86PcRelativePatchInfo> boot_image_jni_entrypoint_patches_; 791 // PC-relative patch info for IntrinsicObjects for the boot image, 792 // and for method/type/string patches for kBootImageRelRo otherwise. 793 ArenaDeque<X86PcRelativePatchInfo> boot_image_other_patches_; 794 795 // Patches for string root accesses in JIT compiled code. 796 ArenaDeque<PatchInfo<Label>> jit_string_patches_; 797 // Patches for class root accesses in JIT compiled code. 798 ArenaDeque<PatchInfo<Label>> jit_class_patches_; 799 800 // Offset to the start of the constant area in the assembled code. 801 // Used for fixups to the constant area. 802 int32_t constant_area_start_; 803 804 // Fixups for jump tables that need to be patched after the constant table is generated. 805 ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_; 806 807 // Maps a HX86ComputeBaseMethodAddress instruction id, to its offset in the 808 // compiled code. 809 ArenaSafeMap<uint32_t, int32_t> method_address_offset_; 810 811 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86); 812 }; 813 814 } // namespace x86 815 } // namespace art 816 817 #endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_ 818