1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 19 20 #include "code_generator.h" 21 #include "dex/type_reference.h" 22 #include "driver/compiler_options.h" 23 #include "nodes.h" 24 #include "parallel_move_resolver.h" 25 #include "utils/mips64/assembler_mips64.h" 26 27 namespace art { 28 namespace mips64 { 29 30 // InvokeDexCallingConvention registers 31 32 static constexpr GpuRegister kParameterCoreRegisters[] = 33 { A1, A2, A3, A4, A5, A6, A7 }; 34 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 35 36 static constexpr FpuRegister kParameterFpuRegisters[] = 37 { F13, F14, F15, F16, F17, F18, F19 }; 38 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters); 39 40 41 // InvokeRuntimeCallingConvention registers 42 43 static constexpr GpuRegister kRuntimeParameterCoreRegisters[] = 44 { A0, A1, A2, A3, A4, A5, A6, A7 }; 45 static constexpr size_t kRuntimeParameterCoreRegistersLength = 46 arraysize(kRuntimeParameterCoreRegisters); 47 48 static constexpr FpuRegister kRuntimeParameterFpuRegisters[] = 49 { F12, F13, F14, F15, F16, F17, F18, F19 }; 50 static constexpr size_t kRuntimeParameterFpuRegistersLength = 51 arraysize(kRuntimeParameterFpuRegisters); 52 53 54 static constexpr GpuRegister kCoreCalleeSaves[] = 55 { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; 56 static constexpr FpuRegister kFpuCalleeSaves[] = 57 { F24, F25, F26, F27, F28, F29, F30, F31 }; 58 59 60 class CodeGeneratorMIPS64; 61 62 VectorRegister VectorRegisterFrom(Location location); 63 64 class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> { 65 public: InvokeDexCallingConvention()66 InvokeDexCallingConvention() 67 : CallingConvention(kParameterCoreRegisters, 68 kParameterCoreRegistersLength, 69 kParameterFpuRegisters, 70 kParameterFpuRegistersLength, 71 kMips64PointerSize) {} 72 73 private: 74 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); 75 }; 76 77 class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor { 78 public: InvokeDexCallingConventionVisitorMIPS64()79 InvokeDexCallingConventionVisitorMIPS64() {} ~InvokeDexCallingConventionVisitorMIPS64()80 virtual ~InvokeDexCallingConventionVisitorMIPS64() {} 81 82 Location GetNextLocation(DataType::Type type) override; 83 Location GetReturnLocation(DataType::Type type) const override; 84 Location GetMethodLocation() const override; 85 86 private: 87 InvokeDexCallingConvention calling_convention; 88 89 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64); 90 }; 91 92 class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> { 93 public: InvokeRuntimeCallingConvention()94 InvokeRuntimeCallingConvention() 95 : CallingConvention(kRuntimeParameterCoreRegisters, 96 kRuntimeParameterCoreRegistersLength, 97 kRuntimeParameterFpuRegisters, 98 kRuntimeParameterFpuRegistersLength, 99 kMips64PointerSize) {} 100 101 Location GetReturnLocation(DataType::Type return_type); 102 103 private: 104 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 105 }; 106 107 class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention { 108 public: FieldAccessCallingConventionMIPS64()109 FieldAccessCallingConventionMIPS64() {} 110 GetObjectLocation()111 Location GetObjectLocation() const override { 112 return Location::RegisterLocation(A1); 113 } GetFieldIndexLocation()114 Location GetFieldIndexLocation() const override { 115 return Location::RegisterLocation(A0); 116 } GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED)117 Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { 118 return Location::RegisterLocation(V0); 119 } GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,bool is_instance)120 Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, 121 bool is_instance) const override { 122 return is_instance 123 ? Location::RegisterLocation(A2) 124 : Location::RegisterLocation(A1); 125 } GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)126 Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { 127 return Location::FpuRegisterLocation(F0); 128 } 129 130 private: 131 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64); 132 }; 133 134 class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap { 135 public: ParallelMoveResolverMIPS64(ArenaAllocator * allocator,CodeGeneratorMIPS64 * codegen)136 ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen) 137 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} 138 139 void EmitMove(size_t index) override; 140 void EmitSwap(size_t index) override; 141 void SpillScratch(int reg) override; 142 void RestoreScratch(int reg) override; 143 144 void Exchange(int index1, int index2, bool double_slot); 145 void ExchangeQuadSlots(int index1, int index2); 146 147 Mips64Assembler* GetAssembler() const; 148 149 private: 150 CodeGeneratorMIPS64* const codegen_; 151 152 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64); 153 }; 154 155 class SlowPathCodeMIPS64 : public SlowPathCode { 156 public: SlowPathCodeMIPS64(HInstruction * instruction)157 explicit SlowPathCodeMIPS64(HInstruction* instruction) 158 : SlowPathCode(instruction), entry_label_(), exit_label_() {} 159 GetEntryLabel()160 Mips64Label* GetEntryLabel() { return &entry_label_; } GetExitLabel()161 Mips64Label* GetExitLabel() { return &exit_label_; } 162 163 private: 164 Mips64Label entry_label_; 165 Mips64Label exit_label_; 166 167 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64); 168 }; 169 170 class LocationsBuilderMIPS64 : public HGraphVisitor { 171 public: LocationsBuilderMIPS64(HGraph * graph,CodeGeneratorMIPS64 * codegen)172 LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen) 173 : HGraphVisitor(graph), codegen_(codegen) {} 174 175 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 176 void Visit##name(H##name* instr) override; 177 178 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)179 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) 180 181 #undef DECLARE_VISIT_INSTRUCTION 182 183 void VisitInstruction(HInstruction* instruction) override { 184 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 185 << " (id " << instruction->GetId() << ")"; 186 } 187 188 private: 189 void HandleInvoke(HInvoke* invoke); 190 void HandleBinaryOp(HBinaryOperation* operation); 191 void HandleCondition(HCondition* instruction); 192 void HandleShift(HBinaryOperation* operation); 193 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); 194 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 195 Location RegisterOrZeroConstant(HInstruction* instruction); 196 Location FpuRegisterOrConstantForStore(HInstruction* instruction); 197 198 InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_; 199 200 CodeGeneratorMIPS64* const codegen_; 201 202 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64); 203 }; 204 205 class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator { 206 public: 207 InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen); 208 209 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 210 void Visit##name(H##name* instr) override; 211 212 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)213 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) 214 215 #undef DECLARE_VISIT_INSTRUCTION 216 217 void VisitInstruction(HInstruction* instruction) override { 218 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 219 << " (id " << instruction->GetId() << ")"; 220 } 221 GetAssembler()222 Mips64Assembler* GetAssembler() const { return assembler_; } 223 224 // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit 225 // instructions for N cases. 226 // Table-based packed switch generates approx. 11 32-bit instructions 227 // and N 32-bit data words for N cases. 228 // At N = 6 they come out as 18 and 17 32-bit words respectively. 229 // We switch to the table-based method starting with 7 cases. 230 static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6; 231 232 void GenerateMemoryBarrier(MemBarrierKind kind); 233 234 private: 235 void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg); 236 void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, GpuRegister temp); 237 void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor); 238 void HandleBinaryOp(HBinaryOperation* operation); 239 void HandleCondition(HCondition* instruction); 240 void HandleShift(HBinaryOperation* operation); 241 void HandleFieldSet(HInstruction* instruction, 242 const FieldInfo& field_info, 243 bool value_can_be_null); 244 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 245 246 void GenerateMinMaxInt(LocationSummary* locations, bool is_min); 247 void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type); 248 void GenerateMinMax(HBinaryOperation* minmax, bool is_min); 249 250 // Generate a heap reference load using one register `out`: 251 // 252 // out <- *(out + offset) 253 // 254 // while honoring heap poisoning and/or read barriers (if any). 255 // 256 // Location `maybe_temp` is used when generating a read barrier and 257 // shall be a register in that case; it may be an invalid location 258 // otherwise. 259 void GenerateReferenceLoadOneRegister(HInstruction* instruction, 260 Location out, 261 uint32_t offset, 262 Location maybe_temp, 263 ReadBarrierOption read_barrier_option); 264 // Generate a heap reference load using two different registers 265 // `out` and `obj`: 266 // 267 // out <- *(obj + offset) 268 // 269 // while honoring heap poisoning and/or read barriers (if any). 270 // 271 // Location `maybe_temp` is used when generating a Baker's (fast 272 // path) read barrier and shall be a register in that case; it may 273 // be an invalid location otherwise. 274 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, 275 Location out, 276 Location obj, 277 uint32_t offset, 278 Location maybe_temp, 279 ReadBarrierOption read_barrier_option); 280 281 // Generate a GC root reference load: 282 // 283 // root <- *(obj + offset) 284 // 285 // while honoring read barriers (if any). 286 void GenerateGcRootFieldLoad(HInstruction* instruction, 287 Location root, 288 GpuRegister obj, 289 uint32_t offset, 290 ReadBarrierOption read_barrier_option, 291 Mips64Label* label_low = nullptr); 292 293 void GenerateTestAndBranch(HInstruction* instruction, 294 size_t condition_input_index, 295 Mips64Label* true_target, 296 Mips64Label* false_target); 297 void DivRemOneOrMinusOne(HBinaryOperation* instruction); 298 void DivRemByPowerOfTwo(HBinaryOperation* instruction); 299 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); 300 void GenerateDivRemIntegral(HBinaryOperation* instruction); 301 void GenerateIntLongCompare(IfCondition cond, bool is64bit, LocationSummary* locations); 302 // When the function returns `false` it means that the condition holds if `dst` is non-zero 303 // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero 304 // `dst` are exchanged. 305 bool MaterializeIntLongCompare(IfCondition cond, 306 bool is64bit, 307 LocationSummary* input_locations, 308 GpuRegister dst); 309 void GenerateIntLongCompareAndBranch(IfCondition cond, 310 bool is64bit, 311 LocationSummary* locations, 312 Mips64Label* label); 313 void GenerateFpCompare(IfCondition cond, 314 bool gt_bias, 315 DataType::Type type, 316 LocationSummary* locations); 317 // When the function returns `false` it means that the condition holds if `dst` is non-zero 318 // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero 319 // `dst` are exchanged. 320 bool MaterializeFpCompare(IfCondition cond, 321 bool gt_bias, 322 DataType::Type type, 323 LocationSummary* input_locations, 324 FpuRegister dst); 325 void GenerateFpCompareAndBranch(IfCondition cond, 326 bool gt_bias, 327 DataType::Type type, 328 LocationSummary* locations, 329 Mips64Label* label); 330 void HandleGoto(HInstruction* got, HBasicBlock* successor); 331 void GenPackedSwitchWithCompares(GpuRegister value_reg, 332 int32_t lower_bound, 333 uint32_t num_entries, 334 HBasicBlock* switch_block, 335 HBasicBlock* default_block); 336 void GenTableBasedPackedSwitch(GpuRegister value_reg, 337 int32_t lower_bound, 338 uint32_t num_entries, 339 HBasicBlock* switch_block, 340 HBasicBlock* default_block); 341 int32_t VecAddress(LocationSummary* locations, 342 size_t size, 343 /* out */ GpuRegister* adjusted_base); 344 void GenConditionalMove(HSelect* select); 345 346 Mips64Assembler* const assembler_; 347 CodeGeneratorMIPS64* const codegen_; 348 349 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64); 350 }; 351 352 class CodeGeneratorMIPS64 : public CodeGenerator { 353 public: 354 CodeGeneratorMIPS64(HGraph* graph, 355 const CompilerOptions& compiler_options, 356 OptimizingCompilerStats* stats = nullptr); ~CodeGeneratorMIPS64()357 virtual ~CodeGeneratorMIPS64() {} 358 359 void GenerateFrameEntry() override; 360 void GenerateFrameExit() override; 361 362 void Bind(HBasicBlock* block) override; 363 GetWordSize()364 size_t GetWordSize() const override { return kMips64DoublewordSize; } 365 GetFloatingPointSpillSlotSize()366 size_t GetFloatingPointSpillSlotSize() const override { 367 return GetGraph()->HasSIMD() 368 ? 2 * kMips64DoublewordSize // 16 bytes for each spill. 369 : 1 * kMips64DoublewordSize; // 8 bytes for each spill. 370 } 371 GetAddressOf(HBasicBlock * block)372 uintptr_t GetAddressOf(HBasicBlock* block) override { 373 return assembler_.GetLabelLocation(GetLabelOf(block)); 374 } 375 GetLocationBuilder()376 HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } GetInstructionVisitor()377 HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } GetAssembler()378 Mips64Assembler* GetAssembler() override { return &assembler_; } GetAssembler()379 const Mips64Assembler& GetAssembler() const override { return assembler_; } 380 381 // Emit linker patches. 382 void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override; 383 void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; 384 385 // Fast path implementation of ReadBarrier::Barrier for a heap 386 // reference field load when Baker's read barriers are used. 387 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, 388 Location ref, 389 GpuRegister obj, 390 uint32_t offset, 391 Location temp, 392 bool needs_null_check); 393 // Fast path implementation of ReadBarrier::Barrier for a heap 394 // reference array load when Baker's read barriers are used. 395 void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, 396 Location ref, 397 GpuRegister obj, 398 uint32_t data_offset, 399 Location index, 400 Location temp, 401 bool needs_null_check); 402 403 // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier, 404 // GenerateArrayLoadWithBakerReadBarrier and some intrinsics. 405 // 406 // Load the object reference located at the address 407 // `obj + offset + (index << scale_factor)`, held by object `obj`, into 408 // `ref`, and mark it if needed. 409 // 410 // If `always_update_field` is true, the value of the reference is 411 // atomically updated in the holder (`obj`). 412 void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, 413 Location ref, 414 GpuRegister obj, 415 uint32_t offset, 416 Location index, 417 ScaleFactor scale_factor, 418 Location temp, 419 bool needs_null_check, 420 bool always_update_field = false); 421 422 // Generate a read barrier for a heap reference within `instruction` 423 // using a slow path. 424 // 425 // A read barrier for an object reference read from the heap is 426 // implemented as a call to the artReadBarrierSlow runtime entry 427 // point, which is passed the values in locations `ref`, `obj`, and 428 // `offset`: 429 // 430 // mirror::Object* artReadBarrierSlow(mirror::Object* ref, 431 // mirror::Object* obj, 432 // uint32_t offset); 433 // 434 // The `out` location contains the value returned by 435 // artReadBarrierSlow. 436 // 437 // When `index` is provided (i.e. for array accesses), the offset 438 // value passed to artReadBarrierSlow is adjusted to take `index` 439 // into account. 440 void GenerateReadBarrierSlow(HInstruction* instruction, 441 Location out, 442 Location ref, 443 Location obj, 444 uint32_t offset, 445 Location index = Location::NoLocation()); 446 447 // If read barriers are enabled, generate a read barrier for a heap 448 // reference using a slow path. If heap poisoning is enabled, also 449 // unpoison the reference in `out`. 450 void MaybeGenerateReadBarrierSlow(HInstruction* instruction, 451 Location out, 452 Location ref, 453 Location obj, 454 uint32_t offset, 455 Location index = Location::NoLocation()); 456 457 // Generate a read barrier for a GC root within `instruction` using 458 // a slow path. 459 // 460 // A read barrier for an object reference GC root is implemented as 461 // a call to the artReadBarrierForRootSlow runtime entry point, 462 // which is passed the value in location `root`: 463 // 464 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root); 465 // 466 // The `out` location contains the value returned by 467 // artReadBarrierForRootSlow. 468 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); 469 470 void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null); 471 472 // Register allocation. 473 474 void SetupBlockedRegisters() const override; 475 476 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; 477 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; 478 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; 479 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; 480 481 void DumpCoreRegister(std::ostream& stream, int reg) const override; 482 void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; 483 GetInstructionSet()484 InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; } 485 486 const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const; 487 GetLabelOf(HBasicBlock * block)488 Mips64Label* GetLabelOf(HBasicBlock* block) const { 489 return CommonGetLabelOf<Mips64Label>(block_labels_, block); 490 } 491 Initialize()492 void Initialize() override { 493 block_labels_ = CommonInitializeLabels<Mips64Label>(); 494 } 495 496 // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths 497 // at aligned locations. GetPreferredSlotsAlignment()498 uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; } 499 500 void Finalize(CodeAllocator* allocator) override; 501 502 // Code generation helpers. 503 void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; 504 505 void MoveConstant(Location destination, int32_t value) override; 506 507 void AddLocationAsTemp(Location location, LocationSummary* locations) override; 508 509 510 void SwapLocations(Location loc1, Location loc2, DataType::Type type); 511 512 // Generate code to invoke a runtime entry point. 513 void InvokeRuntime(QuickEntrypointEnum entrypoint, 514 HInstruction* instruction, 515 uint32_t dex_pc, 516 SlowPathCode* slow_path = nullptr) override; 517 518 // Generate code to invoke a runtime entry point, but do not record 519 // PC-related information in a stack map. 520 void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, 521 HInstruction* instruction, 522 SlowPathCode* slow_path); 523 524 void GenerateInvokeRuntime(int32_t entry_point_offset); 525 GetMoveResolver()526 ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; } 527 NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED)528 bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; } 529 530 // Check if the desired_string_load_kind is supported. If it is, return it, 531 // otherwise return a fall-back kind that should be used instead. 532 HLoadString::LoadKind GetSupportedLoadStringKind( 533 HLoadString::LoadKind desired_string_load_kind) override; 534 535 // Check if the desired_class_load_kind is supported. If it is, return it, 536 // otherwise return a fall-back kind that should be used instead. 537 HLoadClass::LoadKind GetSupportedLoadClassKind( 538 HLoadClass::LoadKind desired_class_load_kind) override; 539 540 // Check if the desired_dispatch_info is supported. If it is, return it, 541 // otherwise return a fall-back info that should be used instead. 542 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( 543 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, 544 ArtMethod* method) override; 545 546 void GenerateStaticOrDirectCall( 547 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; 548 void GenerateVirtualCall( 549 HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; 550 MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,DataType::Type type ATTRIBUTE_UNUSED)551 void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, 552 DataType::Type type ATTRIBUTE_UNUSED) override { 553 UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64"; 554 } 555 556 void GenerateNop() override; 557 void GenerateImplicitNullCheck(HNullCheck* instruction) override; 558 void GenerateExplicitNullCheck(HNullCheck* instruction) override; 559 560 // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, 561 // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. 562 // 563 // The 16-bit halves of the 32-bit PC-relative offset are patched separately, necessitating 564 // two patches/infos. There can be more than two patches/infos if the instruction supplying 565 // the high half is shared with e.g. a slow path, while the low half is supplied by separate 566 // instructions, e.g.: 567 // auipc r1, high // patch 568 // lwu r2, low(r1) // patch 569 // beqzc r2, slow_path 570 // back: 571 // ... 572 // slow_path: 573 // ... 574 // sw r2, low(r1) // patch 575 // bc back 576 struct PcRelativePatchInfo : PatchInfo<Mips64Label> { PcRelativePatchInfoPcRelativePatchInfo577 PcRelativePatchInfo(const DexFile* dex_file, 578 uint32_t off_or_idx, 579 const PcRelativePatchInfo* info_high) 580 : PatchInfo<Mips64Label>(dex_file, off_or_idx), 581 patch_info_high(info_high) { } 582 583 // Pointer to the info for the high half patch or nullptr if this is the high half patch info. 584 const PcRelativePatchInfo* patch_info_high; 585 586 private: 587 PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete; 588 DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo); 589 }; 590 591 PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data, 592 const PcRelativePatchInfo* info_high = nullptr); 593 PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset, 594 const PcRelativePatchInfo* info_high = nullptr); 595 PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method, 596 const PcRelativePatchInfo* info_high = nullptr); 597 PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method, 598 const PcRelativePatchInfo* info_high = nullptr); 599 PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file, 600 dex::TypeIndex type_index, 601 const PcRelativePatchInfo* info_high = nullptr); 602 PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, 603 dex::TypeIndex type_index, 604 const PcRelativePatchInfo* info_high = nullptr); 605 PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file, 606 dex::StringIndex string_index, 607 const PcRelativePatchInfo* info_high = nullptr); 608 PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file, 609 dex::StringIndex string_index, 610 const PcRelativePatchInfo* info_high = nullptr); 611 Literal* DeduplicateBootImageAddressLiteral(uint64_t address); 612 613 void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high, 614 GpuRegister out, 615 PcRelativePatchInfo* info_low = nullptr); 616 617 void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_reference); 618 void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); 619 620 void PatchJitRootUse(uint8_t* code, 621 const uint8_t* roots_data, 622 const Literal* literal, 623 uint64_t index_in_table) const; 624 Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, 625 dex::StringIndex string_index, 626 Handle<mirror::String> handle); 627 Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, 628 dex::TypeIndex type_index, 629 Handle<mirror::Class> handle); 630 631 private: 632 using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>; 633 using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>; 634 using StringToLiteralMap = ArenaSafeMap<StringReference, 635 Literal*, 636 StringReferenceValueComparator>; 637 using TypeToLiteralMap = ArenaSafeMap<TypeReference, 638 Literal*, 639 TypeReferenceValueComparator>; 640 641 Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); 642 Literal* DeduplicateUint64Literal(uint64_t value); 643 644 PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file, 645 uint32_t offset_or_index, 646 const PcRelativePatchInfo* info_high, 647 ArenaDeque<PcRelativePatchInfo>* patches); 648 649 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)> 650 void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos, 651 ArenaVector<linker::LinkerPatch>* linker_patches); 652 653 // Labels for each block that will be compiled. 654 Mips64Label* block_labels_; // Indexed by block id. 655 Mips64Label frame_entry_label_; 656 LocationsBuilderMIPS64 location_builder_; 657 InstructionCodeGeneratorMIPS64 instruction_visitor_; 658 ParallelMoveResolverMIPS64 move_resolver_; 659 Mips64Assembler assembler_; 660 661 // Deduplication map for 32-bit literals, used for non-patchable boot image addresses. 662 Uint32ToLiteralMap uint32_literals_; 663 // Deduplication map for 64-bit literals, used for non-patchable method address or method code 664 // address. 665 Uint64ToLiteralMap uint64_literals_; 666 // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo. 667 // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods). 668 ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_; 669 // PC-relative method patch info for kBssEntry. 670 ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_; 671 // PC-relative type patch info for kBootImageLinkTimePcRelative. 672 ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_; 673 // PC-relative type patch info for kBssEntry. 674 ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_; 675 // PC-relative String patch info for kBootImageLinkTimePcRelative. 676 ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_; 677 // PC-relative type patch info for kBssEntry. 678 ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_; 679 // PC-relative patch info for IntrinsicObjects. 680 ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_; 681 682 // Patches for string root accesses in JIT compiled code. 683 StringToLiteralMap jit_string_patches_; 684 // Patches for class root accesses in JIT compiled code. 685 TypeToLiteralMap jit_class_patches_; 686 687 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64); 688 }; 689 690 } // namespace mips64 691 } // namespace art 692 693 #endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 694