1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 19 20 #include "code_generator.h" 21 #include "driver/compiler_options.h" 22 #include "nodes.h" 23 #include "parallel_move_resolver.h" 24 #include "utils/mips64/assembler_mips64.h" 25 #include "utils/type_reference.h" 26 27 namespace art { 28 namespace mips64 { 29 30 // InvokeDexCallingConvention registers 31 32 static constexpr GpuRegister kParameterCoreRegisters[] = 33 { A1, A2, A3, A4, A5, A6, A7 }; 34 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 35 36 static constexpr FpuRegister kParameterFpuRegisters[] = 37 { F13, F14, F15, F16, F17, F18, F19 }; 38 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters); 39 40 41 // InvokeRuntimeCallingConvention registers 42 43 static constexpr GpuRegister kRuntimeParameterCoreRegisters[] = 44 { A0, A1, A2, A3, A4, A5, A6, A7 }; 45 static constexpr size_t kRuntimeParameterCoreRegistersLength = 46 arraysize(kRuntimeParameterCoreRegisters); 47 48 static constexpr FpuRegister kRuntimeParameterFpuRegisters[] = 49 { F12, F13, F14, F15, F16, F17, F18, F19 }; 50 static constexpr size_t kRuntimeParameterFpuRegistersLength = 51 arraysize(kRuntimeParameterFpuRegisters); 52 53 54 static constexpr GpuRegister kCoreCalleeSaves[] = 55 { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; 56 static constexpr FpuRegister kFpuCalleeSaves[] = 57 { F24, F25, F26, F27, F28, F29, F30, F31 }; 58 59 60 class CodeGeneratorMIPS64; 61 62 class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> { 63 public: InvokeDexCallingConvention()64 InvokeDexCallingConvention() 65 : CallingConvention(kParameterCoreRegisters, 66 kParameterCoreRegistersLength, 67 kParameterFpuRegisters, 68 kParameterFpuRegistersLength, 69 kMips64PointerSize) {} 70 71 private: 72 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); 73 }; 74 75 class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor { 76 public: InvokeDexCallingConventionVisitorMIPS64()77 InvokeDexCallingConventionVisitorMIPS64() {} ~InvokeDexCallingConventionVisitorMIPS64()78 virtual ~InvokeDexCallingConventionVisitorMIPS64() {} 79 80 Location GetNextLocation(Primitive::Type type) OVERRIDE; 81 Location GetReturnLocation(Primitive::Type type) const OVERRIDE; 82 Location GetMethodLocation() const OVERRIDE; 83 84 private: 85 InvokeDexCallingConvention calling_convention; 86 87 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64); 88 }; 89 90 class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> { 91 public: InvokeRuntimeCallingConvention()92 InvokeRuntimeCallingConvention() 93 : CallingConvention(kRuntimeParameterCoreRegisters, 94 kRuntimeParameterCoreRegistersLength, 95 kRuntimeParameterFpuRegisters, 96 kRuntimeParameterFpuRegistersLength, 97 kMips64PointerSize) {} 98 99 Location GetReturnLocation(Primitive::Type return_type); 100 101 private: 102 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 103 }; 104 105 class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention { 106 public: FieldAccessCallingConventionMIPS64()107 FieldAccessCallingConventionMIPS64() {} 108 GetObjectLocation()109 Location GetObjectLocation() const OVERRIDE { 110 return Location::RegisterLocation(A1); 111 } GetFieldIndexLocation()112 Location GetFieldIndexLocation() const OVERRIDE { 113 return Location::RegisterLocation(A0); 114 } GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED)115 Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { 116 return Location::RegisterLocation(V0); 117 } GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED,bool is_instance)118 Location GetSetValueLocation(Primitive::Type type ATTRIBUTE_UNUSED, 119 bool is_instance) const OVERRIDE { 120 return is_instance 121 ? Location::RegisterLocation(A2) 122 : Location::RegisterLocation(A1); 123 } GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED)124 Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { 125 return Location::FpuRegisterLocation(F0); 126 } 127 128 private: 129 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64); 130 }; 131 132 class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap { 133 public: ParallelMoveResolverMIPS64(ArenaAllocator * allocator,CodeGeneratorMIPS64 * codegen)134 ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen) 135 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} 136 137 void EmitMove(size_t index) OVERRIDE; 138 void EmitSwap(size_t index) OVERRIDE; 139 void SpillScratch(int reg) OVERRIDE; 140 void RestoreScratch(int reg) OVERRIDE; 141 142 void Exchange(int index1, int index2, bool double_slot); 143 144 Mips64Assembler* GetAssembler() const; 145 146 private: 147 CodeGeneratorMIPS64* const codegen_; 148 149 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64); 150 }; 151 152 class SlowPathCodeMIPS64 : public SlowPathCode { 153 public: SlowPathCodeMIPS64(HInstruction * instruction)154 explicit SlowPathCodeMIPS64(HInstruction* instruction) 155 : SlowPathCode(instruction), entry_label_(), exit_label_() {} 156 GetEntryLabel()157 Mips64Label* GetEntryLabel() { return &entry_label_; } GetExitLabel()158 Mips64Label* GetExitLabel() { return &exit_label_; } 159 160 private: 161 Mips64Label entry_label_; 162 Mips64Label exit_label_; 163 164 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64); 165 }; 166 167 class LocationsBuilderMIPS64 : public HGraphVisitor { 168 public: LocationsBuilderMIPS64(HGraph * graph,CodeGeneratorMIPS64 * codegen)169 LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen) 170 : HGraphVisitor(graph), codegen_(codegen) {} 171 172 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 173 void Visit##name(H##name* instr) OVERRIDE; 174 175 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)176 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) 177 178 #undef DECLARE_VISIT_INSTRUCTION 179 180 void VisitInstruction(HInstruction* instruction) OVERRIDE { 181 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 182 << " (id " << instruction->GetId() << ")"; 183 } 184 185 private: 186 void HandleInvoke(HInvoke* invoke); 187 void HandleBinaryOp(HBinaryOperation* operation); 188 void HandleCondition(HCondition* instruction); 189 void HandleShift(HBinaryOperation* operation); 190 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); 191 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 192 Location RegisterOrZeroConstant(HInstruction* instruction); 193 Location FpuRegisterOrConstantForStore(HInstruction* instruction); 194 195 InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_; 196 197 CodeGeneratorMIPS64* const codegen_; 198 199 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64); 200 }; 201 202 class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator { 203 public: 204 InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen); 205 206 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 207 void Visit##name(H##name* instr) OVERRIDE; 208 209 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)210 FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION) 211 212 #undef DECLARE_VISIT_INSTRUCTION 213 214 void VisitInstruction(HInstruction* instruction) OVERRIDE { 215 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 216 << " (id " << instruction->GetId() << ")"; 217 } 218 GetAssembler()219 Mips64Assembler* GetAssembler() const { return assembler_; } 220 221 // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit 222 // instructions for N cases. 223 // Table-based packed switch generates approx. 11 32-bit instructions 224 // and N 32-bit data words for N cases. 225 // At N = 6 they come out as 18 and 17 32-bit words respectively. 226 // We switch to the table-based method starting with 7 cases. 227 static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6; 228 229 private: 230 void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg); 231 void GenerateMemoryBarrier(MemBarrierKind kind); 232 void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor); 233 void HandleBinaryOp(HBinaryOperation* operation); 234 void HandleCondition(HCondition* instruction); 235 void HandleShift(HBinaryOperation* operation); 236 void HandleFieldSet(HInstruction* instruction, 237 const FieldInfo& field_info, 238 bool value_can_be_null); 239 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 240 241 // Generate a heap reference load using one register `out`: 242 // 243 // out <- *(out + offset) 244 // 245 // while honoring heap poisoning and/or read barriers (if any). 246 // 247 // Location `maybe_temp` is used when generating a read barrier and 248 // shall be a register in that case; it may be an invalid location 249 // otherwise. 250 void GenerateReferenceLoadOneRegister(HInstruction* instruction, 251 Location out, 252 uint32_t offset, 253 Location maybe_temp, 254 ReadBarrierOption read_barrier_option); 255 // Generate a heap reference load using two different registers 256 // `out` and `obj`: 257 // 258 // out <- *(obj + offset) 259 // 260 // while honoring heap poisoning and/or read barriers (if any). 261 // 262 // Location `maybe_temp` is used when generating a Baker's (fast 263 // path) read barrier and shall be a register in that case; it may 264 // be an invalid location otherwise. 265 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, 266 Location out, 267 Location obj, 268 uint32_t offset, 269 Location maybe_temp, 270 ReadBarrierOption read_barrier_option); 271 272 // Generate a GC root reference load: 273 // 274 // root <- *(obj + offset) 275 // 276 // while honoring read barriers (if any). 277 void GenerateGcRootFieldLoad(HInstruction* instruction, 278 Location root, 279 GpuRegister obj, 280 uint32_t offset, 281 ReadBarrierOption read_barrier_option); 282 283 void GenerateTestAndBranch(HInstruction* instruction, 284 size_t condition_input_index, 285 Mips64Label* true_target, 286 Mips64Label* false_target); 287 void DivRemOneOrMinusOne(HBinaryOperation* instruction); 288 void DivRemByPowerOfTwo(HBinaryOperation* instruction); 289 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); 290 void GenerateDivRemIntegral(HBinaryOperation* instruction); 291 void GenerateIntLongCompare(IfCondition cond, bool is64bit, LocationSummary* locations); 292 void GenerateIntLongCompareAndBranch(IfCondition cond, 293 bool is64bit, 294 LocationSummary* locations, 295 Mips64Label* label); 296 void GenerateFpCompare(IfCondition cond, 297 bool gt_bias, 298 Primitive::Type type, 299 LocationSummary* locations); 300 void GenerateFpCompareAndBranch(IfCondition cond, 301 bool gt_bias, 302 Primitive::Type type, 303 LocationSummary* locations, 304 Mips64Label* label); 305 void HandleGoto(HInstruction* got, HBasicBlock* successor); 306 void GenPackedSwitchWithCompares(GpuRegister value_reg, 307 int32_t lower_bound, 308 uint32_t num_entries, 309 HBasicBlock* switch_block, 310 HBasicBlock* default_block); 311 void GenTableBasedPackedSwitch(GpuRegister value_reg, 312 int32_t lower_bound, 313 uint32_t num_entries, 314 HBasicBlock* switch_block, 315 HBasicBlock* default_block); 316 317 Mips64Assembler* const assembler_; 318 CodeGeneratorMIPS64* const codegen_; 319 320 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64); 321 }; 322 323 class CodeGeneratorMIPS64 : public CodeGenerator { 324 public: 325 CodeGeneratorMIPS64(HGraph* graph, 326 const Mips64InstructionSetFeatures& isa_features, 327 const CompilerOptions& compiler_options, 328 OptimizingCompilerStats* stats = nullptr); ~CodeGeneratorMIPS64()329 virtual ~CodeGeneratorMIPS64() {} 330 331 void GenerateFrameEntry() OVERRIDE; 332 void GenerateFrameExit() OVERRIDE; 333 334 void Bind(HBasicBlock* block) OVERRIDE; 335 GetWordSize()336 size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; } 337 GetFloatingPointSpillSlotSize()338 size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64DoublewordSize; } 339 GetAddressOf(HBasicBlock * block)340 uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { 341 return assembler_.GetLabelLocation(GetLabelOf(block)); 342 } 343 GetLocationBuilder()344 HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } GetInstructionVisitor()345 HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } GetAssembler()346 Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; } GetAssembler()347 const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; } 348 349 // Emit linker patches. 350 void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; 351 void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; 352 353 // Fast path implementation of ReadBarrier::Barrier for a heap 354 // reference field load when Baker's read barriers are used. 355 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, 356 Location ref, 357 GpuRegister obj, 358 uint32_t offset, 359 Location temp, 360 bool needs_null_check); 361 // Fast path implementation of ReadBarrier::Barrier for a heap 362 // reference array load when Baker's read barriers are used. 363 void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, 364 Location ref, 365 GpuRegister obj, 366 uint32_t data_offset, 367 Location index, 368 Location temp, 369 bool needs_null_check); 370 371 // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier, 372 // GenerateArrayLoadWithBakerReadBarrier and some intrinsics. 373 // 374 // Load the object reference located at the address 375 // `obj + offset + (index << scale_factor)`, held by object `obj`, into 376 // `ref`, and mark it if needed. 377 // 378 // If `always_update_field` is true, the value of the reference is 379 // atomically updated in the holder (`obj`). 380 void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, 381 Location ref, 382 GpuRegister obj, 383 uint32_t offset, 384 Location index, 385 ScaleFactor scale_factor, 386 Location temp, 387 bool needs_null_check, 388 bool always_update_field = false); 389 390 // Generate a read barrier for a heap reference within `instruction` 391 // using a slow path. 392 // 393 // A read barrier for an object reference read from the heap is 394 // implemented as a call to the artReadBarrierSlow runtime entry 395 // point, which is passed the values in locations `ref`, `obj`, and 396 // `offset`: 397 // 398 // mirror::Object* artReadBarrierSlow(mirror::Object* ref, 399 // mirror::Object* obj, 400 // uint32_t offset); 401 // 402 // The `out` location contains the value returned by 403 // artReadBarrierSlow. 404 // 405 // When `index` is provided (i.e. for array accesses), the offset 406 // value passed to artReadBarrierSlow is adjusted to take `index` 407 // into account. 408 void GenerateReadBarrierSlow(HInstruction* instruction, 409 Location out, 410 Location ref, 411 Location obj, 412 uint32_t offset, 413 Location index = Location::NoLocation()); 414 415 // If read barriers are enabled, generate a read barrier for a heap 416 // reference using a slow path. If heap poisoning is enabled, also 417 // unpoison the reference in `out`. 418 void MaybeGenerateReadBarrierSlow(HInstruction* instruction, 419 Location out, 420 Location ref, 421 Location obj, 422 uint32_t offset, 423 Location index = Location::NoLocation()); 424 425 // Generate a read barrier for a GC root within `instruction` using 426 // a slow path. 427 // 428 // A read barrier for an object reference GC root is implemented as 429 // a call to the artReadBarrierForRootSlow runtime entry point, 430 // which is passed the value in location `root`: 431 // 432 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root); 433 // 434 // The `out` location contains the value returned by 435 // artReadBarrierForRootSlow. 436 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); 437 438 void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null); 439 440 // Register allocation. 441 442 void SetupBlockedRegisters() const OVERRIDE; 443 444 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 445 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 446 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 447 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 448 449 void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; 450 void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; 451 GetInstructionSet()452 InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; } 453 GetInstructionSetFeatures()454 const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const { 455 return isa_features_; 456 } 457 GetLabelOf(HBasicBlock * block)458 Mips64Label* GetLabelOf(HBasicBlock* block) const { 459 return CommonGetLabelOf<Mips64Label>(block_labels_, block); 460 } 461 Initialize()462 void Initialize() OVERRIDE { 463 block_labels_ = CommonInitializeLabels<Mips64Label>(); 464 } 465 466 // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths 467 // at aligned locations. GetPreferredSlotsAlignment()468 uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; } 469 470 void Finalize(CodeAllocator* allocator) OVERRIDE; 471 472 // Code generation helpers. 473 void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; 474 475 void MoveConstant(Location destination, int32_t value) OVERRIDE; 476 477 void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; 478 479 480 void SwapLocations(Location loc1, Location loc2, Primitive::Type type); 481 482 // Generate code to invoke a runtime entry point. 483 void InvokeRuntime(QuickEntrypointEnum entrypoint, 484 HInstruction* instruction, 485 uint32_t dex_pc, 486 SlowPathCode* slow_path = nullptr) OVERRIDE; 487 488 // Generate code to invoke a runtime entry point, but do not record 489 // PC-related information in a stack map. 490 void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, 491 HInstruction* instruction, 492 SlowPathCode* slow_path); 493 494 void GenerateInvokeRuntime(int32_t entry_point_offset); 495 GetMoveResolver()496 ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } 497 NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED)498 bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } 499 500 // Check if the desired_string_load_kind is supported. If it is, return it, 501 // otherwise return a fall-back kind that should be used instead. 502 HLoadString::LoadKind GetSupportedLoadStringKind( 503 HLoadString::LoadKind desired_string_load_kind) OVERRIDE; 504 505 // Check if the desired_class_load_kind is supported. If it is, return it, 506 // otherwise return a fall-back kind that should be used instead. 507 HLoadClass::LoadKind GetSupportedLoadClassKind( 508 HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; 509 510 // Check if the desired_dispatch_info is supported. If it is, return it, 511 // otherwise return a fall-back info that should be used instead. 512 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( 513 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, 514 HInvokeStaticOrDirect* invoke) OVERRIDE; 515 516 void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE; 517 void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE; 518 MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,Primitive::Type type ATTRIBUTE_UNUSED)519 void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, 520 Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE { 521 UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64"; 522 } 523 524 void GenerateNop() OVERRIDE; 525 void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; 526 void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; 527 528 // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays, 529 // boot image strings and method calls. The only difference is the interpretation of 530 // the offset_or_index. 531 struct PcRelativePatchInfo { PcRelativePatchInfoPcRelativePatchInfo532 PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx) 533 : target_dex_file(dex_file), offset_or_index(off_or_idx) { } 534 PcRelativePatchInfo(PcRelativePatchInfo&& other) = default; 535 536 const DexFile& target_dex_file; 537 // Either the dex cache array element offset or the string/type/method index. 538 uint32_t offset_or_index; 539 // Label for the auipc instruction. 540 Mips64Label pc_rel_label; 541 }; 542 543 PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, 544 dex::StringIndex string_index); 545 PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index); 546 PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index); 547 PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, 548 uint32_t element_offset); 549 PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file, 550 uint32_t method_index); 551 Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, 552 dex::StringIndex string_index); 553 Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index); 554 Literal* DeduplicateBootImageAddressLiteral(uint64_t address); 555 556 void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out); 557 558 void PatchJitRootUse(uint8_t* code, 559 const uint8_t* roots_data, 560 const Literal* literal, 561 uint64_t index_in_table) const; 562 Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, 563 dex::StringIndex string_index, 564 Handle<mirror::String> handle); 565 Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, 566 dex::TypeIndex type_index, 567 Handle<mirror::Class> handle); 568 569 private: 570 using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>; 571 using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>; 572 using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>; 573 using StringToLiteralMap = ArenaSafeMap<StringReference, 574 Literal*, 575 StringReferenceValueComparator>; 576 using TypeToLiteralMap = ArenaSafeMap<TypeReference, 577 Literal*, 578 TypeReferenceValueComparator>; 579 using BootStringToLiteralMap = ArenaSafeMap<StringReference, 580 Literal*, 581 StringReferenceValueComparator>; 582 using BootTypeToLiteralMap = ArenaSafeMap<TypeReference, 583 Literal*, 584 TypeReferenceValueComparator>; 585 586 Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); 587 Literal* DeduplicateUint64Literal(uint64_t value); 588 Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map); 589 590 PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file, 591 uint32_t offset_or_index, 592 ArenaDeque<PcRelativePatchInfo>* patches); 593 594 template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)> 595 void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos, 596 ArenaVector<LinkerPatch>* linker_patches); 597 598 // Labels for each block that will be compiled. 599 Mips64Label* block_labels_; // Indexed by block id. 600 Mips64Label frame_entry_label_; 601 LocationsBuilderMIPS64 location_builder_; 602 InstructionCodeGeneratorMIPS64 instruction_visitor_; 603 ParallelMoveResolverMIPS64 move_resolver_; 604 Mips64Assembler assembler_; 605 const Mips64InstructionSetFeatures& isa_features_; 606 607 // Deduplication map for 32-bit literals, used for non-patchable boot image addresses. 608 Uint32ToLiteralMap uint32_literals_; 609 // Deduplication map for 64-bit literals, used for non-patchable method address or method code 610 // address. 611 Uint64ToLiteralMap uint64_literals_; 612 // PC-relative patch info. 613 ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_; 614 // Deduplication map for boot string literals for kBootImageLinkTimeAddress. 615 BootStringToLiteralMap boot_image_string_patches_; 616 // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC). 617 ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_; 618 // Deduplication map for boot type literals for kBootImageLinkTimeAddress. 619 BootTypeToLiteralMap boot_image_type_patches_; 620 // PC-relative type patch info for kBootImageLinkTimePcRelative. 621 ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_; 622 // PC-relative type patch info for kBssEntry. 623 ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_; 624 // Patches for string root accesses in JIT compiled code. 625 StringToLiteralMap jit_string_patches_; 626 // Patches for class root accesses in JIT compiled code. 627 TypeToLiteralMap jit_class_patches_; 628 629 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64); 630 }; 631 632 } // namespace mips64 633 } // namespace art 634 635 #endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_ 636