/art/compiler/utils/arm/ |
D | jni_macro_assembler_arm_vixl.h | 70 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override; 74 ManagedRegister scratch) override; 81 ManagedRegister scratch) override; 106 ManagedRegister scratch) override; 110 ManagedRegister scratch) override; 112 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override; 114 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override; 119 ManagedRegister scratch, 125 ManagedRegister scratch, 131 ManagedRegister scratch, [all …]
|
D | jni_macro_assembler_arm_vixl.cc | 268 vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm()); in StoreSpanning() local 271 temps.Exclude(scratch); in StoreSpanning() 272 asm_.LoadFromOffset(kLoadWord, scratch, sp, in_off.Int32Value()); in StoreSpanning() 273 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4); in StoreSpanning() 279 vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm()); in CopyRef() local 281 temps.Exclude(scratch); in CopyRef() 282 asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value()); in CopyRef() 283 asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value()); in CopyRef() 315 vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm()); in StoreImmediateToFrame() local 317 temps.Exclude(scratch); in StoreImmediateToFrame() [all …]
|
/art/compiler/utils/arm64/ |
D | jni_macro_assembler_arm64.cc | 160 Arm64ManagedRegister scratch = m_scratch.AsArm64(); in StoreImmediateToFrame() local 161 CHECK(scratch.IsXRegister()) << scratch; in StoreImmediateToFrame() 162 LoadImmediate(scratch.AsXRegister(), imm); in StoreImmediateToFrame() 163 StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, in StoreImmediateToFrame() 170 Arm64ManagedRegister scratch = m_scratch.AsArm64(); in StoreStackOffsetToThread() local 171 CHECK(scratch.IsXRegister()) << scratch; in StoreStackOffsetToThread() 172 AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value()); in StoreStackOffsetToThread() 173 StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); in StoreStackOffsetToThread() 188 Arm64ManagedRegister scratch = m_scratch.AsArm64(); in StoreSpanning() local 190 LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value()); in StoreSpanning() [all …]
|
D | jni_macro_assembler_arm64.h | 72 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override; 75 ManagedRegister scratch) override; 80 ManagedRegister scratch) override; 97 ManagedRegister scratch) override; 98 void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) 100 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override; 101 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override; 105 ManagedRegister scratch, 110 ManagedRegister scratch, 115 ManagedRegister scratch, [all …]
|
D | assembler_arm64.cc | 96 Arm64ManagedRegister scratch = m_scratch.AsArm64(); in JumpTo() local 98 CHECK(scratch.IsXRegister()) << scratch; in JumpTo() 101 temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister())); in JumpTo() 102 ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); in JumpTo() 103 ___ Br(reg_x(scratch.AsXRegister())); in JumpTo()
|
/art/compiler/utils/x86/ |
D | jni_macro_assembler_x86.h | 63 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override; 67 ManagedRegister scratch) override; 72 ManagedRegister scratch) override; 93 ManagedRegister scratch) override; 95 void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) 98 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override; 100 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override; 102 void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, 105 void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, 108 void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, [all …]
|
D | jni_macro_assembler_x86.cc | 169 X86ManagedRegister scratch = mscratch.AsX86(); in StoreStackOffsetToThread() local 170 CHECK(scratch.IsCpuRegister()); in StoreStackOffsetToThread() 171 __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); in StoreStackOffsetToThread() 172 __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); in StoreStackOffsetToThread() 323 X86ManagedRegister scratch = mscratch.AsX86(); in CopyRef() local 324 CHECK(scratch.IsCpuRegister()); in CopyRef() 325 __ movl(scratch.AsCpuRegister(), Address(ESP, src)); in CopyRef() 326 __ movl(Address(ESP, dest), scratch.AsCpuRegister()); in CopyRef() 332 X86ManagedRegister scratch = mscratch.AsX86(); in CopyRawPtrFromThread() local 333 CHECK(scratch.IsCpuRegister()); in CopyRawPtrFromThread() [all …]
|
/art/compiler/utils/x86_64/ |
D | jni_macro_assembler_x86_64.h | 64 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) override; 68 ManagedRegister scratch) override; 75 ManagedRegister scratch) override; 98 ManagedRegister scratch) override; 100 void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) 103 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) override; 105 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) override; 110 ManagedRegister scratch, 116 ManagedRegister scratch, 122 ManagedRegister scratch, [all …]
|
D | jni_macro_assembler_x86_64.cc | 208 X86_64ManagedRegister scratch = mscratch.AsX86_64(); in StoreStackOffsetToThread() local 209 CHECK(scratch.IsCpuRegister()); in StoreStackOffsetToThread() 210 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs)); in StoreStackOffsetToThread() 211 __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister()); in StoreStackOffsetToThread() 373 X86_64ManagedRegister scratch = mscratch.AsX86_64(); in CopyRef() local 374 CHECK(scratch.IsCpuRegister()); in CopyRef() 375 __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src)); in CopyRef() 376 __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister()); in CopyRef() 382 X86_64ManagedRegister scratch = mscratch.AsX86_64(); in CopyRawPtrFromThread() local 383 CHECK(scratch.IsCpuRegister()); in CopyRawPtrFromThread() [all …]
|
/art/compiler/utils/ |
D | jni_macro_assembler.h | 86 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0; 90 ManagedRegister scratch) = 0; 97 ManagedRegister scratch) = 0; 122 ManagedRegister scratch) = 0; 126 ManagedRegister scratch) = 0; 128 virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0; 130 virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; 135 ManagedRegister scratch, 141 ManagedRegister scratch, 147 ManagedRegister scratch, [all …]
|
D | swap_space_test.cc | 37 ScratchFile scratch; in SwapTest() local 38 int fd = scratch.GetFd(); in SwapTest() 39 unlink(scratch.GetFilename().c_str()); in SwapTest() 72 scratch.Close(); in SwapTest()
|
/art/compiler/optimizing/ |
D | parallel_move_resolver.cc | 264 int scratch = -1; in AllocateScratchRegister() local 267 scratch = reg; in AllocateScratchRegister() 272 if (scratch == -1) { in AllocateScratchRegister() 274 scratch = if_scratch; in AllocateScratchRegister() 279 return scratch; in AllocateScratchRegister() 381 for (Location scratch : scratches_) { in AddScratchLocation() local 382 CHECK(!loc.Equals(scratch)); in AddScratchLocation() 458 Location scratch = AllocateScratchLocationFor(kind); in PerformMove() local 462 move->SetDestination(scratch); in PerformMove() 465 UpdateMoveSource(source, scratch); in PerformMove() [all …]
|
D | code_generator_vector_arm_vixl.cc | 903 /*out*/ vixl32::Register* scratch) { in VecAddress() argument 919 *scratch = temps_scope->Acquire(); in VecAddress() 920 __ Add(*scratch, base, Operand(RegisterFrom(index), ShiftType::LSL, shift)); in VecAddress() 922 return MemOperand(*scratch, offset); in VecAddress() 929 /*out*/ vixl32::Register* scratch) { in VecAddressUnaligned() argument 943 __ Add(*scratch, base, offset); in VecAddressUnaligned() 945 *scratch = temps_scope->Acquire(); in VecAddressUnaligned() 946 __ Add(*scratch, base, offset); in VecAddressUnaligned() 947 __ Add(*scratch, *scratch, Operand(RegisterFrom(index), ShiftType::LSL, shift)); in VecAddressUnaligned() 949 return AlignedMemOperand(*scratch, kNoAlignment); in VecAddressUnaligned() [all …]
|
D | parallel_move_test.cc | 116 Location scratch = GetScratchLocation(kind); in AllocateScratchLocationFor() local 117 if (scratch.Equals(Location::NoLocation())) { in AllocateScratchLocationFor() 121 scratch = (kind == Location::kRegister) ? Location::RegisterLocation(scratch_index_) in AllocateScratchLocationFor() 125 return scratch; in AllocateScratchLocationFor()
|
D | code_generator_vector_arm64.cc | 1403 /*out*/ Register* scratch) { in VecAddress() argument 1425 *scratch = temps_scope->AcquireSameSizeAs(base); in VecAddress() 1426 __ Add(*scratch, base, Operand(WRegisterFrom(index), LSL, shift)); in VecAddress() 1427 return HeapOperand(*scratch, offset); in VecAddress() 1440 Register scratch; in VisitVecLoad() local 1459 VecAddress(instruction, &temps, 1, /*is_string_char_at*/ true, &scratch)); in VisitVecLoad() 1462 if (scratch.IsValid()) { in VisitVecLoad() 1463 temps.Release(scratch); // if used, no longer needed in VisitVecLoad() 1467 __ Ldr(reg, VecAddress(instruction, &temps, size, /*is_string_char_at*/ true, &scratch)); in VisitVecLoad() 1481 __ Ldr(reg, VecAddress(instruction, &temps, size, instruction->IsStringCharAt(), &scratch)); in VisitVecLoad() [all …]
|
D | code_generator_arm_vixl.h | 415 /*out*/ vixl32::Register* scratch); 420 /*out*/ vixl32::Register* scratch);
|
D | code_generator_arm64.cc | 1030 Location scratch = GetScratchLocation(kind); in AllocateScratchLocationFor() local 1031 if (!scratch.Equals(Location::NoLocation())) { in AllocateScratchLocationFor() 1032 return scratch; in AllocateScratchLocationFor() 1036 scratch = LocationFrom(vixl_temps_.AcquireX()); in AllocateScratchLocationFor() 1039 scratch = LocationFrom(codegen_->GetGraph()->HasSIMD() in AllocateScratchLocationFor() 1043 AddScratchLocation(scratch); in AllocateScratchLocationFor() 1044 return scratch; in AllocateScratchLocationFor()
|
D | code_generator_arm64.h | 349 /*out*/ vixl::aarch64::Register* scratch);
|
/art/dex2oat/ |
D | dex2oat_image_test.cc | 119 ScratchFile scratch; in CompileImageAndGetSizes() local 120 std::string scratch_dir = scratch.GetFilename(); in CompileImageAndGetSizes() 124 CHECK(!scratch_dir.empty()) << "No directory " << scratch.GetFilename(); in CompileImageAndGetSizes() 126 if (!CompileBootImage(extra_args, scratch.GetFilename(), &error_msg)) { in CompileImageAndGetSizes() 127 LOG(ERROR) << "Failed to compile image " << scratch.GetFilename() << error_msg; in CompileImageAndGetSizes() 129 std::string art_file = scratch.GetFilename() + ".art"; in CompileImageAndGetSizes() 130 std::string oat_file = scratch.GetFilename() + ".oat"; in CompileImageAndGetSizes() 131 std::string vdex_file = scratch.GetFilename() + ".vdex"; in CompileImageAndGetSizes() 141 scratch.Close(); in CompileImageAndGetSizes()
|
/art/compiler/utils/mips64/ |
D | assembler_mips64.cc | 3737 Mips64ManagedRegister scratch = mscratch.AsMips64(); in StoreImmediateToFrame() local 3738 CHECK(scratch.IsGpuRegister()) << scratch; in StoreImmediateToFrame() 3739 LoadConst32(scratch.AsGpuRegister(), imm); in StoreImmediateToFrame() 3740 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); in StoreImmediateToFrame() 3746 Mips64ManagedRegister scratch = mscratch.AsMips64(); in StoreStackOffsetToThread() local 3747 CHECK(scratch.IsGpuRegister()) << scratch; in StoreStackOffsetToThread() 3748 Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); in StoreStackOffsetToThread() 3749 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); in StoreStackOffsetToThread() 3759 Mips64ManagedRegister scratch = mscratch.AsMips64(); in StoreSpanning() local 3761 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value()); in StoreSpanning() [all …]
|
/art/test/626-checker-arm64-scratch-register/ |
D | info.txt | 1 Regression test checking that the ARM64 scratch register pool is not
|
/art/test/635-checker-arm64-volatile-load-cc/ |
D | info.txt | 1 Regression test checking that the VIXL ARM64 scratch register pool is
|
/art/test/646-checker-arraycopy-large-cst-pos/ |
D | info.txt | 1 Regression test for an issue with a depleted VIXL scratch register
|
/art/test/572-checker-array-get-regression/ |
D | info.txt | 3 used to require too many scratch (temporary) registers.
|
/art/compiler/utils/mips/ |
D | assembler_mips.cc | 4918 MipsManagedRegister scratch = mscratch.AsMips(); in StoreImmediateToFrame() local 4919 CHECK(scratch.IsCoreRegister()) << scratch; in StoreImmediateToFrame() 4920 LoadConst32(scratch.AsCoreRegister(), imm); in StoreImmediateToFrame() 4921 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); in StoreImmediateToFrame() 4927 MipsManagedRegister scratch = mscratch.AsMips(); in StoreStackOffsetToThread() local 4928 CHECK(scratch.IsCoreRegister()) << scratch; in StoreStackOffsetToThread() 4929 Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); in StoreStackOffsetToThread() 4930 StoreToOffset(kStoreWord, scratch.AsCoreRegister(), in StoreStackOffsetToThread() 4941 MipsManagedRegister scratch = mscratch.AsMips(); in StoreSpanning() local 4943 LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); in StoreSpanning() [all …]
|