Home
last modified time | relevance | path

Searched refs:imm (Results 1 – 25 of 34) sorted by relevance

12

/art/test/442-checker-constant-folding/src/
DMain.java1326 long imm = 33L; in ReturnInt33() local
1327 return (int) imm; in ReturnInt33()
1343 float imm = 1.0e34f; in ReturnIntMax() local
1344 return (int) imm; in ReturnIntMax()
1360 double imm = Double.NaN; in ReturnInt0() local
1361 return (int) imm; in ReturnInt0()
1377 int imm = 33; in ReturnLong33() local
1378 return (long) imm; in ReturnLong33()
1394 float imm = 34.0f; in ReturnLong34() local
1395 return (long) imm; in ReturnLong34()
[all …]
/art/compiler/optimizing/
Dscheduler_arm64.cc92 int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); in VisitDiv() local
93 if (imm == 0) { in VisitDiv()
96 } else if (imm == 1 || imm == -1) { in VisitDiv()
99 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitDiv()
103 DCHECK(imm <= -2 || imm >= 2); in VisitDiv()
160 int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); in VisitRem() local
161 if (imm == 0) { in VisitRem()
164 } else if (imm == 1 || imm == -1) { in VisitRem()
167 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitRem()
171 DCHECK(imm <= -2 || imm >= 2); in VisitRem()
Dscheduler_arm.cc484 void SchedulingLatencyVisitorARM::HandleDivRemConstantIntegralLatencies(int32_t imm) { in HandleDivRemConstantIntegralLatencies() argument
485 if (imm == 0) { in HandleDivRemConstantIntegralLatencies()
488 } else if (imm == 1 || imm == -1) { in HandleDivRemConstantIntegralLatencies()
490 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in HandleDivRemConstantIntegralLatencies()
505 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitDiv() local
506 HandleDivRemConstantIntegralLatencies(imm); in VisitDiv()
568 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitRem() local
569 HandleDivRemConstantIntegralLatencies(imm); in VisitRem()
Dcode_generator_x86_64.cc3220 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); in VisitSub() local
3221 __ subl(first.AsRegister<CpuRegister>(), imm); in VisitSub()
3323 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3324 __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm); in VisitMul()
3481 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3483 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3491 if (imm == -1) { in DivRemOneOrMinusOne()
3503 if (imm == -1) { in DivRemOneOrMinusOne()
3522 int64_t imm = Int64FromConstant(second.GetConstant()); in DivByPowerOfTwo() local
3523 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo()
[all …]
Dscheduler_arm.h115 void HandleDivRemConstantIntegralLatencies(int32_t imm);
Dcode_generator_x86.cc3240 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3241 __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm); in VisitMul()
3482 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivRemOneOrMinusOne() local
3484 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3490 if (imm == -1) { in DivRemOneOrMinusOne()
3502 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivByPowerOfTwo() local
3503 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo()
3504 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in DivByPowerOfTwo()
3511 int shift = CTZ(imm); in DivByPowerOfTwo()
3514 if (imm < 0) { in DivByPowerOfTwo()
[all …]
Dcode_generator_mips64.cc1847 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant()); in HandleBinaryOp() local
1849 can_use_imm = IsUint<16>(imm); in HandleBinaryOp()
1851 can_use_imm = IsInt<16>(imm); in HandleBinaryOp()
1854 can_use_imm = IsInt<16>(-imm); in HandleBinaryOp()
3090 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3091 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3096 if (imm == -1) { in DivRemOneOrMinusOne()
3119 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemByPowerOfTwo() local
3120 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo()
3134 if (imm < 0) { in DivRemByPowerOfTwo()
[all …]
Dcode_generator_mips.cc2014 int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant()); in HandleBinaryOp() local
2016 can_use_imm = IsUint<16>(imm); in HandleBinaryOp()
2018 can_use_imm = IsInt<16>(imm); in HandleBinaryOp()
2021 can_use_imm = IsInt<16>(-imm); in HandleBinaryOp()
3562 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); in DivRemOneOrMinusOne() local
3563 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3568 if (imm == -1) { in DivRemOneOrMinusOne()
3586 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); in DivRemByPowerOfTwo() local
3587 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo()
3600 if (imm < 0) { in DivRemByPowerOfTwo()
[all …]
Dcode_generator_arm64.cc3341 int64_t imm = Int64FromConstant(second.GetConstant()); in FOR_EACH_CONDITION_INSTRUCTION() local
3342 DCHECK(imm == 1 || imm == -1); in FOR_EACH_CONDITION_INSTRUCTION()
3347 if (imm == 1) { in FOR_EACH_CONDITION_INSTRUCTION()
3364 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemByPowerOfTwo() local
3365 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo()
3375 if (imm > 0) { in DivRemByPowerOfTwo()
3399 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateDivRemWithAnyConstant() local
3406 CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift); in GenerateDivRemWithAnyConstant()
3420 if (imm > 0 && magic < 0) { in GenerateDivRemWithAnyConstant()
3422 } else if (imm < 0 && magic > 0) { in GenerateDivRemWithAnyConstant()
[all …]
/art/compiler/utils/x86/
Dassembler_x86.cc81 void X86Assembler::pushl(const Immediate& imm) { in pushl() argument
83 if (imm.is_int8()) { in pushl()
85 EmitUint8(imm.value() & 0xFF); in pushl()
88 EmitImmediate(imm); in pushl()
106 void X86Assembler::movl(Register dst, const Immediate& imm) { in movl() argument
109 EmitImmediate(imm); in movl()
134 void X86Assembler::movl(const Address& dst, const Immediate& imm) { in movl() argument
138 EmitImmediate(imm); in movl()
249 void X86Assembler::movb(const Address& dst, const Immediate& imm) { in movb() argument
253 CHECK(imm.is_int8()); in movb()
[all …]
Dassembler_x86.h321 void pushl(const Immediate& imm);
331 void movl(const Address& dst, const Immediate& imm);
346 void rorl(Register reg, const Immediate& imm);
348 void roll(Register reg, const Immediate& imm);
357 void movb(const Address& dst, const Immediate& imm);
365 void movw(const Address& dst, const Immediate& imm);
472 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
473 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
530 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
531 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Djni_macro_assembler_x86.h62 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_x86.cc162 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) { in StoreImmediateToFrame() argument
163 __ movl(Address(ESP, dest), Immediate(imm)); in StoreImmediateToFrame()
/art/compiler/utils/x86_64/
Dassembler_x86_64.cc78 void X86_64Assembler::pushq(const Immediate& imm) { in pushq() argument
80 CHECK(imm.is_int32()); // pushq only supports 32b immediate. in pushq()
81 if (imm.is_int8()) { in pushq()
83 EmitUint8(imm.value() & 0xFF); in pushq()
86 EmitImmediate(imm); in pushq()
106 void X86_64Assembler::movq(CpuRegister dst, const Immediate& imm) { in movq() argument
108 if (imm.is_int32()) { in movq()
113 EmitInt32(static_cast<int32_t>(imm.value())); in movq()
117 EmitInt64(imm.value()); in movq()
122 void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) { in movl() argument
[all …]
Dassembler_x86_64.h350 void pushq(const Immediate& imm);
366 void movq(const Address& dst, const Immediate& imm);
368 void movl(const Address& dst, const Immediate& imm);
380 void movb(const Address& dst, const Immediate& imm);
388 void movw(const Address& dst, const Immediate& imm);
501 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
502 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
558 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
559 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
560 void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Djni_macro_assembler_x86_64.h63 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_x86_64.cc200 uint32_t imm, in StoreImmediateToFrame() argument
202 __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? in StoreImmediateToFrame()
/art/compiler/utils/
Dassembler_test.h159 for (int64_t imm : imms) { variable
160 ImmType new_imm = CreateImmediate(imm);
179 sreg << imm * multiplier + bias;
213 for (int64_t imm : imms) { in RepeatTemplatedRegistersImmBits() local
214 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedRegistersImmBits()
239 sreg << imm + bias; in RepeatTemplatedRegistersImmBits()
272 for (int64_t imm : imms) { in RepeatTemplatedImmBitsRegisters() local
273 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedImmBitsRegisters()
292 sreg << imm; in RepeatTemplatedImmBitsRegisters()
320 for (int64_t imm : imms) { in RepeatTemplatedRegisterImmBits() local
[all …]
Djni_macro_assembler.h79 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
/art/compiler/utils/arm/
Dassembler_arm_vixl.h140 void Vmov(vixl32::DRegister rd, double imm) { in Vmov() argument
141 if (vixl::VFP::IsImmFP64(imm)) { in Vmov()
142 MacroAssembler::Vmov(rd, imm); in Vmov()
144 MacroAssembler::Vldr(rd, imm); in Vmov()
Djni_macro_assembler_arm_vixl.h67 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_arm_vixl.cc270 uint32_t imm, in StoreImmediateToFrame() argument
276 asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm); in StoreImmediateToFrame()
/art/compiler/linker/arm/
Drelative_patcher_thumb2.cc93 uint32_t imm = (diff16 >> 11) & 0x1u; in PatchPcRelativeReference() local
96 insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; in PatchPcRelativeReference()
/art/compiler/utils/arm64/
Djni_macro_assembler_arm64.h69 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
/art/runtime/interpreter/mterp/mips/
Dheader.S673 #define LOAD_IMM(dest, imm) li dest, imm argument

12