Lines Matching full:imm
24 LogicalImmediate LogicalImmediate::Create(uint64_t imm, int width) in Create() argument
26 if ((imm == 0ULL) || (imm == ~0ULL) || in Create()
27 … ((width != RegXSize) && (((imm >> width) != 0) || (imm == (~0ULL >> (RegXSize - width)))))) { in Create()
37 if ((imm & mask) != ((imm >> size) & mask)) { in Create()
47 imm &= mask; in Create()
49 if (IsShiftedMask_64(imm)) { in Create()
50 i = CountTrailingZeros64(imm); in Create()
52 cto = CountTrailingOnes64(imm >> i); in Create()
54 imm |= ~mask; in Create()
55 if (!IsShiftedMask_64(~imm)) { in Create()
59 uint32_t clo = CountLeadingOnes64(imm); in Create()
61 cto = clo + CountTrailingOnes64(imm) - (static_cast<uint32_t>(RegXSize) - size); in Create()
105 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
107 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Ldp()
109 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Ldp()
111 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Ldp()
139 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
141 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Stp()
143 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Stp()
145 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Stp()
172 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
176 imm >>= 2; in Ldp()
180 imm >>= 3; in Ldp()
184 imm >>= 4; in Ldp()
191 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Ldp()
218 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
222 imm >>= 2; in Stp()
226 imm >>= 3; in Stp()
230 imm >>= 4; in Stp()
237 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Stp()
278 uint64_t imm = GetImmOfLdr(operand, scale, regX); in Ldr() local
281 …uint32_t instructionCode = ((regX && (scale == Scale::Q)) << 30) | op | LoadAndStoreImm(imm, isSig… in Ldr()
321 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Str() local
327 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Str()
329 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Str()
344 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, isSigned) | in Str()
358 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldur() local
360 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) | in Ldur()
370 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stur() local
372 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) | in Stur()
377 void AssemblerAarch64::Mov(const Register &rd, const Immediate &imm) in Mov() argument
381 uint64_t immValue = static_cast<uint64_t>(imm.Value()); in Mov()
487 static uint64_t UpdateImm(uint64_t imm, unsigned idx, bool clear) in UpdateImm() argument
491 imm &= ~(HWORD_MASK << idx); in UpdateImm()
494 imm |= HWORD_MASK << idx; in UpdateImm()
496 return imm; in UpdateImm()
499 bool AssemblerAarch64::TrySequenceOfOnes(const Register &rd, uint64_t imm) in TrySequenceOfOnes() argument
506 int64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
534 uint64_t orrImm = imm; in TrySequenceOfOnes()
538 uint64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
559 Movk(rd, (imm >> firstMovkShift) & HWORD_MASK, firstMovkShift); in TrySequenceOfOnes()
561 Movk(rd, (imm >> secondMovkShift) & HWORD_MASK, secondMovkShift); in TrySequenceOfOnes()
566 bool AssemblerAarch64::TryReplicateHWords(const Register &rd, uint64_t imm) in TryReplicateHWords() argument
571 uint64_t halfWord = (imm >> idx) & HWORD_MASK; in TryReplicateHWords()
592 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
599 // 3 : 3 means repeat 3 times, Imm encode has been done. in TryReplicateHWords()
605 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
616 void AssemblerAarch64::EmitMovInstruct(const Register &rd, uint64_t imm, in EmitMovInstruct() argument
622 imm = ~imm; in EmitMovInstruct()
626 if (imm != 0) { in EmitMovInstruct()
627 int lz = static_cast<int>(CountLeadingZeros64(imm)); in EmitMovInstruct()
628 int tz = static_cast<int>(CountTrailingZeros64(imm)); in EmitMovInstruct()
633 uint64_t imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
636 imm = ~imm; in EmitMovInstruct()
645 imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
654 void AssemblerAarch64::Movz(const Register &rd, uint64_t imm, int shift) in Movz() argument
656 MovWide(MoveOpCode::MOVZ, rd, imm, shift); in Movz()
659 void AssemblerAarch64::Movk(const Register &rd, uint64_t imm, int shift) in Movk() argument
661 MovWide(MoveOpCode::MOVK, rd, imm, shift); in Movk()
664 void AssemblerAarch64::Movn(const Register &rd, uint64_t imm, int shift) in Movn() argument
666 MovWide(MoveOpCode::MOVN, rd, imm, shift); in Movn()
669 void AssemblerAarch64::MovWide(uint32_t op, const Register &rd, uint64_t imm, int shift) in MovWide() argument
671 uint32_t imm_field = (imm << MOV_WIDE_Imm16_LOWBITS) & MOV_WIDE_Imm16_MASK; in MovWide()
678 void AssemblerAarch64::Orr(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Orr() argument
680 BitWiseOpImm(ORR_Imm, rd, rn, imm.Value()); in Orr()
683 void AssemblerAarch64::And(const Register &rd, const Register &rn, const LogicalImmediate &imm) in And() argument
685 BitWiseOpImm(AND_Imm, rd, rn, imm.Value()); in And()
688 void AssemblerAarch64::Ands(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Ands() argument
690 BitWiseOpImm(ANDS_Imm, rd, rn, imm.Value()); in Ands()
711 …emblerAarch64::BitWiseOpImm(BitwiseOpCode op, const Register &rd, const Register &rn, uint64_t imm) in BitWiseOpImm() argument
713 uint32_t code = Sf(!rd.IsW()) | op | imm | Rn(rn.GetId()) | Rd(rd.GetId()); in BitWiseOpImm()
776 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Add() local
777 if (imm < 0) { in Add()
778 AddSubImm(SUB_Imm, rd, rn, false, -1 * imm); in Add()
780 AddSubImm(ADD_Imm, rd, rn, false, imm); in Add()
807 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Sub() local
808 if (imm < 0) { in Sub()
809 AddSubImm(ADD_Imm, rd, rn, false, -1 * imm); in Sub()
811 AddSubImm(SUB_Imm, rd, rn, false, imm); in Sub()
835 bool AssemblerAarch64::IsAddSubImm(uint64_t imm) in IsAddSubImm() argument
838 if (imm <= IMM12_MASK) { in IsAddSubImm()
842 if (((imm & IMM12_MASK) == 0) && ((imm & ~IMM12_MASK) <= IMM12_MASK)) { in IsAddSubImm()
848 …64::AddSubImm(AddSubOpCode op, const Register &rd, const Register &rn, bool setFlags, uint64_t imm) in AddSubImm() argument
850 ASSERT(IsAddSubImm(imm)); in AddSubImm()
853 uint64_t imm12 = imm & (~IMM12_MASK); in AddSubImm()
857 imm12 = imm; in AddSubImm()
909 void AssemblerAarch64::B(int32_t imm) in B() argument
911 uint32_t code = BranchOpCode::Branch | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in B()
929 void AssemblerAarch64::Bl(int32_t imm) in Bl() argument
931 uint32_t code = CallOpCode::BL | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in Bl()
950 void AssemblerAarch64::B(Condition cond, int32_t imm) in B() argument
952 uint32_t code = BranchOpCode::BranchCond | BranchImm19(imm) | cond; in B()
972 void AssemblerAarch64::Cbz(const Register &rt, int32_t imm) in Cbz() argument
974 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBZ | BranchImm19(imm) | rt.GetId(); in Cbz()
978 void AssemblerAarch64::Cbnz(const Register &rt, int32_t imm) in Cbnz() argument
980 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBNZ | BranchImm19(imm) | rt.GetId(); in Cbnz()
992 void AssemblerAarch64::Tbz(const Register &rt, int32_t bitPos, int32_t imm) in Tbz() argument
996 uint32_t imm14 = (imm << BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbz()
1009 void AssemblerAarch64::Tbnz(const Register &rt, int32_t bitPos, int32_t imm) in Tbnz() argument
1013 uint32_t imm14 = (imm <<BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbnz()
1023 void AssemblerAarch64::Tst(const Register &rn, const LogicalImmediate &imm) in Tst() argument
1025 Ands(Register(Zero, rn.GetType()), rn, imm); in Tst()
1137 void AssemblerAarch64::Brk(const Immediate &imm) in Brk() argument
1140 (static_cast<uint32_t>(imm.Value()) << BRK_Imm16_LOWBITS) & BRK_Imm16_MASK; in Brk()
1148 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in GetImmOfLdr() local
1151 imm >>= 1; in GetImmOfLdr()
1154 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in GetImmOfLdr()
1156 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in GetImmOfLdr()
1160 return imm; in GetImmOfLdr()