Lines Matching full:imm
28 LogicalImmediate LogicalImmediate::Create(uint64_t imm, int width) in Create() argument
30 if ((imm == 0ULL) || (imm == ~0ULL) || in Create()
31 … ((width != RegXSize) && (((imm >> width) != 0) || (imm == (~0ULL >> (RegXSize - width)))))) { in Create()
41 if ((imm & mask) != ((imm >> size) & mask)) { in Create()
51 imm &= mask; in Create()
53 if (IsShiftedMask_64(imm)) { in Create()
54 i = CountTrailingZeros64(imm); in Create()
56 cto = CountTrailingOnes64(imm >> i); in Create()
58 imm |= ~mask; in Create()
59 if (!IsShiftedMask_64(~imm)) { in Create()
63 uint32_t clo = CountLeadingOnes64(imm); in Create()
65 cto = clo + CountTrailingOnes64(imm) - (static_cast<uint32_t>(RegXSize) - size); in Create()
107 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
109 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Ldp()
111 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Ldp()
113 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Ldp()
139 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
141 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Stp()
143 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Stp()
145 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Stp()
170 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
174 imm >>= 2; in Ldp()
178 imm >>= 3; in Ldp()
182 imm >>= 4; in Ldp()
188 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Ldp()
213 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
217 imm >>= 2; in Stp()
221 imm >>= 3; in Stp()
225 imm >>= 4; in Stp()
231 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Stp()
270 uint64_t imm = GetImmOfLdr(operand, scale, regX); in Ldr() local
273 …uint32_t instructionCode = ((regX && (scale == Scale::Q)) << 30) | op | LoadAndStoreImm(imm, isSig… in Ldr()
313 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Str() local
319 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Str()
321 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Str()
335 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, isSigned) in Str()
348 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldur() local
350 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) in Ldur()
360 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stur() local
362 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) in Stur()
367 void AssemblerAarch64::Mov(const Register &rd, const Immediate &imm) in Mov() argument
371 uint64_t immValue = static_cast<uint64_t>(imm.Value()); in Mov()
477 static uint64_t UpdateImm(uint64_t imm, unsigned idx, bool clear) in UpdateImm() argument
481 imm &= ~(HWORD_MASK << idx); in UpdateImm()
484 imm |= HWORD_MASK << idx; in UpdateImm()
486 return imm; in UpdateImm()
489 bool AssemblerAarch64::TrySequenceOfOnes(const Register &rd, uint64_t imm) in TrySequenceOfOnes() argument
496 int64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
524 uint64_t orrImm = imm; in TrySequenceOfOnes()
528 uint64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
549 Movk(rd, (imm >> firstMovkShift) & HWORD_MASK, firstMovkShift); in TrySequenceOfOnes()
551 Movk(rd, (imm >> secondMovkShift) & HWORD_MASK, secondMovkShift); in TrySequenceOfOnes()
556 bool AssemblerAarch64::TryReplicateHWords(const Register &rd, uint64_t imm) in TryReplicateHWords() argument
561 uint64_t halfWord = (imm >> idx) & HWORD_MASK; in TryReplicateHWords()
582 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
589 // 3 : 3 means repeat 3 times, Imm encode has been done. in TryReplicateHWords()
595 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
606 void AssemblerAarch64::EmitMovInstruct(const Register &rd, uint64_t imm, in EmitMovInstruct() argument
612 imm = ~imm; in EmitMovInstruct()
616 if (imm != 0) { in EmitMovInstruct()
617 int lz = static_cast<int>(CountLeadingZeros64(imm)); in EmitMovInstruct()
618 int tz = static_cast<int>(CountTrailingZeros64(imm)); in EmitMovInstruct()
623 uint64_t imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
626 imm = ~imm; in EmitMovInstruct()
635 imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
644 void AssemblerAarch64::Movz(const Register &rd, uint64_t imm, int shift) in Movz() argument
646 MovWide(MoveOpCode::MOVZ, rd, imm, shift); in Movz()
649 void AssemblerAarch64::Movk(const Register &rd, uint64_t imm, int shift) in Movk() argument
651 MovWide(MoveOpCode::MOVK, rd, imm, shift); in Movk()
654 void AssemblerAarch64::Movn(const Register &rd, uint64_t imm, int shift) in Movn() argument
656 MovWide(MoveOpCode::MOVN, rd, imm, shift); in Movn()
659 void AssemblerAarch64::MovWide(uint32_t op, const Register &rd, uint64_t imm, int shift) in MovWide() argument
661 uint32_t imm_field = (imm << MOV_WIDE_Imm16_LOWBITS) & MOV_WIDE_Imm16_MASK; in MovWide()
668 void AssemblerAarch64::Orr(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Orr() argument
670 BitWiseOpImm(ORR_Imm, rd, rn, imm.Value()); in Orr()
673 void AssemblerAarch64::And(const Register &rd, const Register &rn, const LogicalImmediate &imm) in And() argument
675 BitWiseOpImm(AND_Imm, rd, rn, imm.Value()); in And()
678 void AssemblerAarch64::Ands(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Ands() argument
680 BitWiseOpImm(ANDS_Imm, rd, rn, imm.Value()); in Ands()
701 …emblerAarch64::BitWiseOpImm(BitwiseOpCode op, const Register &rd, const Register &rn, uint64_t imm) in BitWiseOpImm() argument
703 uint32_t code = Sf(!rd.IsW()) | op | imm | Rn(rn.GetId()) | Rd(rd.GetId()); in BitWiseOpImm()
768 // and is the preferred disassembly when imms + 1 == imm in Lsl()
778 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Add() local
779 if (imm < 0) { in Add()
780 AddSubImm(SUB_Imm, rd, rn, false, -1 * imm); in Add()
782 AddSubImm(ADD_Imm, rd, rn, false, imm); in Add()
809 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Sub() local
810 if (imm < 0) { in Sub()
811 AddSubImm(ADD_Imm, rd, rn, false, -1 * imm); in Sub()
813 AddSubImm(SUB_Imm, rd, rn, false, imm); in Sub()
837 bool AssemblerAarch64::IsAddSubImm(uint64_t imm) in IsAddSubImm() argument
840 if (imm <= IMM12_MASK) { in IsAddSubImm()
844 if (((imm & IMM12_MASK) == 0) && ((imm & ~IMM12_MASK) <= IMM12_MASK)) { in IsAddSubImm()
850 …64::AddSubImm(AddSubOpCode op, const Register &rd, const Register &rn, bool setFlags, uint64_t imm) in AddSubImm() argument
852 ASSERT(IsAddSubImm(imm)); in AddSubImm()
855 uint64_t imm12 = imm & (~IMM12_MASK); in AddSubImm()
859 imm12 = imm; in AddSubImm()
911 void AssemblerAarch64::B(int32_t imm) in B() argument
913 uint32_t code = BranchOpCode::Branch | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in B()
931 void AssemblerAarch64::Bl(int32_t imm) in Bl() argument
933 uint32_t code = CallOpCode::BL | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in Bl()
952 void AssemblerAarch64::B(Condition cond, int32_t imm) in B() argument
954 uint32_t code = BranchOpCode::BranchCond | BranchImm19(imm) | cond; in B()
974 void AssemblerAarch64::Cbz(const Register &rt, int32_t imm) in Cbz() argument
976 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBZ | BranchImm19(imm) | rt.GetId(); in Cbz()
980 void AssemblerAarch64::Cbnz(const Register &rt, int32_t imm) in Cbnz() argument
982 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBNZ | BranchImm19(imm) | rt.GetId(); in Cbnz()
994 void AssemblerAarch64::Tbz(const Register &rt, int32_t bitPos, int32_t imm) in Tbz() argument
998 uint32_t imm14 = (imm << BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbz()
1011 void AssemblerAarch64::Tbnz(const Register &rt, int32_t bitPos, int32_t imm) in Tbnz() argument
1015 uint32_t imm14 = (imm <<BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbnz()
1020 void AssemblerAarch64::Tst(const Register &rn, const LogicalImmediate &imm) in Tst() argument
1022 Ands(Register(Zero, rn.GetType()), rn, imm); in Tst()
1134 void AssemblerAarch64::Brk(const Immediate &imm) in Brk() argument
1137 (static_cast<uint32_t>(imm.Value()) << BRK_Imm16_LOWBITS) & BRK_Imm16_MASK; in Brk()
1145 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in GetImmOfLdr() local
1148 imm >>= 1; in GetImmOfLdr()
1151 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in GetImmOfLdr()
1153 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in GetImmOfLdr()
1157 return imm; in GetImmOfLdr()