Lines Matching full:imm
28 LogicalImmediate LogicalImmediate::Create(uint64_t imm, int width) in Create() argument
30 if ((imm == 0ULL) || (imm == ~0ULL) || in Create()
31 … ((width != RegXSize) && (((imm >> width) != 0) || (imm == (~0ULL >> (RegXSize - width)))))) { in Create()
41 if ((imm & mask) != ((imm >> size) & mask)) { in Create()
51 imm &= mask; in Create()
53 if (IsShiftedMask_64(imm)) { in Create()
54 i = CountTrailingZeros64(imm); in Create()
56 cto = CountTrailingOnes64(imm >> i); in Create()
58 imm |= ~mask; in Create()
59 if (!IsShiftedMask_64(~imm)) { in Create()
63 uint32_t clo = CountLeadingOnes64(imm); in Create()
65 cto = clo + CountTrailingOnes64(imm) - (static_cast<uint32_t>(RegXSize) - size); in Create()
109 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
111 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Ldp()
113 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Ldp()
115 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Ldp()
143 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
145 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Stp()
147 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Stp()
149 uint32_t instructionCode = Sf(sf) | op | LoadAndStorePairImm(imm) | Rt2(rt2.GetId()) | in Stp()
176 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldp() local
180 imm >>= 2; in Ldp()
184 imm >>= 3; in Ldp()
188 imm >>= 4; in Ldp()
195 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Ldp()
222 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stp() local
226 imm >>= 2; in Stp()
230 imm >>= 3; in Stp()
234 imm >>= 4; in Stp()
241 uint32_t instructionCode = opc | op | LoadAndStorePairImm(imm) | Rt2(vt2.GetId()) | in Stp()
282 uint64_t imm = GetImmOfLdr(operand, scale, regX); in Ldr() local
285 …uint32_t instructionCode = ((regX && (scale == Scale::Q)) << 30) | op | LoadAndStoreImm(imm, isSig… in Ldr()
325 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Str() local
331 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in Str()
333 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in Str()
348 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, isSigned) | in Str()
362 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Ldur() local
364 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) | in Ldur()
374 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in Stur() local
376 uint32_t instructionCode = (regX << 30) | op | LoadAndStoreImm(imm, true) | in Stur()
381 void AssemblerAarch64::Mov(const Register &rd, const Immediate &imm) in Mov() argument
385 uint64_t immValue = static_cast<uint64_t>(imm.Value()); in Mov()
491 static uint64_t UpdateImm(uint64_t imm, unsigned idx, bool clear) in UpdateImm() argument
495 imm &= ~(HWORD_MASK << idx); in UpdateImm()
498 imm |= HWORD_MASK << idx; in UpdateImm()
500 return imm; in UpdateImm()
503 bool AssemblerAarch64::TrySequenceOfOnes(const Register &rd, uint64_t imm) in TrySequenceOfOnes() argument
510 int64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
538 uint64_t orrImm = imm; in TrySequenceOfOnes()
542 uint64_t himm = (imm >> shift) & HWORD_MASK; in TrySequenceOfOnes()
563 Movk(rd, (imm >> firstMovkShift) & HWORD_MASK, firstMovkShift); in TrySequenceOfOnes()
565 Movk(rd, (imm >> secondMovkShift) & HWORD_MASK, secondMovkShift); in TrySequenceOfOnes()
570 bool AssemblerAarch64::TryReplicateHWords(const Register &rd, uint64_t imm) in TryReplicateHWords() argument
575 uint64_t halfWord = (imm >> idx) & HWORD_MASK; in TryReplicateHWords()
596 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
603 // 3 : 3 means repeat 3 times, Imm encode has been done. in TryReplicateHWords()
609 imm16 = (imm >> shift) & HWORD_MASK; in TryReplicateHWords()
620 void AssemblerAarch64::EmitMovInstruct(const Register &rd, uint64_t imm, in EmitMovInstruct() argument
626 imm = ~imm; in EmitMovInstruct()
630 if (imm != 0) { in EmitMovInstruct()
631 int lz = static_cast<int>(CountLeadingZeros64(imm)); in EmitMovInstruct()
632 int tz = static_cast<int>(CountTrailingZeros64(imm)); in EmitMovInstruct()
637 uint64_t imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
640 imm = ~imm; in EmitMovInstruct()
649 imm16 = (imm >> firstshift) & HWORD_MASK; in EmitMovInstruct()
658 void AssemblerAarch64::Movz(const Register &rd, uint64_t imm, int shift) in Movz() argument
660 MovWide(MoveOpCode::MOVZ, rd, imm, shift); in Movz()
663 void AssemblerAarch64::Movk(const Register &rd, uint64_t imm, int shift) in Movk() argument
665 MovWide(MoveOpCode::MOVK, rd, imm, shift); in Movk()
668 void AssemblerAarch64::Movn(const Register &rd, uint64_t imm, int shift) in Movn() argument
670 MovWide(MoveOpCode::MOVN, rd, imm, shift); in Movn()
673 void AssemblerAarch64::MovWide(uint32_t op, const Register &rd, uint64_t imm, int shift) in MovWide() argument
675 uint32_t imm_field = (imm << MOV_WIDE_Imm16_LOWBITS) & MOV_WIDE_Imm16_MASK; in MovWide()
682 void AssemblerAarch64::Orr(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Orr() argument
684 BitWiseOpImm(ORR_Imm, rd, rn, imm.Value()); in Orr()
687 void AssemblerAarch64::And(const Register &rd, const Register &rn, const LogicalImmediate &imm) in And() argument
689 BitWiseOpImm(AND_Imm, rd, rn, imm.Value()); in And()
692 void AssemblerAarch64::Ands(const Register &rd, const Register &rn, const LogicalImmediate &imm) in Ands() argument
694 BitWiseOpImm(ANDS_Imm, rd, rn, imm.Value()); in Ands()
715 …emblerAarch64::BitWiseOpImm(BitwiseOpCode op, const Register &rd, const Register &rn, uint64_t imm) in BitWiseOpImm() argument
717 uint32_t code = Sf(!rd.IsW()) | op | imm | Rn(rn.GetId()) | Rd(rd.GetId()); in BitWiseOpImm()
780 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Add() local
781 if (imm < 0) { in Add()
782 AddSubImm(SUB_Imm, rd, rn, false, -1 * imm); in Add()
784 AddSubImm(ADD_Imm, rd, rn, false, imm); in Add()
811 int64_t imm = static_cast<int64_t>(operand.ImmediateValue()); in Sub() local
812 if (imm < 0) { in Sub()
813 AddSubImm(ADD_Imm, rd, rn, false, -1 * imm); in Sub()
815 AddSubImm(SUB_Imm, rd, rn, false, imm); in Sub()
839 bool AssemblerAarch64::IsAddSubImm(uint64_t imm) in IsAddSubImm() argument
842 if (imm <= IMM12_MASK) { in IsAddSubImm()
846 if (((imm & IMM12_MASK) == 0) && ((imm & ~IMM12_MASK) <= IMM12_MASK)) { in IsAddSubImm()
852 …64::AddSubImm(AddSubOpCode op, const Register &rd, const Register &rn, bool setFlags, uint64_t imm) in AddSubImm() argument
854 ASSERT(IsAddSubImm(imm)); in AddSubImm()
857 uint64_t imm12 = imm & (~IMM12_MASK); in AddSubImm()
861 imm12 = imm; in AddSubImm()
913 void AssemblerAarch64::B(int32_t imm) in B() argument
915 uint32_t code = BranchOpCode::Branch | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in B()
933 void AssemblerAarch64::Bl(int32_t imm) in Bl() argument
935 uint32_t code = CallOpCode::BL | ((imm << BRANCH_Imm26_LOWBITS) & BRANCH_Imm26_MASK); in Bl()
954 void AssemblerAarch64::B(Condition cond, int32_t imm) in B() argument
956 uint32_t code = BranchOpCode::BranchCond | BranchImm19(imm) | cond; in B()
976 void AssemblerAarch64::Cbz(const Register &rt, int32_t imm) in Cbz() argument
978 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBZ | BranchImm19(imm) | rt.GetId(); in Cbz()
982 void AssemblerAarch64::Cbnz(const Register &rt, int32_t imm) in Cbnz() argument
984 uint32_t code = Sf(!rt.IsW()) | BranchOpCode::CBNZ | BranchImm19(imm) | rt.GetId(); in Cbnz()
996 void AssemblerAarch64::Tbz(const Register &rt, int32_t bitPos, int32_t imm) in Tbz() argument
1000 uint32_t imm14 = (imm << BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbz()
1013 void AssemblerAarch64::Tbnz(const Register &rt, int32_t bitPos, int32_t imm) in Tbnz() argument
1017 uint32_t imm14 = (imm <<BRANCH_Imm14_LOWBITS) & BRANCH_Imm14_MASK; in Tbnz()
1022 void AssemblerAarch64::Tst(const Register &rn, const LogicalImmediate &imm) in Tst() argument
1024 Ands(Register(Zero, rn.GetType()), rn, imm); in Tst()
1136 void AssemblerAarch64::Brk(const Immediate &imm) in Brk() argument
1139 (static_cast<uint32_t>(imm.Value()) << BRK_Imm16_LOWBITS) & BRK_Imm16_MASK; in Brk()
1147 uint64_t imm = static_cast<uint64_t>(operand.GetImmediate().Value()); in GetImmOfLdr() local
1150 imm >>= 1; in GetImmOfLdr()
1153 imm >>= 3; // 3: 64 RegSise, imm/8 to remove trailing zeros in GetImmOfLdr()
1155 imm >>= 2; // 2: 32 RegSise, imm/4 to remove trailing zeros in GetImmOfLdr()
1159 return imm; in GetImmOfLdr()