/external/vixl/test/aarch64/ |
D | test-api-movprfx-aarch64.cc | 89 __ add(z1.VnH(), p2.Merging(), z1.VnH(), z1.VnH()); in TEST() 98 __ asr(z10.VnH(), p2.Merging(), z10.VnH(), z10.VnD()); in TEST() 104 __ asrr(z22.VnH(), p1.Merging(), z22.VnH(), z22.VnH()); in TEST() 116 __ cls(z10.VnH(), p2.Merging(), z10.VnH()); in TEST() 148 __ movprfx(z25.VnH(), p6.Merging(), z28.VnH()); in TEST() 149 __ lsr(z25.VnH(), p6.Merging(), z25.VnH(), z25.VnD()); in TEST() 154 __ movprfx(z26.VnH(), p6.Zeroing(), z27.VnH()); in TEST() 155 __ lsrr(z26.VnH(), p6.Merging(), z26.VnH(), z26.VnH()); in TEST() 167 __ mla(z7.VnH(), p0.Merging(), z5.VnH(), z7.VnH()); in TEST() 169 __ movprfx(z1.VnH(), p0.Merging(), z17.VnH()); in TEST() [all …]
|
D | test-disasm-sve-aarch64.cc | 56 COMPARE_PREFIX(lsl(z0.VnH(), z1.VnH(), 15), "lsl z0.h, z1.h, #15"); in TEST() 61 COMPARE_PREFIX(splice(z0.VnH(), p7, z0.VnH(), z1.VnH()), in TEST() 168 COMPARE_PREFIX(eor(z26.VnH(), z26.VnH(), 0x7ff8), in TEST() 177 COMPARE_PREFIX(orn(z11.VnH(), z11.VnH(), 0x2), "orr z11.h, z11.h, #0xfffd"); in TEST() 180 COMPARE_PREFIX(mov(z0.VnH(), 0xf00f), "mov z0.h, #0xf00f"); in TEST() 185 COMPARE_PREFIX(dupm(z0.VnH(), 0xfe), "dupm z0.h, #0xfe"); in TEST() 186 COMPARE_PREFIX(dupm(z0.VnH(), 0xff), "dupm z0.h, #0xff"); in TEST() 187 COMPARE_PREFIX(dupm(z0.VnH(), 0x1fe), "mov z0.h, #0x1fe"); in TEST() 188 COMPARE_PREFIX(dupm(z0.VnH(), 0xfe00), "dupm z0.h, #0xfe00"); in TEST() 189 COMPARE_PREFIX(dupm(z0.VnH(), 0xfe01), "mov z0.h, #0xfe01"); in TEST() [all …]
|
D | test-assembler-sve-aarch64.cc | 222 InsrHelper(&masm, z1.VnH(), z1_inputs); in TEST_SVE() 228 __ Insr(z2.VnH(), -42); // 0xffd6 in TEST_SVE() 229 __ Insr(z2.VnH(), 0xfedc); // 0xfedc in TEST_SVE() 252 ASSERT_EQUAL_SVE_LANE(z1_inputs[i], z1.VnH(), lane); in TEST_SVE() 273 Initialise(&masm, p1.VnH(), p1_inputs); in TEST_SVE() 287 Initialise(&masm, p4.VnH(), p4_inputs); in TEST_SVE() 317 ASSERT_EQUAL_SVE_LANE(p1_inputs[i], p1.VnH(), lane); in TEST_SVE() 338 ASSERT_EQUAL_SVE(p6_expected, p6.VnH()); in TEST_SVE() 597 __ Punpklo(p3.VnH(), p3.VnB()); in TEST_SVE() 598 __ Index(z0.VnH(), 0x1110, 1); in TEST_SVE() [all …]
|
D | test-api-aarch64.cc | 291 VIXL_CHECK(ZRegister(1, kHRegSize).Is(z1.VnH())); in TEST() 297 VIXL_CHECK(ZRegister(1, kFormatVnH).Is(z1.VnH())); in TEST() 306 VIXL_CHECK(PRegisterWithLaneSize(1, kHRegSize).Is(p1.VnH())); in TEST() 312 VIXL_CHECK(PRegisterWithLaneSize(1, kFormatVnH).Is(p1.VnH())); in TEST() 580 VIXL_CHECK(p14.VnH().GetLaneSizeInBits() == kHRegSize); in TEST() 584 VIXL_CHECK(p14.VnH().GetLaneSizeInBytes() == kHRegSizeInBytes); in TEST() 591 VIXL_CHECK(Helper::GetQualification(p5.VnH()) == kWithLaneSize); in TEST() 647 VIXL_CHECK(!z7.VnH().Is(h7)); in TEST() 652 VIXL_CHECK(!z7.VnH().Is(v7.H())); in TEST() 657 VIXL_CHECK(!z7.VnH().Is(z7.H())); in TEST() [all …]
|
D | test-trace-aarch64.cc | 2753 __ str(p14.VnH(), SVEMemOperand(x0, 11, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2757 __ ldr(p10.VnH(), SVEMemOperand(x0, 11, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2762 __ str(z2.VnH(), SVEMemOperand(x0, 11, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2766 __ ldr(z22.VnH(), SVEMemOperand(x0, 11, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2771 __ st1h(z1.VnH(), p1, SVEMemOperand(x0, 3, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2775 __ ld1h(z21.VnH(), p2.Zeroing(), SVEMemOperand(x0, x2, LSL, 1)); in GenerateTestSequenceSVE() 2780 __ st1b(z2.VnH(), p1, SVEMemOperand(x0, 3, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2786 __ ld1b(z20.VnH(), p1.Zeroing(), SVEMemOperand(x0, x2)); in GenerateTestSequenceSVE() 2792 __ ld1sb(z21.VnH(), p1.Zeroing(), SVEMemOperand(x0, 3, SVE_MUL_VL)); in GenerateTestSequenceSVE() 2801 __ st2h(z1.VnH(), z2.VnH(), p4, SVEMemOperand(x0, 4, SVE_MUL_VL)); in GenerateTestSequenceSVE() [all …]
|
/external/vixl/src/aarch64/ |
D | registers-aarch64.h | 613 ZRegister VnH() const { return ZRegister(GetCode(), kHRegSize); } in VnH() function 646 PRegisterWithLaneSize VnH() const; 743 inline PRegisterWithLaneSize PRegister::VnH() const { in VnH() function
|