/external/vixl/test/aarch64/ |
D | test-trace-aarch64.cc | 614 __ abs(v16.V16B(), v11.V16B()); in GenerateTestSequenceNEON() 622 __ add(v31.V16B(), v15.V16B(), v23.V16B()); in GenerateTestSequenceNEON() 632 __ addhn2(v16.V16B(), v21.V8H(), v20.V8H()); in GenerateTestSequenceNEON() 636 __ addp(v3.V16B(), v8.V16B(), v28.V16B()); in GenerateTestSequenceNEON() 643 __ addv(b27, v23.V16B()); in GenerateTestSequenceNEON() 648 __ and_(v10.V16B(), v8.V16B(), v27.V16B()); in GenerateTestSequenceNEON() 650 __ bic(v26.V16B(), v3.V16B(), v24.V16B()); in GenerateTestSequenceNEON() 656 __ bif(v12.V16B(), v26.V16B(), v8.V16B()); in GenerateTestSequenceNEON() 658 __ bit(v8.V16B(), v3.V16B(), v13.V16B()); in GenerateTestSequenceNEON() 660 __ bsl(v9.V16B(), v31.V16B(), v23.V16B()); in GenerateTestSequenceNEON() [all …]
|
D | test-cpu-features-aarch64.cc | 731 TEST_NEON(abs_1, abs(v0.V16B(), v1.V16B())) 741 TEST_NEON(addhn2_0, addhn2(v0.V16B(), v1.V8H(), v2.V8H())) 746 TEST_NEON(addp_2, addp(v0.V16B(), v1.V16B(), v2.V16B())) 753 TEST_NEON(addv_1, addv(b0, v1.V16B())) 758 TEST_NEON(add_1, add(v0.V16B(), v1.V16B(), v2.V16B())) 766 TEST_NEON(and_1, and_(v0.V16B(), v1.V16B(), v2.V16B())) 772 TEST_NEON(bic_5, bic(v0.V16B(), v1.V16B(), v2.V16B())) 774 TEST_NEON(bif_1, bif(v0.V16B(), v1.V16B(), v2.V16B())) 776 TEST_NEON(bit_1, bit(v0.V16B(), v1.V16B(), v2.V16B())) 778 TEST_NEON(bsl_1, bsl(v0.V16B(), v1.V16B(), v2.V16B())) [all …]
|
D | test-disasm-aarch64.cc | 3460 V(V16B(), "16b") \ 3471 V(V8H(), "8h", V16B(), "16b") \ 3481 V(V8H(), "8h", V16B(), "16b") \ 3487 V(V16B(), "16b") \ 3547 COMPARE_MACRO(Ld1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST() 3663 COMPARE_MACRO(St1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST() 3691 COMPARE_MACRO(St2(v1.V16B(), v2.V16B(), MemOperand(x16, 32, PostIndex)), in TEST() 3810 COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16)), "ld1 {v1.b}[1], [x16]"); in TEST() 3826 COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)), in TEST() 3859 COMPARE_MACRO(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16)), in TEST() [all …]
|
D | test-assembler-aarch64.cc | 3867 __ Ld1(v2.V16B(), MemOperand(x17)); in TEST() 3869 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x17)); in TEST() 3915 __ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex)); in TEST() 3916 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex)); in TEST() 4109 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17)); in TEST() 4111 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x17)); in TEST() 4153 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex)); in TEST() 4154 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex)); in TEST() 4374 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17)); in TEST() 4421 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex)); in TEST() [all …]
|
D | test-simulator-aarch64.cc | 1486 VRegister vn = v1.V16B(); in Test1OpNEON_Helper() 1487 VRegister vntmp = v3.V16B(); in Test1OpNEON_Helper() 1515 __ Movi(vd.V16B(), 0); in Test1OpNEON_Helper() 1704 VRegister vn_ext = (kDRegSize == vn_bits) ? vn.V8B() : vn.V16B(); in Test1OpAcrossNEON_Helper() 1705 VRegister vntmp_ext = (kDRegSize == vn_bits) ? vntmp.V8B() : vntmp.V16B(); in Test1OpAcrossNEON_Helper() 1932 VRegister vd = v0.V16B(); in Test2OpNEON_Helper() 1933 VRegister vn = v1.V16B(); in Test2OpNEON_Helper() 1934 VRegister vm = v2.V16B(); in Test2OpNEON_Helper() 1935 VRegister vntmp = v3.V16B(); in Test2OpNEON_Helper() 1936 VRegister vmtmp = v4.V16B(); in Test2OpNEON_Helper() [all …]
|
/external/vixl/examples/aarch64/ |
D | add2-vectors.cc | 56 __ Ld1(v0.V16B(), MemOperand(x0)); in GenerateAdd2Vectors() 57 __ Ld1(v1.V16B(), MemOperand(x1, 16, PostIndex)); in GenerateAdd2Vectors() 58 __ Add(v0.V16B(), v0.V16B(), v1.V16B()); in GenerateAdd2Vectors() 59 __ St1(v0.V16B(), MemOperand(x0, 16, PostIndex)); in GenerateAdd2Vectors()
|
D | neon-matrix-multiply.cc | 78 __ Movi(v0.V16B(), 0); in GenerateNEONMatrixMultiply() 79 __ Movi(v1.V16B(), 0); in GenerateNEONMatrixMultiply() 80 __ Movi(v2.V16B(), 0); in GenerateNEONMatrixMultiply() 81 __ Movi(v3.V16B(), 0); in GenerateNEONMatrixMultiply()
|
/external/v8/src/compiler/arm64/ |
D | code-generator-arm64.cc | 1895 i.InputSimd128Register(0).V16B()); in AssembleArchInstruction() 1924 __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0)); in AssembleArchInstruction() 1928 __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), in AssembleArchInstruction() 1933 VRegister dst = i.OutputSimd128Register().V16B(), in AssembleArchInstruction() 1934 src1 = i.InputSimd128Register(0).V16B(); in AssembleArchInstruction() 1943 __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), in AssembleArchInstruction() 1948 __ Sshr(i.OutputSimd128Register().V16B(), in AssembleArchInstruction() 1949 i.InputSimd128Register(0).V16B(), i.InputInt5(1)); in AssembleArchInstruction() 1963 __ Sqxtn2(dst.V16B(), src1.V8H()); in AssembleArchInstruction() 1975 VRegister dst = i.OutputSimd128Register().V16B(); in AssembleArchInstruction() [all …]
|
/external/vixl/src/aarch64/ |
D | operands-aarch64.h | 366 VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } in V16B() function
|
D | assembler-aarch64.cc | 3748 orr(vd.V16B(), vn.V16B(), vn.V16B()); in mov() 3801 not_(vd.V16B(), vn.V16B()); in mvn()
|
D | macro-assembler-aarch64.cc | 952 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); in Movi16bitHelper()
|
/external/v8/src/arm64/ |
D | assembler-arm64.cc | 3586 orr(vd.V16B(), vn.V16B(), vn.V16B()); in mov() 3625 not_(vd.V16B(), vn.V16B()); in mvn()
|
D | macro-assembler-arm64.cc | 368 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); in Movi16bitHelper()
|
D | assembler-arm64.h | 317 VRegister V16B() const { in V16B() function
|