/external/vixl/test/aarch64/ |
D | test-trace-aarch64.cc | 615 __ abs(v16.V16B(), v11.V16B()); in GenerateTestSequenceNEON() 623 __ add(v31.V16B(), v15.V16B(), v23.V16B()); in GenerateTestSequenceNEON() 633 __ addhn2(v16.V16B(), v21.V8H(), v20.V8H()); in GenerateTestSequenceNEON() 637 __ addp(v3.V16B(), v8.V16B(), v28.V16B()); in GenerateTestSequenceNEON() 644 __ addv(b27, v23.V16B()); in GenerateTestSequenceNEON() 649 __ and_(v10.V16B(), v8.V16B(), v27.V16B()); in GenerateTestSequenceNEON() 651 __ bic(v26.V16B(), v3.V16B(), v24.V16B()); in GenerateTestSequenceNEON() 657 __ bif(v12.V16B(), v26.V16B(), v8.V16B()); in GenerateTestSequenceNEON() 659 __ bit(v8.V16B(), v3.V16B(), v13.V16B()); in GenerateTestSequenceNEON() 661 __ bsl(v9.V16B(), v31.V16B(), v23.V16B()); in GenerateTestSequenceNEON() [all …]
|
D | test-disasm-aarch64.cc | 3085 V(V16B(), "16b") \ 3096 V(V8H(), "8h", V16B(), "16b") \ 3106 V(V8H(), "8h", V16B(), "16b") \ 3112 V(V16B(), "16b") \ 3172 COMPARE_MACRO(Ld1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST() 3288 COMPARE_MACRO(St1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST() 3316 COMPARE_MACRO(St2(v1.V16B(), v2.V16B(), MemOperand(x16, 32, PostIndex)), in TEST() 3435 COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16)), "ld1 {v1.b}[1], [x16]"); in TEST() 3451 COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16, 1, PostIndex)), in TEST() 3484 COMPARE_MACRO(Ld2(v1.V16B(), v2.V16B(), 1, MemOperand(x16)), in TEST() [all …]
|
D | test-assembler-aarch64.cc | 3280 __ Ld1(v2.V16B(), MemOperand(x17)); in TEST() 3282 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x17)); in TEST() 3328 __ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex)); in TEST() 3329 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex)); in TEST() 3522 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17)); in TEST() 3524 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x17)); in TEST() 3566 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex)); in TEST() 3567 __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex)); in TEST() 3787 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17)); in TEST() 3834 __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex)); in TEST() [all …]
|
D | test-simulator-aarch64.cc | 1332 VRegister vn = v1.V16B(); in Test1OpNEON_Helper() 1333 VRegister vntmp = v3.V16B(); in Test1OpNEON_Helper() 1361 __ Movi(vd.V16B(), 0); in Test1OpNEON_Helper() 1544 VRegister vn_ext = (kDRegSize == vn_bits) ? vn.V8B() : vn.V16B(); in Test1OpAcrossNEON_Helper() 1545 VRegister vntmp_ext = (kDRegSize == vn_bits) ? vntmp.V8B() : vntmp.V16B(); in Test1OpAcrossNEON_Helper() 1763 VRegister vd = v0.V16B(); in Test2OpNEON_Helper() 1764 VRegister vn = v1.V16B(); in Test2OpNEON_Helper() 1765 VRegister vm = v2.V16B(); in Test2OpNEON_Helper() 1766 VRegister vntmp = v3.V16B(); in Test2OpNEON_Helper() 1767 VRegister vmtmp = v4.V16B(); in Test2OpNEON_Helper() [all …]
|
/external/vixl/examples/aarch64/ |
D | add2-vectors.cc | 56 __ Ld1(v0.V16B(), MemOperand(x0)); in GenerateAdd2Vectors() 57 __ Ld1(v1.V16B(), MemOperand(x1, 16, PostIndex)); in GenerateAdd2Vectors() 58 __ Add(v0.V16B(), v0.V16B(), v1.V16B()); in GenerateAdd2Vectors() 59 __ St1(v0.V16B(), MemOperand(x0, 16, PostIndex)); in GenerateAdd2Vectors()
|
D | neon-matrix-multiply.cc | 78 __ Movi(v0.V16B(), 0); in GenerateNEONMatrixMultiply() 79 __ Movi(v1.V16B(), 0); in GenerateNEONMatrixMultiply() 80 __ Movi(v2.V16B(), 0); in GenerateNEONMatrixMultiply() 81 __ Movi(v3.V16B(), 0); in GenerateNEONMatrixMultiply()
|
/external/vixl/src/aarch64/ |
D | operands-aarch64.h | 362 VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } in V16B() function
|
D | assembler-aarch64.cc | 2811 orr(vd.V16B(), vn.V16B(), vn.V16B()); in mov() 2861 not_(vd.V16B(), vn.V16B()); in mvn()
|
D | macro-assembler-aarch64.cc | 952 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); in Movi16bitHelper()
|