/external/vixl/test/aarch64/ |
D | test-trace-aarch64.cc | 616 __ abs(v27.V2S(), v25.V2S()); in GenerateTestSequenceNEON() 624 __ add(v15.V2S(), v14.V2S(), v19.V2S()); in GenerateTestSequenceNEON() 629 __ addhn(v10.V2S(), v14.V2D(), v15.V2D()); in GenerateTestSequenceNEON() 638 __ addp(v22.V2S(), v30.V2S(), v26.V2S()); in GenerateTestSequenceNEON() 651 __ bic(v7.V2S(), 0xe4, 16); in GenerateTestSequenceNEON() 663 __ cls(v21.V2S(), v0.V2S()); in GenerateTestSequenceNEON() 669 __ clz(v27.V2S(), v17.V2S()); in GenerateTestSequenceNEON() 680 __ cmeq(v2.V2S(), v3.V2S(), v9.V2S()); in GenerateTestSequenceNEON() 681 __ cmeq(v16.V2S(), v25.V2S(), 0); in GenerateTestSequenceNEON() 696 __ cmge(v25.V2S(), v22.V2S(), v3.V2S()); in GenerateTestSequenceNEON() [all …]
|
D | test-cpu-features-aarch64.cc | 760 TEST_NEON(abs_4, abs(v0.V2S(), v1.V2S())) 766 TEST_NEON(addhn_2, addhn(v0.V2S(), v1.V2D(), v2.V2D())) 775 TEST_NEON(addp_5, addp(v0.V2S(), v1.V2S(), v2.V2S())) 787 TEST_NEON(add_4, add(v0.V2S(), v1.V2S(), v2.V2S())) 795 TEST_NEON(bic_2, bic(v0.V2S(), 0xd8, 0)) 809 TEST_NEON(cls_4, cls(v0.V2S(), v1.V2S())) 815 TEST_NEON(clz_4, clz(v0.V2S(), v1.V2S())) 821 TEST_NEON(cmeq_4, cmeq(v0.V2S(), v1.V2S(), v2.V2S())) 829 TEST_NEON(cmeq_12, cmeq(v0.V2S(), v1.V2S(), 0)) 837 TEST_NEON(cmge_4, cmge(v0.V2S(), v1.V2S(), v2.V2S())) [all …]
|
D | test-disasm-neon-aarch64.cc | 309 V(V2S(), "2s") \ 315 V(V2S(), "2s", V4H(), "4h") \ 316 V(V1D(), "1d", V2S(), "2s") \ 324 V(V2D(), "2d", V2S(), "2s") 336 V(V2S(), "2s") \ 342 V(V2S(), "2s") \ 399 COMPARE_MACRO(Ld1(v16.V2S(), in TEST() 400 v17.V2S(), in TEST() 401 v18.V2S(), in TEST() 409 COMPARE_MACRO(Ld1(v19.V2S(), in TEST() [all …]
|
D | test-assembler-neon-aarch64.cc | 318 __ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(), MemOperand(x17)); in TEST() 320 __ Ld1(v30.V2S(), v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x17)); in TEST() 371 __ Ld1(v16.V2S(), in TEST() 372 v17.V2S(), in TEST() 373 v18.V2S(), in TEST() 374 v19.V2S(), in TEST() 376 __ Ld1(v30.V2S(), in TEST() 377 v31.V2S(), in TEST() 378 v0.V2S(), in TEST() 379 v1.V2S(), in TEST() [all …]
|
D | test-api-aarch64.cc | 258 VIXL_CHECK(d0.V2S().IsValidVRegister()); in TEST() 259 VIXL_CHECK(!d0.V2S().IsValidFPRegister()); in TEST()
|
/external/vixl/benchmarks/aarch64/ |
D | bench-utils.cc | 380 __ Umull(PickV().V2D(), PickV().V2S(), PickV().V2S()); in GenerateNEONSequence()
|
/external/v8/src/wasm/baseline/arm64/ |
D | liftoff-assembler-arm64.h | 1194 Ushr(scratch.V2S(), rhs.V2S(), 31); in emit_f32_copysign() 1198 Sli(dst.V2S(), scratch.V2S(), 31); in emit_f32_copysign() 1503 Sxtl(dst.fp().V2D(), dst.fp().V2S()); in LoadTransform() 1506 Uxtl(dst.fp().V2D(), dst.fp().V2S()); in LoadTransform() 1883 Xtn(tmp1.V2S(), lhs.fp().V2D()); in emit_i64x2_mul() 1884 Xtn(tmp2.V2S(), rhs.fp().V2D()); in emit_i64x2_mul() 1885 Umull(tmp1.V2D(), tmp1.V2S(), tmp2.V2S()); in emit_i64x2_mul() 1889 Shll(dst.fp().V2D(), tmp2.V2S(), 32); in emit_i64x2_mul()
|
/external/python/cryptography/vectors/cryptography_vectors/x509/PKITS_data/smime/ |
D | SignedValidSelfIssuedinhibitAnyPolicyTest7.eml | 134 V2S/F4RcJWPwCbQsze732qQYjZuMNWT6OcJuvv9kgTfDaZ5elWwZZK/j4vBKWucz
|
/external/v8/src/compiler/backend/arm64/ |
D | code-generator-arm64.cc | 1699 __ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1); in AssembleArchInstruction() 1703 __ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1)); in AssembleArchInstruction() 1707 __ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1)); in AssembleArchInstruction() 2185 __ Xtn(tmp1.V2S(), src1.V2D()); in AssembleArchInstruction() 2193 __ Xtn(tmp3.V2S(), src2.V2D()); in AssembleArchInstruction() 2197 __ Shll(dst.V2D(), tmp2.V2S(), 32); in AssembleArchInstruction() 2202 __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S()); in AssembleArchInstruction() 2662 __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0)); in AssembleArchInstruction() 2663 __ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S()); in AssembleArchInstruction() 2667 __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0)); in AssembleArchInstruction() [all …]
|
/external/vixl/src/aarch64/ |
D | operands-aarch64.h | 372 VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } in V2S() function
|
D | macro-assembler-aarch64.cc | 1079 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); in Movi64bitHelper()
|
/external/v8/src/codegen/arm64/ |
D | register-arm64.h | 336 VRegister V2S() const { in V2S() function
|
D | macro-assembler-arm64.cc | 467 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF); in Movi64bitHelper()
|
/external/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 8400 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(), in lowerVectorShuffleAsElementInsertion() local 8402 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) { in lowerVectorShuffleAsElementInsertion() 8404 V2S = DAG.getBitcast(EltVT, V2S); in lowerVectorShuffleAsElementInsertion() 8413 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S); in lowerVectorShuffleAsElementInsertion() 8415 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S); in lowerVectorShuffleAsElementInsertion()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86ISelLowering.cpp | 10748 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(), in lowerVectorShuffleAsElementInsertion() local 10750 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) { in lowerVectorShuffleAsElementInsertion() 10752 V2S = DAG.getBitcast(EltVT, V2S); in lowerVectorShuffleAsElementInsertion() 10761 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S); in lowerVectorShuffleAsElementInsertion() 10763 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S); in lowerVectorShuffleAsElementInsertion()
|