/external/libxaac/decoder/armv7/ |
D | ixheaacd_pre_twiddle_compute.s | 175 VSHL.S32 Q10, Q10, Q7 179 VSHL.S32 Q11, Q11, Q7 182 VSHL.S32 Q9, Q15, Q7 183 VSHL.S32 Q8, Q14, Q7 241 VSHL.S32 Q10, Q10, Q7 242 VSHL.S32 Q11, Q11, Q7 245 VSHL.S32 Q9, Q15, Q7 248 VSHL.S32 Q8, Q14, Q7 296 VSHL.S32 Q10, Q10, Q7 297 VSHL.S32 Q11, Q11, Q7 [all …]
|
D | ixheaacd_sbr_imdct_using_fft.s | 400 VSHL.S32 q12, q12, #1 402 VSHL.S32 q5, q5, #1 404 VSHL.S32 q9, q9, #1 406 VSHL.S32 q2, q2, #1 408 VSHL.S32 q0, q0, #1 410 VSHL.S32 q7, q7, #1 412 VSHL.S32 q14, q14, #1 414 VSHL.S32 q4, q4, #1 416 VSHL.S32 q13, q13, #1 418 VSHL.S32 q6, q6, #1 [all …]
|
D | ixheaacd_dec_DCT2_64_asm.s | 80 VSHL.S32 D0, D0, #1 121 VSHL.S32 Q15, Q15, #1 123 VSHL.S32 Q14, Q14, #1 176 VSHL.S32 Q15, Q15, #1 179 VSHL.S32 Q14, Q14, #1 223 VSHL.S32 Q15, Q15, #1 226 VSHL.S32 Q14, Q14, #1 273 VSHL.S32 Q15, Q15, #1 274 VSHL.S32 Q14, Q14, #1 319 VSHL.S32 D2, D2, #4 [all …]
|
D | ixheaacd_imdct_using_fft.s | 395 VSHL.S32 q12, q12, #3 397 VSHL.S32 q5, q5, #3 399 VSHL.S32 q9, q9, #3 401 VSHL.S32 q2, q2, #3 403 VSHL.S32 q0, q0, #3 405 VSHL.S32 q7, q7, #3 407 VSHL.S32 q14, q14, #3 409 VSHL.S32 q4, q4, #3 411 VSHL.S32 q13, q13, #3 413 VSHL.S32 q6, q6, #3 [all …]
|
D | ixheaacd_fft32x32_ld.s | 404 VSHL.S32 q12, q12, #3 @ch 406 VSHL.S32 q5, q5, #3 @ch 408 VSHL.S32 q9, q9, #3 @ch 410 VSHL.S32 q2, q2, #3 @ch 412 VSHL.S32 q0, q0, #3 @ch 414 VSHL.S32 q7, q7, #3 @ch 416 VSHL.S32 q14, q14, #3 @ch 418 VSHL.S32 q4, q4, #3 @ch 420 VSHL.S32 q13, q13, #3 @ch 422 VSHL.S32 q6, q6, #3 @ch [all …]
|
D | ixheaacd_sbr_qmfsyn64_winadd.s | 47 VSHL.S32 Q10, Q15, Q14
|
/external/llvm/test/CodeGen/ARM/ |
D | rotate.ll | 4 ;; select ROTL. Make sure if generates the basic VSHL/VSHR.
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | trunc.ll | 25 ; SI: v_mov_b32_e32 [[VSHL:v[0-9]+]], [[SHL]] 26 ; SI: buffer_store_dword [[VSHL]],
|
/external/arm-neon-tests/ |
D | ref-rvct-neon-nofp16.txt | 1501 VSHL/VSHLQ output: 1502 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1503 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1504 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1505 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1506 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1507 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1508 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1509 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1510 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
D | ref-rvct-neon.txt | 1595 VSHL/VSHLQ output: 1596 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1597 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1598 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1599 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1600 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1601 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1602 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1603 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1604 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
D | ref-rvct-all.txt | 1595 VSHL/VSHLQ output: 1596 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1597 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1598 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1599 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1600 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1601 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1602 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1603 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1604 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86IntrinsicsInfo.h | 298 X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 299 X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 300 X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1242 X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1243 X86_INTRINSIC_DATA(avx512_mask_psll_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1244 X86_INTRINSIC_DATA(avx512_mask_psll_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1248 X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1249 X86_INTRINSIC_DATA(avx512_mask_psll_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1250 X86_INTRINSIC_DATA(avx512_mask_psll_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1254 X86_INTRINSIC_DATA(avx512_mask_psll_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), [all …]
|
D | X86ISelLowering.h | 311 VSHL, VSRL, VSRA, enumerator
|
D | X86InstrFragmentsSIMD.td | 205 def X86vshl : SDNode<"X86ISD::VSHL",
|
/external/swiftshader/third_party/LLVM/include/llvm/ |
D | IntrinsicsARM.td | 266 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 267 // operation cannot be safely translated to LLVM's shift operators. VSHL can 274 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/llvm/include/llvm/IR/ |
D | IntrinsicsARM.td | 311 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 312 // operation cannot be safely translated to LLVM's shift operators. VSHL can 319 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMISelLowering.h | 110 VSHL, // ...left enumerator
|
D | ARMISelLowering.cpp | 879 case ARMISD::VSHL: return "ARMISD::VSHL"; in getTargetNodeName() 3102 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 3110 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 7575 VShiftOpc = ARMISD::VSHL; in PerformIntrinsicCombine() 7720 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), in PerformShiftCombine()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.h | 113 VSHL, enumerator
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 109 VSHL, // ...left enumerator
|
D | ARMScheduleSwift.td | 545 "VSHL", "VSHR(s|u)", "VSHLL", "VQSHL", "VQSHLU", "VBIF",
|
D | ARMISelLowering.cpp | 1183 case ARMISD::VSHL: return "ARMISD::VSHL"; in getTargetNodeName() 4260 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 4268 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 10584 VShiftOpc = ARMISD::VSHL; in PerformIntrinsicCombine() 10727 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), in PerformShiftCombine() 11212 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) in isVectorLoadExtDesirable()
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86ISelLowering.h | 224 VSHL, VSRL, enumerator
|
D | X86InstrFragmentsSIMD.td | 91 def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
|
/external/v8/src/arm/ |
D | assembler-arm.cc | 4449 enum NeonShiftOp { VSHL, VSHR }; enumerator 4461 if (op == VSHL) { in EncodeNeonShiftOp() 4480 emit(EncodeNeonShiftOp(VSHL, dt, dst, src, shift)); in vshl()
|