/external/libxaac/decoder/armv7/ |
D | ixheaacd_pre_twiddle_compute.s | 175 VSHL.S32 Q10, Q10, Q7 179 VSHL.S32 Q11, Q11, Q7 182 VSHL.S32 Q9, Q15, Q7 183 VSHL.S32 Q8, Q14, Q7 241 VSHL.S32 Q10, Q10, Q7 242 VSHL.S32 Q11, Q11, Q7 245 VSHL.S32 Q9, Q15, Q7 248 VSHL.S32 Q8, Q14, Q7 296 VSHL.S32 Q10, Q10, Q7 297 VSHL.S32 Q11, Q11, Q7 [all …]
|
D | ixheaacd_sbr_imdct_using_fft.s | 401 VSHL.S32 q12, q12, #1 403 VSHL.S32 q5, q5, #1 405 VSHL.S32 q9, q9, #1 407 VSHL.S32 q2, q2, #1 409 VSHL.S32 q0, q0, #1 411 VSHL.S32 q7, q7, #1 413 VSHL.S32 q14, q14, #1 415 VSHL.S32 q4, q4, #1 417 VSHL.S32 q13, q13, #1 419 VSHL.S32 q6, q6, #1 [all …]
|
D | ixheaacd_dec_DCT2_64_asm.s | 81 VSHL.S32 D0, D0, #1 122 VSHL.S32 Q15, Q15, #1 124 VSHL.S32 Q14, Q14, #1 177 VSHL.S32 Q15, Q15, #1 180 VSHL.S32 Q14, Q14, #1 224 VSHL.S32 Q15, Q15, #1 227 VSHL.S32 Q14, Q14, #1 274 VSHL.S32 Q15, Q15, #1 275 VSHL.S32 Q14, Q14, #1 320 VSHL.S32 D2, D2, #4 [all …]
|
D | ixheaacd_imdct_using_fft.s | 395 VSHL.S32 q12, q12, #3 397 VSHL.S32 q5, q5, #3 399 VSHL.S32 q9, q9, #3 401 VSHL.S32 q2, q2, #3 403 VSHL.S32 q0, q0, #3 405 VSHL.S32 q7, q7, #3 407 VSHL.S32 q14, q14, #3 409 VSHL.S32 q4, q4, #3 411 VSHL.S32 q13, q13, #3 413 VSHL.S32 q6, q6, #3 [all …]
|
D | ixheaacd_fft32x32_ld.s | 404 VSHL.S32 q12, q12, #3 @ch 406 VSHL.S32 q5, q5, #3 @ch 408 VSHL.S32 q9, q9, #3 @ch 410 VSHL.S32 q2, q2, #3 @ch 412 VSHL.S32 q0, q0, #3 @ch 414 VSHL.S32 q7, q7, #3 @ch 416 VSHL.S32 q14, q14, #3 @ch 418 VSHL.S32 q4, q4, #3 @ch 420 VSHL.S32 q13, q13, #3 @ch 422 VSHL.S32 q6, q6, #3 @ch [all …]
|
D | ixheaacd_sbr_qmfsyn64_winadd.s | 48 VSHL.S32 Q10, Q15, Q14
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | trunc.ll | 27 ; GCN: v_mov_b32_e32 [[VSHL:v[0-9]+]], [[SHL]] 28 ; SI: buffer_store_dword [[VSHL]] 29 ; VI: flat_store_dword v[{{[0-9:]+}}], [[VSHL]]
|
/external/llvm/test/CodeGen/ARM/ |
D | rotate.ll | 4 ;; select ROTL. Make sure if generates the basic VSHL/VSHR.
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | rotate.ll | 5 ;; select ROTL. Make sure if generates the basic VSHL/VSHR.
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | trunc.ll | 25 ; SI: v_mov_b32_e32 [[VSHL:v[0-9]+]], [[SHL]] 26 ; SI: buffer_store_dword [[VSHL]],
|
/external/arm-neon-tests/ |
D | ref-rvct-neon-nofp16.txt | 1501 VSHL/VSHLQ output: 1502 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1503 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1504 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1505 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1506 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1507 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1508 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1509 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1510 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
D | ref-rvct-neon.txt | 1595 VSHL/VSHLQ output: 1596 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1597 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1598 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1599 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1600 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1601 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1602 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1603 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1604 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
D | ref-rvct-all.txt | 1595 VSHL/VSHLQ output: 1596 VSHL/VSHLQ:0:result_int8x8 [] = { ffffffe0, ffffffe2, ffffffe4, ffffffe6, ffffffe8, ffffffea, fffff… 1597 VSHL/VSHLQ:1:result_int16x4 [] = { ffffff80, ffffff88, ffffff90, ffffff98, } 1598 VSHL/VSHLQ:2:result_int32x2 [] = { fffff000, fffff100, } 1599 VSHL/VSHLQ:3:result_int64x1 [] = { ffffffffffffff80, } 1600 VSHL/VSHLQ:4:result_uint8x8 [] = { e0, e2, e4, e6, e8, ea, ec, ee, } 1601 VSHL/VSHLQ:5:result_uint16x4 [] = { ff80, ff88, ff90, ff98, } 1602 VSHL/VSHLQ:6:result_uint32x2 [] = { fffff000, fffff100, } 1603 VSHL/VSHLQ:7:result_uint64x1 [] = { ffffffffffffff80, } 1604 VSHL/VSHLQ:8:result_poly8x8 [] = { 33, 33, 33, 33, 33, 33, 33, 33, } [all …]
|
/external/llvm/lib/Target/X86/ |
D | X86IntrinsicsInfo.h | 298 X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 299 X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 300 X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1242 X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1243 X86_INTRINSIC_DATA(avx512_mask_psll_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1244 X86_INTRINSIC_DATA(avx512_mask_psll_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1248 X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1249 X86_INTRINSIC_DATA(avx512_mask_psll_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1250 X86_INTRINSIC_DATA(avx512_mask_psll_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1254 X86_INTRINSIC_DATA(avx512_mask_psll_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), [all …]
|
D | X86ISelLowering.h | 311 VSHL, VSRL, VSRA, enumerator
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86IntrinsicsInfo.h | 392 X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 393 X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 394 X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0), 850 X86_INTRINSIC_DATA(avx512_psll_d_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 851 X86_INTRINSIC_DATA(avx512_psll_q_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 852 X86_INTRINSIC_DATA(avx512_psll_w_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1056 X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1057 X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1058 X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86IntrinsicsInfo.h | 392 X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 393 X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 394 X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0), 846 X86_INTRINSIC_DATA(avx512_psll_d_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 847 X86_INTRINSIC_DATA(avx512_psll_q_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 848 X86_INTRINSIC_DATA(avx512_psll_w_512, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1062 X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1063 X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1064 X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
|
/external/llvm/include/llvm/IR/ |
D | IntrinsicsARM.td | 311 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 312 // operation cannot be safely translated to LLVM's shift operators. VSHL can 319 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.h | 113 VSHL, enumerator
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/IR/ |
D | IntrinsicsARM.td | 501 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 502 // operation cannot be safely translated to LLVM's shift operators. VSHL can 509 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 109 VSHL, // ...left enumerator
|
/external/llvm-project/llvm/include/llvm/IR/ |
D | IntrinsicsARM.td | 501 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 502 // operation cannot be safely translated to LLVM's shift operators. VSHL can 509 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.h | 113 VSHL, enumerator
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans.s | 764 @ VSHL.S32 q14,q14,#6 ; q14 = G0 = 64*(B0 + B1 + B2 + B3 + B4 + B5 + B6 + B7) 765 @ VSHL.S32 q15,q15,#6 ; q15 = G4 = 64*(B0 - B1 - B2 + B3 + B4 - B5 - B6 + B7) 885 @ VSHL.S32 q14,q14,#6 ; q14 = H0 = 64*(B0 + B1 + B2 + B3 + B4 + B5 + B6 + B7) 887 @ VSHL.S32 q2,q15,#6 ; q2 = H4 = 64*(B0 - B1 - B2 + B3 + B4 - B5 - B6 + B7)
|
/external/llvm-project/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.h | 178 VSHL, enumerator
|