/external/libhevc/common/arm64/ |
D | ihevc_itrans_recon_8x8.s | 267 sqrshrn v2.4h, v20.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 268 sqrshrn v15.4h, v6.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) 269 sqrshrn v3.4h, v24.4s,#shift_stage1_idct //// x2 = (a2 + b2 + rnd) >> 7(shift_stage1_idct) 270 sqrshrn v14.4h, v22.4s,#shift_stage1_idct //// x5 = (a2 - b2 + rnd) >> 7(shift_stage1_idct) 271 sqrshrn v6.4h, v28.4s,#shift_stage1_idct //// x1 = (a1 + b1 + rnd) >> 7(shift_stage1_idct) 272 sqrshrn v11.4h, v18.4s,#shift_stage1_idct //// x6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct) 273 sqrshrn v7.4h, v26.4s,#shift_stage1_idct //// x3 = (a3 + b3 + rnd) >> 7(shift_stage1_idct) 274 sqrshrn v10.4h, v30.4s,#shift_stage1_idct //// x4 = (a3 - b3 + rnd) >> 7(shift_stage1_idct) 337 sqrshrn v2.4h, v20.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 338 sqrshrn v15.4h, v6.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) [all …]
|
D | ihevc_itrans_recon_4x4.s | 159 …sqrshrn v28.4h, v19.4s,#shift_stage1_idct //pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) ) 160 …sqrshrn v29.4h, v16.4s,#shift_stage1_idct //pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) ) 161 …sqrshrn v30.4h, v18.4s,#shift_stage1_idct //pi2_out[2] = clip_s16((e[0] - o[0] + add)>>shift) ) 162 …sqrshrn v31.4h, v20.4s,#shift_stage1_idct //pi2_out[3] = clip_s16((e[0] - o[0] + add)>>shift) ) 194 …sqrshrn v28.4h, v19.4s,#shift_stage2_idct //pi2_out[0] = clip_s16((e[0] + o[0] + add)>>shift) ) 195 …sqrshrn v29.4h, v16.4s,#shift_stage2_idct //pi2_out[1] = clip_s16((e[1] + o[1] + add)>>shift) ) 196 …sqrshrn v30.4h, v18.4s,#shift_stage2_idct //pi2_out[2] = clip_s16((e[0] - o[0] + add)>>shift) ) 197 …sqrshrn v31.4h, v20.4s,#shift_stage2_idct //pi2_out[3] = clip_s16((e[0] - o[0] + add)>>shift) )
|
D | ihevc_itrans_recon_4x4_ttype1.s | 159 sqrshrn v28.4h, v6.4s,#shift_stage1_idct // (pi2_out[0] + rounding ) >> shift_stage1_idct 160 sqrshrn v29.4h, v5.4s,#shift_stage1_idct // (pi2_out[1] + rounding ) >> shift_stage1_idct 161 sqrshrn v30.4h, v7.4s,#shift_stage1_idct // (pi2_out[2] + rounding ) >> shift_stage1_idct 162 sqrshrn v31.4h, v20.4s,#shift_stage1_idct // (pi2_out[3] + rounding ) >> shift_stage1_idct 203 sqrshrn v28.4h, v6.4s,#shift_stage2_idct // (pi2_out[0] + rounding ) >> shift_stage1_idct 204 sqrshrn v29.4h, v5.4s,#shift_stage2_idct // (pi2_out[1] + rounding ) >> shift_stage1_idct 205 sqrshrn v30.4h, v7.4s,#shift_stage2_idct // (pi2_out[2] + rounding ) >> shift_stage1_idct 206 sqrshrn v31.4h, v20.4s,#shift_stage2_idct // (pi2_out[3] + rounding ) >> shift_stage1_idct
|
D | ihevc_itrans_recon_16x16.s | 403 sqrshrn v30.4h, v20.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 404 sqrshrn v19.4h, v22.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) 405 sqrshrn v31.4h, v14.4s,#shift_stage1_idct //// x2 = (a2 + b2 + rnd) >> 7(shift_stage1_idct) 406 sqrshrn v18.4h, v26.4s,#shift_stage1_idct //// x5 = (a2 - b2 + rnd) >> 7(shift_stage1_idct) 407 sqrshrn v12.4h, v12.4s,#shift_stage1_idct //// x1 = (a1 + b1 + rnd) >> 7(shift_stage1_idct) 408 sqrshrn v15.4h, v24.4s,#shift_stage1_idct //// x6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct) 409 sqrshrn v13.4h, v16.4s,#shift_stage1_idct //// x3 = (a3 + b3 + rnd) >> 7(shift_stage1_idct) 410 sqrshrn v14.4h, v28.4s,#shift_stage1_idct //// x4 = (a3 - b3 + rnd) >> 7(shift_stage1_idct) 571 sqrshrn v18.4h, v4.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 572 sqrshrn v31.4h, v22.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) [all …]
|
D | ihevc_itrans_recon_32x32.s | 500 sqrshrn v30.4h, v8.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 501 sqrshrn v19.4h, v10.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) 502 sqrshrn v31.4h, v14.4s,#shift_stage1_idct //// x2 = (a2 + b2 + rnd) >> 7(shift_stage1_idct) 503 sqrshrn v18.4h, v26.4s,#shift_stage1_idct //// x5 = (a2 - b2 + rnd) >> 7(shift_stage1_idct) 504 sqrshrn v12.4h, v12.4s,#shift_stage1_idct //// x1 = (a1 + b1 + rnd) >> 7(shift_stage1_idct) 505 sqrshrn v15.4h, v24.4s,#shift_stage1_idct //// x6 = (a1 - b1 + rnd) >> 7(shift_stage1_idct) 506 sqrshrn v13.4h, v16.4s,#shift_stage1_idct //// x3 = (a3 + b3 + rnd) >> 7(shift_stage1_idct) 507 sqrshrn v14.4h, v28.4s,#shift_stage1_idct //// x4 = (a3 - b3 + rnd) >> 7(shift_stage1_idct) 859 sqrshrn v30.4h, v8.4s,#shift_stage1_idct //// x0 = (a0 + b0 + rnd) >> 7(shift_stage1_idct) 860 sqrshrn v19.4h, v10.4s,#shift_stage1_idct //// x7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) [all …]
|
/external/libmpeg2/common/armv8/ |
D | impeg2_idct.s | 467 sqrshrn v2.4h, v20.4s, #idct_stg1_shift //// x0 = (a0 + b0 + rnd) >> 7(IDCT_STG1_SHIFT) 468 sqrshrn v15.4h, v6.4s, #idct_stg1_shift //// x7 = (a0 - b0 + rnd) >> 7(IDCT_STG1_SHIFT) 469 sqrshrn v3.4h, v24.4s, #idct_stg1_shift //// x2 = (a2 + b2 + rnd) >> 7(IDCT_STG1_SHIFT) 470 sqrshrn v14.4h, v22.4s, #idct_stg1_shift //// x5 = (a2 - b2 + rnd) >> 7(IDCT_STG1_SHIFT) 471 sqrshrn v6.4h, v28.4s, #idct_stg1_shift //// x1 = (a1 + b1 + rnd) >> 7(IDCT_STG1_SHIFT) 472 sqrshrn v11.4h, v18.4s, #idct_stg1_shift //// x6 = (a1 - b1 + rnd) >> 7(IDCT_STG1_SHIFT) 473 sqrshrn v7.4h, v26.4s, #idct_stg1_shift //// x3 = (a3 + b3 + rnd) >> 7(IDCT_STG1_SHIFT) 474 sqrshrn v10.4h, v30.4s, #idct_stg1_shift //// x4 = (a3 - b3 + rnd) >> 7(IDCT_STG1_SHIFT) 538 sqrshrn v2.4h, v20.4s, #idct_stg1_shift //// x0 = (a0 + b0 + rnd) >> 7(IDCT_STG1_SHIFT) 539 sqrshrn v15.4h, v6.4s, #idct_stg1_shift //// x7 = (a0 - b0 + rnd) >> 7(IDCT_STG1_SHIFT) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | neon-scalar-shift-imm.s | 146 sqrshrn b10, h13, #2 147 sqrshrn h15, s10, #6 148 sqrshrn s15, d12, #9
|
D | neon-simd-shift.s | 367 sqrshrn v0.8b, v1.8h, #3 368 sqrshrn v0.4h, v1.4s, #3 369 sqrshrn v0.2s, v1.2d, #3
|
D | arm64-advsimd.s | 1354 sqrshrn b0, h0, #1 1355 sqrshrn h0, s0, #2 1356 sqrshrn s0, d0, #3 1403 ; CHECK: sqrshrn b0, h0, #1 ; encoding: [0x00,0x9c,0x0f,0x5f] 1404 ; CHECK: sqrshrn h0, s0, #2 ; encoding: [0x00,0x9c,0x1e,0x5f] 1405 ; CHECK: sqrshrn s0, d0, #3 ; encoding: [0x00,0x9c,0x3d,0x5f] 1486 sqrshrn.8b v0, v0, #1 1488 sqrshrn.4h v0, v0, #3 1490 sqrshrn.2s v0, v0, #5 1658 ; CHECK: sqrshrn.8b v0, v0, #1 ; encoding: [0x00,0x9c,0x0f,0x0f] [all …]
|
D | arm64-diags.s | 129 sqrshrn b4, h9, #10 131 sqrshrn v7.4h, v8.4s, #39 135 ; CHECK-ERRORS: sqrshrn b4, h9, #10 141 ; CHECK-ERRORS: sqrshrn v7.4h, v8.4s, #39
|
/external/capstone/suite/MC/AArch64/ |
D | neon-scalar-shift-imm.s.cs | 31 0xaa,0x9d,0x0e,0x5f = sqrshrn b10, h13, #2 32 0x4f,0x9d,0x1a,0x5f = sqrshrn h15, s10, #6 33 0x8f,0x9d,0x37,0x5f = sqrshrn s15, d12, #9
|
D | neon-simd-shift.s.cs | 128 0x20,0x9c,0x0d,0x0f = sqrshrn v0.8b, v1.8h, #3 129 0x20,0x9c,0x1d,0x0f = sqrshrn v0.4h, v1.4s, #3 130 0x20,0x9c,0x3d,0x0f = sqrshrn v0.2s, v1.2d, #3
|
/external/llvm/test/MC/AArch64/ |
D | neon-scalar-shift-imm.s | 146 sqrshrn b10, h13, #2 147 sqrshrn h15, s10, #6 148 sqrshrn s15, d12, #9
|
D | neon-simd-shift.s | 367 sqrshrn v0.8b, v1.8h, #3 368 sqrshrn v0.4h, v1.4s, #3 369 sqrshrn v0.2s, v1.2d, #3
|
D | arm64-advsimd.s | 1354 sqrshrn b0, h0, #1 1355 sqrshrn h0, s0, #2 1356 sqrshrn s0, d0, #3 1403 ; CHECK: sqrshrn b0, h0, #1 ; encoding: [0x00,0x9c,0x0f,0x5f] 1404 ; CHECK: sqrshrn h0, s0, #2 ; encoding: [0x00,0x9c,0x1e,0x5f] 1405 ; CHECK: sqrshrn s0, d0, #3 ; encoding: [0x00,0x9c,0x3d,0x5f] 1486 sqrshrn.8b v0, v0, #1 1488 sqrshrn.4h v0, v0, #3 1490 sqrshrn.2s v0, v0, #5 1658 ; CHECK: sqrshrn.8b v0, v0, #1 ; encoding: [0x00,0x9c,0x0f,0x0f] [all …]
|
D | arm64-diags.s | 129 sqrshrn b4, h9, #10 131 sqrshrn v7.4h, v8.4s, #39 135 ; CHECK-ERRORS: sqrshrn b4, h9, #10 141 ; CHECK-ERRORS: sqrshrn v7.4h, v8.4s, #39
|
D | neon-diagnostics.s | 1981 sqrshrn v0.8b, v1.8b, #3 1982 sqrshrn v0.4h, v1.4h, #3 1983 sqrshrn v0.2s, v1.2s, #3 5131 sqrshrn b10, h13, #99 5132 sqrshrn h15, s10, #99 5133 sqrshrn s15, d12, #99
|
/external/libavc/common/armv8/ |
D | ih264_iquant_itrans_recon_av8.s | 157 sqrshrn v0.4h, v0.4s, #0x4 // d0 = c[i] = ((q[i] + 32) >> 4) where i = 0..3 158 sqrshrn v1.4h, v2.4s, #0x4 // d1 = c[i] = ((q[i] + 32) >> 4) where i = 4..7 159 sqrshrn v2.4h, v4.4s, #0x4 // d2 = c[i] = ((q[i] + 32) >> 4) where i = 8..11 160 sqrshrn v3.4h, v6.4s, #0x4 // d3 = c[i] = ((q[i] + 32) >> 4) where i = 12..15 350 sqrshrn v0.4h, v0.4s, #0x4 // d0 = c[i] = ((q[i] + 32) >> 4) where i = 0..3 351 sqrshrn v1.4h, v2.4s, #0x4 // d1 = c[i] = ((q[i] + 32) >> 4) where i = 4..7 352 sqrshrn v2.4h, v4.4s, #0x4 // d2 = c[i] = ((q[i] + 32) >> 4) where i = 8..11 353 sqrshrn v3.4h, v6.4s, #0x4 // d3 = c[i] = ((q[i] + 32) >> 4) where i = 12..15 596 sqrshrn v0.4h, v16.4s, #6 598 sqrshrn v1.4h, v18.4s, #6 [all …]
|
D | ih264_ihadamard_scaling_av8.s | 154 sqrshrn v0.4h, v0.4s, #6 // d0 = c[i] = ((q[i] + 32) >> 4) where i = 0..3 155 sqrshrn v1.4h, v1.4s, #6 // d1 = c[i] = ((q[i] + 32) >> 4) where i = 4..7 156 sqrshrn v2.4h, v2.4s, #6 // d2 = c[i] = ((q[i] + 32) >> 4) where i = 8..11 157 sqrshrn v3.4h, v3.4s, #6 // d3 = c[i] = ((q[i] + 32) >> 4) where i = 12..15
|
D | ih264_iquant_itrans_recon_dc_av8.s | 143 sqrshrn v0.4h, v0.4s, #4 360 sqrshrn v0.4h, v0.4s, #6
|
D | ih264_deblk_chroma_av8.s | 384 sqrshrn v8.8b, v8.8h, #3 // 385 sqrshrn v9.8b, v10.8h, #3 //Q4 = i_macro = (((q0 - p0)<<2) + (p1 - q1) + 4)>>3
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-simd-shift.ll | 501 %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %b, i32 3) 512 %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %b, i32 9) 524 %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %b, i32 19) 596 declare <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16>, i32) 598 declare <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32>, i32) 600 declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32)
|
D | arm64-vshift.ll | 876 ; CHECK: sqrshrn {{s[0-9]+}}, d0, #1 877 %tmp = call i32 @llvm.aarch64.neon.sqrshrn.i32(i64 %A, i32 1) 883 ;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1 885 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1) 891 ;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1 893 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1) 899 ;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1 901 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1) 910 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1) 920 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-neon-simd-shift.ll | 501 %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %b, i32 3) 512 %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %b, i32 9) 524 %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %b, i32 19) 596 declare <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16>, i32) 598 declare <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32>, i32) 600 declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32)
|
D | arm64-vshift.ll | 876 ; CHECK: sqrshrn {{s[0-9]+}}, d0, #1 877 %tmp = call i32 @llvm.aarch64.neon.sqrshrn.i32(i64 %A, i32 1) 883 ;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1 885 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1) 891 ;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1 893 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1) 899 ;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1 901 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1) 910 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1) 920 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1) [all …]
|