/external/llvm-project/llvm/test/MC/AArch64/SVE/ |
D | uqsub.s | 11 uqsub z0.b, z0.b, z0.b label 17 uqsub z0.h, z0.h, z0.h label 23 uqsub z0.s, z0.s, z0.s label 29 uqsub z0.d, z0.d, z0.d label 35 uqsub z0.b, z0.b, #0 label 41 uqsub z31.b, z31.b, #255 label 47 uqsub z0.h, z0.h, #0 label 53 uqsub z0.h, z0.h, #0, lsl #8 label 59 uqsub z31.h, z31.h, #255, lsl #8 label 65 uqsub z31.h, z31.h, #65280 label [all …]
|
D | uqsub-diagnostics.s | 4 uqsub z22.h, z10.h, z32.h label 10 uqsub z20.h, z2.h, z31.x label 16 uqsub z27.h, z11.h, z27.b label 25 uqsub z0.b, z0.b, #0, lsl #8 // #0, lsl #8 is not valid for .b label 30 uqsub z0.b, z0.b, #-1 label 35 uqsub z0.b, z0.b, #1, lsl #8 label 40 uqsub z0.b, z0.b, #256 label 45 uqsub z0.h, z0.h, #-1 label 50 uqsub z0.h, z0.h, #256, lsl #8 label 55 uqsub z0.h, z0.h, #65536 label [all …]
|
/external/llvm-project/llvm/test/MC/AArch64/SVE2/ |
D | uqsub.s | 10 uqsub z0.b, p0/m, z0.b, z1.b label 16 uqsub z0.h, p0/m, z0.h, z1.h label 22 uqsub z29.s, p7/m, z29.s, z30.s label 28 uqsub z31.d, p7/m, z31.d, z30.d label 43 uqsub z31.d, p0/m, z31.d, z30.d label 55 uqsub z31.d, p7/m, z31.d, z30.d label
|
D | uqsub-diagnostics.s | 6 uqsub z0.b, p0/m, z1.b, z2.b label 15 uqsub z0.b, p0/m, z0.d, z1.d label 20 uqsub z0.b, p0/m, z0.b, z1.h label 29 uqsub z0.b, p0/z, z0.b, z1.b label 34 uqsub z0.b, p8/m, z0.b, z1.b label
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | arm64-vqsub.ll | 32 ;CHECK: uqsub.8b 35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 41 ;CHECK: uqsub.4h 44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 50 ;CHECK: uqsub.2s 53 %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 95 ;CHECK: uqsub.16b 98 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 104 ;CHECK: uqsub.8h 107 %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) [all …]
|
D | usub_sat_vec.ll | 39 ; CHECK-NEXT: uqsub v0.16b, v0.16b, v1.16b 48 ; CHECK-NEXT: uqsub v0.16b, v0.16b, v2.16b 49 ; CHECK-NEXT: uqsub v1.16b, v1.16b, v3.16b 58 ; CHECK-NEXT: uqsub v0.16b, v0.16b, v4.16b 59 ; CHECK-NEXT: uqsub v1.16b, v1.16b, v5.16b 60 ; CHECK-NEXT: uqsub v2.16b, v2.16b, v6.16b 61 ; CHECK-NEXT: uqsub v3.16b, v3.16b, v7.16b 70 ; CHECK-NEXT: uqsub v0.8h, v0.8h, v1.8h 79 ; CHECK-NEXT: uqsub v0.8h, v0.8h, v2.8h 80 ; CHECK-NEXT: uqsub v1.8h, v1.8h, v3.8h [all …]
|
D | arm64-arith-saturating.ll | 64 ; CHECK: uqsub s0, s0, s1 67 %vqsub.i = tail call i32 @llvm.aarch64.neon.uqsub.i32(i32 %vecext, i32 %vecext1) nounwind 73 ; CHECK: uqsub d0, d0, d1 76 %vqsub.i = tail call i64 @llvm.aarch64.neon.uqsub.i64(i64 %vecext, i64 %vecext1) nounwind 80 declare i64 @llvm.aarch64.neon.uqsub.i64(i64, i64) nounwind readnone 81 declare i32 @llvm.aarch64.neon.uqsub.i32(i32, i32) nounwind readnone
|
D | sve-intrinsics-int-arith.ll | 294 ; CHECK: uqsub z0.b, z0.b, z1.b 296 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a, 303 ; CHECK: uqsub z0.h, z0.h, z1.h 305 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a, 312 ; CHECK: uqsub z0.s, z0.s, z1.s 314 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a, 321 ; CHECK: uqsub z0.d, z0.d, z1.d 323 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a, 398 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 399 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) [all …]
|
D | sve-int-arith.ll | 194 ; CHECK-NEXT: uqsub z0.d, z0.d, z1.d 203 ; CHECK-NEXT: uqsub z0.s, z0.s, z1.s 212 ; CHECK-NEXT: uqsub z0.h, z0.h, z1.h 221 ; CHECK-NEXT: uqsub z0.b, z0.b, z1.b
|
D | sve-intrinsics-int-arith-imm.ll | 672 ; CHECK-NEXT: uqsub z0.b, z0.b, #27 // =0x1b 676 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a, 684 ; CHECK-NEXT: uqsub z0.h, z0.h, #43 // =0x2b 688 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a, 696 ; CHECK-NEXT: uqsub z0.h, z0.h, #2048 // =0x800 700 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a, 708 ; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1 712 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a, 720 ; CHECK-NEXT: uqsub z0.s, z0.s, #8192 // =0x2000 724 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a, [all …]
|
D | sve-int-imm.ll | 440 ; CHECK: uqsub z0.b, z0.b, #30 450 ; CHECK: uqsub z0.h, z0.h, #30 460 ; CHECK: uqsub z0.h, z0.h, #1024 470 ; CHECK: uqsub z0.s, z0.s, #30 480 ; CHECK: uqsub z0.s, z0.s, #1024 490 ; CHECK: uqsub z0.d, z0.d, #30 500 ; CHECK: uqsub z0.d, z0.d, #1024
|
D | sve2-intrinsics-uniform-dsp.ll | 1511 ; CHECK: uqsub z0.b, p0/m, z0.b, z1.b 1513 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> %pg, 1521 ; CHECK: uqsub z0.h, p0/m, z0.h, z1.h 1523 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> %pg, 1531 ; CHECK: uqsub z0.s, p0/m, z0.s, z1.s 1533 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> %pg, 1541 ; CHECK: uqsub z0.d, p0/m, z0.d, z1.d 1543 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> %pg, 2056 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, … 2057 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <… [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-vqsub.ll | 32 ;CHECK: uqsub.8b 35 %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) 41 ;CHECK: uqsub.4h 44 %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) 50 ;CHECK: uqsub.2s 53 %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) 95 ;CHECK: uqsub.16b 98 %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) 104 ;CHECK: uqsub.8h 107 %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) [all …]
|
D | arm64-arith-saturating.ll | 64 ; CHECK: uqsub s0, s0, s1 67 %vqsub.i = tail call i32 @llvm.aarch64.neon.uqsub.i32(i32 %vecext, i32 %vecext1) nounwind 73 ; CHECK: uqsub d0, d0, d1 76 %vqsub.i = tail call i64 @llvm.aarch64.neon.uqsub.i64(i64 %vecext, i64 %vecext1) nounwind 80 declare i64 @llvm.aarch64.neon.uqsub.i64(i64, i64) nounwind readnone 81 declare i32 @llvm.aarch64.neon.uqsub.i32(i32, i32) nounwind readnone
|
/external/llvm/test/MC/AArch64/ |
D | neon-saturating-add-sub.s | 66 uqsub v0.8b, v1.8b, v2.8b 67 uqsub v0.16b, v1.16b, v2.16b 68 uqsub v0.4h, v1.4h, v2.4h 69 uqsub v0.8h, v1.8h, v2.8h 70 uqsub v0.2s, v1.2s, v2.2s 71 uqsub v0.4s, v1.4s, v2.4s 72 uqsub v0.2d, v1.2d, v2.2d
|
D | neon-scalar-saturating-add-sub.s | 45 uqsub b0, b1, b2 46 uqsub h10, h11, h12 47 uqsub s20, s21, s2 48 uqsub d17, d31, d8
|
/external/capstone/suite/MC/AArch64/ |
D | neon-saturating-add-sub.s.cs | 23 0x20,0x2c,0x22,0x2e = uqsub v0.8b, v1.8b, v2.8b 24 0x20,0x2c,0x22,0x6e = uqsub v0.16b, v1.16b, v2.16b 25 0x20,0x2c,0x62,0x2e = uqsub v0.4h, v1.4h, v2.4h 26 0x20,0x2c,0x62,0x6e = uqsub v0.8h, v1.8h, v2.8h 27 0x20,0x2c,0xa2,0x2e = uqsub v0.2s, v1.2s, v2.2s 28 0x20,0x2c,0xa2,0x6e = uqsub v0.4s, v1.4s, v2.4s 29 0x20,0x2c,0xe2,0x6e = uqsub v0.2d, v1.2d, v2.2d
|
D | neon-scalar-saturating-add-sub.s.cs | 14 0x20,0x2c,0x22,0x7e = uqsub b0, b1, b2 15 0x6a,0x2d,0x6c,0x7e = uqsub h10, h11, h12 16 0xb4,0x2e,0xa2,0x7e = uqsub s20, s21, s2 17 0xf1,0x2f,0xe8,0x7e = uqsub d17, d31, d8
|
/external/llvm-project/llvm/test/MC/AArch64/ |
D | neon-saturating-add-sub.s | 66 uqsub v0.8b, v1.8b, v2.8b 67 uqsub v0.16b, v1.16b, v2.16b 68 uqsub v0.4h, v1.4h, v2.4h 69 uqsub v0.8h, v1.8h, v2.8h 70 uqsub v0.2s, v1.2s, v2.2s 71 uqsub v0.4s, v1.4s, v2.4s 72 uqsub v0.2d, v1.2d, v2.2d
|
D | neon-scalar-saturating-add-sub.s | 45 uqsub b0, b1, b2 46 uqsub h10, h11, h12 47 uqsub s20, s21, s2 48 uqsub d17, d31, d8
|
/external/libhevc/common/arm64/ |
D | ihevc_deblk_luma_horz.s | 207 uqsub v31.8b, v26.8b , v1.8b 224 uqsub v17.8b, v27.8b , v1.8b 245 uqsub v31.8b, v28.8b , v1.8b 293 uqsub v31.8b, v25.8b , v1.8b 301 uqsub v17.8b, v24.8b , v1.8b 399 uqsub v31.8b, v23.8b , v1.8b
|
D | ihevc_deblk_luma_vert.s | 203 uqsub v30.8b,v7.8b,v19.8b 244 uqsub v31.8b,v5.8b,v19.8b 255 uqsub v25.8b,v4.8b,v19.8b 282 uqsub v31.8b,v2.8b,v19.8b 293 uqsub v28.8b,v3.8b,v19.8b 305 uqsub v31.8b,v6.8b,v19.8b
|
/external/libavc/common/armv8/ |
D | ih264_deblk_chroma_av8.s | 401 uqsub v4.16b, v4.16b , v14.16b //Q2 = p0 - delta 403 uqsub v0.16b, v0.16b , v14.16b //Q0 = q0 - delta 555 uqsub v24.16b, v2.16b , v14.16b //p0-|delta| 556 uqsub v26.16b, v4.16b , v14.16b //q0-|delta|
|
D | ih264_deblk_luma_av8.s | 181 uqsub v6.16b, v6.16b , v18.16b //Q3 = p0 - delta 185 uqsub v0.16b, v0.16b , v18.16b //Q0 = q0 - delta 627 uqsub v22.16b, v6.16b , v30.16b //clip(p0-delta) 641 uqsub v8.16b, v8.16b , v30.16b //clip(q0-delta)
|
/external/vixl/test/aarch64/ |
D | test-trace-aarch64.cc | 2365 __ uqsub(b28, b20, b26); in GenerateTestSequenceNEON() local 2366 __ uqsub(d0, d7, d10); in GenerateTestSequenceNEON() local 2367 __ uqsub(h26, h24, h7); in GenerateTestSequenceNEON() local 2368 __ uqsub(s23, s23, s16); in GenerateTestSequenceNEON() local 2369 __ uqsub(v14.V16B(), v16.V16B(), v24.V16B()); in GenerateTestSequenceNEON() local 2370 __ uqsub(v11.V2D(), v17.V2D(), v6.V2D()); in GenerateTestSequenceNEON() local 2371 __ uqsub(v10.V2S(), v10.V2S(), v8.V2S()); in GenerateTestSequenceNEON() local 2372 __ uqsub(v9.V4H(), v15.V4H(), v12.V4H()); in GenerateTestSequenceNEON() local 2373 __ uqsub(v23.V4S(), v18.V4S(), v7.V4S()); in GenerateTestSequenceNEON() local 2374 __ uqsub(v9.V8B(), v19.V8B(), v17.V8B()); in GenerateTestSequenceNEON() local [all …]
|