Home
last modified time | relevance | path

Searched refs:addp (Results 1 – 25 of 65) sorted by relevance

123

/external/llvm/test/CodeGen/AArch64/
Darm64-neon-add-pairwise.ll3 declare <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8>, <8 x i8>)
8 %tmp1 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
9 ; CHECK: addp v0.8b, v0.8b, v1.8b
13 declare <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8>, <16 x i8>)
17 %tmp1 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
18 ; CHECK: addp v0.16b, v0.16b, v1.16b
22 declare <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16>, <4 x i16>)
26 %tmp1 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
27 ; CHECK: addp v0.4h, v0.4h, v1.4h
31 declare <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16>, <8 x i16>)
[all …]
Darm64-vadd.ll641 ;CHECK: addp.8b
644 %tmp3 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
650 ;CHECK: addp.16b
653 %tmp3 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
659 ;CHECK: addp.4h
662 %tmp3 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
668 ;CHECK: addp.8h
671 %tmp3 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
677 ;CHECK: addp.2s
680 %tmp3 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
[all …]
Darm64-vaddv.ll52 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
62 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
73 ; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
83 ; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
163 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v0, v0
173 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
212 ; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0
222 ; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
233 ; CHECK: addp.2d d0, v0
Darm64-detect-vec-redux.ll20 %vpaddq_v2.i = tail call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> undef, <2 x i64> %1) #2
45 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) #1
Darm64-addp.ll15 ; CHECK: addp.2d d0, v0
/external/capstone/suite/MC/AArch64/
Dneon-add-pairwise.s.cs2 0x20,0xbc,0x22,0x0e = addp v0.8b, v1.8b, v2.8b
3 0x20,0xbc,0x22,0x4e = addp v0.16b, v1.16b, v2.16b
4 0x20,0xbc,0x62,0x0e = addp v0.4h, v1.4h, v2.4h
5 0x20,0xbc,0x62,0x4e = addp v0.8h, v1.8h, v2.8h
6 0x20,0xbc,0xa2,0x0e = addp v0.2s, v1.2s, v2.2s
7 0x20,0xbc,0xa2,0x4e = addp v0.4s, v1.4s, v2.4s
8 0x20,0xbc,0xe2,0x4e = addp v0.2d, v1.2d, v2.2d
Dneon-scalar-reduce-pairwise.s.cs2 0x20,0xb8,0xf1,0x5e = addp d0, v1.2d
/external/llvm/test/MC/AArch64/
Dneon-add-pairwise.s9 addp v0.8b, v1.8b, v2.8b
10 addp v0.16b, v1.16b, v2.16b
11 addp v0.4h, v1.4h, v2.4h
12 addp v0.8h, v1.8h, v2.8h
13 addp v0.2s, v1.2s, v2.2s
14 addp v0.4s, v1.4s, v2.4s
15 addp v0.2d, v1.2d, v2.2d
Ddirective-cpu.s17 addp v0.4s, v0.4s, v0.4s
21 addp v0.4s, v0.4s, v0.4s
Dneon-scalar-reduce-pairwise.s6 addp d0, v1.2d define
/external/libavc/common/armv8/
Dih264_intra_pred_chroma_av8.s132 addp v0.4s, v0.4s , v0.4s
133 addp v1.4s, v1.4s , v1.4s
134 addp v0.4s, v0.4s , v0.4s
135 addp v1.4s, v1.4s , v1.4s
138 addp v2.4s, v2.4s , v2.4s
139 addp v3.4s, v3.4s , v3.4s
140 addp v2.4s, v2.4s , v2.4s
141 addp v3.4s, v3.4s , v3.4s
159 addp v0.4s, v0.4s , v0.4s
160 addp v1.4s, v1.4s , v1.4s
[all …]
Dih264_resi_trans_quant_av8.s220 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
221 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
222 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
422 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
423 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
424 addp v0.8b, v0.8b, v0.8b //i pair add nnz 1
573 addp v2.8b, v2.8b, v2.8b
574 addp v2.8b, v2.8b, v2.8b
575 addp v2.8b, v2.8b, v2.8b
674 addp v5.8b, v5.8b, v5.8b //sum up nnz
[all …]
Dih264_intra_pred_luma_16x16_av8.s334 addp v0.4h, v0.4h , v0.4h
335 addp v0.4h, v0.4h , v0.4h
448 addp v0.8h, v0.8h, v1.8h
/external/libavc/encoder/armv8/
Dih264e_evaluate_intra_chroma_modes_av8.s119 addp v0.4s, v0.4s , v0.4s
120 addp v1.4s, v1.4s , v1.4s
121 addp v0.4s, v0.4s , v0.4s
122 addp v1.4s, v1.4s , v1.4s
125 addp v2.4s, v2.4s , v2.4s
126 addp v3.4s, v3.4s , v3.4s
127 addp v2.4s, v2.4s , v2.4s
128 addp v3.4s, v3.4s , v3.4s
150 addp v0.4s, v0.4s , v0.4s
151 addp v1.4s, v1.4s , v1.4s
[all …]
Dime_distortion_metrics_av8.s134 addp v30.8h, v30.8h, v30.8h
136 addp v30.2s, v30.2s, v30.2s
217 addp v30.8h, v30.8h, v30.8h
219 addp v30.2s, v30.2s, v30.2s
313 addp v31.8h, v30.8h, v30.8h
315 addp v31.2s, v31.2s, v31.2s
362 addp v31.8h, v30.8h, v30.8h
364 addp v31.2s, v31.2s, v31.2s
443 addp v30.8h, v30.8h, v31.8h
445 addp v30.2s, v30.2s, v30.2s
[all …]
Dih264e_evaluate_intra16x16_modes_av8.s139 addp v0.4h, v0.4h , v0.4h
140 addp v0.4h, v0.4h , v0.4h
447 addp v16.2s, v16.2s, v16.2s
457 addp v26.2s, v26.2s, v26.2s
466 addp v24.2s, v24.2s, v24.2s ///DC
/external/libmpeg2/common/armv8/
Dicv_variance_av8.s81 addp v4.8h, v4.8h, v4.8h
82 addp v4.4h, v4.4h, v4.4h
83 addp v4.4h, v4.4h, v4.4h
99 addp v20.4s, v20.4s, v20.4s
100 addp v20.2s, v20.2s, v20.2s
Dicv_sad_av8.s94 addp v0.8h, v0.8h, v0.8h
95 addp v0.8h, v0.8h, v0.8h
96 addp v0.8h, v0.8h, v0.8h
Dideint_spatial_filter_av8.s125 addp v16.8h, v16.8h, v16.8h
126 addp v18.8h, v18.8h, v18.8h
127 addp v20.8h, v20.8h, v20.8h
/external/arm-optimized-routines/string/aarch64/
Dstrrchr.S86 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128
87 addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
88 addp vend1.16b, vhas_nul1.16b, vhas_chr1.16b // 128->64
112 addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
113 addp vend1.16b, vend1.16b, vhas_chr1.16b // 128->64
122 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b
123 addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b
Dmemchr.S81 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
82 addp vend.16b, vend.16b, vend.16b /* 128->64 */
102 addp vend.2d, vend.2d, vend.2d
111 addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
112 addp vend.16b, vend.16b, vend.16b /* 128->64 */
Dstrchrnul.S74 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
76 addp vend1.16b, vend1.16b, vend1.16b // 128->64
99 addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
100 addp vend1.16b, vend1.16b, vend1.16b // 128->64
Dstrchr.S82 addp vend1.16b, vend1.16b, vend2.16b // 256->128
84 addp vend1.16b, vend1.16b, vend2.16b // 128->64
109 addp vend1.16b, vend1.16b, vend2.16b // 256->128
110 addp vend1.16b, vend1.16b, vend2.16b // 128->64
Dstrlen.S181 addp maskv.16b, datav1.16b, datav2.16b
182 addp maskv.16b, maskv.16b, maskv.16b
/external/llvm/test/CodeGen/Hexagon/
Dbit-gen-rseq.ll21 …%0 = tail call i64 @llvm.hexagon.A2.addp(i64 %t.sroa.0.0.insert.insert, i64 %t.sroa.0.0.insert.ins…
34 …%1 = tail call i64 @llvm.hexagon.A2.addp(i64 %t.sroa.0.0.insert.insert19, i64 %t.sroa.0.0.insert.i…
40 declare i64 @llvm.hexagon.A2.addp(i64, i64) #1

123