Home
last modified time | relevance | path

Searched refs:sqrshrun (Results 1 – 25 of 38) sorted by relevance

12

/external/libavc/common/armv8/
Dih264_intra_pred_luma_16x16_av8.s522 sqrshrun v20.8b, v26.8h, #5
523 sqrshrun v21.8b, v28.8h, #5
526 sqrshrun v22.8b, v26.8h, #5
528 sqrshrun v23.8b, v28.8h, #5
531 sqrshrun v20.8b, v26.8h, #5
533 sqrshrun v21.8b, v28.8h, #5
536 sqrshrun v22.8b, v26.8h, #5
538 sqrshrun v23.8b, v28.8h, #5
541 sqrshrun v20.8b, v26.8h, #5
543 sqrshrun v21.8b, v28.8h, #5
[all …]
Dih264_inter_pred_chroma_av8.s160 sqrshrun v26.8b, v20.8h, #6
168 sqrshrun v27.8b, v22.8h, #6
178 sqrshrun v18.8b, v24.8h, #6
182 sqrshrun v19.8b, v16.8h, #6
188 sqrshrun v26.8b, v20.8h, #6
197 sqrshrun v27.8b, v24.8h, #6
205 sqrshrun v26.8b, v20.8h, #6
208 sqrshrun v27.8b, v22.8h, #6
223 sqrshrun v18.8b, v24.8h, #6
228 sqrshrun v19.8b, v16.8h, #6
[all …]
Dih264_inter_pred_filters_luma_vert_av8.s152 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
157 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
165 sqrshrun v30.8b, v16.8h, #5
173 sqrshrun v31.8b, v14.8h, #5
180 sqrshrun v30.8b, v18.8h, #5
187 sqrshrun v31.8b, v16.8h, #5
193 sqrshrun v30.8b, v14.8h, #5
196 sqrshrun v31.8b, v18.8h, #5
212 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
215 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
[all …]
Dih264_inter_pred_luma_vert_qpel_av8.s159 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
164 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
174 sqrshrun v30.8b, v16.8h, #5
181 sqrshrun v31.8b, v14.8h, #5
191 sqrshrun v30.8b, v18.8h, #5
198 sqrshrun v31.8b, v16.8h, #5
207 sqrshrun v30.8b, v14.8h, #5
210 sqrshrun v31.8b, v18.8h, #5
229 sqrshrun v30.8b, v14.8h, #5 // dst[0_0] = CLIP_U8((temp +16) >> 5)
231 sqrshrun v31.8b, v20.8h, #5 // dst[0_8] = CLIP_U8((temp4 +16) >> 5)
[all …]
Dih264_inter_pred_luma_horz_qpel_vert_qpel_av8.s163 sqrshrun v26.8b, v24.8h, #5
175 sqrshrun v28.8b, v28.8h, #5
182 sqrshrun v27.8b, v24.8h, #5
197 sqrshrun v29.8b, v24.8h, #5
207 sqrshrun v26.8b, v16.8h, #5
215 sqrshrun v27.8b, v24.8h, #5
232 sqrshrun v28.8b, v28.8h, #5
246 sqrshrun v29.8b, v24.8h, #5
254 sqrshrun v26.8b, v16.8h, #5
271 sqrshrun v27.8b, v24.8h, #5
[all …]
Dih264_inter_pred_luma_horz_qpel_vert_hpel_av8.s206 sqrshrun v18.4h, v26.4s, #10
207 sqrshrun v19.4h, v22.4s, #10
231 sqrshrun v19.4h, v26.4s, #10
232 sqrshrun v18.4h, v22.4s, #10
244 sqrshrun v20.8b, v20.8h, #5
245 sqrshrun v21.8b, v22.8h, #5
293 sqrshrun v18.4h, v26.4s, #10
294 sqrshrun v19.4h, v22.4s, #10
319 sqrshrun v19.4h, v26.4s, #10
320 sqrshrun v18.4h, v22.4s, #10
[all …]
Dih264_inter_pred_filters_luma_horz_av8.s171sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
173sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
176sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
178sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
224sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
226sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
229sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
231sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
276sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
278sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
[all …]
Dih264_intra_pred_chroma_av8.s511 sqrshrun v28.8b, v24.8h, #5
513 sqrshrun v29.8b, v0.8h, #5
516 sqrshrun v28.8b, v2.8h, #5
517 sqrshrun v29.8b, v26.8h, #5
524 sqrshrun v28.8b, v24.8h, #5
525 sqrshrun v29.8b, v0.8h, #5
532 sqrshrun v28.8b, v2.8h, #5
533 sqrshrun v29.8b, v26.8h, #5
540 sqrshrun v28.8b, v24.8h, #5
541 sqrshrun v29.8b, v0.8h, #5
[all …]
Dih264_intra_pred_luma_8x8_av8.s342 sqrshrun v31.8b, v12.8h, #4
486 sqrshrun v4.8b, v24.8h, #2
487 sqrshrun v5.8b, v26.8h, #2
586 sqrshrun v4.8b, v24.8h, #2
587 sqrshrun v5.8b, v26.8h, #2
686 sqrshrun v4.8b, v20.8h, #1
687 sqrshrun v5.8b, v22.8h, #1
689 sqrshrun v6.8b, v24.8h, #2
690 sqrshrun v7.8b, v26.8h, #2
811 sqrshrun v4.8b, v20.8h, #1
[all …]
Dih264_inter_pred_luma_horz_qpel_av8.s180sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
182sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
187sqrshrun v18.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
190sqrshrun v19.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
241sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
243sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
248sqrshrun v18.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
251sqrshrun v19.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
299sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
301sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
[all …]
Dih264_inter_pred_luma_horz_hpel_vert_qpel_av8.s259 sqrshrun v26.8b, v26.8h, #5
268 sqrshrun v18.4h, v18.4s, #10
270 sqrshrun v19.4h, v6.4s, #10
303 sqrshrun v28.8b, v8.8h, #5
307 sqrshrun v18.4h, v18.4s, #10
309 sqrshrun v19.4h, v6.4s, #10
344 sqrshrun v26.8b, v10.8h, #5
349 sqrshrun v18.4h, v18.4s, #10
351 sqrshrun v19.4h, v6.4s, #10
374 sqrshrun v18.4h, v18.4s, #10
[all …]
Dih264_inter_pred_luma_horz_hpel_vert_hpel_av8.s143 sqrshrun v18.4h, v26.4s, #10
144 sqrshrun v19.4h, v23.4s, #10
168 sqrshrun v19.4h, v26.4s, #10
169 sqrshrun v25.4h, v22.4s, #10
224 sqrshrun v18.4h, v26.4s, #10
225 sqrshrun v19.4h, v23.4s, #10
250 sqrshrun v19.4h, v26.4s, #10
251 sqrshrun v25.4h, v22.4s, #10
303 sqrshrun v18.4h, v26.4s, #10
304 sqrshrun v19.4h, v23.4s, #10
[all …]
Dih264_intra_pred_luma_4x4_av8.s427 sqrshrun v3.8b, v24.8h, #2
511 sqrshrun v3.8b, v24.8h, #2
594 sqrshrun v4.8b, v20.8h, #1
595 sqrshrun v3.8b, v24.8h, #2
678 sqrshrun v4.8b, v20.8h, #1
679 sqrshrun v5.8b, v24.8h, #2
765 sqrshrun v4.8b, v20.8h, #1
766 sqrshrun v5.8b, v24.8h, #2
849 sqrshrun v4.8b, v20.8h, #1
850 sqrshrun v5.8b, v24.8h, #2
/external/libhevc/common/arm64/
Dihevc_inter_pred_chroma_vert_w16inp.s160 sqrshrun v0.8b, v0.8h,#6 //rounding shift
161 sqrshrun v30.8b, v30.8h,#6 //rounding shift
220 sqrshrun v30.8b, v30.8h,#6 //rounding shift
236 sqrshrun v28.8b, v28.8h,#6 //rounding shift
249 sqrshrun v26.8b, v26.8h,#6 //rounding shift
263 sqrshrun v24.8b, v24.8h,#6 //rounding shift
276 sqrshrun v30.8b, v30.8h,#6 //rounding shift
290 sqrshrun v28.8b, v28.8h,#6 //rounding shift
306 sqrshrun v26.8b, v26.8h,#6 //rounding shift
316 sqrshrun v24.8b, v24.8h,#6 //rounding shift
[all …]
Dihevc_inter_pred_chroma_vert.s158 sqrshrun v6.8b, v6.8h,#6 //shifts right
161 sqrshrun v4.8b, v4.8h,#6 //shifts right
202 sqrshrun v4.8b, v4.8h,#6 //vrshrq_n_s16(vreinterpretq_s16_u16(mul_res1),6)
254 sqrshrun v30.8b, v30.8h,#6
264 sqrshrun v28.8b, v28.8h,#6
279 sqrshrun v26.8b, v26.8h,#6
300 sqrshrun v24.8b, v24.8h,#6
313 sqrshrun v30.8b, v30.8h,#6
335 sqrshrun v28.8b, v28.8h,#6
356 sqrshrun v26.8b, v26.8h,#6
[all …]
Dihevc_inter_pred_chroma_horz.s253 sqrshrun v30.8b, v30.8h,#6
259 sqrshrun v31.8b, v28.8h,#6
308 sqrshrun v22.8b, v22.8h,#6
309 sqrshrun v23.8b, v20.8h,#6
344 sqrshrun v30.8b, v30.8h,#6
345 sqrshrun v31.8b, v28.8h,#6
406 sqrshrun v22.8b, v22.8h,#6
407 sqrshrun v23.8b, v20.8h,#6
421 sqrshrun v30.8b, v30.8h,#6
422 sqrshrun v31.8b, v28.8h,#6
[all …]
Dihevc_inter_pred_filters_luma_vert_w16inp.s200 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
218 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
243 sqrshrun v21.8b, v21.8h,#6
260 sqrshrun v30.8b, v30.8h,#6
286 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
309 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
327 sqrshrun v21.8b, v21.8h,#6
341 sqrshrun v30.8b, v30.8h,#6
354 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
366 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
[all …]
Dihevc_inter_pred_filters_luma_vert.s209 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
224 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
244 sqrshrun v21.8b, v21.8h,#6
278 sqrshrun v30.8b, v30.8h,#6
304 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
333 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
356 sqrshrun v21.8b, v21.8h,#6
375 sqrshrun v30.8b, v30.8h,#6
388 sqrshrun v19.8b, v19.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
401 sqrshrun v20.8b, v20.8h,#6 //sto_res = vqmovun_s16(sto_res_tmp)//
[all …]
Dihevc_inter_pred_filters_luma_horz.s253 sqrshrun v20.8b, v8.8h,#6 //right shift and saturating narrow result 1
262 sqrshrun v8.8b, v10.8h,#6 //right shift and saturating narrow result 2
372 sqrshrun v8.8b, v8.8h,#6 //right shift and saturating narrow result 1
391 sqrshrun v9.8b, v20.8h,#6
412 sqrshrun v10.8b, v10.8h,#6 //right shift and saturating narrow result 2
442 sqrshrun v11.8b, v22.8h,#6
467 sqrshrun v11.8b, v22.8h,#6
581 sqrshrun v8.8b, v8.8h,#6 //narrow right shift and saturating the result
/external/libavc/encoder/armv8/
Dih264e_half_pel_av8.s172sqrshrun v20.8b, v8.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
173sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
174sqrshrun v22.8b, v12.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3…
175sqrshrun v23.8b, v14.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
176sqrshrun v24.8b, v16.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
177sqrshrun v25.8b, v18.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3…
328sqrshrun v2.8b, v20.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column1…
330sqrshrun v3.8b, v22.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2…
341sqrshrun v4.8b, v24.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column3…
397 sqrshrun v26.8b, v20.8h, #2 //// half,half gird set1,2
[all …]
/external/llvm/test/MC/AArch64/
Dneon-scalar-shift-imm.s180 sqrshrun b17, h10, #6
181 sqrshrun h10, s13, #15
182 sqrshrun s22, d16, #31
Dneon-simd-shift.s314 sqrshrun v0.8b, v1.8h, #3
315 sqrshrun v0.4h, v1.4s, #3
316 sqrshrun v0.2s, v1.2d, #3
Darm64-advsimd.s1206 sqrshrun b0, h0, #1
1207 sqrshrun h0, s0, #2
1208 sqrshrun s0, d0, #3
1255 ; CHECK: sqrshrun b0, h0, #1 ; encoding: [0x00,0x8c,0x0f,0x7f]
1256 ; CHECK: sqrshrun h0, s0, #2 ; encoding: [0x00,0x8c,0x1e,0x7f]
1257 ; CHECK: sqrshrun s0, d0, #3 ; encoding: [0x00,0x8c,0x3d,0x7f]
1341 sqrshrun.8b v0, v0, #1
1343 sqrshrun.4h v0, v0, #3
1345 sqrshrun.2s v0, v0, #5
1513 ; CHECK: sqrshrun.8b v0, v0, #1 ; encoding: [0x00,0x8c,0x0f,0x2f]
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-neon-simd-shift.ll402 %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %b, i32 3)
413 %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %b, i32 9)
425 %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %b, i32 19)
578 declare <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16>, i32)
580 declare <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32>, i32)
582 declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32)
Darm64-vshift.ll942 ; CHECK: sqrshrun {{s[0-9]+}}, d0, #1
943 %tmp = call i32 @llvm.aarch64.neon.sqrshrun.i32(i64 %A, i32 1)
949 ;CHECK: sqrshrun.8b v0, {{v[0-9]+}}, #1
951 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
957 ;CHECK: sqrshrun.4h v0, {{v[0-9]+}}, #1
959 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
965 ;CHECK: sqrshrun.2s v0, {{v[0-9]+}}, #1
967 %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
976 %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
986 %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
[all …]

12