/external/libhevc/common/arm64/ |
D | ihevc_intra_pred_chroma_ver.s | 117 ld2 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15) 127 st2 {v20.8b, v21.8b}, [x2],#16 128 st2 {v20.8b, v21.8b}, [x5],#16 129 st2 {v20.8b, v21.8b}, [x8],#16 130 st2 {v20.8b, v21.8b}, [x10],#16 140 st2 {v20.8b, v21.8b}, [x2],#16 141 st2 {v20.8b, v21.8b}, [x5],#16 142 st2 {v20.8b, v21.8b}, [x8],#16 143 st2 {v20.8b, v21.8b}, [x10],#16 153 st2 {v20.8b, v21.8b}, [x2],#16 [all …]
|
D | ihevc_sao_edge_offset_class0.s | 149 …mov v21.b[15], w11 //vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 1… 153 …EXT v21.16b, v21.16b , v17.16b,#15 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur… 157 cmhi v16.16b, v17.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 162 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 173 …mov v21.b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur… 177 …EXT v21.16b, v17.16b , v21.16b,#1 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_… 185 cmhi v16.16b, v17.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 188 cmhi v18.16b, v21.16b , v17.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 195 ADD v21.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) 198 ADD v21.16b, v21.16b , v22.16b //edge_idx = vaddq_s8(edge_idx, sign_right) [all …]
|
D | ihevc_sao_edge_offset_class0_chroma.s | 171 …mov v21.h[7], w11 //vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, … 177 …EXT v21.16b, v21.16b , v19.16b,#14 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur… 181 cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 185 cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 194 …mov v21.b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur… 202 …mov v21.b[1], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur… 205 …EXT v21.16b, v19.16b , v21.16b,#2 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_… 209 cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) 212 cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) 218 ADD v21.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) [all …]
|
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 199 smull v21.4s,v3.4h,v23.4h 201 smlal v21.4s,v2.4h,v22.4h 203 smlal v21.4s,v4.4h,v24.4h 204 smlal v21.4s,v5.4h,v25.4h 205 smlal v21.4s,v6.4h,v26.4h 206 smlal v21.4s,v7.4h,v27.4h 207 smlal v21.4s,v16.4h,v28.4h 208 smlal v21.4s,v17.4h,v29.4h 229 sub v21.4s, v21.4s, v30.4s 257 shrn v21.4h, v21.4s, #6 [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 188 smull v21.4s, v3.4h, v23.4h 190 smlal v21.4s, v2.4h, v22.4h 192 smlal v21.4s, v4.4h, v24.4h 193 smlal v21.4s, v5.4h, v25.4h 194 smlal v21.4s, v6.4h, v26.4h 195 smlal v21.4s, v7.4h, v27.4h 196 smlal v21.4s, v16.4h, v28.4h 197 smlal v21.4s, v17.4h, v29.4h 217 sqshrn v21.4h, v21.4s,#6 243 sqrshrun v21.8b, v21.8h,#6 [all …]
|
D | ihevc_inter_pred_filters_luma_vert.s | 212 umull v21.8h, v3.8b, v23.8b 214 umlsl v21.8h, v2.8b, v22.8b 216 umlsl v21.8h, v4.8b, v24.8b 217 umlal v21.8h, v5.8b, v25.8b 218 umlal v21.8h, v6.8b, v26.8b 219 umlsl v21.8h, v7.8b, v27.8b 220 umlal v21.8h, v16.8b, v28.8b 221 umlsl v21.8h, v17.8b, v29.8b 244 sqrshrun v21.8b, v21.8h,#6 275 st1 {v21.8b},[x14],x6 [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16out.s | 165 umull v21.8h, v3.8b, v23.8b 167 umlsl v21.8h, v2.8b, v22.8b 169 umlsl v21.8h, v4.8b, v24.8b 171 umlal v21.8h, v5.8b, v25.8b 172 umlal v21.8h, v6.8b, v26.8b 173 umlsl v21.8h, v7.8b, v27.8b 174 umlal v21.8h, v16.8b, v28.8b 175 umlsl v21.8h, v17.8b, v29.8b 226 st1 {v21.16b},[x14],x6 259 umull v21.8h, v3.8b, v23.8b [all …]
|
D | ihevc_intra_pred_luma_vert.s | 118 ld1 {v20.8b, v21.8b}, [x6],#16 //16 loads (col 0:15) 126 st1 {v20.8b, v21.8b}, [x2],#16 127 st1 {v20.8b, v21.8b}, [x5],#16 128 st1 {v20.8b, v21.8b}, [x8],#16 129 st1 {v20.8b, v21.8b}, [x10],#16 139 st1 {v20.8b, v21.8b}, [x2],#16 140 st1 {v20.8b, v21.8b}, [x5],#16 141 st1 {v20.8b, v21.8b}, [x8],#16 142 st1 {v20.8b, v21.8b}, [x10],#16 151 st1 {v20.8b, v21.8b}, [x2],#16 [all …]
|
D | ihevc_intra_pred_filters_luma_mode_11_to_17.s | 301 add v21.8b, v19.8b , v2.8b //ref_main_idx + 1 (row 0) 305 tbl v13.8b, {v0.16b},v21.8b //load from ref_main_idx + 1 (row 0) 307 add v5.8b, v21.8b , v2.8b //ref_main_idx + 1 (row 1) 315 add v21.8b, v21.8b , v3.8b //ref_main_idx + 1 (row 2) 323 tbl v15.8b, {v0.16b},v21.8b //load from ref_main_idx + 1 (row 2) 336 add v21.8b, v21.8b , v3.8b //ref_main_idx + 1 (row 4) 345 tbl v13.8b, {v0.16b},v21.8b //load from ref_main_idx + 1 (row 4) 358 add v21.8b, v21.8b , v3.8b //ref_main_idx + 1 (row 6) 367 tbl v15.8b, {v0.16b},v21.8b //load from ref_main_idx + 1 (row 6) 436 add v21.8b, v2.8b , v19.8b //ref_main_idx + 1 [all …]
|
D | ihevc_intra_pred_filters_chroma_mode_11_to_17.s | 299 add v21.8b, v19.8b , v29.8b //ref_main_idx + 1 (row 0) 303 tbl v13.8b, { v0.16b, v1.16b}, v21.8b //load from ref_main_idx + 1 (row 0) 305 add v5.8b, v21.8b , v29.8b //ref_main_idx + 1 (row 1) 316 add v21.8b, v21.8b , v29.8b //ref_main_idx + 1 (row 2) 324 tbl v15.8b, { v0.16b, v1.16b}, v21.8b //load from ref_main_idx + 1 (row 2) 337 add v21.8b, v21.8b , v29.8b //ref_main_idx + 1 (row 4) 346 tbl v13.8b, { v0.16b, v1.16b}, v21.8b //load from ref_main_idx + 1 (row 4) 359 add v21.8b, v21.8b , v29.8b //ref_main_idx + 1 (row 6) 370 tbl v15.8b, { v0.16b, v1.16b}, v21.8b //load from ref_main_idx + 1 (row 6) 446 add v21.8b, v29.8b , v19.8b //ref_main_idx + 1 [all …]
|
/external/libavc/common/armv8/ |
D | ih264_deblk_luma_av8.s | 472 trn1 v21.8b, v0.8b, v2.8b 474 mov v0.8b, v21.8b 475 trn1 v21.8b, v4.8b, v6.8b 477 mov v4.8b, v21.8b 478 trn1 v21.8b, v8.8b, v10.8b 480 mov v8.8b, v21.8b 481 trn1 v21.8b, v12.8b, v14.8b 483 mov v12.8b, v21.8b 484 trn1 v21.8b, v1.8b, v3.8b 486 mov v1.8b, v21.8b [all …]
|
D | ih264_resi_trans_quant_av8.s | 143 sub v21.4h, v14.4h , v17.4h //x3 = x4-x7 146 shl v23.4h, v21.4h, #1 //u_shift(x3,1,shft) 153 sub v27.4h, v21.4h , v22.4h //x8 = x3 - u_shift(x2,1,shft); 177 add v21.4s, v1.4s, v23.4s 184 sshl v21.4s, v21.4s, v24.4s //shift row 2 189 xtn v21.4h, v21.4s //narrow row 2 194 neg v25.8h, v21.8h //get negative 200 cmeq v1.4h, v21.4h, #0 205 bsl v5.8b, v21.8b, v25.8b //restore sign of row 3 and 4 345 sub v21.4h, v14.4h , v17.4h //x3 = x4-x7 [all …]
|
D | ih264_intra_pred_luma_16x16_av8.s | 221 dup v21.16b, v0.b[4] 228 st1 {v21.16b}, [x1], x3 509 sqrshrun v21.8b, v28.8h, #5 513 st1 {v20.2s, v21.2s}, [x2], x3 519 sqrshrun v21.8b, v28.8h, #5 523 st1 {v20.2s, v21.2s}, [x2], x3 529 sqrshrun v21.8b, v28.8h, #5 533 st1 {v20.2s, v21.2s}, [x2], x3 539 sqrshrun v21.8b, v28.8h, #5 543 st1 {v20.2s, v21.2s}, [x2], x3 [all …]
|
/external/libxaac/decoder/armv8/ |
D | ixheaacd_post_twiddle_overlap.s | 418 UZP2 v21.8h, v24.8h, v24.8h 420 MOV v25.d[0], v21.d[0] 426 UZP2 v21.8h, v26.8h, v26.8h 428 MOV v27.d[0], v21.d[0] 484 UZP2 v21.8h, v24.8h, v24.8h 486 MOV v25.d[0], v21.d[0] 492 UZP2 v21.8h, v22.8h, v22.8h 494 MOV v23.d[0], v21.d[0] 503 UZP2 v21.8h, v30.8h, v30.8h 505 MOV v30.d[1], v21.d[0] [all …]
|
D | ixheaacd_pre_twiddle.s | 263 MOV v21.16B, v22.16B 264 ST2 { v20.4s, v21.4s}, [x7], x8 346 MOV v21.16B, v22.16B 348 ST2 { v20.4s, v21.4s}, [x7], x8 392 MOV v21.16B, v22.16B 393 ST2 { v20.4s, v21.4s}, [x7], x8 401 LD2 {v21.2s, v22.2s}, [x0], #16 402 MOV v0.8B, v21.8B 408 MOV v21.8B, v0.8B 410 UZP1 v0.4h, v21.4h, v1.4h [all …]
|
/external/libavc/encoder/armv8/ |
D | ih264e_evaluate_intra16x16_modes_av8.s | 169 dup v21.8b, v9.b[15] ///HORIZONTAL VALUE ROW=0// 179 uabdl v28.8h, v1.8b, v21.8b 190 dup v21.8b, v9.b[14] 200 uabal v28.8h, v3.8b, v21.8b 207 dup v21.8b, v9.b[13] 216 uabal v28.8h, v5.8b, v21.8b 223 dup v21.8b, v9.b[12] 232 uabal v28.8h, v7.8b, v21.8b 240 dup v21.8b, v9.b[11] 249 uabal v28.8h, v1.8b, v21.8b [all …]
|
D | ih264e_half_pel_av8.s | 175 …sqrshrun v21.8b, v10.8h, #5 //// (a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 + 16) >> 5 (column2… 181 st1 {v20.8b, v21.8b}, [x1], #16 ////Store dest row0 313 mov v21.d[0], v20.d[1] 320 ext v30.8b, v20.8b , v21.8b , #4 325 ext v29.8b, v20.8b , v21.8b , #6 333 ext v31.8b, v21.8b , v22.8b , #2 335 ext v28.8b, v20.8b , v21.8b , #2 342 smlsl v26.4s, v21.4h, v0.h[0] //// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (set1) 343 ext v30.8b, v21.8b , v22.8b , #4 346 ext v29.8b, v21.8b , v22.8b , #6 [all …]
|
D | ih264e_evaluate_intra_chroma_modes_av8.s | 130 dup v21.8h, v5.h[0] 143 mov v30.d[0], v21.d[0] 196 dup v21.8h, v27.h[7] 207 uabdl v14.8h, v1.8b, v21.8b 219 dup v21.8h, v27.h[6] ///HORIZONTAL VALUE ROW=1// 229 uabal v14.8h, v3.8b, v21.8b 236 dup v21.8h, v27.h[5] ///HORIZONTAL VALUE ROW=2// 245 uabal v14.8h, v5.8b, v21.8b 252 dup v21.8h, v27.h[4] ///HORIZONTAL VALUE ROW=3// 260 uabal v14.8h, v7.8b, v21.8b [all …]
|
/external/capstone/suite/MC/AArch64/ |
D | neon-simd-misc.s.cs | 6 0xad,0x0a,0x60,0x0e = rev64 v13.4h, v21.4h 10 0x35,0x08,0x20,0x2e = rev32 v21.8b, v1.8b 13 0x35,0x18,0x20,0x0e = rev16 v21.8b, v1.8b 14 0xa3,0x2a,0x20,0x4e = saddlp v3.8h, v21.16b 20 0xa3,0x2a,0x20,0x6e = uaddlp v3.8h, v21.16b 26 0xa3,0x6a,0x20,0x4e = sadalp v3.8h, v21.16b 32 0xa3,0x6a,0x20,0x6e = uadalp v3.8h, v21.16b 43 0xad,0x3a,0x60,0x0e = suqadd v13.4h, v21.4h 50 0xad,0x3a,0x60,0x2e = usqadd v13.4h, v21.4h 57 0xad,0x7a,0x60,0x0e = sqabs v13.4h, v21.4h [all …]
|
/external/libmpeg2/common/armv8/ |
D | ideint_cac_av8.s | 148 mov v21.d[0], v20.d[1] 149 add v20.4h, v20.4h, v21.4h 160 add v21.4s, v0.4s, v1.4s 205 add v21.2s, v0.2s, v21.2s 211 ushr v0.2s, v21.2s, #3 212 add v21.2s, v21.2s, v0.2s 216 add v21.2s, v21.2s, v0.2s 218 cmhi v0.2s, v20.2s, v21.2s
|
/external/llvm/test/MC/Hexagon/ |
D | v60-vmpy-acc.s | 17 #CHECK: 1900f5ac { v12.w += vrmpy(v21.ub,r0.b) } 18 v12.w += vrmpy(v21.ub,r0.b) 53 #CHECK: 193cfc94 { v21:20.w += vdmpy(v29:28.h,r28.b) } 54 v21:20.w += vdmpy(v29:28.h,r28.b) 80 #CHECK: 1c3fead5 { v21.w += vmpyo(v10.w,v31.h):<<1:sat:shift } 81 v21.w += vmpyo(v10.w,v31.h):<<1:sat:shift 89 #CHECK: 1c34f4b5 { v21.w += vmpyie(v20.w,v20.uh) } 90 v21.w += vmpyie(v20.w,v20.uh) 95 #CHECK: 1c1ff6f4 { v21:20.w += vmpy(v22.h,v31.h) } 96 v21:20.w += vmpy(v22.h,v31.h) [all …]
|
D | v60-permute.s | 5 #CHECK: 1fd2d5cf { v15.b = vpack(v21.h{{ *}},{{ *}}v18.h):sat } 6 v15.b=vpack(v21.h,v18.h):sat 23 #CHECK: 1fe6c435 { v21.b = vpacko(v4.h{{ *}},{{ *}}v6.h) } 24 v21.b=vpacko(v4.h,v6.h) 44 #CHECK: 1e00d5c9 { v9.h = vdeal(v21.h) } 45 v9.h=vdeal(v21.h)
|
/external/llvm/test/MC/AArch64/ |
D | neon-simd-misc.s | 13 rev64 v13.4h, v21.4h 25 rev32 v21.8b, v1.8b 34 rev16 v21.8b, v1.8b 43 saddlp v3.8h, v21.16b 61 uaddlp v3.8h, v21.16b 79 sadalp v3.8h, v21.16b 97 uadalp v3.8h, v21.16b 120 suqadd v13.4h, v21.4h 140 usqadd v13.4h, v21.4h 160 sqabs v13.4h, v21.4h [all …]
|
/external/vixl/test/test-trace-reference/ |
D | log-vregs | 22 # v21: 0x403914a7419a3034403c41f241cf6561 109 # v21: 0x00000000000000000000000040380000 (s21: 2.87500) 144 # v21: 0x00000000000000007ff0000000000000 173 # v21: 0x00000000000000000000000046160000 (s21: 9600.00) 180 # v21: 0x00000000000000000000000000000001 226 # v21: 0x00000000000000000000001f0000001f 247 # v21: 0xffffffff0000000000000000ffffffff 250 # v21: 0x000000000000000000000000ffffffff 260 # v21: 0x0000000000000000ffffffffffffffff 266 # v21: 0x0000000000000000ffffffffffffffff [all …]
|
/external/libhevc/decoder/arm64/ |
D | ihevcd_fmt_conv_420sp_to_rgba8888.s | 240 sqxtun v21.8b, v24.8h 251 ZIP1 v27.8b, v20.8b, v21.8b 252 ZIP2 v21.8b, v20.8b, v21.8b 259 mov v20.d[1], v21.d[0] 306 sqxtun v21.8b, v24.8h 317 ZIP1 v27.8b, v20.8b, v21.8b 318 ZIP2 v21.8b, v20.8b, v21.8b 325 mov v20.d[1], v21.d[0] 403 sqxtun v21.8b, v24.8h 414 ZIP1 v27.8b, v20.8b, v21.8b [all …]
|