/external/libxaac/decoder/armv7/ |
D | ixheaacd_post_twiddle_overlap.s | 299 VMULL.U16 Q4, D24, D18 310 VSHR.U32 Q4, Q4, #16 313 VMLAL.S16 Q4, D25, D18 322 VNEG.S32 Q4, Q4 324 VADD.I32 Q0, Q0, Q4 327 VDUP.S32 Q4, R11 328 VQADD.S32 Q0, Q0, Q4 362 VQSUB.S32 Q2, Q2, Q4 377 VMULL.S32 Q4, D30, D0 378 VQMOVN.S64 D22, Q4 [all …]
|
D | ia_xheaacd_mps_reoder_mulshift_acc.s | 61 VLD1.32 {Q4, Q5}, [R2]! @LOADING values from R2 N.real_fix 70 VMULL.S32 Q4, D5, D13 76 VSHR.S64 Q4, Q4, #31 86 VSUB.I64 Q10, Q10, Q4 105 VLD1.32 {Q4, Q5}, [R2]! @LOADING values from R2 N.real_fix 114 VMULL.S32 Q4, D5, D13 125 VSHR.S64 Q4, Q4, #31 130 VSUB.I64 Q10, Q10, Q4 149 VLD1.32 {Q4, Q5}, [R2]! @LOADING values from R2 N.real_fix 158 VMULL.S32 Q4, D5, D13 [all …]
|
D | ixheaacd_dct3_32.s | 71 VLD1.32 {Q4}, [R5], R12 74 VREV64.32 Q4, Q4 79 VSHR.S32 Q4, Q4, #7 81 VSUB.I32 Q5, Q3, Q4 116 VLD1.32 {Q4}, [R5], R12 119 VREV64.32 Q4, Q4 123 VSHR.S32 Q4, Q4, #7 125 VSUB.I32 Q5, Q3, Q4 149 VLD1.32 {Q4}, [R5], R11 152 VREV64.32 Q4, Q4 [all …]
|
D | ia_xheaacd_mps_mulshift.s | 33 VQDMULL.S32 Q4, D0, D4 37 VUZP.32 Q4, Q6
|
D | ixheaacd_mps_synt_pre_twiddle.s | 40 VMULL.S32 Q4, D1, D2 45 VSHRN.I64 D8, Q4, #31
|
D | ixheaacd_mps_synt_post_twiddle.s | 40 VMULL.S32 Q4, D15, D2 45 VSHRN.I64 D8, Q4, #31
|
D | ixheaacd_overlap_add2.s | 187 VREV64.32 Q4, Q7 188 VQNEG.S32 Q4, Q4 231 VREV64.32 Q4, Q7 233 VQNEG.S32 Q4, Q4
|
D | ixheaacd_mps_synt_post_fft_twiddle.s | 43 VMULL.S32 Q4, D0, D4 48 VSHRN.S64 D8, Q4, #31
|
D | ixheaacd_mps_synt_out_calc.s | 30 VMULL.S32 Q4, D0, D4 34 VSHRN.S64 D8, Q4, #31
|
D | ixheaacd_calc_post_twid.s | 43 VMULL.S32 Q4, D4, D0 52 VSHRN.S64 D6, Q4, #32
|
D | ixheaacd_esbr_fwd_modulation.s | 51 VQSUB.S32 Q4, Q0, Q2 91 VQSUB.S64 Q1, Q3, Q4
|
D | ixheaacd_pre_twiddle_compute.s | 111 VREV64.16 Q5, Q4 168 VREV64.16 Q5, Q4 237 VREV64.16 Q5, Q4 334 VREV64.16 Q5, Q4
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_32x32_a9q.s | 210 VADD.S16 Q4, Q8,Q11 @ e[k] = resi_tmp_1 + resi_tmp_2 k -> 1-8 row 1 -- dual issue 224 VADD.S16 Q0, Q4, Q5 @ ee[k] = e[k] + e[16-k] k->1-8 row 1 226 VSUB.S16 Q1, Q4, Q5 @ eo[k] = e[k] - e[16-k] k->1-8 row 1 -- dual issue 236 @ Q4 A8 A7 A6 A5 B8 B7 B6 B5 243 VADD.S16 Q13, Q0, Q4 @ eee[k] = ee[k] + ee[7 - k] row 1 & 2 245 VSUB.S16 Q0, Q0 ,Q4 @ eeo[k] = ee[k] - ee[7 - k] row 1 & 2 -- dual issue 285 VMULL.S16 Q4,D24,D1 @g_ai2_ihevc_trans_32[4][0-4] * eeo[0-4] R2 315 VTRN.32 Q4,Q12 @R2 transpose1 327 VADD.S32 Q4,Q4,Q12 @R2 add -- dual issue 1st cycle 336 VADD.S32 Q8,Q4,Q8 @R2 add -- dual issue [all …]
|
D | ihevc_sao_edge_offset_class1.s | 142 VCGT.U8 Q6,Q5,Q4 @vcgtq_u8(pu1_cur_row, pu1_top_row) 145 VCLT.U8 Q7,Q5,Q4 @vcltq_u8(pu1_cur_row, pu1_top_row) 178 …VSUB.U8 Q4,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_g… 183 VNEG.S8 Q8,Q4 @II sign_up = vnegq_s8(sign_down) 185 VADD.I8 Q11,Q11,Q4 @II edge_idx = vaddq_s8(edge_idx, sign_down) 197 …VMOVL.U8 Q4,D11 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(v… 201 …VADDW.S8 Q4,Q4,D13 @pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1],… 203 …VMAX.S16 Q4,Q4,Q1 @pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1]… 205 …VMIN.U16 Q4,Q4,Q2 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(… 211 VMOVN.I16 D21,Q4 @vmovn_s16(pi2_tmp_cur_row.val[1]) [all …]
|
D | ihevc_resi_trans.s | 1007 VLD2.U8 {Q3,Q4},[R1],R5 @LOAD 1-16 pred row 2 1010 VSUBL.U8 Q4,D0,D2 @Get residue 1-8 row 1 1028 VADD.S16 Q8 ,Q4,Q5 @e[k] = resi_tmp_1 + resi_tmp_2 k -> 1-8 row 1 1029 VSUB.S16 Q9 ,Q4,Q5 @o[k] = resi_tmp_1 - resi_tmp_2 k ->9-16 row 1 1098 VMULL.S16 Q4,D27,D1 @g_ai2_ihevc_trans_16[6][0-4] * eo[0-4] R2 1123 VTRN.32 Q2,Q4 @R2 transpose1 -- 2 cycles 1138 VADD.S32 Q2,Q2,Q4 @R2 add 1143 VADD.S32 Q4,Q6,Q2 @R2 add 1153 VZIP.S32 Q5,Q4 @ 3 cycle instruction 1229 VMULL.S16 Q4,D22,D4 @o[0][0-3]* R2 [all …]
|
/external/libhevc/decoder/arm/ |
D | ihevcd_fmt_conv_420sp_to_rgba8888.s | 193 VMULL.S16 Q4,D4,D0[3] @//(U-128)*C4 FOR B 205 VQSHRN.S32 D8,Q4,#13 @//D8 = (U-128)*C4>>13 4 16-BIT VALUES 219 VADDW.U8 Q7,Q4,D30 @//Q7 - HAS Y + B 223 VADDW.U8 Q10,Q4,D31 @//Q10 - HAS Y + B 260 VADDW.U8 Q7,Q4,D28 @//Q7 - HAS Y + B 264 VADDW.U8 Q10,Q4,D29 @//Q10 - HAS Y + B 324 VMULL.S16 Q4,D4,D0[3] @//(U-128)*C4 FOR B 336 VQSHRN.S32 D8,Q4,#13 @//D8 = (U-128)*C4>>13 4 16-BIT VALUES 350 VADDW.U8 Q7,Q4,D30 @//Q7 - HAS Y + B 354 VADDW.U8 Q10,Q4,D31 @//Q10 - HAS Y + B [all …]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64CallingConvention.td | 68 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 70 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 72 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 75 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 77 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 106 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 108 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 110 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 113 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 115 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AArch64/ |
D | AArch64CallingConvention.td | 72 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 74 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 76 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 79 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 81 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 112 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 114 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 116 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 119 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 121 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> [all …]
|
/external/icu/icu4c/source/data/locales/ |
D | sv_FI.txt | 43 "Q4", 51 "Q4",
|
D | lrc.txt | 254 "Q4", 266 "Q4", 274 "Q4",
|
D | es_DO.txt | 107 "Q4", 115 "Q4",
|
D | haw.txt | 427 "Q4", 439 "Q4", 447 "Q4", 459 "Q4",
|
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenCallingConv.inc | 192 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 205 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 218 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 237 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 254 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 436 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 449 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 462 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 481 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… 497 …AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64… [all …]
|
/external/fdlibm/ |
D | s_expm1.c | 126 Q4 = 4.00821782732936239552e-06, /* 3ED0CFCA 86E65239 */ variable 187 r1 = one+hxs*(Q1+hxs*(Q2+hxs*(Q3+hxs*(Q4+hxs*Q5))));
|
/external/libavc/common/arm/ |
D | ih264_deblk_chroma_a9.s | 102 vaddl.u8 q5, d7, d1 @Q4,Q5 = q0 + p1 107 vmlal.u8 q5, d3, d31 @Q5,Q4 = (X2(q1U) + q0U + p1U) 119 vrshrn.u16 d9, q5, #2 @Q4 = (X2(q1U) + q0U + p1U + 2) >> 2 184 vdup.8 q11, r2 @Q4 = alpha 288 vsubl.u8 q4, d0, d4 @Q5,Q4 = (q0 - p0) 292 vshl.i16 q4, q4, #2 @Q4 = (q0 - p0)<<2 300 vadd.i16 q5, q5, q3 @Q5,Q4 = [ (q0 - p0)<<2 ] + (p1 - q1) 304 vqrshrn.s16 d9, q5, #3 @Q4 = i_macro = (((q0 - p0)<<2) + (p1 - q1) + 4)>>3 308 vabs.s8 q3, q4 @Q4 = ABS (i_macro) 314 vcge.s8 q4, q4, #0 @Q4 = (i_macro >= 0) [all …]
|