/external/XNNPACK/src/q8-vadd/ |
D | neon.c | 58 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 59 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 60 vacc2_lo = vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 61 vacc3_lo = vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 62 vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_high_s16(vxb0), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 63 vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_high_s16(vxb1), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 64 vacc2_hi = vmlaq_s32(vacc2_hi, vmovl_high_s16(vxb2), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 65 vacc3_hi = vmlaq_s32(vacc3_hi, vmovl_high_s16(vxb3), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 123 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() 124 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_q8_vadd_ukernel__neon() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_neon_32x32.c | 1204 a[0] = vmlaq_s32(a[0], g_ai4_ihevc_trans_32_1_8, vrev64q_s32(eeee)); in ihevc_resi_trans_32x32_neon() 1250 a[14] = vmlaq_s32(a[14], g_ai4_ihevc_trans_32_14_47, eo1); in ihevc_resi_trans_32x32_neon() 1251 a[10] = vmlaq_s32(a[10], g_ai4_ihevc_trans_32_10_47, eo1); in ihevc_resi_trans_32x32_neon() 1252 a[6] = vmlaq_s32(a[6], g_ai4_ihevc_trans_32_6_47, eo1); in ihevc_resi_trans_32x32_neon() 1253 a[2] = vmlaq_s32(a[2], g_ai4_ihevc_trans_32_2_47, eo1); in ihevc_resi_trans_32x32_neon() 1280 a[30] = vmlaq_s32(a[30], g_ai4_ihevc_trans_32_30_47, eo1); in ihevc_resi_trans_32x32_neon() 1281 a[26] = vmlaq_s32(a[26], g_ai4_ihevc_trans_32_26_47, eo1); in ihevc_resi_trans_32x32_neon() 1282 a[22] = vmlaq_s32(a[22], g_ai4_ihevc_trans_32_22_47, eo1); in ihevc_resi_trans_32x32_neon() 1283 a[18] = vmlaq_s32(a[18], g_ai4_ihevc_trans_32_18_47, eo1); in ihevc_resi_trans_32x32_neon() 1310 a[1] = vmlaq_s32(a[1], g_ai4_ihevc_trans_32_1_47, o1); in ihevc_resi_trans_32x32_neon() [all …]
|
D | ihevc_resi_trans_neon.c | 285 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 287 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 290 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 291 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 323 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 325 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 328 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 329 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
|
/external/libhevc/encoder/arm/ |
D | ihevce_hme_utils_neon.c | 192 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 193 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 194 dst2_4x32b = vmlaq_s32(add_4x32b, dst2_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 195 dst3_4x32b = vmlaq_s32(add_4x32b, dst3_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 198 dst4_4x32b = vmlaq_s32(add_4x32b, dst4_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 199 dst5_4x32b = vmlaq_s32(add_4x32b, dst5_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 200 dst6_4x32b = vmlaq_s32(add_4x32b, dst6_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 201 dst7_4x32b = vmlaq_s32(add_4x32b, dst7_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 615 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon() 616 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon() [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | selfguided_neon.c | 1542 v0 = vmlaq_s32(v0, xq0_vec, f00); in av1_apply_selfguided_restoration_neon() 1543 v4 = vmlaq_s32(v4, xq0_vec, f10); in av1_apply_selfguided_restoration_neon() 1553 v0 = vmlaq_s32(v0, xq1_vec, f00); in av1_apply_selfguided_restoration_neon() 1554 v4 = vmlaq_s32(v4, xq1_vec, f10); in av1_apply_selfguided_restoration_neon()
|
/external/gemmlowp/internal/ |
D | simd_wrappers_neon.h | 130 *acc = vmlaq_s32(*acc, lhs, rhs);
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 213 …acket4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 573 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0 3908 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0 3909 _NEON2SSE_INLINE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c) // VMLA.I32 q0,q0,q0 in vmlaq_s32() function 3953 #define vmlaq_u32 vmlaq_s32 13351 return vmlaq_s32(a,b,c); 14070 return vmlaq_s32(a,b,scalar); in vmlaq_n_s32()
|
/external/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 477 return vmlaq_s32(v1, v2, v3); in test_vmlaq_s32()
|
D | arm_neon_intrinsics.c | 8885 return vmlaq_s32(a, b, c); in test_vmlaq_s32()
|