Home
last modified time | relevance | path

Searched refs:vmlaq_s32 (Results 1 – 25 of 25) sorted by relevance

/external/XNNPACK/src/qu8-vadd/
Dminmax-neon.c58 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
59 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
60 vacc2_lo = vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
61 vacc3_lo = vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
62 vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_high_s16(vxb0), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
63 vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_high_s16(vxb1), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
64 vacc2_hi = vmlaq_s32(vacc2_hi, vmovl_high_s16(vxb2), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
65 vacc3_hi = vmlaq_s32(vacc3_hi, vmovl_high_s16(vxb3), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
123 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
124 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon()
[all …]
/external/XNNPACK/src/qs8-vaddc/gen/
Dminmax-neon-ld64-x32.c50 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
51 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
52 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
53 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
54 int32x4_t vaccGHIJ = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
55 int32x4_t vaccKLMN = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
56 int32x4_t vaccOPQR = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexOPQRSTUV)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
57 int32x4_t vaccSTUV = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexOPQRSTUV)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
100 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
101 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
Dminmax-neon-ld64-x24.c48 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
49 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
50 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
51 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
52 int32x4_t vaccGHIJ = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
53 int32x4_t vaccKLMN = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
91 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
92 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x16.c46 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
47 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
48 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
49 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
78 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
79 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x8.c44 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
45 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
69 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
70 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
/external/libhevc/common/arm/
Dihevc_resi_trans_neon_32x32.c1204 a[0] = vmlaq_s32(a[0], g_ai4_ihevc_trans_32_1_8, vrev64q_s32(eeee)); in ihevc_resi_trans_32x32_neon()
1250 a[14] = vmlaq_s32(a[14], g_ai4_ihevc_trans_32_14_47, eo1); in ihevc_resi_trans_32x32_neon()
1251 a[10] = vmlaq_s32(a[10], g_ai4_ihevc_trans_32_10_47, eo1); in ihevc_resi_trans_32x32_neon()
1252 a[6] = vmlaq_s32(a[6], g_ai4_ihevc_trans_32_6_47, eo1); in ihevc_resi_trans_32x32_neon()
1253 a[2] = vmlaq_s32(a[2], g_ai4_ihevc_trans_32_2_47, eo1); in ihevc_resi_trans_32x32_neon()
1280 a[30] = vmlaq_s32(a[30], g_ai4_ihevc_trans_32_30_47, eo1); in ihevc_resi_trans_32x32_neon()
1281 a[26] = vmlaq_s32(a[26], g_ai4_ihevc_trans_32_26_47, eo1); in ihevc_resi_trans_32x32_neon()
1282 a[22] = vmlaq_s32(a[22], g_ai4_ihevc_trans_32_22_47, eo1); in ihevc_resi_trans_32x32_neon()
1283 a[18] = vmlaq_s32(a[18], g_ai4_ihevc_trans_32_18_47, eo1); in ihevc_resi_trans_32x32_neon()
1310 a[1] = vmlaq_s32(a[1], g_ai4_ihevc_trans_32_1_47, o1); in ihevc_resi_trans_32x32_neon()
[all …]
Dihevc_resi_trans_neon.c285 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
287 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
290 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
291 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
323 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
325 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
328 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
329 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
/external/XNNPACK/src/qs8-vadd/gen/
Dminmax-neon-ld64-x32.c62 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
63 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
64 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
65 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
66 vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
67 vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
68 vaccOPQR = vmlaq_s32(vaccOPQR, vmovl_s16(vget_low_s16(veyOPQRSTUV)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
69 vaccSTUV = vmlaq_s32(vaccSTUV, vmovl_s16(vget_high_s16(veyOPQRSTUV)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
117 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
118 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
Dminmax-neon-ld64-x24.c56 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
57 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
58 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
59 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
60 vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
61 vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
104 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
105 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
Dminmax-neon-ld64-x16.c50 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
51 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
52 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
53 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
87 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
88 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
Dminmax-neon-ld64-x8.c44 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
45 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
74 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
75 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
/external/libhevc/encoder/arm/
Dihevce_hme_utils_neon.c192 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
193 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
194 dst2_4x32b = vmlaq_s32(add_4x32b, dst2_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
195 dst3_4x32b = vmlaq_s32(add_4x32b, dst3_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
198 dst4_4x32b = vmlaq_s32(add_4x32b, dst4_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
199 dst5_4x32b = vmlaq_s32(add_4x32b, dst5_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
200 dst6_4x32b = vmlaq_s32(add_4x32b, dst6_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
201 dst7_4x32b = vmlaq_s32(add_4x32b, dst7_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon()
615 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon()
616 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon()
[all …]
/external/XNNPACK/src/qs8-vaddc/
Dneon-ld64.c.in46 …int32x4_t vacc${ABC[N:N+4]} = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex${ABC[N:N+8]})), vx_mul…
47 …int32x4_t vacc${ABC[N+4:N+8]} = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex${ABC[N:N+8]})), vx_…
91 …int32x4_t vacc${ABC[0:4]} = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex${ABC[0:8]})), vx_multipl…
92 …int32x4_t vacc${ABC[4:8]} = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex${ABC[0:8]})), vx_multip…
/external/XNNPACK/src/qs8-vadd/
Dneon-ld64.c.in47 …vacc${ABC[N:N+4]} = vmlaq_s32(vacc${ABC[N:N+4]}, vmovl_s16(vget_low_s16(vey${ABC[N:N+8]})), vy_mul…
48 …vacc${ABC[N+4:N+8]} = vmlaq_s32(vacc${ABC[N+4:N+8]}, vmovl_s16(vget_high_s16(vey${ABC[N:N+8]})), v…
98 …vacc${ABC[0:4]} = vmlaq_s32(vacc${ABC[0:4]}, vmovl_s16(vget_low_s16(vey${ABC[0:8]})), vy_multiplie…
99 …vacc${ABC[4:8]} = vmlaq_s32(vacc${ABC[4:8]}, vmovl_s16(vget_high_s16(vey${ABC[0:8]})), vy_multipli…
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dbatch_matmul.h247 vmlaq_s32(scratch_val0, row_sum0, input_offset0); in BatchMatMul()
249 vmlaq_s32(scratch_val1, row_sum1, input_offset1); in BatchMatMul()
Dneon_tensor_utils.cc1433 vmlaq_s32(scratch_val0, row_sum0, input_offset0); in NeonMatrixBatchVectorMultiplyAccumulate()
1435 vmlaq_s32(scratch_val1, row_sum1, input_offset1); in NeonMatrixBatchVectorMultiplyAccumulate()
/external/libaom/libaom/av1/common/arm/
Dselfguided_neon.c1542 v0 = vmlaq_s32(v0, xq0_vec, f00); in av1_apply_selfguided_restoration_neon()
1543 v4 = vmlaq_s32(v4, xq0_vec, f10); in av1_apply_selfguided_restoration_neon()
1553 v0 = vmlaq_s32(v0, xq1_vec, f00); in av1_apply_selfguided_restoration_neon()
1554 v4 = vmlaq_s32(v4, xq1_vec, f10); in av1_apply_selfguided_restoration_neon()
/external/gemmlowp/internal/
Dsimd_wrappers_neon.h130 *acc = vmlaq_s32(*acc, lhs, rhs);
/external/eigen/Eigen/src/Core/arch/NEON/
DPacketMath.h213 …acket4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
/external/libgav1/libgav1/src/dsp/arm/
Dinverse_transform_10bit_neon.cc1491 vmlaq_s32(v_dual_round, v_src, v_multiplier); in Identity4_NEON()
1511 const int32x4_t v_src_mult_lo = vmlaq_s32(v_dual_round, v_src, v_multiplier); in Identity4DcOnly()
/external/neon_2_sse/
DNEON_2_SSE.h573 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0
3908 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0
3909 _NEON2SSE_INLINE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c) // VMLA.I32 q0,q0,q0 in vmlaq_s32() function
3953 #define vmlaq_u32 vmlaq_s32
13351 return vmlaq_s32(a,b,c);
14070 return vmlaq_s32(a,b,scalar); in vmlaq_n_s32()
/external/llvm-project/clang/test/CodeGen/
Daarch64-neon-intrinsics.c474 return vmlaq_s32(v1, v2, v3); in test_vmlaq_s32()
Darm_neon_intrinsics.c7020 return vmlaq_s32(a, b, c); in test_vmlaq_s32()
/external/clang/test/CodeGen/
Darm_neon_intrinsics.c8885 return vmlaq_s32(a, b, c); in test_vmlaq_s32()
Daarch64-neon-intrinsics.c477 return vmlaq_s32(v1, v2, v3); in test_vmlaq_s32()