/external/XNNPACK/src/qu8-vadd/ |
D | minmax-neon.c | 58 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 59 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 60 vacc2_lo = vmlaq_s32(vacc2_lo, vmovl_s16(vget_low_s16(vxb2)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 61 vacc3_lo = vmlaq_s32(vacc3_lo, vmovl_s16(vget_low_s16(vxb3)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 62 vacc0_hi = vmlaq_s32(vacc0_hi, vmovl_high_s16(vxb0), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 63 vacc1_hi = vmlaq_s32(vacc1_hi, vmovl_high_s16(vxb1), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 64 vacc2_hi = vmlaq_s32(vacc2_hi, vmovl_high_s16(vxb2), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 65 vacc3_hi = vmlaq_s32(vacc3_hi, vmovl_high_s16(vxb3), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 123 vacc0_lo = vmlaq_s32(vacc0_lo, vmovl_s16(vget_low_s16(vxb0)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 124 vacc1_lo = vmlaq_s32(vacc1_lo, vmovl_s16(vget_low_s16(vxb1)), vb_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() [all …]
|
/external/XNNPACK/src/qs8-vaddc/gen/ |
D | minmax-neon-ld64-x32.c | 50 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 51 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 52 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 53 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 54 int32x4_t vaccGHIJ = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 55 int32x4_t vaccKLMN = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 56 int32x4_t vaccOPQR = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexOPQRSTUV)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 57 int32x4_t vaccSTUV = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexOPQRSTUV)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 100 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 101 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
|
D | minmax-neon-ld64-x24.c | 48 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 49 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 50 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 51 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 52 int32x4_t vaccGHIJ = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 53 int32x4_t vaccKLMN = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vexGHIJKLMN)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 91 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 92 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x16.c | 46 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 47 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 48 int32x4_t vacc89AB = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 49 int32x4_t vaccCDEF = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex89ABCDEF)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 78 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 79 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x8.c | 44 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8() 45 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8() 69 int32x4_t vacc0123 = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8() 70 int32x4_t vacc4567 = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex01234567)), vx_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_neon_32x32.c | 1204 a[0] = vmlaq_s32(a[0], g_ai4_ihevc_trans_32_1_8, vrev64q_s32(eeee)); in ihevc_resi_trans_32x32_neon() 1250 a[14] = vmlaq_s32(a[14], g_ai4_ihevc_trans_32_14_47, eo1); in ihevc_resi_trans_32x32_neon() 1251 a[10] = vmlaq_s32(a[10], g_ai4_ihevc_trans_32_10_47, eo1); in ihevc_resi_trans_32x32_neon() 1252 a[6] = vmlaq_s32(a[6], g_ai4_ihevc_trans_32_6_47, eo1); in ihevc_resi_trans_32x32_neon() 1253 a[2] = vmlaq_s32(a[2], g_ai4_ihevc_trans_32_2_47, eo1); in ihevc_resi_trans_32x32_neon() 1280 a[30] = vmlaq_s32(a[30], g_ai4_ihevc_trans_32_30_47, eo1); in ihevc_resi_trans_32x32_neon() 1281 a[26] = vmlaq_s32(a[26], g_ai4_ihevc_trans_32_26_47, eo1); in ihevc_resi_trans_32x32_neon() 1282 a[22] = vmlaq_s32(a[22], g_ai4_ihevc_trans_32_22_47, eo1); in ihevc_resi_trans_32x32_neon() 1283 a[18] = vmlaq_s32(a[18], g_ai4_ihevc_trans_32_18_47, eo1); in ihevc_resi_trans_32x32_neon() 1310 a[1] = vmlaq_s32(a[1], g_ai4_ihevc_trans_32_1_47, o1); in ihevc_resi_trans_32x32_neon() [all …]
|
D | ihevc_resi_trans_neon.c | 285 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 287 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 290 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 291 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 323 src0_4x32b = vmlaq_s32(c3_4x32b, c0_4x32b, coeff0_4x32b); /* 29*c0 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 325 src3_4x32b = vmlaq_s32(c3_4x32b, c2_4x32b, coeff1_4x32b); /* 55*c2 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 328 src0_4x32b = vmlaq_s32(src0_4x32b, c1_4x32b, coeff1_4x32b); /* 29*c0 + 55*c1 + c3 */ in ihevc_resi_trans_4x4_ttype1_neon() 329 src2_4x32b = vmlaq_s32(src2_4x32b, c0_4x32b, coeff1_4x32b); /* 29*c2 + 55*c0 - c3 */ in ihevc_resi_trans_4x4_ttype1_neon()
|
/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-neon-ld64-x32.c | 62 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 63 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 64 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 65 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 66 vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 67 vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 68 vaccOPQR = vmlaq_s32(vaccOPQR, vmovl_s16(vget_low_s16(veyOPQRSTUV)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 69 vaccSTUV = vmlaq_s32(vaccSTUV, vmovl_s16(vget_high_s16(veyOPQRSTUV)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 117 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 118 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
|
D | minmax-neon-ld64-x24.c | 56 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 57 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 58 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 59 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 60 vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 61 vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(veyGHIJKLMN)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 104 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 105 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x16.c | 50 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 51 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 52 vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 53 vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vey89ABCDEF)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 87 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 88 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x8.c | 44 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8() 45 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8() 74 vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8() 75 vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vey01234567)), vy_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
|
/external/libhevc/encoder/arm/ |
D | ihevce_hme_utils_neon.c | 192 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 193 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 194 dst2_4x32b = vmlaq_s32(add_4x32b, dst2_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 195 dst3_4x32b = vmlaq_s32(add_4x32b, dst3_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 198 dst4_4x32b = vmlaq_s32(add_4x32b, dst4_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 199 dst5_4x32b = vmlaq_s32(add_4x32b, dst5_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 200 dst6_4x32b = vmlaq_s32(add_4x32b, dst6_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 201 dst7_4x32b = vmlaq_s32(add_4x32b, dst7_4x32b, inv_wt_4x32b); in ihevce_get_wt_inp_4x8_neon() 615 dst0_4x32b = vmlaq_s32(add_4x32b, dst0_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon() 616 dst1_4x32b = vmlaq_s32(add_4x32b, dst1_4x32b, inv_wt_4x32b); in hme_get_wt_inp_ctb_neon() [all …]
|
/external/XNNPACK/src/qs8-vaddc/ |
D | neon-ld64.c.in | 46 …int32x4_t vacc${ABC[N:N+4]} = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex${ABC[N:N+8]})), vx_mul… 47 …int32x4_t vacc${ABC[N+4:N+8]} = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex${ABC[N:N+8]})), vx_… 91 …int32x4_t vacc${ABC[0:4]} = vmlaq_s32(vy_bias, vmovl_s16(vget_low_s16(vex${ABC[0:8]})), vx_multipl… 92 …int32x4_t vacc${ABC[4:8]} = vmlaq_s32(vy_bias, vmovl_s16(vget_high_s16(vex${ABC[0:8]})), vx_multip…
|
/external/XNNPACK/src/qs8-vadd/ |
D | neon-ld64.c.in | 47 …vacc${ABC[N:N+4]} = vmlaq_s32(vacc${ABC[N:N+4]}, vmovl_s16(vget_low_s16(vey${ABC[N:N+8]})), vy_mul… 48 …vacc${ABC[N+4:N+8]} = vmlaq_s32(vacc${ABC[N+4:N+8]}, vmovl_s16(vget_high_s16(vey${ABC[N:N+8]})), v… 98 …vacc${ABC[0:4]} = vmlaq_s32(vacc${ABC[0:4]}, vmovl_s16(vget_low_s16(vey${ABC[0:8]})), vy_multiplie… 99 …vacc${ABC[4:8]} = vmlaq_s32(vacc${ABC[4:8]}, vmovl_s16(vget_high_s16(vey${ABC[0:8]})), vy_multipli…
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | batch_matmul.h | 247 vmlaq_s32(scratch_val0, row_sum0, input_offset0); in BatchMatMul() 249 vmlaq_s32(scratch_val1, row_sum1, input_offset1); in BatchMatMul()
|
D | neon_tensor_utils.cc | 1433 vmlaq_s32(scratch_val0, row_sum0, input_offset0); in NeonMatrixBatchVectorMultiplyAccumulate() 1435 vmlaq_s32(scratch_val1, row_sum1, input_offset1); in NeonMatrixBatchVectorMultiplyAccumulate()
|
/external/libaom/libaom/av1/common/arm/ |
D | selfguided_neon.c | 1542 v0 = vmlaq_s32(v0, xq0_vec, f00); in av1_apply_selfguided_restoration_neon() 1543 v4 = vmlaq_s32(v4, xq0_vec, f10); in av1_apply_selfguided_restoration_neon() 1553 v0 = vmlaq_s32(v0, xq1_vec, f00); in av1_apply_selfguided_restoration_neon() 1554 v4 = vmlaq_s32(v4, xq1_vec, f10); in av1_apply_selfguided_restoration_neon()
|
/external/gemmlowp/internal/ |
D | simd_wrappers_neon.h | 130 *acc = vmlaq_s32(*acc, lhs, rhs);
|
/external/eigen/Eigen/src/Core/arch/NEON/ |
D | PacketMath.h | 213 …acket4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
|
/external/libgav1/libgav1/src/dsp/arm/ |
D | inverse_transform_10bit_neon.cc | 1491 vmlaq_s32(v_dual_round, v_src, v_multiplier); in Identity4_NEON() 1511 const int32x4_t v_src_mult_lo = vmlaq_s32(v_dual_round, v_src, v_multiplier); in Identity4DcOnly()
|
/external/neon_2_sse/ |
D | NEON_2_SSE.h | 573 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0 3908 _NEON2SSESTORAGE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c); // VMLA.I32 q0,q0,q0 3909 _NEON2SSE_INLINE int32x4_t vmlaq_s32(int32x4_t a, int32x4_t b, int32x4_t c) // VMLA.I32 q0,q0,q0 in vmlaq_s32() function 3953 #define vmlaq_u32 vmlaq_s32 13351 return vmlaq_s32(a,b,c); 14070 return vmlaq_s32(a,b,scalar); in vmlaq_n_s32()
|
/external/llvm-project/clang/test/CodeGen/ |
D | aarch64-neon-intrinsics.c | 474 return vmlaq_s32(v1, v2, v3); in test_vmlaq_s32()
|
D | arm_neon_intrinsics.c | 7020 return vmlaq_s32(a, b, c); in test_vmlaq_s32()
|
/external/clang/test/CodeGen/ |
D | arm_neon_intrinsics.c | 8885 return vmlaq_s32(a, b, c); in test_vmlaq_s32()
|
D | aarch64-neon-intrinsics.c | 477 return vmlaq_s32(v1, v2, v3); in test_vmlaq_s32()
|