/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-neon-ld64-x8.c | 26 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8() 27 const int32x4_t vy_multiplier = vld1q_dup_s32(¶ms->neon.y_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8() 28 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x8()
|
D | minmax-neon-ld64-x16.c | 26 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 27 const int32x4_t vy_multiplier = vld1q_dup_s32(¶ms->neon.y_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16() 28 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x24.c | 26 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 27 const int32x4_t vy_multiplier = vld1q_dup_s32(¶ms->neon.y_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24() 28 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x32.c | 26 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 27 const int32x4_t vy_multiplier = vld1q_dup_s32(¶ms->neon.y_multiplier); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32() 28 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vadd_minmax_ukernel__neon_ld64_x32()
|
/external/XNNPACK/src/qs8-vaddc/gen/ |
D | minmax-neon-ld64-x8.c | 28 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8() 29 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x8()
|
D | minmax-neon-ld64-x16.c | 28 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16() 29 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x16()
|
D | minmax-neon-ld64-x24.c | 28 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24() 29 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x24()
|
D | minmax-neon-ld64-x32.c | 28 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32() 29 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_vaddc_minmax_ukernel__neon_ld64_x32()
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 1x8c4-minmax-neondot.c | 95 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8c4__neondot() 99 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8c4__neondot()
|
D | 1x16c4-minmax-neondot.c | 109 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x16c4__neondot() 115 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x16c4__neondot()
|
D | 1x8c8-minmax-neon-mull-padal.c | 125 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal() 129 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c2-minmax-neon-mull-padal-dup.c | 127 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8c2__neon_mull_padal_dup() 131 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
|
D | 1x8-minmax-neon-mlal-lane.c | 167 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mlal_lane() 171 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mlal_lane()
|
D | 1x8-minmax-neon-mull-addw-dup.c | 164 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mull_addw_dup() 168 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8__neon_mull_addw_dup()
|
D | 1x8c16-minmax-neon-mlal-padal.c | 133 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal() 137 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_igemm_minmax_ukernel_1x8c16__neon_mlal_padal()
|
/external/XNNPACK/src/qs8-vadd/ |
D | neon-ld64.c.in | 25 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); 26 const int32x4_t vy_multiplier = vld1q_dup_s32(¶ms->neon.y_multiplier); 27 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
|
/external/XNNPACK/src/qu8-vadd/ |
D | minmax-neon.c | 25 const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 26 const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier); in xnn_qu8_vadd_minmax_ukernel__neon() 27 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qu8_vadd_minmax_ukernel__neon()
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 1x8c8-minmax-neon-mull-padal.c | 111 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal() 115 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mull_padal()
|
D | 1x8c2-minmax-neon-mull-padal-dup.c | 113 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_gemm_minmax_ukernel_1x8c2__neon_mull_padal_dup() 117 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_gemm_minmax_ukernel_1x8c2__neon_mull_padal_dup()
|
D | 1x8-minmax-neon-mlal-lane.c | 154 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mlal_lane() 158 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mlal_lane()
|
D | 1x8c16-minmax-neon-mlal-padal.c | 119 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal() 123 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_gemm_minmax_ukernel_1x8c16__neon_mlal_padal()
|
D | 1x8-minmax-neon-mull-addw-dup.c | 150 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mull_addw_dup() 154 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift); in xnn_qs8_gemm_minmax_ukernel_1x8__neon_mull_addw_dup()
|
/external/XNNPACK/src/qs8-vaddc/ |
D | neon-ld64.c.in | 27 const int32x4_t vx_multiplier = vld1q_dup_s32(¶ms->neon.x_multiplier); 28 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
|
/external/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | filterbanks_neon.c | 42 statev = vld1q_dup_s32(filter_state_ch1); in WebRtcIsacfix_AllpassFilter2FixDec16Neon()
|
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7x-minmax-neon-c8.c | 56 const int32x4_t vbias = vld1q_dup_s32(¶ms->neon.bias); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 58 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8()
|