/external/XNNPACK/src/qu8-vcvt/gen/ |
D | vcvt-armsimd32-x8.c | 40 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qu8_vcvt_ukernel__armsimd32_x8() local 75 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qu8_vcvt_ukernel__armsimd32_x8() local 97 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qu8_vcvt_ukernel__armsimd32_x8() local
|
D | vcvt-armsimd32-x4.c | 37 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qu8_vcvt_ukernel__armsimd32_x4() local 59 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qu8_vcvt_ukernel__armsimd32_x4() local
|
/external/XNNPACK/src/qs8-vcvt/gen/ |
D | vcvt-armsimd32-x8.c | 40 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qs8_vcvt_ukernel__armsimd32_x8() local 75 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qs8_vcvt_ukernel__armsimd32_x8() local 97 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qs8_vcvt_ukernel__armsimd32_x8() local
|
D | vcvt-armsimd32-x4.c | 37 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qs8_vcvt_ukernel__armsimd32_x4() local 59 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); in xnn_qs8_vcvt_ukernel__armsimd32_x4() local
|
/external/XNNPACK/src/qs8-vcvt/ |
D | armsimd32.c.in | 65 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); variable 87 int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias); variable
|
/external/XNNPACK/src/qs8-vlrelu/gen/ |
D | vlrelu-armsimd32-x8.c | 50 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() local 90 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() local 117 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qs8_vlrelu_ukernel__armsimd32_x8() local
|
D | vlrelu-armsimd32-x4.c | 43 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() local 70 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qs8_vlrelu_ukernel__armsimd32_x4() local
|
D | vlrelu-sse2-x32.c | 90 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qs8_vlrelu_ukernel__sse2_x32() local 139 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qs8_vlrelu_ukernel__sse2_x32() local 183 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qs8_vlrelu_ukernel__sse2_x32() local
|
D | vlrelu-sse2-x16.c | 68 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qs8_vlrelu_ukernel__sse2_x16() local 112 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qs8_vlrelu_ukernel__sse2_x16() local
|
/external/XNNPACK/src/qu8-vlrelu/gen/ |
D | vlrelu-armsimd32-x8.c | 50 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qu8_vlrelu_ukernel__armsimd32_x8() local 90 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qu8_vlrelu_ukernel__armsimd32_x8() local 117 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qu8_vlrelu_ukernel__armsimd32_x8() local
|
D | vlrelu-armsimd32-x4.c | 43 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qu8_vlrelu_ukernel__armsimd32_x4() local 70 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); in xnn_qu8_vlrelu_ukernel__armsimd32_x4() local
|
D | vlrelu-sse2-x32.c | 88 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qu8_vlrelu_ukernel__sse2_x32() local 136 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qu8_vlrelu_ukernel__sse2_x32() local 179 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qu8_vlrelu_ukernel__sse2_x32() local
|
D | vlrelu-sse2-x16.c | 67 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qu8_vlrelu_ukernel__sse2_x16() local 110 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); in xnn_qu8_vlrelu_ukernel__sse2_x16() local
|
/external/XNNPACK/src/qs8-vlrelu/ |
D | armsimd32.c.in | 78 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); variable 105 int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias); variable
|
D | sse2.c.in | 125 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); variable 173 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1); variable
|
/external/XNNPACK/src/f16-vmulcaddc/gen/ |
D | c8-minmax-fma3-2x.c | 56 __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); in xnn_f16_vmulcaddc_minmax_ukernel_c8__fma3_2x() local 81 __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); in xnn_f16_vmulcaddc_minmax_ukernel_c8__fma3_2x() local
|
D | c16-minmax-fma3-2x.c | 93 __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); in xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x() local 118 __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1)); in xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x() local
|
/external/XNNPACK/src/f32-vmulcaddc/gen/ |
D | c8-minmax-wasmsimd-arm-2x.c | 93 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_arm_2x() local 119 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_arm_2x() local
|
D | c8-minmax-wasmrelaxedsimd-2x.c | 93 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_2x() local 119 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_2x() local
|
D | c8-minmax-wasmsimd-x86-2x.c | 93 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x() local 119 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x() local
|
D | c8-minmax-wasmrelaxedsimd-fma-2x.c | 93 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_fma_2x() local 119 v128_t vacc1 = wasm_v128_load(i1); in xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmrelaxedsimd_fma_2x() local
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x1-minmax-scalar-pipelined.c | 52 float vacc1 = vw; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 130 float vacc1 = vw; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 180 float vacc1 = vw; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local
|
/external/XNNPACK/src/f32-prelu/gen/ |
D | avx512f-2x32.c | 89 const __m512 vacc1 = _mm512_mask_mul_ps(vi1, vsign1, vi1, vw); in xnn_f32_prelu_ukernel__avx512f_2x32() local 112 const __m512 vacc1 = _mm512_mask_mul_ps(vi1, vsign1, vi1, vw); in xnn_f32_prelu_ukernel__avx512f_2x32() local
|
/external/XNNPACK/src/qu8-gavgpool/gen/ |
D | 7p7x-minmax-fp32-scalar-imagic-c2.c | 47 int32_t vacc1 = vi0x1 + vinit_bias; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2() local 98 int32_t vacc1 = b[1]; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2() local 176 int32_t vacc1 = buffer[1]; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2() local
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7p7x-minmax-fp32-scalar-fmagic-c2.c | 47 int32_t vacc1 = vi0x1 + vinit_bias; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() local 98 int32_t vacc1 = b[1]; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() local 176 int32_t vacc1 = buffer[1]; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2() local
|