/external/XNNPACK/src/f32-gavgpool-cw/ |
D | sse-x4.c | 42 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local 59 const __m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local 97 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local 104 __m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local
|
D | wasmsimd-x86-x4.c | 42 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local 59 const v128_t vi0 = wasm_v128_and(wasm_v128_load(i0), vmask); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local 97 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local 104 v128_t vi0 = wasm_v128_and(vmask, wasm_v128_load(i0)); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local
|
D | wasmsimd-arm-x4.c | 42 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local 59 const v128_t vi0 = wasm_v128_and(wasm_v128_load(i0), vmask); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local 97 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local 104 v128_t vi0 = wasm_v128_and(vmask, wasm_v128_load(i0)); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local
|
D | neon-x4.c | 42 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_cw_ukernel__neon_x4() local 55 float32x4_t vi0 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + n); in xnn_f32_gavgpool_cw_ukernel__neon_x4() local 103 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_cw_ukernel__neon_x4() local 109 float32x4_t vi0 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + n); in xnn_f32_gavgpool_cw_ukernel__neon_x4() local
|
/external/XNNPACK/src/s8-maxpool/ |
D | 2p2x-minmax-neon-c16.c | 43 const int8x16_t vi0 = vld1q_s8(i0); i0 += 16; in xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16() local 53 const int8x16_t vi0 = vld1q_s8(i0); in xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16() local 91 const int8x16_t vi0 = vld1q_s8(i0); i0 += 16; in xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16() local 103 const int8x16_t vi0 = vld1q_s8(i0); in xnn_s8_maxpool_minmax_ukernel_2p2x__neon_c16() local
|
D | 4p3x-minmax-neon-c16.c | 53 const int8x16_t vi0 = vld1q_s8(i0); i0 += 16; in xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16() local 67 const int8x16_t vi0 = vld1q_s8(i0); in xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16() local 114 const int8x16_t vi0 = vld1q_s8(i0); i0 += 16; in xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16() local 128 const int8x16_t vi0 = vld1q_s8(i0); in xnn_s8_maxpool_minmax_ukernel_4p3x__neon_c16() local
|
/external/XNNPACK/src/f16-gavgpool-cw/ |
D | neonfp16arith-x4.c | 43 const float16x4_t vi0 = vld1_f16(i0); i0 += 4; in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x4() local 56 float16x4_t vi0 = vld1_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n); in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x4() local 96 const float16x4_t vi0 = vld1_f16(i0); i0 += 4; in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x4() local 103 float16x4_t vi0 = vld1_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n); in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x4() local
|
D | neonfp16arith-x8.c | 44 const float16x8_t vi0 = vld1q_f16(i0); i0 += 8; in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8() local 57 float16x8_t vi0 = vld1q_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n); in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8() local 108 const float16x8_t vi0 = vld1q_f16(i0); i0 += 8; in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8() local 115 float16x8_t vi0 = vld1q_f16(i0); i0 = (const __fp16*) ((uintptr_t) i0 + n); in xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8() local
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 4x1-minmax-scalar.c | 48 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 89 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 135 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 160 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 193 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local 212 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar() local
|
D | 2x1-minmax-scalar.c | 46 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_2x1__scalar() local 73 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_2x1__scalar() local 108 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_2x1__scalar() local 127 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_2x1__scalar() local
|
D | 8x1-minmax-scalar.c | 52 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 121 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 189 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 226 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 270 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 295 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 328 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local 347 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar() local
|
D | 8x2-minmax-scalar.c | 60 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 163 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 235 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 290 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 336 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 371 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 405 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local 430 const float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x2__scalar() local
|
D | 4x1-minmax-scalar-pipelined.c | 40 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local 94 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local 132 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_4x1__scalar_pipelined() local
|
D | 8x1-minmax-scalar-pipelined.c | 40 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 122 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 174 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local 212 float vi0 = input[0]; in xnn_f32_spmm_minmax_ukernel_8x1__scalar_pipelined() local
|
/external/XNNPACK/src/qu8-gavgpool/gen/ |
D | 7p7x-minmax-fp32-scalar-imagic-c1.c | 43 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local 76 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local
|
D | 7p7x-minmax-fp32-scalar-lrintf-c1.c | 44 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local 77 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local
|
D | 7p7x-minmax-fp32-scalar-fmagic-c1.c | 43 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local 76 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local
|
/external/XNNPACK/src/qs8-gavgpool/gen/ |
D | 7p7x-minmax-fp32-scalar-fmagic-c1.c | 43 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local 76 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1() local
|
D | 7p7x-minmax-fp32-scalar-lrintf-c1.c | 44 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local 77 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1() local
|
D | 7p7x-minmax-fp32-scalar-imagic-c1.c | 43 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local 76 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local 130 const int32_t vi0 = (int32_t) *i0++; in xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1() local
|
/external/XNNPACK/src/f32-gavgpool/ |
D | 7p7x-minmax-wasmsimd-arm-c4.c | 39 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 78 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 139 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 176 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local
|
D | 7p7x-minmax-wasmsimd-x86-c4.c | 39 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 78 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 139 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 176 const v128_t vi0 = wasm_v128_load(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local
|
D | 7p7x-minmax-sse-c4.c | 39 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 77 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 138 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 175 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local
|
D | 7p7x-minmax-neon-c4.c | 39 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 70 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 124 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 152 const float32x4_t vi0 = vld1q_f32(i0); in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local
|
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gavgpool/ |
D | mp8x7p7q-neon.c | 42 const uint8x8_t vi0 = vld1_u8(i0); in pytorch_q8gavgpool_ukernel_mp8x7p7q__neon() local 85 const uint8x8_t vi0 = vld1_u8(i0); in pytorch_q8gavgpool_ukernel_mp8x7p7q__neon() local 164 const uint8x8_t vi0 = vld1_u8(i0); in pytorch_q8gavgpool_ukernel_mp8x7p7q__neon() local 238 const uint8x8_t vi0 = in pytorch_q8gavgpool_ukernel_mp8x7p7q__neon() local
|