/external/XNNPACK/src/f32-gavgpool-spchw/ |
D | neon-x4.c | 42 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_spchw_ukernel__neon_x4() local 47 vsum0 = vaddq_f32(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() 55 float32x4_t vi0 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + n); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() local 60 vi0 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0))); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() 65 vsum0 = vaddq_f32(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() 103 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_spchw_ukernel__neon_x4() local 104 vsum0 = vaddq_f32(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() 109 float32x4_t vi0 = vld1q_f32(i0); i0 = (const float*) ((uintptr_t) i0 + n); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() local 110 vi0 = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi0))); in xnn_f32_gavgpool_spchw_ukernel__neon_x4() 111 vsum0 = vaddq_f32(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__neon_x4()
|
D | sse-x4.c | 42 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() local 51 vsum0 = _mm_add_ps(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() 59 const __m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() local 68 vsum0 = _mm_add_ps(vsum0, vi0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() 97 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() local 99 vsum = _mm_add_ps(vsum, vi0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() 104 __m128 vi0 = _mm_and_ps(_mm_loadu_ps(i0), vmask); in xnn_f32_gavgpool_spchw_ukernel__sse_x4() local 106 vsum = _mm_add_ps(vsum, vi0); in xnn_f32_gavgpool_spchw_ukernel__sse_x4()
|
/external/XNNPACK/src/f32-gavgpool/ |
D | mp7p7q-scalar.c | 37 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() local 45 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() 69 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() local 78 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() 123 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() local 132 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
|
D | mp7p7q-wasm.c | 37 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() local 45 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() 69 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() local 78 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() 123 const float vi0 = *i0++; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() local 132 const float vsum01 = vi0 + vi1; in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
|
D | mp7p7q-neon.c | 39 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_ukernel_mp7p7q__neon() local 47 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__neon() 70 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_ukernel_mp7p7q__neon() local 79 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__neon() 124 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_gavgpool_ukernel_mp7p7q__neon() local 133 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__neon() 152 const float32x4_t vi0 = vld1q_f32(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__neon() local 161 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__neon()
|
D | mp7p7q-sse.c | 39 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() local 54 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() 77 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() local 93 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() 138 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() local 155 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() 175 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__sse() local 184 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__sse()
|
D | mp7p7q-psimd.c | 39 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() local 54 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() 77 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() local 93 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() 138 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() local 155 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() 175 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd() local 184 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_gavgpool_ukernel_mp7p7q__psimd()
|
/external/XNNPACK/src/f32-pavgpool/ |
D | mp9p8q-scalar.c | 47 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar() local 57 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar() 84 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar() local 94 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar() 144 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar() local 154 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__scalar()
|
D | mp9p8q-wasm.c | 47 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm() local 57 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm() 84 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm() local 94 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm() 144 const float vi0 = *i0++; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm() local 154 const float vsum01 = vi0 + vi1; in xnn_f32_pavgpool_ukernel_mp9p8q__wasm()
|
D | mp9p8q-psimd.c | 47 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() local 66 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() 93 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() local 111 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() 163 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() local 182 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() 201 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd() local 211 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__psimd()
|
D | mp9p8q-sse.c | 47 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() local 66 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() 92 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() local 110 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() 161 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() local 180 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() 199 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__sse() local 209 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__sse()
|
D | mp9p8q-neon.c | 47 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_pavgpool_ukernel_mp9p8q__neon() local 57 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__neon() 83 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_pavgpool_ukernel_mp9p8q__neon() local 93 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__neon() 143 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_pavgpool_ukernel_mp9p8q__neon() local 153 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__neon() 171 const float32x4_t vi0 = vld1q_f32(i0); in xnn_f32_pavgpool_ukernel_mp9p8q__neon() local 181 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_pavgpool_ukernel_mp9p8q__neon()
|
/external/XNNPACK/src/f32-avgpool/ |
D | mp9p8q-scalar.c | 47 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__scalar() local 57 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__scalar() 84 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__scalar() local 94 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__scalar() 142 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__scalar() local 152 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__scalar()
|
D | mp9p8q-wasm.c | 47 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__wasm() local 57 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__wasm() 84 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__wasm() local 94 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__wasm() 142 const float vi0 = *i0++; in xnn_f32_avgpool_ukernel_mp9p8q__wasm() local 152 const float vsum01 = vi0 + vi1; in xnn_f32_avgpool_ukernel_mp9p8q__wasm()
|
D | mp9p8q-sse.c | 47 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_avgpool_ukernel_mp9p8q__sse() local 66 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__sse() 92 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_avgpool_ukernel_mp9p8q__sse() local 110 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__sse() 158 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_avgpool_ukernel_mp9p8q__sse() local 177 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__sse() 196 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_avgpool_ukernel_mp9p8q__sse() local 206 const __m128 vsum01 = _mm_add_ps(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__sse()
|
D | mp9p8q-neon.c | 47 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_avgpool_ukernel_mp9p8q__neon() local 57 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__neon() 83 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_avgpool_ukernel_mp9p8q__neon() local 93 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__neon() 141 const float32x4_t vi0 = vld1q_f32(i0); i0 += 4; in xnn_f32_avgpool_ukernel_mp9p8q__neon() local 151 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__neon() 169 const float32x4_t vi0 = vld1q_f32(i0); in xnn_f32_avgpool_ukernel_mp9p8q__neon() local 179 const float32x4_t vsum01 = vaddq_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__neon()
|
D | mp9p8q-psimd.c | 47 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() local 66 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() 93 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() local 111 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() 160 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() local 179 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() 198 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_avgpool_ukernel_mp9p8q__psimd() local 208 const psimd_f32 vsum01 = psimd_add_f32(vi0, vi1); in xnn_f32_avgpool_ukernel_mp9p8q__psimd()
|
/external/XNNPACK/src/f32-argmaxpool/ |
D | 9p8x-scalar-c1.c | 59 const float vi0 = *i0++; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() local 69 float vmax = vi0; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() 141 const float vi0 = *i0++; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() local 153 if (vi0 > vmax) { in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() 154 vmax = vi0; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() 245 const float vi0 = *i0++; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() local 257 if (vi0 > vmax) { in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1() 258 vmax = vi0; in xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1()
|
/external/XNNPACK/src/f32-prelu/gen/ |
D | scalar-2x1.c | 52 const float vi0 = *i0++; in xnn_f32_prelu_ukernel__scalar_2x1() local 55 float vacc0 = signbit(vi0) ? vi0 * vw : vi0; in xnn_f32_prelu_ukernel__scalar_2x1()
|
D | wasm-2x1.c | 52 const float vi0 = *i0++; in xnn_f32_prelu_ukernel__wasm_2x1() local 55 float vacc0 = signbit(vi0) ? vi0 * vw : vi0; in xnn_f32_prelu_ukernel__wasm_2x1()
|
/external/XNNPACK/src/f32-maxpool/ |
D | 9p8x-sse-c4.c | 78 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() local 97 const __m128 vmax018 = _mm_max_ps(_mm_max_ps(vi0, vi1), vi8); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() 111 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() local 130 const __m128 vmax018 = _mm_max_ps(_mm_max_ps(vi0, vi1), vi8); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() 194 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() local 212 const __m128 vmax01 = _mm_max_ps(_mm_max_ps(vi0, vi1), vo); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() 226 const __m128 vi0 = _mm_loadu_ps(i0); in xnn_f32_maxpool_ukernel_9p8x__sse_c4() local 236 const __m128 vmax01 = _mm_max_ps(_mm_max_ps(vi0, vi1), vo); in xnn_f32_maxpool_ukernel_9p8x__sse_c4()
|
D | 9p8x-psimd-c4.c | 78 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() local 97 const psimd_f32 vmax018 = psimd_max_f32(psimd_max_f32(vi0, vi1), vi8); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() 111 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() local 130 const psimd_f32 vmax018 = psimd_max_f32(psimd_max_f32(vi0, vi1), vi8); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() 194 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() local 212 const psimd_f32 vmax01 = psimd_max_f32(psimd_max_f32(vi0, vi1), vo); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() 226 const psimd_f32 vi0 = psimd_load_f32(i0); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4() local 236 const psimd_f32 vmax01 = psimd_max_f32(psimd_max_f32(vi0, vi1), vo); in xnn_f32_maxpool_ukernel_9p8x__psimd_c4()
|
/external/XNNPACK/src/q8-gavgpool/ |
D | mp7p7q-scalar.c | 41 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar() local 49 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar() 75 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar() local 83 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar() 135 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar() local 143 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_gavgpool_ukernel_mp7p7q__scalar()
|
/external/XNNPACK/src/q8-avgpool/ |
D | mp9p8q-scalar.c | 54 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_avgpool_ukernel_mp9p8q__scalar() local 64 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_avgpool_ukernel_mp9p8q__scalar() 94 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_avgpool_ukernel_mp9p8q__scalar() local 103 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_avgpool_ukernel_mp9p8q__scalar() 154 const uint32_t vi0 = (uint32_t) *i0++; in xnn_q8_avgpool_ukernel_mp9p8q__scalar() local 163 const uint32_t vsum01 = vi0 + vi1; in xnn_q8_avgpool_ukernel_mp9p8q__scalar()
|
/external/XNNPACK/src/u8-maxpool/ |
D | 9p8x-scalar-c1.c | 76 const uint8_t vi0 = *i0++; in xnn_u8_maxpool_ukernel_9p8x__scalar_c1() local 86 const uint8_t vmax01 = vi0 > vi1 ? vi0 : vi1; in xnn_u8_maxpool_ukernel_9p8x__scalar_c1() 145 const uint8_t vi0 = *i0++; in xnn_u8_maxpool_ukernel_9p8x__scalar_c1() local 155 const uint8_t vmax01 = vi0 > vi1 ? vi0 : vi1; in xnn_u8_maxpool_ukernel_9p8x__scalar_c1()
|