Searched refs:mask_even (Results 1 – 6 of 6) sorted by relevance
446 params.sse.mask_even[0] = UINT32_C(0xFFFFFFFF); in xnn_init_f32_spchw_params()447 params.sse.mask_even[1] = -(uint32_t) (w8 >= 2); in xnn_init_f32_spchw_params()448 params.sse.mask_even[2] = -(uint32_t) (w8 >= 4); in xnn_init_f32_spchw_params()449 params.sse.mask_even[3] = -(uint32_t) (w8 >= 6); in xnn_init_f32_spchw_params()465 params.neon.mask_even[0] = UINT32_C(0xFFFFFFFF); in xnn_init_f32_spchw_params()466 params.neon.mask_even[1] = -(uint32_t) (w8 >= 2); in xnn_init_f32_spchw_params()467 params.neon.mask_even[2] = -(uint32_t) (w8 >= 4); in xnn_init_f32_spchw_params()468 params.neon.mask_even[3] = -(uint32_t) (w8 >= 6); in xnn_init_f32_spchw_params()492 params->sse.mask_even[0] = UINT32_C(0xFFFFFFFF); in xnn_update_f32_spchw_params()493 params->sse.mask_even[1] = -(uint32_t) (w8 >= 2); in xnn_update_f32_spchw_params()[all …]
46 XNN_ALIGN(16) uint32_t mask_even[4]; // used by stride 2 kernels55 XNN_ALIGN(16) uint32_t mask_even[4]; // used by stride 2 kernels
186 const bool32x4_t mask_even = vec_cmplt(thres_even, lim); in mask_s16() local187 return vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge); in mask_s16()
28 const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__neonfma()
28 const __m128 vmask_even = _mm_load_ps((const float*) params->sse.mask_even); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__sse()
28 const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma()