Searched refs:mask_odd (Results 1 – 6 of 6) sorted by relevance
450 params.sse.mask_odd[0] = -(uint32_t) (w8 >= 1); in xnn_init_f32_spchw_params()451 params.sse.mask_odd[1] = -(uint32_t) (w8 >= 3); in xnn_init_f32_spchw_params()452 params.sse.mask_odd[2] = -(uint32_t) (w8 >= 5); in xnn_init_f32_spchw_params()453 params.sse.mask_odd[3] = -(uint32_t) (w8 >= 7); in xnn_init_f32_spchw_params()469 params.neon.mask_odd[0] = -(uint32_t) (w8 >= 1); in xnn_init_f32_spchw_params()470 params.neon.mask_odd[1] = -(uint32_t) (w8 >= 3); in xnn_init_f32_spchw_params()471 params.neon.mask_odd[2] = -(uint32_t) (w8 >= 5); in xnn_init_f32_spchw_params()472 params.neon.mask_odd[3] = -(uint32_t) (w8 >= 7); in xnn_init_f32_spchw_params()496 params->sse.mask_odd[0] = -(uint32_t) (w8 >= 1); in xnn_update_f32_spchw_params()497 params->sse.mask_odd[1] = -(uint32_t) (w8 >= 3); in xnn_update_f32_spchw_params()[all …]
47 XNN_ALIGN(16) uint32_t mask_odd[4]; // used by stride 2 kernels56 XNN_ALIGN(16) uint32_t mask_odd[4]; // used by stride 2 kernels
185 const bool32x4_t mask_odd = vec_cmplt(thres_odd, lim); in mask_s16() local187 return vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge); in mask_s16()
29 const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__neonfma()
29 const __m128 vmask_odd = _mm_load_ps((const float*) params->sse.mask_odd); in xnn_f32_dwconv_spchw_ukernel_3x3s2p1__sse()
29 const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd); in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__neonfma()