/external/XNNPACK/src/f32-velu/ |
D | avx-rr2-lut4-p4-perm.c.in | 54 __m256 ven${N} = _mm256_andnot_ps(vindex_mask, vn${N}); 56 …const __m128 ven${N}_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(… 60 …const __m128 ven${N}_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(v… 64 ven${N} = _mm256_insertf128_ps(_mm256_castps128_ps256(ven${N}_lo), ven${N}_hi, 1); 68 __m256 vs${N} = _mm256_mul_ps(vl${N}, ven${N}); 105 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); variable 107 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); 109 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); 112 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); 114 __m256 vs = _mm256_mul_ps(vl, ven); [all …]
|
D | scalar-rr2-lut16-p3.c.in | 54 const uint32_t ven${N} = float_as_uint32(vn${N}) << 19; 60 float vs${N} = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx${N}] + ven${N}); 113 const uint32_t ven = float_as_uint32(vn) << 19; 118 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven); 158 const uint32_t ven = float_as_uint32(vn) << 19; 163 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven); 202 const uint32_t ven = float_as_uint32(vn) << 19; 207 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx-rr2-lut4-p4-perm-x8.c | 48 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() local 50 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 52 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 55 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 57 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 84 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() local 86 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 88 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 91 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8() 93 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8()
|
D | velu-avx-rr2-lut4-p4-perm-x16.c | 110 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() local 112 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 114 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 117 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 119 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 146 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() local 148 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 150 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 153 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16() 155 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16()
|
D | velu-avx-rr2-lut4-p4-perm-x24.c | 132 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() local 134 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 136 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 139 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 141 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 168 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() local 170 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 172 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 175 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24() 177 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24()
|
D | velu-avx-rr2-lut4-p4-perm-x32.c | 154 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() local 156 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 158 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 161 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 163 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 190 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() local 192 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 194 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 197 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32() 199 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32()
|
D | velu-avx-rr2-lut4-p4-perm-x40.c | 176 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() local 178 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 180 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 183 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 185 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 212 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() local 214 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 216 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 219 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40() 221 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40()
|
D | velu-avx-rr2-lut4-p4-perm-x48.c | 198 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() local 200 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 202 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 205 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 207 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 234 __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() local 236 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 238 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 241 ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48() 243 __m256 vs = _mm256_mul_ps(vl, ven); in xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48()
|
D | velu-avx512f-rr1-lut16-p3-perm-x16.c | 47 const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16() local 49 __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16() 80 const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16() local 82 __m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16()
|
D | velu-avx2-rr1-lut8-p4-perm-x8.c | 45 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8() local 47 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8() 77 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8() local 79 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8()
|
D | velu-avx2-rr1-lut16-p3-gather-x8.c | 49 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8() local 52 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8() 82 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8() local 85 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8()
|
/external/XNNPACK/src/math/ |
D | exp-f32-avx512f-rr2-p5.c | 60 __m512i ven = _mm512_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx512f_rr2_p5() local 61 ven = _mm512_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx512f_rr2_p5() 62 veo = _mm512_sub_epi32(veo, ven); in xnn_math_f32_exp__avx512f_rr2_p5() 63 const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx512f_rr2_p5()
|
D | exp-f32-avx2-rr2-lut8-p3-perm.c | 61 __m256i ven = _mm256_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx2_rr2_lut8_p3_perm() local 62 ven = _mm256_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx2_rr2_lut8_p3_perm() 63 veo = _mm256_sub_epi32(veo, ven); in xnn_math_f32_exp__avx2_rr2_lut8_p3_perm() 64 const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx2_rr2_lut8_p3_perm()
|
D | exp-f32-avx512f-rr2-lut32-p2-perm2.c | 72 __m512i ven = _mm512_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2() local 73 ven = _mm512_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2() 74 veo = _mm512_sub_epi32(veo, ven); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2() 75 const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx512f_rr2_lut32_p2_perm2()
|
D | exp-f32-neonfma-rr2-p5.c | 61 int32x4_t ven = vmaxq_s32(veo, vmin_exponent); in xnn_math_f32_exp__neonfma_rr2_p5() local 62 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_p5() 63 veo = vsubq_s32(veo, ven); in xnn_math_f32_exp__neonfma_rr2_p5() 64 const float32x4_t vsn = vreinterpretq_f32_s32(vaddq_s32(ven, vdefault_exponent)); in xnn_math_f32_exp__neonfma_rr2_p5()
|
D | exp-f32-avx512f-rr2-lut16-p3-perm.c | 67 __m512i ven = _mm512_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm() local 68 ven = _mm512_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm() 69 veo = _mm512_sub_epi32(veo, ven); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm() 70 const __m512 vsn = _mm512_castsi512_ps(_mm512_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx512f_rr2_lut16_p3_perm()
|
D | exp-f16-neonfp16arith-rr2-p3.c | 61 int16x8_t ven = vmaxq_s16(veo, vmin_exponent); in xnn_math_f16_exp__neonfp16arith_rr2_p3() local 62 ven = vminq_s16(ven, vmax_exponent); in xnn_math_f16_exp__neonfp16arith_rr2_p3() 63 veo = vsubq_s16(veo, ven); in xnn_math_f16_exp__neonfp16arith_rr2_p3() 64 const float16x8_t vsn = vreinterpretq_f16_s16(vaddq_s16(ven, vdefault_exponent)); in xnn_math_f16_exp__neonfp16arith_rr2_p3()
|
D | exp-f32-avx2-rr2-p5.c | 60 __m256i ven = _mm256_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx2_rr2_p5() local 61 ven = _mm256_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx2_rr2_p5() 62 veo = _mm256_sub_epi32(veo, ven); in xnn_math_f32_exp__avx2_rr2_p5() 63 const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx2_rr2_p5()
|
D | exp-f32-avx2-rr2-lut8-p4-perm.c | 64 __m256i ven = _mm256_max_epi32(veo, vmin_exponent); in xnn_math_f32_exp__avx2_rr2_lut8_p4_perm() local 65 ven = _mm256_min_epi32(ven, vmax_exponent); in xnn_math_f32_exp__avx2_rr2_lut8_p4_perm() 66 veo = _mm256_sub_epi32(veo, ven); in xnn_math_f32_exp__avx2_rr2_lut8_p4_perm() 67 const __m256 vsn = _mm256_castsi256_ps(_mm256_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__avx2_rr2_lut8_p4_perm()
|
D | exp-f32-sse2-rr2-p5.c | 61 __m128i ven = _mm_max_epi16(veo, vmin_exponent); in xnn_math_f32_exp__sse2_rr2_p5() local 62 ven = _mm_min_epi16(ven, vmax_exponent); in xnn_math_f32_exp__sse2_rr2_p5() 63 veo = _mm_sub_epi32(veo, ven); in xnn_math_f32_exp__sse2_rr2_p5() 64 const __m128 vsn = _mm_castsi128_ps(_mm_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__sse2_rr2_p5()
|
D | exp-f32-neonfma-rr2-lut64-p2.c | 63 int32x4_t ven = vmaxq_s32(veo, vmin_exponent); in xnn_math_f32_exp__neonfma_rr2_lut64_p2() local 64 ven = vminq_s32(ven, vmax_exponent); in xnn_math_f32_exp__neonfma_rr2_lut64_p2() 65 veo = vsubq_s32(veo, ven); in xnn_math_f32_exp__neonfma_rr2_lut64_p2() 66 const float32x4_t vsn = vreinterpretq_f32_s32(vaddq_s32(ven, vdefault_exponent)); in xnn_math_f32_exp__neonfma_rr2_lut64_p2()
|
D | exp-f32-sse2-rr2-lut64-p2.c | 63 __m128i ven = _mm_max_epi16(veo, vmin_exponent); in xnn_math_f32_exp__sse2_rr2_lut64_p2() local 64 ven = _mm_min_epi16(ven, vmax_exponent); in xnn_math_f32_exp__sse2_rr2_lut64_p2() 65 veo = _mm_sub_epi32(veo, ven); in xnn_math_f32_exp__sse2_rr2_lut64_p2() 66 const __m128 vsn = _mm_castsi128_ps(_mm_add_epi32(ven, vdefault_exponent)); in xnn_math_f32_exp__sse2_rr2_lut64_p2()
|
D | expm1minus-f32-avx-rr2-lut4-p4-perm.c | 70 const __m256 ven = _mm256_andnot_ps(vindex_mask, vn); in xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm() local 71 …_m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21)); in xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm() 72 …128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21)); in xnn_math_f32_expm1minus__avx_rr2_lut4_p4_perm()
|
/external/cronet/third_party/icu/source/data/locales/ |
D | fr_HT.txt | 30 "-1"{"ven dernier"} 31 "0"{"ce ven"} 32 "1"{"ven prochain"}
|
/external/icu/icu4c/source/data/locales/ |
D | fr_HT.txt | 30 "-1"{"ven dernier"} 31 "0"{"ce ven"} 32 "1"{"ven prochain"}
|