Home
last modified time | relevance | path

Searched refs:vs2 (Results 1 – 25 of 194) sorted by relevance

12345678

/external/rust/crates/libz-sys/src/zlib-ng/arch/power/
Dadler32_power8.c79 vector unsigned int vs2 = { 0 }; in adler32_power8() local
86 vs2[0] = s2; in adler32_power8()
101 vs2 = vec_add(vsum2, vs2); in adler32_power8()
108 vs2 = vec_add(vs1_save, vs2); in adler32_power8()
109 vs2 = vec_sumsu(vs2, vsum2); in adler32_power8()
115 vs2[0] = vs2[0] % BASE; in adler32_power8()
118 vs2 = vec_and(vs2, vmask); in adler32_power8()
136 vs2 = vec_add(vsum2, vs2); in adler32_power8()
143 vs2 = vec_add(vs1_save, vs2); in adler32_power8()
144 vs2 = vec_sumsu(vs2, vsum2); in adler32_power8()
[all …]
/external/go-cmp/cmp/internal/value/
Dsort.go25 vs2 := vs[:1]
27 if isLess(vs2[len(vs2)-1], v) {
28 vs2 = append(vs2, v)
31 return vs2
/external/rust/crates/libz-sys/src/zlib-ng/arch/x86/
Dadler32_ssse3.c56 __m128i vs2 = _mm_load_si128((__m128i*)s2); in adler32_ssse3() local
83 vsum2 = _mm_add_epi32(vsum2, vs2); in adler32_ssse3()
84 vs2 = _mm_add_epi32(vsum2, vs1_0); in adler32_ssse3()
95 _mm_store_si128((__m128i*)s2_unpack, vs2); in adler32_ssse3()
Dadler32_avx.c59 __m256i vs2 = _mm256_load_si256((__m256i*)s2); in adler32_avx2() local
81 vsum2 = _mm256_add_epi32(vsum2, vs2); in adler32_avx2()
82 vs2 = _mm256_add_epi32(vsum2, vs1_0); in adler32_avx2()
92 _mm256_store_si256((__m256i*)s2_unpack, vs2); in adler32_avx2()
/external/XNNPACK/src/f32-velu/gen/
Dvelu-scalar-rr2-p6-x3.c60 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local
80 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
108 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
109 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
119 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
Dvelu-scalar-rr2-lut16-p3-x3.c71 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local
85 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
101 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
102 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
112 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
Dvelu-scalar-rr2-lut16-p3-x4.c77 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local
93 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
116 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
117 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
130 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
Dvelu-scalar-rr2-p6-x4.c63 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
87 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
124 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
125 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
138 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
Dvelu-scalar-rr2-lut16-p3-x5.c83 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() local
101 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
131 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
132 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
148 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
Dvelu-wasm-rr2-p6-x3.c60 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x3() local
96 vt2 *= vs2; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
97 vs2 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
107 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
Dvelu-wasm-rr2-lut16-p3-x3.c71 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3() local
89 vt2 *= vs2; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
90 vs2 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
100 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
Dvelu-scalar-rr2-p6-x5.c66 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
94 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
140 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
141 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
157 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
Dvelu-scalar-rr2-lut16-p3-x6.c89 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6() local
109 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
146 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
147 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
166 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
Dvelu-wasm-rr2-p6-x4.c63 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x4() local
108 vt2 *= vs2; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4()
109 vs2 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4()
122 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4()
Dvelu-wasm-rr2-lut16-p3-x4.c77 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4() local
100 vt2 *= vs2; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4()
101 vs2 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4()
114 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4()
Dvelu-avx512f-rr1-lut16-p3-perm-x48.c64 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48() local
80 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
84 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
95 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48()
Dvelu-scalar-rr2-p6-x6.c69 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x6() local
101 vs2 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
156 vt2 *= vs2; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
157 vs2 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
176 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()
Dvelu-avx512f-rr1-p6-x48.c59 __m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48() local
87 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
91 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
102 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x48()
Dvelu-avx2-rr1-lut16-p3-gather-x24.c71 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24() local
83 vt2 = _mm256_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
89 vs2 = _mm256_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
96 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
Dvelu-avx2-rr1-lut4-p4-perm-x24.c66 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24() local
82 vt2 = _mm256_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24()
88 vs2 = _mm256_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24()
95 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24()
Dvelu-avx2-rr1-p6-x24.c57 __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23)); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24() local
85 vt2 = _mm256_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24()
91 vs2 = _mm256_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24()
98 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_p6_x24()
Dvelu-avx2-rr1-lut8-p4-perm-x24.c66 __m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24() local
82 vt2 = _mm256_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24()
88 vs2 = _mm256_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24()
95 const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24()
Dvelu-avx512f-rr1-lut16-p3-perm-x64.c69 __m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64() local
89 vt2 = _mm512_mul_ps(vt2, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
95 vs2 = _mm512_fmsub_ps(vs2, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
108 __m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64()
Dvelu-wasm-rr2-lut16-p3-x5.c83 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5() local
111 vt2 *= vs2; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
112 vs2 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
128 const float ve2 = (vp2 + vs2) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()
/external/tensorflow/tensorflow/python/kernel_tests/
Dpartitioned_variables_test.py355 vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
359 var2_name = vs2[0]._save_slice_info.full_name
364 self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
365 self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
375 vs2 = partitioned_variables.create_partitioned_variables(
379 var2_name = vs2[0]._save_slice_info.full_name
384 self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
385 self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
392 vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
396 var2_name = vs2[0]._save_slice_info.full_name
[all …]

12345678