Home
last modified time | relevance | path

Searched refs:vs1 (Results 1 – 25 of 308) sorted by relevance

12345678910>>...13

/external/fbjni/test/jni/
Diterator_tests.cpp35 std::vector<std::string> vs1; in nativeTestListIterator() local
37 vs1.push_back(elem->toStdString()); in nativeTestListIterator()
40 EXPECT(vs1.size() == 3); in nativeTestListIterator()
41 EXPECT(vs1[0] == "red"); in nativeTestListIterator()
42 EXPECT(vs1[1] == "green"); in nativeTestListIterator()
43 EXPECT(vs1[2] == "blue"); in nativeTestListIterator()
52 EXPECT(vs1 == vs2); in nativeTestListIterator()
56 EXPECT(vs1 == vs3); in nativeTestListIterator()
/external/XNNPACK/src/f32-velu/gen/
Dvelu-scalar-rr2-lut16-p3-x2.c62 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2() local
71 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()
83 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()
84 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()
91 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()
Dvelu-scalar-rr2-p6-x2.c54 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x2() local
68 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()
89 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()
90 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()
97 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()
Dvelu-scalar-rr2-lut16-p3-x3.c68 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3() local
79 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
98 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
99 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
109 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3()
Dvelu-scalar-rr2-p6-x3.c57 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x3() local
75 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
105 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
106 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
116 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()
Dvelu-wasm-rr2-lut16-p3-x2.c62 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2() local
75 vt1 *= vs1; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2()
76 vs1 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2()
83 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2()
Dvelu-scalar-rr2-lut16-p3-x4.c74 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local
87 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
113 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
114 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
127 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4()
Dvelu-wasm-rr2-p6-x2.c54 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x2() local
81 vt1 *= vs1; in xnn_f32_velu_ukernel__wasm_rr2_p6_x2()
82 vs1 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x2()
89 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x2()
Dvelu-scalar-rr2-p6-x4.c60 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
82 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
121 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
122 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
135 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4()
Dvelu-scalar-rr2-lut16-p3-x5.c80 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5() local
95 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
128 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
129 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
145 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5()
Dvelu-avx512f-rr1-lut16-p3-perm-x32.c57 __m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1)); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32() local
69 vt1 = _mm512_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
72 vs1 = _mm512_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
80 __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32()
Dvelu-wasm-rr2-p6-x3.c57 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__wasm_rr2_p6_x3() local
93 vt1 *= vs1; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
94 vs1 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
104 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_p6_x3()
Dvelu-wasm-rr2-lut16-p3-x3.c68 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3() local
86 vt1 *= vs1; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
87 vs1 -= vone; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
97 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3()
Dvelu-avx2-rr1-lut16-p3-gather-x16.c62 __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1)); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16() local
71 vt1 = _mm256_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
75 vs1 = _mm256_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
80 const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
Dvelu-scalar-rr2-p6-x5.c63 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
89 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
137 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
138 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
154 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()
Dvelu-avx2-rr1-lut8-p4-perm-x16.c58 __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16() local
70 vt1 = _mm256_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16()
74 vs1 = _mm256_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16()
79 const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16()
Dvelu-avx2-rr1-p6-x16.c52 __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23)); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16() local
73 vt1 = _mm256_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16()
77 vs1 = _mm256_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16()
82 const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx2_rr1_p6_x16()
Dvelu-avx2-rr1-lut4-p4-perm-x16.c58 __m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1)); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16() local
70 vt1 = _mm256_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16()
74 vs1 = _mm256_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16()
79 const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16()
Dvelu-avx512f-rr1-p6-x32.c54 __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23)); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32() local
75 vt1 = _mm512_mul_ps(vt1, vs1); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
78 vs1 = _mm512_fmsub_ps(vs1, valpha, valpha); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
86 __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x32()
Dvelu-scalar-rr2-lut16-p3-x6.c86 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1); in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6() local
103 vs1 = 0.0f; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
143 vt1 *= vs1; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
144 vs1 -= vone; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
163 const float ve1 = (vp1 + vs1) * valpha; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6()
/external/tensorflow/tensorflow/python/kernel_tests/variables/
Dpartitioned_variables_test.py348 vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
353 var1_name = vs1[0]._save_slice_info.full_name
357 self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
358 self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
366 vs1 = partitioned_variables.create_partitioned_variables(
373 var1_name = vs1[0]._save_slice_info.full_name
377 self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
378 self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
385 vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
390 var1_name = vs1[0]._save_slice_info.full_name
[all …]
/external/XNNPACK/src/f16-velu/gen/
Dvelu-avx2-rr1-p3-x16.c53 __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23)); in xnn_f16_velu_ukernel__avx2_rr1_p3_x16() local
69 vt1 = _mm256_mul_ps(vt1, vs1); in xnn_f16_velu_ukernel__avx2_rr1_p3_x16()
70 vs1 = _mm256_fmsub_ps(vs1, valpha, valpha); in xnn_f16_velu_ukernel__avx2_rr1_p3_x16()
74 const __m256 ve1 = _mm256_fmadd_ps(vp1, vt1, vs1); in xnn_f16_velu_ukernel__avx2_rr1_p3_x16()
Dvelu-neonfp16arith-rr1-p3-x16.c54 float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); in xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x16() local
67 vt1 = vmulq_f16(vt1, vs1); in xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x16()
68 vs1 = vfmsq_f16(vminus_alpha, vs1, vminus_alpha); in xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x16()
75 float16x8_t ve1 = vfmsq_f16(vs1, vp1, vminus_alpha); in xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x16()
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/RISCV/
DRISCVInstrInfoV.td318 // op vd, vs2, vs1, vm
321 (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
322 opcodestr, "$vd, $vs2, $vs1$vm">;
324 // op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
327 (ins VR:$vs2, VR:$vs1, VMV0:$v0),
328 opcodestr, "$vd, $vs2, $vs1, v0"> {
332 // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
335 (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
336 opcodestr, "$vd, $vs1, $vs2$vm">;
338 // op vd, vs2, vs1
[all …]
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/
Dscalar-rr2-lut64-p2-x2.c78 const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1); in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2() local
105 float vf1 = vp1 * vs1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2()

12345678910>>...13