Home
last modified time | relevance | path

Searched refs:vtl (Results 1 – 22 of 22) sorted by relevance

/external/XNNPACK/src/f32-ibilinear/gen/
Dwasmsimd-c4.c43 const v128_t vtl = wasm_v128_load(i0); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local
52 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c4()
54 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah)); in xnn_f32_ibilinear_ukernel__wasmsimd_c4()
63 const v128_t vtl = wasm_v128_load(i0); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local
68 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c4()
70 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah)); in xnn_f32_ibilinear_ukernel__wasmsimd_c4()
Dwasmsimd-c8.c77 const v128_t vtl = wasm_v128_load(i0); in xnn_f32_ibilinear_ukernel__wasmsimd_c8() local
86 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c8()
88 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah)); in xnn_f32_ibilinear_ukernel__wasmsimd_c8()
97 const v128_t vtl = wasm_v128_load(i0); in xnn_f32_ibilinear_ukernel__wasmsimd_c8() local
102 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c8()
104 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah)); in xnn_f32_ibilinear_ukernel__wasmsimd_c8()
Dscalar-c1.c41 const float vtl = *i0++; in xnn_f32_ibilinear_ukernel__scalar_c1() local
46 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c1()
49 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_ukernel__scalar_c1()
Dscalar-c2.c75 const float vtl = *i0++; in xnn_f32_ibilinear_ukernel__scalar_c2() local
80 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c2()
83 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_ukernel__scalar_c2()
Dscalar-c4.c97 const float vtl = *i0++; in xnn_f32_ibilinear_ukernel__scalar_c4() local
102 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c4()
105 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_ukernel__scalar_c4()
/external/XNNPACK/src/f32-ibilinear/
Dscalar.c.in42 const float vtl${ABC[C]} = i0[${C}];
52 const float vtd${ABC[C]} = vtr${ABC[C]} - vtl${ABC[C]};
56 const float vt${ABC[C]} = vtl${ABC[C]} + vtd${ABC[C]} * valphah;
70 const float vtl = *i0++;
75 const float vtd = vtr - vtl;
78 const float vt = vtl + vtd * valphah;
89 const float vtl = *i0++;
94 const float vtd = vtr - vtl;
97 const float vt = vtl + vtd * valphah;
Dwasmsimd.c.in44 const v128_t vtl${ABC[0:4]} = wasm_v128_load(i0);
49 const v128_t vtl${ABC[C:C+4]} = wasm_v128_load(i0 + ${C});
59 const v128_t vtd${ABC[C:C+4]} = wasm_f32x4_sub(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
63 …const v128_t vt${ABC[C:C+4]} = wasm_f32x4_add(vtl${ABC[C:C+4]}, wasm_f32x4_mul(vtd${ABC[C:C+4]}, v…
78 const v128_t vtl = wasm_v128_load(i0);
87 const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
89 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
98 const v128_t vtl = wasm_v128_load(i0);
103 const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
105 const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
Dsse.c.in45 const __m128 vtl${ABC[0:4]} = _mm_loadu_ps(i0);
50 const __m128 vtl${ABC[C:C+4]} = _mm_loadu_ps(i0 + ${C});
60 const __m128 vtd${ABC[C:C+4]} = _mm_sub_ps(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
64 …const __m128 vt${ABC[C:C+4]} = _mm_add_ps(vtl${ABC[C:C+4]}, _mm_mul_ps(vtd${ABC[C:C+4]}, valphah));
Dneon.c.in49 const float32x4_t vtl${ABC[C:C+4]} = vld1q_f32(i0); i0 += 4;
55 const float32x4_t vtd${ABC[C:C+4]} = vsubq_f32(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
61 … const float32x4_t vt${ABC[C:C+4]} = vfmaq_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphah);
65 …const float32x4_t vt${ABC[C:C+4]} = vfmaq_lane_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphahv, 0…
70 …const float32x4_t vt${ABC[C:C+4]} = vmlaq_lane_f32(vtl${ABC[C:C+4]}, vtd${ABC[C:C+4]}, valphahv, 0…
/external/XNNPACK/src/f32-ibilinear-chw/
Dscalar.c.in45 const float vtl${ABC[P]} = itl${P}[0];
51 const float vtd${ABC[P]} = vtr${ABC[P]} - vtl${ABC[P]};
55 const float vt${ABC[P]} = vtl${ABC[P]} + vtd${ABC[P]} * valphah${ABC[P]};
78 const float vtl = itl[0]; variable
83 const float vtd = vtr - vtl;
86 const float vt = vtl + vtd * valphah;
105 const float vtl = itl[0]; variable
110 const float vtd = vtr - vtl;
113 const float vt = vtl + vtd * valphah;
Dwasmsimd.c.in67 …const v128_t vtl${ABC[P:P+4]} = wasm_v32x4_shuffle(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, 0, 2,…
71 …const v128_t vl${ABC[P:P+4]} = wasm_f32x4_add(vtl${ABC[P:P+4]}, wasm_f32x4_mul(vld${ABC[P:P+4]}, v…
114 const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6); variable
117 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
147 const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2); variable
150 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
Dneon.c.in68 const float32x4_t vtl${ABC[P:P+4]} = vtl_t${ABC[P:P+4]}.val[0];
72 …const float32x4_t vl${ABC[P:P+4]} = ${VMULADDQ_F32}(vtl${ABC[P:P+4]}, vld${ABC[P:P+4]}, valphav${A…
114 const float32x4_t vtl = vtl_t.val[0];
117 const float32x4_t vl = ${VMULADDQ_F32}(vtl, vld, valphav);
152 const float32x2_t vtl = vtl_t.val[0];
155 const float32x2_t vl = ${VMULADD_F32}(vtl, vld, valphav);
/external/XNNPACK/src/f32-ibilinear-chw/gen/
Dscalar-p1.c43 const float vtl = itl[0]; in xnn_f32_ibilinear_chw_ukernel__scalar_p1() local
48 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p1()
51 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_chw_ukernel__scalar_p1()
Dscalar-p2.c86 const float vtl = itl[0]; in xnn_f32_ibilinear_chw_ukernel__scalar_p2() local
91 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p2()
94 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_chw_ukernel__scalar_p2()
Dwasmsimd-p4.c73 const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local
76 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4()
107 const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local
110 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4()
Dneonfma-p4.c74 const float32x4_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local
77 const float32x4_t vl = vfmaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4()
114 const float32x2_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local
117 const float32x2_t vl = vfma_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4()
Dneon-p4.c74 const float32x4_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local
77 const float32x4_t vl = vmlaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4()
114 const float32x2_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local
117 const float32x2_t vl = vmla_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4()
Dscalar-p4.c116 const float vtl = itl[0]; in xnn_f32_ibilinear_chw_ukernel__scalar_p4() local
121 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p4()
124 const float vt = vtl + vtd * valphah; in xnn_f32_ibilinear_chw_ukernel__scalar_p4()
Dwasmsimd-p8.c159 const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local
162 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8()
193 const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local
196 const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8()
Dneonfma-p8.c162 const float32x4_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local
165 const float32x4_t vl = vfmaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8()
202 const float32x2_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local
205 const float32x2_t vl = vfma_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8()
Dneon-p8.c162 const float32x4_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local
165 const float32x4_t vl = vmlaq_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8()
202 const float32x2_t vtl = vtl_t.val[0]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local
205 const float32x2_t vl = vmla_f32(vtl, vld, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8()
/external/libchrome/mojo/public/interfaces/bindings/tests/
Dsample_service.mojom66 // TODO(vtl): Add tests for default vs null when those are implemented (for