/external/XNNPACK/src/f32-ibilinear/gen/ |
D | wasmsimd-c4.c | 44 const v128_t vtr = wasm_v128_load(i1); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local 52 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() 64 const v128_t vtr = wasm_v128_load(i1); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local 68 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c4()
|
D | wasmsimd-c8.c | 78 const v128_t vtr = wasm_v128_load(i1); in xnn_f32_ibilinear_ukernel__wasmsimd_c8() local 86 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c8() 98 const v128_t vtr = wasm_v128_load(i1); in xnn_f32_ibilinear_ukernel__wasmsimd_c8() local 102 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); in xnn_f32_ibilinear_ukernel__wasmsimd_c8()
|
D | scalar-c1.c | 42 const float vtr = *i1++; in xnn_f32_ibilinear_ukernel__scalar_c1() local 46 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c1()
|
D | scalar-c2.c | 76 const float vtr = *i1++; in xnn_f32_ibilinear_ukernel__scalar_c2() local 80 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c2()
|
D | scalar-c4.c | 98 const float vtr = *i1++; in xnn_f32_ibilinear_ukernel__scalar_c4() local 102 const float vtd = vtr - vtl; in xnn_f32_ibilinear_ukernel__scalar_c4()
|
/external/XNNPACK/src/f32-ibilinear/ |
D | scalar.c.in | 43 const float vtr${ABC[C]} = i1[${C}]; 52 const float vtd${ABC[C]} = vtr${ABC[C]} - vtl${ABC[C]}; 71 const float vtr = *i1++; 75 const float vtd = vtr - vtl; 90 const float vtr = *i1++; 94 const float vtd = vtr - vtl;
|
D | wasmsimd.c.in | 45 const v128_t vtr${ABC[0:4]} = wasm_v128_load(i1); 50 const v128_t vtr${ABC[C:C+4]} = wasm_v128_load(i1 + ${C}); 59 const v128_t vtd${ABC[C:C+4]} = wasm_f32x4_sub(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]}); 79 const v128_t vtr = wasm_v128_load(i1); 87 const v128_t vtd = wasm_f32x4_sub(vtr, vtl); 99 const v128_t vtr = wasm_v128_load(i1); 103 const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
|
D | sse.c.in | 46 const __m128 vtr${ABC[0:4]} = _mm_loadu_ps(i1); 51 const __m128 vtr${ABC[C:C+4]} = _mm_loadu_ps(i1 + ${C}); 60 const __m128 vtd${ABC[C:C+4]} = _mm_sub_ps(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
|
D | neon.c.in | 50 const float32x4_t vtr${ABC[C:C+4]} = vld1q_f32(i1); i1 += 4; 55 const float32x4_t vtd${ABC[C:C+4]} = vsubq_f32(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
|
/external/XNNPACK/src/f32-ibilinear-chw/ |
D | scalar.c.in | 46 const float vtr${ABC[P]} = itl${P}[1]; 51 const float vtd${ABC[P]} = vtr${ABC[P]} - vtl${ABC[P]}; 79 const float vtr = itl[1]; variable 83 const float vtd = vtr - vtl; 106 const float vtr = itl[1]; variable 110 const float vtd = vtr - vtl;
|
D | wasmsimd.c.in | 68 …const v128_t vtr${ABC[P:P+4]} = wasm_v32x4_shuffle(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, 1, 3,… 72 …const v128_t vr${ABC[P:P+4]} = wasm_f32x4_add(vtr${ABC[P:P+4]}, wasm_f32x4_mul(vrd${ABC[P:P+4]}, v… 115 const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7); variable 118 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav)); 148 const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3); variable 151 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
|
D | neon.c.in | 69 const float32x4_t vtr${ABC[P:P+4]} = vtl_t${ABC[P:P+4]}.val[1]; 73 …const float32x4_t vr${ABC[P:P+4]} = ${VMULADDQ_F32}(vtr${ABC[P:P+4]}, vrd${ABC[P:P+4]}, valphav${A… 115 const float32x4_t vtr = vtl_t.val[1]; 118 const float32x4_t vr = ${VMULADDQ_F32}(vtr, vrd, valphav); 153 const float32x2_t vtr = vtl_t.val[1]; 156 const float32x2_t vr = ${VMULADD_F32}(vtr, vrd, valphav);
|
/external/XNNPACK/src/f32-ibilinear-chw/gen/ |
D | wasmsimd-p4.c | 74 const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local 77 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() 108 const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4() local 111 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p4()
|
D | neonfma-p4.c | 75 const float32x4_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local 78 const float32x4_t vr = vfmaq_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() 115 const float32x2_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p4() local 118 const float32x2_t vr = vfma_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p4()
|
D | neon-p4.c | 75 const float32x4_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local 78 const float32x4_t vr = vmlaq_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4() 115 const float32x2_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neon_p4() local 118 const float32x2_t vr = vmla_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p4()
|
D | scalar-p1.c | 44 const float vtr = itl[1]; in xnn_f32_ibilinear_chw_ukernel__scalar_p1() local 48 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p1()
|
D | scalar-p2.c | 87 const float vtr = itl[1]; in xnn_f32_ibilinear_chw_ukernel__scalar_p2() local 91 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p2()
|
D | wasmsimd-p8.c | 160 const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local 163 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() 194 const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8() local 197 const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav)); in xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8()
|
D | neonfma-p8.c | 163 const float32x4_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local 166 const float32x4_t vr = vfmaq_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() 203 const float32x2_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neonfma_p8() local 206 const float32x2_t vr = vfma_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neonfma_p8()
|
D | neon-p8.c | 163 const float32x4_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local 166 const float32x4_t vr = vmlaq_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8() 203 const float32x2_t vtr = vtl_t.val[1]; in xnn_f32_ibilinear_chw_ukernel__neon_p8() local 206 const float32x2_t vr = vmla_f32(vtr, vrd, valphav); in xnn_f32_ibilinear_chw_ukernel__neon_p8()
|
D | scalar-p4.c | 117 const float vtr = itl[1]; in xnn_f32_ibilinear_chw_ukernel__scalar_p4() local 121 const float vtd = vtr - vtl; in xnn_f32_ibilinear_chw_ukernel__scalar_p4()
|
/external/cldr/tools/java/org/unicode/cldr/util/data/transforms/ |
D | internal_raw_IPA-old.txt | 209562 vtr %7157
|