• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-ibilinear/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/ibilinear.h>
15 
16 
xnn_f32_ibilinear_ukernel__wasmsimd_c8(size_t output_pixels,size_t channels,const float ** restrict input,size_t input_offset,const float * restrict weights,float * restrict output,size_t output_increment)17 void xnn_f32_ibilinear_ukernel__wasmsimd_c8(
18     size_t output_pixels,
19     size_t channels,
20     const float**restrict input,
21     size_t input_offset,
22     const float*restrict weights,
23     float*restrict output,
24     size_t output_increment) XNN_DISABLE_TSAN
25 {
26   assert(output_pixels != 0);
27   assert(channels != 0);
28   assert(channels % sizeof(float) == 0);
29 
30   do {
31     const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset);
32     const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset);
33     const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset);
34     const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset);
35     input += 4;
36 
37     const v128_t valphah = wasm_v32x4_load_splat(weights);
38     const v128_t valphav = wasm_v32x4_load_splat(weights + 1);
39     weights += 2;
40 
41     size_t c = channels;
42     for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
43       const v128_t vtl0123 = wasm_v128_load(i0);
44       const v128_t vtr0123 = wasm_v128_load(i1);
45       const v128_t vbl0123 = wasm_v128_load(i2);
46       const v128_t vbr0123 = wasm_v128_load(i3);
47       const v128_t vtl4567 = wasm_v128_load(i0 + 4);
48       const v128_t vtr4567 = wasm_v128_load(i1 + 4);
49       const v128_t vbl4567 = wasm_v128_load(i2 + 4);
50       const v128_t vbr4567 = wasm_v128_load(i3 + 4);
51       i0 += 8;
52       i1 += 8;
53       i2 += 8;
54       i3 += 8;
55 
56       const v128_t vtd0123 = wasm_f32x4_sub(vtr0123, vtl0123);
57       const v128_t vbd0123 = wasm_f32x4_sub(vbr0123, vbl0123);
58       const v128_t vtd4567 = wasm_f32x4_sub(vtr4567, vtl4567);
59       const v128_t vbd4567 = wasm_f32x4_sub(vbr4567, vbl4567);
60 
61       const v128_t vt0123 = wasm_f32x4_add(vtl0123, wasm_f32x4_mul(vtd0123, valphah));
62       const v128_t vb0123 = wasm_f32x4_add(vbl0123, wasm_f32x4_mul(vbd0123, valphah));
63       const v128_t vt4567 = wasm_f32x4_add(vtl4567, wasm_f32x4_mul(vtd4567, valphah));
64       const v128_t vb4567 = wasm_f32x4_add(vbl4567, wasm_f32x4_mul(vbd4567, valphah));
65 
66       const v128_t vd0123 = wasm_f32x4_sub(vb0123, vt0123);
67       const v128_t vd4567 = wasm_f32x4_sub(vb4567, vt4567);
68 
69       const v128_t vo0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vd0123, valphav));
70       const v128_t vo4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vd4567, valphav));
71 
72       wasm_v128_store(output, vo0123);
73       wasm_v128_store(output + 4, vo4567);
74       output += 8;
75     }
76     for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
77       const v128_t vtl = wasm_v128_load(i0);
78       const v128_t vtr = wasm_v128_load(i1);
79       const v128_t vbl = wasm_v128_load(i2);
80       const v128_t vbr = wasm_v128_load(i3);
81       i0 += 4;
82       i1 += 4;
83       i2 += 4;
84       i3 += 4;
85 
86       const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
87       const v128_t vbd = wasm_f32x4_sub(vbr, vbl);
88       const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
89       const v128_t vb = wasm_f32x4_add(vbl, wasm_f32x4_mul(vbd, valphah));
90       const v128_t vd = wasm_f32x4_sub(vb, vt);
91       const v128_t vo = wasm_f32x4_add(vt, wasm_f32x4_mul(vd, valphav));
92 
93       wasm_v128_store(output, vo);
94       output += 4;
95     }
96     if XNN_UNLIKELY(c != 0) {
97       const v128_t vtl = wasm_v128_load(i0);
98       const v128_t vtr = wasm_v128_load(i1);
99       const v128_t vbl = wasm_v128_load(i2);
100       const v128_t vbr = wasm_v128_load(i3);
101 
102       const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
103       const v128_t vbd = wasm_f32x4_sub(vbr, vbl);
104       const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
105       const v128_t vb = wasm_f32x4_add(vbl, wasm_f32x4_mul(vbd, valphah));
106       const v128_t vd = wasm_f32x4_sub(vb, vt);
107       v128_t vo = wasm_f32x4_add(vt, wasm_f32x4_mul(vd, valphav));
108 
109       if (c & (2 * sizeof(float))) {
110         *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
111         vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 2, 3);
112         output += 2;
113       }
114       if (c & (1 * sizeof(float))) {
115         *output++ = wasm_f32x4_extract_lane(vo, 0);
116       }
117     }
118 
119     output = (float*) ((uintptr_t) output + output_increment);
120   } while (--output_pixels != 0);
121 }
122