• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-ibilinear-chw/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/ibilinear.h>
15 
16 
xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8(size_t output_pixels,size_t channels,const float ** restrict input,size_t input_offset,const float * restrict weights,float * restrict output,size_t input_increment)17 void xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8(
18     size_t output_pixels,
19     size_t channels,
20     const float**restrict input,
21     size_t input_offset,
22     const float*restrict weights,
23     float*restrict output,
24     size_t input_increment) XNN_OOB_READS
25 {
26   assert(output_pixels != 0);
27   assert(channels != 0);
28   assert(input_increment % sizeof(float) == 0);
29 
30   do {
31     const float** i = input;
32     const float* w = weights;
33     size_t p = output_pixels;
34     for (; p >= 8; p -= 8) {
35       const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
36       const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
37       const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
38       const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
39       const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
40       const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
41       const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
42       const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
43       const float* itl4 = (const float*) ((uintptr_t) i[8] + input_offset);
44       const float* ibl4 = (const float*) ((uintptr_t) i[9] + input_offset);
45       const float* itl5 = (const float*) ((uintptr_t) i[10] + input_offset);
46       const float* ibl5 = (const float*) ((uintptr_t) i[11] + input_offset);
47       const float* itl6 = (const float*) ((uintptr_t) i[12] + input_offset);
48       const float* ibl6 = (const float*) ((uintptr_t) i[13] + input_offset);
49       const float* itl7 = (const float*) ((uintptr_t) i[14] + input_offset);
50       const float* ibl7 = (const float*) ((uintptr_t) i[15] + input_offset);
51       i += 2 * 8;
52 
53       const v128_t vw0123p0 = wasm_v128_load(w + 0);
54       const v128_t vw0123p1 = wasm_v128_load(w + 4);
55       const v128_t vw4567p0 = wasm_v128_load(w + 8);
56       const v128_t vw4567p1 = wasm_v128_load(w + 12);
57       w += 2 * 8;
58 
59       const v128_t vtltr0 = wasm_v128_load64_splat(itl0);
60       const v128_t vblbr0 = wasm_v128_load64_splat(ibl0);
61       const double vtltr1 = *((const double*) itl1);
62       const double vblbr1 = *((const double*) ibl1);
63       const v128_t vtltr2 = wasm_v128_load64_splat(itl2);
64       const v128_t vblbr2 = wasm_v128_load64_splat(ibl2);
65       const double vtltr3 = *((const double*) itl3);
66       const double vblbr3 = *((const double*) ibl3);
67       const v128_t vtltr4 = wasm_v128_load64_splat(itl4);
68       const v128_t vblbr4 = wasm_v128_load64_splat(ibl4);
69       const double vtltr5 = *((const double*) itl5);
70       const double vblbr5 = *((const double*) ibl5);
71       const v128_t vtltr6 = wasm_v128_load64_splat(itl6);
72       const v128_t vblbr6 = wasm_v128_load64_splat(ibl6);
73       const double vtltr7 = *((const double*) itl7);
74       const double vblbr7 = *((const double*) ibl7);
75 
76       const v128_t valphah0123 = wasm_v32x4_shuffle(vw0123p0, vw0123p1, 0, 2, 4, 6);
77       const v128_t valphav0123 = wasm_v32x4_shuffle(vw0123p0, vw0123p1, 1, 3, 5, 7);
78       const v128_t valphah4567 = wasm_v32x4_shuffle(vw4567p0, vw4567p1, 0, 2, 4, 6);
79       const v128_t valphav4567 = wasm_v32x4_shuffle(vw4567p0, vw4567p1, 1, 3, 5, 7);
80 
81       const v128_t vtltr01 = wasm_f64x2_replace_lane(vtltr0, 1, vtltr1);
82       const v128_t vblbr01 = wasm_f64x2_replace_lane(vblbr0, 1, vblbr1);
83       const v128_t vtltr23 = wasm_f64x2_replace_lane(vtltr2, 1, vtltr3);
84       const v128_t vblbr23 = wasm_f64x2_replace_lane(vblbr2, 1, vblbr3);
85       const v128_t vtltr45 = wasm_f64x2_replace_lane(vtltr4, 1, vtltr5);
86       const v128_t vblbr45 = wasm_f64x2_replace_lane(vblbr4, 1, vblbr5);
87       const v128_t vtltr67 = wasm_f64x2_replace_lane(vtltr6, 1, vtltr7);
88       const v128_t vblbr67 = wasm_f64x2_replace_lane(vblbr6, 1, vblbr7);
89 
90       const v128_t vldrd01 = wasm_f32x4_sub(vblbr01, vtltr01);
91       const v128_t vldrd23 = wasm_f32x4_sub(vblbr23, vtltr23);
92       const v128_t vldrd45 = wasm_f32x4_sub(vblbr45, vtltr45);
93       const v128_t vldrd67 = wasm_f32x4_sub(vblbr67, vtltr67);
94 
95       const v128_t vld0123 = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
96       const v128_t vrd0123 = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
97       const v128_t vld4567 = wasm_v32x4_shuffle(vldrd45, vldrd67, 0, 2, 4, 6);
98       const v128_t vrd4567 = wasm_v32x4_shuffle(vldrd45, vldrd67, 1, 3, 5, 7);
99 
100       const v128_t vtl0123 = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
101       const v128_t vtr0123 = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
102       const v128_t vtl4567 = wasm_v32x4_shuffle(vtltr45, vtltr67, 0, 2, 4, 6);
103       const v128_t vtr4567 = wasm_v32x4_shuffle(vtltr45, vtltr67, 1, 3, 5, 7);
104 
105       const v128_t vl0123 = wasm_f32x4_add(vtl0123, wasm_f32x4_mul(vld0123, valphav0123));
106       const v128_t vr0123 = wasm_f32x4_add(vtr0123, wasm_f32x4_mul(vrd0123, valphav0123));
107       const v128_t vl4567 = wasm_f32x4_add(vtl4567, wasm_f32x4_mul(vld4567, valphav4567));
108       const v128_t vr4567 = wasm_f32x4_add(vtr4567, wasm_f32x4_mul(vrd4567, valphav4567));
109 
110       const v128_t vd0123 = wasm_f32x4_sub(vr0123, vl0123);
111       const v128_t vd4567 = wasm_f32x4_sub(vr4567, vl4567);
112 
113       const v128_t vo0123 = wasm_f32x4_add(vl0123, wasm_f32x4_mul(vd0123, valphah0123));
114       const v128_t vo4567 = wasm_f32x4_add(vl4567, wasm_f32x4_mul(vd4567, valphah4567));
115 
116       wasm_v128_store(output + 0, vo0123);
117       wasm_v128_store(output + 4, vo4567);
118       output += 8;
119     }
120 
121     for (; p >= 4; p -= 4) {
122       const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
123       const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
124       const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
125       const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
126       const float* itl2 = (const float*) ((uintptr_t) i[4] + input_offset);
127       const float* ibl2 = (const float*) ((uintptr_t) i[5] + input_offset);
128       const float* itl3 = (const float*) ((uintptr_t) i[6] + input_offset);
129       const float* ibl3 = (const float*) ((uintptr_t) i[7] + input_offset);
130       i += 8;
131 
132       const v128_t vw0 = wasm_v128_load(w);
133       const v128_t vw1 = wasm_v128_load(w + 4);
134       w += 8;
135 
136       const v128_t vtltr0 = wasm_v128_load64_splat(itl0);
137       const v128_t vblbr0 = wasm_v128_load64_splat(ibl0);
138       const double vtltr1 = *((const double*) itl1);
139       const double vblbr1 = *((const double*) ibl1);
140       const v128_t vtltr2 = wasm_v128_load64_splat(itl2);
141       const v128_t vblbr2 = wasm_v128_load64_splat(ibl2);
142       const double vtltr3 = *((const double*) itl3);
143       const double vblbr3 = *((const double*) ibl3);
144 
145       const v128_t valphah = wasm_v32x4_shuffle(vw0, vw1, 0, 2, 4, 6);
146       const v128_t valphav = wasm_v32x4_shuffle(vw0, vw1, 1, 3, 5, 7);
147 
148       const v128_t vtltr01 = wasm_f64x2_replace_lane(vtltr0, 1, vtltr1);
149       const v128_t vblbr01 = wasm_f64x2_replace_lane(vblbr0, 1, vblbr1);
150       const v128_t vtltr23 = wasm_f64x2_replace_lane(vtltr2, 1, vtltr3);
151       const v128_t vblbr23 = wasm_f64x2_replace_lane(vblbr2, 1, vblbr3);
152 
153       const v128_t vldrd01 = wasm_f32x4_sub(vblbr01, vtltr01);
154       const v128_t vldrd23 = wasm_f32x4_sub(vblbr23, vtltr23);
155 
156       const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
157       const v128_t vrd = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
158 
159       const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
160       const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
161 
162       const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
163       const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
164 
165       const v128_t vd = wasm_f32x4_sub(vr, vl);
166       const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
167 
168       wasm_v128_store(output, vo);
169       output += 4;
170     }
171 
172     if XNN_UNLIKELY(p != 0) {
173       if (p & 2) {
174         const v128_t vw = wasm_v128_load(w);
175         w += 4;
176 
177         const v128_t valphah = wasm_v32x4_shuffle(vw, vw, 0, 2, 0, 2);
178         const v128_t valphav = wasm_v32x4_shuffle(vw, vw, 1, 3, 1, 3);
179 
180         const float* itl0 = (const float*) ((uintptr_t) i[0] + input_offset);
181         const float* ibl0 = (const float*) ((uintptr_t) i[1] + input_offset);
182         const float* itl1 = (const float*) ((uintptr_t) i[2] + input_offset);
183         const float* ibl1 = (const float*) ((uintptr_t) i[3] + input_offset);
184         i += 4;
185 
186         const v128_t vtltr = wasm_f64x2_replace_lane(wasm_v128_load64_splat(itl0), 1, *((const double*) itl1));
187         const v128_t vblbr = wasm_f64x2_replace_lane(wasm_v128_load64_splat(ibl0), 1, *((const double*) ibl1));
188 
189         const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
190         const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2);
191         const v128_t vrd = wasm_v32x4_shuffle(vldrd, vldrd, 1, 3, 1, 3);
192 
193         const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2);
194         const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3);
195 
196         const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
197         const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
198 
199         const v128_t vd = wasm_f32x4_sub(vr, vl);
200         const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
201 
202         *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
203         output += 2;
204       }
205 
206       if (p & 1) {
207         // We are computing the following formula:
208         //   result = (1 - alpha_h) * (1 - alpha_v) * top_left +
209         //                 alpha_h  * (1 - alpha_v) * top_right +
210         //            (1 - alpha_h) *      alpha_v  * bottom_left +
211         //                 alpha_h  *      alpha_v  * bottom_right.
212         //
213         // Rearranging gives
214         //   result =    left + alpha_h * (right        - left),
215         // where
216         //   left =  top_left + alpha_v * (bottom_left  - top_left),
217         //  right = top_right + alpha_v * (bottom_right - top_right).
218 
219         const float alphah = *w;
220         const v128_t valphav = wasm_v128_load32_splat(w + 1);
221         w += 2;
222 
223         const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
224         const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
225         i += 2;
226 
227         const v128_t vtltr = wasm_v128_load64_splat(itl);
228         const v128_t vblbr = wasm_v128_load64_splat(ibl);
229 
230         // Compute at once
231         //    left_diff = bottom_left  - top_left
232         //   right_diff = bottom_right - top_right
233         const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
234         const v128_t vlr = wasm_f32x4_add(vtltr, wasm_f32x4_mul(vldrd, valphav));
235 
236         // Extract them and compute the result.
237         const float l = wasm_f32x4_extract_lane(vlr, 0);
238         const float r = wasm_f32x4_extract_lane(vlr, 1);
239 
240         *output++ = l + alphah * (r - l);
241       }
242     }
243 
244     input_offset += input_increment;
245   } while (--channels != 0);
246 }
247