• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert PIXEL_TILE >= 1
7$assert PIXEL_TILE % 4 == 0
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/ibilinear.h>
14
15
16void xnn_f32_ibilinear_chw_ukernel__wasmsimd_p${PIXEL_TILE}(
17    size_t output_pixels,
18    size_t channels,
19    const float**restrict input,
20    size_t input_offset,
21    const float*restrict weights,
22    float*restrict output,
23    size_t input_increment) XNN_DISABLE_TSAN
24{
25  assert(output_pixels != 0);
26  assert(channels != 0);
27  assert(input_increment % sizeof(float) == 0);
28
29  do {
30    const float** i = input;
31    const float* w = weights;
32    size_t p = output_pixels;
33    $if PIXEL_TILE > 4:
34      for (; p >= ${PIXEL_TILE}; p -= ${PIXEL_TILE}) {
35        $for P in range(PIXEL_TILE):
36          const float* itl${ABC[P]} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
37          const float* ibl${ABC[P]} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
38        i += 2 * ${PIXEL_TILE};
39
40        $for P in range(0, PIXEL_TILE, 4):
41          const v128_t vw${ABC[P:P+4]}p0 = wasm_v128_load(w + ${2 * P});
42          const v128_t vw${ABC[P:P+4]}p1 = wasm_v128_load(w + ${2 * P + 4});
43        w += 2 * ${PIXEL_TILE};
44
45        $for P in range(0, PIXEL_TILE, 2):
46          const v128_t vtltr${ABC[P]} = wasm_v64x2_load_splat(itl${ABC[P]});
47          const v128_t vblbr${ABC[P]} = wasm_v64x2_load_splat(ibl${ABC[P]});
48          const double vtltr${ABC[P+1]} = *((const double*) itl${ABC[P+1]});
49          const double vblbr${ABC[P+1]} = *((const double*) ibl${ABC[P+1]});
50
51        $for P in range(0, PIXEL_TILE, 4):
52          const v128_t valphah${ABC[P:P+4]} = wasm_v32x4_shuffle(vw${ABC[P:P+4]}p0, vw${ABC[P:P+4]}p1, 0, 2, 4, 6);
53          const v128_t valphav${ABC[P:P+4]} = wasm_v32x4_shuffle(vw${ABC[P:P+4]}p0, vw${ABC[P:P+4]}p1, 1, 3, 5, 7);
54
55        $for P in range(0, PIXEL_TILE, 2):
56          const v128_t vtltr${ABC[P:P+2]} = wasm_f64x2_replace_lane(vtltr${ABC[P]}, 1, vtltr${ABC[P+1]});
57          const v128_t vblbr${ABC[P:P+2]} = wasm_f64x2_replace_lane(vblbr${ABC[P]}, 1, vblbr${ABC[P+1]});
58
59        $for P in range(0, PIXEL_TILE, 2):
60          const v128_t vldrd${ABC[P:P+2]} = wasm_f32x4_sub(vblbr${ABC[P:P+2]}, vtltr${ABC[P:P+2]});
61
62        $for P in range(0, PIXEL_TILE, 4):
63          const v128_t vld${ABC[P:P+4]} = wasm_v32x4_shuffle(vldrd${ABC[P:P+2]}, vldrd${ABC[P+2:P+4]}, 0, 2, 4, 6);
64          const v128_t vrd${ABC[P:P+4]} = wasm_v32x4_shuffle(vldrd${ABC[P:P+2]}, vldrd${ABC[P+2:P+4]}, 1, 3, 5, 7);
65
66        $for P in range(0, PIXEL_TILE, 4):
67          const v128_t vtl${ABC[P:P+4]} = wasm_v32x4_shuffle(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, 0, 2, 4, 6);
68          const v128_t vtr${ABC[P:P+4]} = wasm_v32x4_shuffle(vtltr${ABC[P:P+2]}, vtltr${ABC[P+2:P+4]}, 1, 3, 5, 7);
69
70        $for P in range(0, PIXEL_TILE, 4):
71          const v128_t vl${ABC[P:P+4]} = wasm_f32x4_add(vtl${ABC[P:P+4]}, wasm_f32x4_mul(vld${ABC[P:P+4]}, valphav${ABC[P:P+4]}));
72          const v128_t vr${ABC[P:P+4]} = wasm_f32x4_add(vtr${ABC[P:P+4]}, wasm_f32x4_mul(vrd${ABC[P:P+4]}, valphav${ABC[P:P+4]}));
73
74        $for P in range(0, PIXEL_TILE, 4):
75          const v128_t vd${ABC[P:P+4]} = wasm_f32x4_sub(vr${ABC[P:P+4]}, vl${ABC[P:P+4]});
76
77        $for P in range(0, PIXEL_TILE, 4):
78          const v128_t vo${ABC[P:P+4]} = wasm_f32x4_add(vl${ABC[P:P+4]}, wasm_f32x4_mul(vd${ABC[P:P+4]}, valphah${ABC[P:P+4]}));
79
80        $for P in range(0, PIXEL_TILE, 4):
81          wasm_v128_store(output + ${P}, vo${ABC[P:P+4]});
82        output += ${PIXEL_TILE};
83      }
84
85    for (; p >= 4; p -= 4) {
86      $for P in range(4):
87        const float* itl${P} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
88        const float* ibl${P} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
89      i += 8;
90
91      const v128_t vw0 = wasm_v128_load(w);
92      const v128_t vw1 = wasm_v128_load(w + 4);
93      w += 8;
94
95      $for P in range(0, 4, 2):
96        const v128_t vtltr${ABC[P]} = wasm_v64x2_load_splat(itl${P});
97        const v128_t vblbr${ABC[P]} = wasm_v64x2_load_splat(ibl${P});
98        const double vtltr${ABC[P+1]} = *((const double*) itl${P+1});
99        const double vblbr${ABC[P+1]} = *((const double*) ibl${P+1});
100
101      const v128_t valphah = wasm_v32x4_shuffle(vw0, vw1, 0, 2, 4, 6);
102      const v128_t valphav = wasm_v32x4_shuffle(vw0, vw1, 1, 3, 5, 7);
103
104      $for P in range(0, 4, 2):
105        const v128_t vtltr${ABC[P:P+2]} = wasm_f64x2_replace_lane(vtltr${ABC[P]}, 1, vtltr${ABC[P+1]});
106        const v128_t vblbr${ABC[P:P+2]} = wasm_f64x2_replace_lane(vblbr${ABC[P]}, 1, vblbr${ABC[P+1]});
107
108      $for P in range(0, 4, 2):
109        const v128_t vldrd${ABC[P:P+2]} = wasm_f32x4_sub(vblbr${ABC[P:P+2]}, vtltr${ABC[P:P+2]});
110
111      const v128_t vld = wasm_v32x4_shuffle(vldrd01, vldrd23, 0, 2, 4, 6);
112      const v128_t vrd = wasm_v32x4_shuffle(vldrd01, vldrd23, 1, 3, 5, 7);
113
114      const v128_t vtl = wasm_v32x4_shuffle(vtltr01, vtltr23, 0, 2, 4, 6);
115      const v128_t vtr = wasm_v32x4_shuffle(vtltr01, vtltr23, 1, 3, 5, 7);
116
117      const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
118      const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
119
120      const v128_t vd = wasm_f32x4_sub(vr, vl);
121      const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
122
123      wasm_v128_store(output, vo);
124      output += 4;
125    }
126
127    if XNN_UNLIKELY(p != 0) {
128      if (p & 2) {
129        const v128_t vw = wasm_v128_load(w);
130        w += 4;
131
132        const v128_t valphah = wasm_v32x4_shuffle(vw, vw, 0, 2, 0, 2);
133        const v128_t valphav = wasm_v32x4_shuffle(vw, vw, 1, 3, 1, 3);
134
135        $for P in range(2):
136          const float* itl${P} = (const float*) ((uintptr_t) i[${2 * P}] + input_offset);
137          const float* ibl${P} = (const float*) ((uintptr_t) i[${2 * P + 1}] + input_offset);
138        i += 4;
139
140        const v128_t vtltr = wasm_f64x2_replace_lane(wasm_v64x2_load_splat(itl0), 1, *((const double*) itl1));
141        const v128_t vblbr = wasm_f64x2_replace_lane(wasm_v64x2_load_splat(ibl0), 1, *((const double*) ibl1));
142
143        const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
144        const v128_t vld = wasm_v32x4_shuffle(vldrd, vldrd, 0, 2, 0, 2);
145        const v128_t vrd = wasm_v32x4_shuffle(vldrd, vldrd, 1, 3, 1, 3);
146
147        const v128_t vtl = wasm_v32x4_shuffle(vtltr, vtltr, 0, 2, 0, 2);
148        const v128_t vtr = wasm_v32x4_shuffle(vtltr, vtltr, 1, 3, 1, 3);
149
150        const v128_t vl = wasm_f32x4_add(vtl, wasm_f32x4_mul(vld, valphav));
151        const v128_t vr = wasm_f32x4_add(vtr, wasm_f32x4_mul(vrd, valphav));
152
153        const v128_t vd = wasm_f32x4_sub(vr, vl);
154        const v128_t vo = wasm_f32x4_add(vl, wasm_f32x4_mul(vd, valphah));
155
156        *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
157        output += 2;
158      }
159
160      if (p & 1) {
161        // We are computing the following formula:
162        //   result = (1 - alpha_h) * (1 - alpha_v) * top_left +
163        //                 alpha_h  * (1 - alpha_v) * top_right +
164        //            (1 - alpha_h) *      alpha_v  * bottom_left +
165        //                 alpha_h  *      alpha_v  * bottom_right.
166        //
167        // Rearranging gives
168        //   result =    left + alpha_h * (right        - left),
169        // where
170        //   left =  top_left + alpha_v * (bottom_left  - top_left),
171        //  right = top_right + alpha_v * (bottom_right - top_right).
172
173        const float alphah = *w;
174        const v128_t valphav = wasm_v32x4_load_splat(w + 1);
175        w += 2;
176
177        const float* itl = (const float*) ((uintptr_t) i[0] + input_offset);
178        const float* ibl = (const float*) ((uintptr_t) i[1] + input_offset);
179        i += 2;
180
181        const v128_t vtltr = wasm_v64x2_load_splat(itl);
182        const v128_t vblbr = wasm_v64x2_load_splat(ibl);
183
184        // Compute at once
185        //    left_diff = bottom_left  - top_left
186        //   right_diff = bottom_right - top_right
187        const v128_t vldrd = wasm_f32x4_sub(vblbr, vtltr);
188        const v128_t vlr = wasm_f32x4_add(vtltr, wasm_f32x4_mul(vldrd, valphav));
189
190        // Extract them and compute the result.
191        const float l = wasm_f32x4_extract_lane(vlr, 0);
192        const float r = wasm_f32x4_extract_lane(vlr, 1);
193
194        *output++ = l + alphah * (r - l);
195      }
196    }
197
198    input_offset += input_increment;
199  } while (--channels != 0);
200}
201