• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 4 == 0
7$assert CHANNEL_TILE >= 4
8$assert PIXEL_TILE == 1
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/ibilinear.h>
15
16
17void xnn_f32_ibilinear_ukernel__wasmsimd_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}(
18    size_t output_pixels,
19    size_t channels,
20    const float**restrict input,
21    size_t input_offset,
22    const float*restrict weights,
23    float*restrict output,
24    size_t output_increment) XNN_DISABLE_TSAN
25{
26  assert(output_pixels != 0);
27  assert(channels != 0);
28  assert(channels % sizeof(float) == 0);
29
30  do {
31    const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset);
32    const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset);
33    const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset);
34    const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset);
35    input += 4;
36
37    const v128_t valphah = wasm_v32x4_load_splat(weights);
38    const v128_t valphav = wasm_v32x4_load_splat(weights + 1);
39    weights += 2;
40
41    size_t c = channels;
42    $if CHANNEL_TILE > 4:
43      for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
44        const v128_t vtl${ABC[0:4]} = wasm_v128_load(i0);
45        const v128_t vtr${ABC[0:4]} = wasm_v128_load(i1);
46        const v128_t vbl${ABC[0:4]} = wasm_v128_load(i2);
47        const v128_t vbr${ABC[0:4]} = wasm_v128_load(i3);
48        $for C in range(4, CHANNEL_TILE, 4):
49          const v128_t vtl${ABC[C:C+4]} = wasm_v128_load(i0 + ${C});
50          const v128_t vtr${ABC[C:C+4]} = wasm_v128_load(i1 + ${C});
51          const v128_t vbl${ABC[C:C+4]} = wasm_v128_load(i2 + ${C});
52          const v128_t vbr${ABC[C:C+4]} = wasm_v128_load(i3 + ${C});
53        i0 += ${CHANNEL_TILE};
54        i1 += ${CHANNEL_TILE};
55        i2 += ${CHANNEL_TILE};
56        i3 += ${CHANNEL_TILE};
57
58        $for C in range(0, CHANNEL_TILE, 4):
59          const v128_t vtd${ABC[C:C+4]} = wasm_f32x4_sub(vtr${ABC[C:C+4]}, vtl${ABC[C:C+4]});
60          const v128_t vbd${ABC[C:C+4]} = wasm_f32x4_sub(vbr${ABC[C:C+4]}, vbl${ABC[C:C+4]});
61
62        $for C in range(0, CHANNEL_TILE, 4):
63          const v128_t vt${ABC[C:C+4]} = wasm_f32x4_add(vtl${ABC[C:C+4]}, wasm_f32x4_mul(vtd${ABC[C:C+4]}, valphah));
64          const v128_t vb${ABC[C:C+4]} = wasm_f32x4_add(vbl${ABC[C:C+4]}, wasm_f32x4_mul(vbd${ABC[C:C+4]}, valphah));
65
66        $for C in range(0, CHANNEL_TILE, 4):
67          const v128_t vd${ABC[C:C+4]} = wasm_f32x4_sub(vb${ABC[C:C+4]}, vt${ABC[C:C+4]});
68
69        $for C in range(0, CHANNEL_TILE, 4):
70          const v128_t vo${ABC[C:C+4]} = wasm_f32x4_add(vt${ABC[C:C+4]}, wasm_f32x4_mul(vd${ABC[C:C+4]}, valphav));
71
72        wasm_v128_store(output, vo${ABC[0:4]});
73        $for C in range(4, CHANNEL_TILE, 4):
74          wasm_v128_store(output + ${C}, vo${ABC[C:C+4]});
75        output += ${CHANNEL_TILE};
76      }
77    for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
78      const v128_t vtl = wasm_v128_load(i0);
79      const v128_t vtr = wasm_v128_load(i1);
80      const v128_t vbl = wasm_v128_load(i2);
81      const v128_t vbr = wasm_v128_load(i3);
82      i0 += 4;
83      i1 += 4;
84      i2 += 4;
85      i3 += 4;
86
87      const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
88      const v128_t vbd = wasm_f32x4_sub(vbr, vbl);
89      const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
90      const v128_t vb = wasm_f32x4_add(vbl, wasm_f32x4_mul(vbd, valphah));
91      const v128_t vd = wasm_f32x4_sub(vb, vt);
92      const v128_t vo = wasm_f32x4_add(vt, wasm_f32x4_mul(vd, valphav));
93
94      wasm_v128_store(output, vo);
95      output += 4;
96    }
97    if XNN_UNLIKELY(c != 0) {
98      const v128_t vtl = wasm_v128_load(i0);
99      const v128_t vtr = wasm_v128_load(i1);
100      const v128_t vbl = wasm_v128_load(i2);
101      const v128_t vbr = wasm_v128_load(i3);
102
103      const v128_t vtd = wasm_f32x4_sub(vtr, vtl);
104      const v128_t vbd = wasm_f32x4_sub(vbr, vbl);
105      const v128_t vt = wasm_f32x4_add(vtl, wasm_f32x4_mul(vtd, valphah));
106      const v128_t vb = wasm_f32x4_add(vbl, wasm_f32x4_mul(vbd, valphah));
107      const v128_t vd = wasm_f32x4_sub(vb, vt);
108      v128_t vo = wasm_f32x4_add(vt, wasm_f32x4_mul(vd, valphav));
109
110      if (c & (2 * sizeof(float))) {
111        *((double*) output) = wasm_f64x2_extract_lane(vo, 0);
112        vo = wasm_v32x4_shuffle(vo, vo, 2, 3, 2, 3);
113        output += 2;
114      }
115      if (c & (1 * sizeof(float))) {
116        *output++ = wasm_f32x4_extract_lane(vo, 0);
117      }
118    }
119
120    output = (float*) ((uintptr_t) output + output_increment);
121  } while (--output_pixels != 0);
122}
123