• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 8 == 0
7$assert KERNEL_TILE >= 2
8$assert ACCUMULATORS >= 1
9$ABC = "0123456789ABCDEF"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/dwconv.h>
15
16
17static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
18
19$ISA = {0: "avx", 3: "fma3"}[FMA]
20void xnn_f32_dwconv_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__${ISA}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
21    size_t channels,
22    size_t output_width,
23    const float** input,
24    const float* weights,
25    float* output,
26    size_t input_stride,
27    size_t output_increment,
28    const union xnn_f32_output_params params[restrict static 1])
29{
30  assert(channels != 0);
31  assert(output_width != 0);
32
33  const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
34  const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
35  do {
36    $for K in range(KERNEL_TILE):
37      const float* i${K} = input[${K}];
38      assert(i${K} != NULL);
39    input = (const float**) ((uintptr_t) input + input_stride);
40
41    size_t c = channels;
42    const float* w = weights;
43    for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) {
44      __m256 vacc${ABC[0:8]}p0 = _mm256_load_ps(w);
45      $for C in range(8, CHANNEL_TILE, 8):
46        __m256 vacc${ABC[C:C+8]}p0 = _mm256_load_ps(w + ${C});
47
48      $for K in range(KERNEL_TILE):
49
50        const __m256 vi${K}x${ABC[0:8]} = _mm256_loadu_ps(i${K});
51        $for C in range(8, CHANNEL_TILE, 8):
52          const __m256 vi${K}x${ABC[C:C+8]} = _mm256_loadu_ps(i${K} + ${C});
53        i${K} += ${CHANNEL_TILE};
54
55        $for C in range(0, CHANNEL_TILE, 8):
56          const __m256 vk${K}x${ABC[C:C+8]} = _mm256_load_ps(w + ${(K + 1) * CHANNEL_TILE + C});
57        $for C in range(0, CHANNEL_TILE, 8):
58          $if 1 <= K < ACCUMULATORS:
59            __m256 vacc${ABC[C:C+8]}p${K} = _mm256_mul_ps(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]});
60          $elif FMA == 3:
61            vacc${ABC[C:C+8]}p${K % ACCUMULATORS} = _mm256_fmadd_ps(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]}, vacc${ABC[C:C+8]}p${K % ACCUMULATORS});
62          $else:
63            vacc${ABC[C:C+8]}p${K % ACCUMULATORS} = _mm256_add_ps(vacc${ABC[C:C+8]}p${K % ACCUMULATORS}, _mm256_mul_ps(vi${K}x${ABC[C:C+8]}, vk${K}x${ABC[C:C+8]}));
64
65      w += ${(KERNEL_TILE + 1) * CHANNEL_TILE};
66
67      $if ACCUMULATORS > 1:
68        // Add up all accumulators to vacc${ABC[0:CHANNEL_TILE]}p0
69        $ACC_SLICE = 1
70        $while ACC_SLICE < ACCUMULATORS:
71          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
72            $if A + ACC_SLICE < ACCUMULATORS:
73              $for C in range(0, CHANNEL_TILE, 8):
74                vacc${ABC[C:C+8]}p${A} = _mm256_add_ps(vacc${ABC[C:C+8]}p${A}, vacc${ABC[C:C+8]}p${A + ACC_SLICE});
75          $ACC_SLICE *= 2
76
77      $for C in range(0, CHANNEL_TILE, 8):
78        __m256 vacc${ABC[C:C+8]} = _mm256_max_ps(vacc${ABC[C:C+8]}p0, vmin);
79      $for C in range(0, CHANNEL_TILE, 8):
80        vacc${ABC[C:C+8]} = _mm256_min_ps(vacc${ABC[C:C+8]}, vmax);
81
82      _mm256_storeu_ps(output, vacc${ABC[0:8]});
83      $for C in range(8, CHANNEL_TILE, 8):
84        _mm256_storeu_ps(output + ${C}, vacc${ABC[C:C+8]});
85      output += ${CHANNEL_TILE};
86    }
87    $if CHANNEL_TILE > 8:
88      for (; c >= 8; c -= 8) {
89        __m256 vacc01234567p0 = _mm256_load_ps(w);
90        $for K in range(KERNEL_TILE):
91
92          const __m256 vi${K}x01234567 = _mm256_loadu_ps(i${K});
93          i${K} += 8;
94
95          const __m256 vk${K}x01234567 = _mm256_load_ps(w + ${(K + 1) * CHANNEL_TILE});
96          $if 1 <= K < ACCUMULATORS:
97            __m256 vacc01234567p${K} = _mm256_mul_ps(vi${K}x01234567, vk${K}x01234567);
98          $elif FMA == 3:
99            vacc01234567p${K % ACCUMULATORS} = _mm256_fmadd_ps(vi${K}x01234567, vk${K}x01234567, vacc01234567p${K % ACCUMULATORS});
100          $else:
101            vacc01234567p${K % ACCUMULATORS} = _mm256_add_ps(vacc01234567p${K % ACCUMULATORS}, _mm256_mul_ps(vi${K}x01234567, vk${K}x01234567));
102
103        w += 8;
104
105        $if ACCUMULATORS > 1:
106          // Add up all accumulators to vacc${ABC[0:8]}p0
107          $ACC_SLICE = 1
108          $while ACC_SLICE < ACCUMULATORS:
109            $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
110              $if A + ACC_SLICE < ACCUMULATORS:
111                vacc01234567p${A} = _mm256_add_ps(vacc01234567p${A}, vacc01234567p${A + ACC_SLICE});
112            $ACC_SLICE *= 2
113
114        __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
115        vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
116
117        _mm256_storeu_ps(output, vacc01234567);
118        output += 8;
119      }
120    if XNN_UNLIKELY(c != 0) {
121      assert(c >= 1);
122      assert(c <= 7);
123      __m256i vmask = _mm256_loadu_si256((const __m256i*) &mask_table[7 - c]);
124
125      __m256 vacc01234567p0 = _mm256_load_ps(w);
126      $for K in range(KERNEL_TILE):
127
128        const __m256 vi${K}x01234567 = _mm256_maskload_ps(i${K}, vmask);
129        const __m256 vk${K}x01234567 = _mm256_load_ps(w + ${(K + 1) * CHANNEL_TILE});
130        $if 1 <= K < ACCUMULATORS:
131          __m256 vacc01234567p${K} = _mm256_mul_ps(vi${K}x01234567, vk${K}x01234567);
132        $elif FMA == 3:
133          vacc01234567p${K % ACCUMULATORS} = _mm256_fmadd_ps(vi${K}x01234567, vk${K}x01234567, vacc01234567p${K % ACCUMULATORS});
134        $else:
135          vacc01234567p${K % ACCUMULATORS} = _mm256_add_ps(vacc01234567p${K % ACCUMULATORS}, _mm256_mul_ps(vi${K}x01234567, vk${K}x01234567));
136
137      $if ACCUMULATORS > 1:
138        // Add up all accumulators to vacc${ABC[0:8]}p0
139        $ACC_SLICE = 1
140        $while ACC_SLICE < ACCUMULATORS:
141          $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
142            $if A + ACC_SLICE < ACCUMULATORS:
143              vacc01234567p${A} = _mm256_add_ps(vacc01234567p${A}, vacc01234567p${A + ACC_SLICE});
144          $ACC_SLICE *= 2
145
146      __m256 vacc01234567 = _mm256_max_ps(vacc01234567p0, vmin);
147      vacc01234567 = _mm256_min_ps(vacc01234567, vmax);
148
149      // _mm256_maskstore_ps(output, vmask, vacc01234567); output += c; could be used here, but triggers msan failures (probably an msan bug).
150      __m128 vacc0123 = _mm256_castps256_ps128(vacc01234567);
151      if (c & 4) {
152        _mm_storeu_ps(output, vacc0123);
153        vacc0123 = _mm256_extractf128_ps(vacc01234567, 1);
154        output += 4;
155      }
156      if (c & 2) {
157        _mm_storel_pi((__m64*) output, vacc0123);
158        vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
159        output += 2;
160      }
161      if (c & 1) {
162        _mm_store_ss(output, vacc0123);
163        output += 1;
164      }
165    }
166
167    output = (float*) ((uintptr_t) output + output_increment);
168  } while (--output_width != 0);
169}
170