• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vmulcaddc/fma3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/math.h>
15 #include <xnnpack/vmulcaddc.h>
16 
17 
xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x(size_t rows,size_t channels,const void * restrict input,size_t input_stride,const void * restrict weights,void * restrict output,size_t output_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vmulcaddc_minmax_ukernel_c16__fma3_2x(
19     size_t rows,
20     size_t channels,
21     const void*restrict input,
22     size_t input_stride,
23     const void*restrict weights,
24     void*restrict output,
25     size_t output_stride,
26     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(rows != 0);
29   assert(channels != 0);
30   assert(channels % sizeof(uint16_t) == 0);
31 
32   const uint16_t* i0 = (const uint16_t*) input;
33   uint16_t* o0 = (uint16_t*) output;
34   const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
35   uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
36 
37   const size_t input_increment = input_stride * 2 - channels;
38   const size_t output_increment = output_stride * 2 - channels;
39 
40   const __m256 vmin = _mm256_load_ps(params->avx.min);
41   const __m256 vmax = _mm256_load_ps(params->avx.max);
42   do {
43     if XNN_UNPREDICTABLE(rows < 2) {
44       i1 = i0;
45       o1 = o0;
46     }
47 
48     const uint16_t* w = (const uint16_t*) weights;
49     size_t c = channels;
50     for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
51       const __m256 vscale01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
52       const __m256 vscale89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
53 
54       __m256 vacc0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
55       __m256 vacc0x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
56       i0 += 16;
57       __m256 vacc1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
58       __m256 vacc1x89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
59       i1 += 16;
60 
61       const __m256 vbias01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
62       const __m256 vbias89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 24)));
63       w += 32;
64 
65       vacc0x01234567 = _mm256_fmadd_ps(vacc0x01234567, vscale01234567, vbias01234567);
66       vacc0x89ABCDEF = _mm256_fmadd_ps(vacc0x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
67       vacc1x01234567 = _mm256_fmadd_ps(vacc1x01234567, vscale01234567, vbias01234567);
68       vacc1x89ABCDEF = _mm256_fmadd_ps(vacc1x89ABCDEF, vscale89ABCDEF, vbias89ABCDEF);
69 
70       vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
71       vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
72       vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
73       vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
74 
75       vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
76       vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
77       vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
78       vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
79 
80       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
81       _mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x89ABCDEF, _MM_FROUND_NO_EXC));
82       o0 += 16;
83       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
84       _mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x89ABCDEF, _MM_FROUND_NO_EXC));
85       o1 += 16;
86     }
87     for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
88       const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
89 
90       __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
91       i0 += 8;
92       __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
93       i1 += 8;
94 
95       const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
96       w += 8;
97 
98       vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
99       vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
100 
101       vacc0 = _mm256_max_ps(vacc0, vmin);
102       vacc1 = _mm256_max_ps(vacc1, vmin);
103 
104       vacc0 = _mm256_min_ps(vacc0, vmax);
105       vacc1 = _mm256_min_ps(vacc1, vmax);
106 
107       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0, _MM_FROUND_NO_EXC));
108       o0 += 8;
109       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1, _MM_FROUND_NO_EXC));
110       o1 += 8;
111     }
112     if XNN_UNLIKELY(c != 0) {
113       const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
114 
115       __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
116       i0 = (const uint16_t*) ((uintptr_t) i0 + c);
117       __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
118       i1 = (const uint16_t*) ((uintptr_t) i1 + c);
119 
120       const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 16)));
121 
122       vacc0 = _mm256_fmadd_ps(vacc0, vscale, vbias);
123       vacc1 = _mm256_fmadd_ps(vacc1, vscale, vbias);
124 
125       vacc0 = _mm256_max_ps(vacc0, vmin);
126       vacc1 = _mm256_max_ps(vacc1, vmin);
127 
128       vacc0 = _mm256_min_ps(vacc0, vmax);
129       vacc1 = _mm256_min_ps(vacc1, vmax);
130 
131       __m128i vh0 = _mm256_cvtps_ph(vacc0, _MM_FROUND_NO_EXC);
132       __m128i vh1 = _mm256_cvtps_ph(vacc1, _MM_FROUND_NO_EXC);
133 
134       if (c & (4 * sizeof(uint16_t))) {
135         _mm_storel_epi64((__m128i*) o0, vh0);
136         _mm_storel_epi64((__m128i*) o1, vh1);
137 
138         vh0 = _mm_unpackhi_epi64(vh0, vh0);
139         vh1 = _mm_unpackhi_epi64(vh1, vh1);
140 
141         o0 += 4;
142         o1 += 4;
143       }
144       if (c & (2 * sizeof(uint16_t))) {
145         *((uint32_t*) o0) = (uint32_t) _mm_cvtsi128_si32(vh0);
146         *((uint32_t*) o1) = (uint32_t) _mm_cvtsi128_si32(vh1);
147 
148         vh0 = _mm_srli_epi64(vh0, 32);
149         vh1 = _mm_srli_epi64(vh1, 32);
150 
151         o0 += 2;
152         o1 += 2;
153       }
154       if (c & (1 * sizeof(uint16_t))) {
155         *o0 = (uint16_t) _mm_extract_epi16(vh0, 0);
156         *o1 = (uint16_t) _mm_extract_epi16(vh1, 0);
157 
158         o0 += 1;
159         o1 += 1;
160       }
161     }
162     i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
163     o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
164     i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
165     o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
166     rows = doz(rows, 2);
167   } while (rows != 0);
168 }
169