• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-prelu/f16c.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16 
17 
xnn_f16_prelu_ukernel__f16c_2x16(size_t rows,size_t channels,const void * restrict input,size_t input_stride,const void * restrict weights,void * restrict output,size_t output_stride)18 void xnn_f16_prelu_ukernel__f16c_2x16(
19     size_t rows,
20     size_t channels,
21     const void* restrict input,
22     size_t input_stride,
23     const void* restrict weights,
24     void* restrict output,
25     size_t output_stride) XNN_OOB_READS
26 {
27   assert(rows != 0);
28   assert(channels != 0);
29   assert(channels % sizeof(uint16_t) == 0);
30 
31   const uint16_t* i0 = (const uint16_t*) input;
32   uint16_t* o0 = (uint16_t*) output;
33   const uint16_t* i1 = (const uint16_t*) ((uintptr_t) i0 + input_stride);
34   uint16_t* o1 = (uint16_t*) ((uintptr_t) o0 + output_stride);
35 
36   const size_t input_increment = input_stride * 2 - channels;
37   const size_t output_increment = output_stride * 2 - channels;
38 
39   do {
40     if XNN_UNPREDICTABLE(rows < 2) {
41       i1 = i0;
42       o1 = o0;
43     }
44 
45     const uint16_t* w = (const uint16_t*) weights;
46     size_t c = channels;
47     for (; c >= 16 * sizeof(uint16_t); c -= 16 * sizeof(uint16_t)) {
48       const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
49       const __m256 vw89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + 8)));
50       w += 16;
51 
52       const __m256 vi0x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
53       const __m256 vi0x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i0 + 8)));
54       i0 += 16;
55       const __m256 vi1x001234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
56       const __m256 vi1x089ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i1 + 8)));
57       i1 += 16;
58 
59       __m256 vacc0x001234567 = _mm256_mul_ps(vi0x001234567, vw01234567);
60       __m256 vacc0x089ABCDEF = _mm256_mul_ps(vi0x089ABCDEF, vw89ABCDEF);
61       __m256 vacc1x001234567 = _mm256_mul_ps(vi1x001234567, vw01234567);
62       __m256 vacc1x089ABCDEF = _mm256_mul_ps(vi1x089ABCDEF, vw89ABCDEF);
63 
64       vacc0x001234567 = _mm256_blendv_ps(vi0x001234567, vacc0x001234567, vi0x001234567);
65       vacc0x089ABCDEF = _mm256_blendv_ps(vi0x089ABCDEF, vacc0x089ABCDEF, vi0x089ABCDEF);
66       vacc1x001234567 = _mm256_blendv_ps(vi1x001234567, vacc1x001234567, vi1x001234567);
67       vacc1x089ABCDEF = _mm256_blendv_ps(vi1x089ABCDEF, vacc1x089ABCDEF, vi1x089ABCDEF);
68 
69       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_NO_EXC));
70       _mm_storeu_si128((__m128i*) (o0 + 0), _mm256_cvtps_ph(vacc0x001234567, _MM_FROUND_NO_EXC));
71       _mm_storeu_si128((__m128i*) (o0 + 8), _mm256_cvtps_ph(vacc0x089ABCDEF, _MM_FROUND_NO_EXC));
72       o0 += 16;
73       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_NO_EXC));
74       _mm_storeu_si128((__m128i*) (o1 + 0), _mm256_cvtps_ph(vacc1x001234567, _MM_FROUND_NO_EXC));
75       _mm_storeu_si128((__m128i*) (o1 + 8), _mm256_cvtps_ph(vacc1x089ABCDEF, _MM_FROUND_NO_EXC));
76       o1 += 16;
77     }
78     for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
79       const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
80       w += 8;
81 
82       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
83       i0 += 8;
84       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
85       i1 += 8;
86 
87       __m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
88       __m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
89 
90       vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
91       vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
92 
93       _mm_storeu_si128((__m128i*) o0, _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC));
94       o0 += 8;
95       _mm_storeu_si128((__m128i*) o1, _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC));
96       o1 += 8;
97     }
98     if XNN_UNLIKELY(c != 0) {
99       const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
100 
101       const __m256 vi0x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
102       i0 = (const uint16_t*) ((uintptr_t) i0 + c);
103       const __m256 vi1x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
104       i1 = (const uint16_t*) ((uintptr_t) i1 + c);
105 
106       __m256 vacc0x01234567 = _mm256_mul_ps(vi0x01234567, vw01234567);
107       __m256 vacc1x01234567 = _mm256_mul_ps(vi1x01234567, vw01234567);
108 
109       vacc0x01234567 = _mm256_blendv_ps(vi0x01234567, vacc0x01234567, vi0x01234567);
110       vacc1x01234567 = _mm256_blendv_ps(vi1x01234567, vacc1x01234567, vi1x01234567);
111 
112       __m128i vh0x01234567 = _mm256_cvtps_ph(vacc0x01234567, _MM_FROUND_NO_EXC);
113       __m128i vh1x01234567 = _mm256_cvtps_ph(vacc1x01234567, _MM_FROUND_NO_EXC);
114       if (c & (4 * sizeof(uint16_t))) {
115         _mm_storel_epi64((__m128i*) o0, vh0x01234567);
116         _mm_storel_epi64((__m128i*) o1, vh1x01234567);
117 
118         vh0x01234567 = _mm_unpackhi_epi64(vh0x01234567, vh0x01234567);
119         vh1x01234567 = _mm_unpackhi_epi64(vh1x01234567, vh1x01234567);
120 
121         o0 += 4;
122         o1 += 4;
123       }
124       if (c & (2 * sizeof(uint16_t))) {
125         *((uint32_t*) o0) = (uint32_t) _mm_cvtsi128_si32(vh0x01234567);
126         *((uint32_t*) o1) = (uint32_t) _mm_cvtsi128_si32(vh1x01234567);
127 
128         vh0x01234567 = _mm_srli_epi64(vh0x01234567, 32);
129         vh1x01234567 = _mm_srli_epi64(vh1x01234567, 32);
130 
131         o0 += 2;
132         o1 += 2;
133       }
134       if (c & (1 * sizeof(uint16_t))) {
135         *o0 = (uint16_t) _mm_extract_epi16(vh0x01234567, 0);
136         *o1 = (uint16_t) _mm_extract_epi16(vh1x01234567, 0);
137 
138         o0 += 1;
139         o1 += 1;
140       }
141     }
142     i0 = (const uint16_t*) ((uintptr_t) i0 + input_increment);
143     o0 = (uint16_t*) ((uintptr_t) o0 + output_increment);
144     i1 = (const uint16_t*) ((uintptr_t) i1 + input_increment);
145     o1 = (uint16_t*) ((uintptr_t) o1 + output_increment);
146     rows = doz(rows, 2);
147   } while (rows != 0);
148 }
149