• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vadd/sse-mul16-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/vadd.h>
15 
16 
xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x8(size_t n,const int8_t * input_x,const int8_t * input_y,int8_t * output,const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x8(
18     size_t n,
19     const int8_t* input_x,
20     const int8_t* input_y,
21     int8_t* output,
22     const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
23 {
24   const __m128i vzero_point_product = _mm_load_si128((const __m128i*) params->sse2.zero_point_product);
25   const __m128i vx_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.x_multiplier_lo);
26   const __m128i vx_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.x_multiplier_hi);
27   const __m128i vy_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.y_multiplier_lo);
28   const __m128i vy_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.y_multiplier_hi);
29   const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
30   const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
31   const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
32   const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
33   const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
34   const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
35 
36   for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
37     __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input_x);
38     __m128i vy01234567 = _mm_loadl_epi64((const __m128i*) input_y);
39     input_x += 8;
40     input_y += 8;
41 
42     vx01234567 = _mm_unpacklo_epi8(vx01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vx01234567));
43     vy01234567 = _mm_unpacklo_epi8(vy01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vy01234567));
44 
45     __m128i vxprod01234567hi = _mm_mulhi_epu16(vx01234567, vx_multiplier_lo);
46     __m128i vyprod01234567hi = _mm_mulhi_epu16(vy01234567, vy_multiplier_lo);
47     const __m128i vxprod01234567lo = _mm_mullo_epi16(vx01234567, vx_multiplier_lo);
48     const __m128i vyprod01234567lo = _mm_mullo_epi16(vy01234567, vy_multiplier_lo);
49 
50     vxprod01234567hi = _mm_add_epi16(vxprod01234567hi, _mm_mullo_epi16(vx01234567, vx_multiplier_hi));
51     vyprod01234567hi = _mm_add_epi16(vyprod01234567hi, _mm_mullo_epi16(vy01234567, vy_multiplier_hi));
52 
53     vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), vx_multiplier_lo));
54     vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), vy_multiplier_lo));
55 
56     __m128i vacc0123 = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(vxprod01234567lo, vxprod01234567hi));
57     __m128i vacc4567 = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(vxprod01234567lo, vxprod01234567hi));
58 
59     vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vyprod01234567lo, vyprod01234567hi));
60     vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vyprod01234567lo, vyprod01234567hi));
61 
62     const __m128i vrem0123 = _mm_add_epi32(_mm_and_si128(vacc0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123));
63     const __m128i vrem4567 = _mm_add_epi32(_mm_and_si128(vacc4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567));
64 
65     vacc0123 = _mm_sub_epi32(_mm_sra_epi32(vacc0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
66     vacc4567 = _mm_sub_epi32(_mm_sra_epi32(vacc4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
67 
68     __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
69 
70     vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
71 
72     vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
73 
74     const __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
75 
76     _mm_storel_epi64((__m128i*) output, vout0123456701234567);
77     output += 8;
78   }
79   if XNN_UNLIKELY(n != 0) {
80     {
81       __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input_x);
82       __m128i vy01234567 = _mm_loadl_epi64((const __m128i*) input_y);
83 
84       vx01234567 = _mm_unpacklo_epi8(vx01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vx01234567));
85       vy01234567 = _mm_unpacklo_epi8(vy01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vy01234567));
86 
87       __m128i vxprod01234567hi = _mm_mulhi_epu16(vx01234567, vx_multiplier_lo);
88       __m128i vyprod01234567hi = _mm_mulhi_epu16(vy01234567, vy_multiplier_lo);
89       const __m128i vxprod01234567lo = _mm_mullo_epi16(vx01234567, vx_multiplier_lo);
90       const __m128i vyprod01234567lo = _mm_mullo_epi16(vy01234567, vy_multiplier_lo);
91 
92       vxprod01234567hi = _mm_add_epi16(vxprod01234567hi, _mm_mullo_epi16(vx01234567, vx_multiplier_hi));
93       vyprod01234567hi = _mm_add_epi16(vyprod01234567hi, _mm_mullo_epi16(vy01234567, vy_multiplier_hi));
94 
95       vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), vx_multiplier_lo));
96       vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), vy_multiplier_lo));
97 
98       __m128i vacc0123 = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(vxprod01234567lo, vxprod01234567hi));
99       __m128i vacc4567 = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(vxprod01234567lo, vxprod01234567hi));
100 
101       vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vyprod01234567lo, vyprod01234567hi));
102       vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vyprod01234567lo, vyprod01234567hi));
103 
104       const __m128i vrem0123 = _mm_add_epi32(_mm_and_si128(vacc0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123));
105       const __m128i vrem4567 = _mm_add_epi32(_mm_and_si128(vacc4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567));
106 
107       vacc0123 = _mm_sub_epi32(_mm_sra_epi32(vacc0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
108       vacc4567 = _mm_sub_epi32(_mm_sra_epi32(vacc4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
109 
110       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
111       vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
112       vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
113 
114       __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
115 
116       if (n & (4 * sizeof(int8_t))) {
117         *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
118         vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
119         output += 4;
120       }
121       if (n & (2 * sizeof(int8_t))) {
122         *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
123         vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
124         output += 2;
125       }
126       if (n & (1 * sizeof(int8_t))) {
127         *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
128       }
129     }
130   }
131 }
132