• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2020-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include <cmath>
25#include <limits>
26
27#if defined(__ARM_FEATURE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE)
28
29#ifndef M_PI
30#define M_PI (3.14159265358979323846)
31#endif // M_PI
32
33namespace arm_compute
34{
35inline svfloat32_t svtaylor_poly_f32_z(svbool_t pg, svfloat32_t x, svfloat32_t coeff_1, svfloat32_t coeff_2, svfloat32_t coeff_3,
36                                       svfloat32_t coeff_4, svfloat32_t coeff_5, svfloat32_t coeff_6, svfloat32_t coeff_7, svfloat32_t coeff_8)
37{
38    const auto A   = svmla_f32_z(pg, coeff_1, coeff_5, x);
39    const auto B   = svmla_f32_z(pg, coeff_3, coeff_7, x);
40    const auto C   = svmla_f32_z(pg, coeff_2, coeff_6, x);
41    const auto D   = svmla_f32_z(pg, coeff_4, coeff_8, x);
42    const auto x2  = svmul_f32_z(pg, x, x);
43    const auto x4  = svmul_f32_z(pg, x2, x2);
44    const auto res = svmla_f32_z(pg, svmla_f32_z(pg, A, B, x2), svmla_f32_z(pg, C, D, x2), x4);
45    return res;
46}
47
48inline svfloat16_t svtaylor_poly_f16_z(svbool_t pg, svfloat16_t x, svfloat16_t coeff_1, svfloat16_t coeff_2, svfloat16_t coeff_3,
49                                       svfloat16_t coeff_4, svfloat16_t coeff_5, svfloat16_t coeff_6, svfloat16_t coeff_7, svfloat16_t coeff_8)
50{
51    const auto A   = svmla_f16_z(pg, coeff_1, coeff_5, x);
52    const auto B   = svmla_f16_z(pg, coeff_3, coeff_7, x);
53    const auto C   = svmla_f16_z(pg, coeff_2, coeff_6, x);
54    const auto D   = svmla_f16_z(pg, coeff_4, coeff_8, x);
55    const auto x2  = svmul_f16_z(pg, x, x);
56    const auto x4  = svmul_f16_z(pg, x2, x2);
57    const auto res = svmla_f16_z(pg, svmla_f16_z(pg, A, B, x2), svmla_f16_z(pg, C, D, x2), x4);
58    return res;
59}
60
61inline svfloat16_t svinv_f16_z(svbool_t pg, svfloat16_t x)
62{
63    auto recip = svrecpe_f16(x);
64    recip      = svmul_f16_z(pg, svrecps_f16(x, recip), recip);
65    recip      = svmul_f16_z(pg, svrecps_f16(x, recip), recip);
66    return recip;
67}
68
69inline svfloat32_t svinv_f32_z(svbool_t pg, svfloat32_t x)
70{
71    auto recip = svrecpe_f32(x);
72    recip      = svmul_f32_z(pg, svrecps_f32(x, recip), recip);
73    recip      = svmul_f32_z(pg, svrecps_f32(x, recip), recip);
74    return recip;
75}
76
77static const uint32_t svexp_f32_coeff[] = {
78    0x3f7ffff6, // x^1: 0x1.ffffecp-1f
79    0x3efffedb, // x^2: 0x1.fffdb6p-2f
80    0x3e2aaf33, // x^3: 0x1.555e66p-3f
81    0x3d2b9f17, // x^4: 0x1.573e2ep-5f
82    0x3c072010, // x^5: 0x1.0e4020p-7f
83};
84
85inline svfloat32_t svexp_f32_z(svbool_t pg, svfloat32_t x)
86{
87    const auto c1 = svreinterpret_f32_u32(svdup_n_u32(svexp_f32_coeff[0]));
88    const auto c2 = svreinterpret_f32_u32(svdup_n_u32(svexp_f32_coeff[1]));
89    const auto c3 = svreinterpret_f32_u32(svdup_n_u32(svexp_f32_coeff[2]));
90    const auto c4 = svreinterpret_f32_u32(svdup_n_u32(svexp_f32_coeff[3]));
91    const auto c5 = svreinterpret_f32_u32(svdup_n_u32(svexp_f32_coeff[4]));
92
93    const auto shift   = svreinterpret_f32_u32(svdup_n_u32(0x4b00007f));  // 2^23 + 127 = 0x1.0000fep23f
94    const auto inv_ln2 = svreinterpret_f32_u32(svdup_n_u32(0x3fb8aa3b));  // 1 / ln(2) = 0x1.715476p+0f
95    const auto neg_ln2_hi  = svreinterpret_f32_u32(svdup_n_u32(0xbf317200));  // -ln(2) from bits  -1 to -19: -0x1.62e400p-1f
96    const auto neg_ln2_lo  = svreinterpret_f32_u32(svdup_n_u32(0xb5bfbe8e));  // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f
97
98    const auto inf       = svdup_n_f32(std::numeric_limits<float>::infinity());
99    const auto max_input = svdup_n_f32(88.7f);   // Approximately ln(0x1.fffffep+127)
100    const auto zero      = svdup_n_f32(0.f);
101    const auto min_input = svdup_n_f32(-86.6f);  // Approximately ln(2^-125)
102
103    // Range reduction:
104    //   e^x = 2^n * e^r
105    // where:
106    //   n = floor(x / ln(2))
107    //   r = x - n * ln(2)
108    //
109    // By adding x / ln(2) with 2^23 + 127 (shift):
110    //   * As FP32 fraction part only has 23-bits, the addition of 2^23 + 127 forces decimal part
111    //     of x / ln(2) out of the result. The integer part of x / ln(2) (i.e. n) + 127 will occupy
112    //     the whole fraction part of z in FP32 format.
113    //     Subtracting 2^23 + 127 (shift) from z will result in the integer part of x / ln(2)
114    //     (i.e. n) because the decimal part has been pushed out and lost.
115    //   * The addition of 127 makes the FP32 fraction part of z ready to be used as the exponent
116    //     in FP32 format. Left shifting z by 23 bits will result in 2^n.
117    const auto z = svmla_f32_z(pg, shift, x, inv_ln2);
118    const auto n = svsub_f32_z(pg, z, shift);
119    const auto scale = svreinterpret_f32_u32(svlsl_n_u32_z(pg, svreinterpret_u32_f32(z), 23));  // 2^n
120
121    // The calculation of n * ln(2) is done using 2 steps to achieve accuracy beyond FP32.
122    // This outperforms longer Taylor series (3-4 tabs) both in term of accuracy and performance.
123    const auto r_hi = svmla_f32_z(pg, x, n, neg_ln2_hi);
124    const auto r = svmla_f32_z(pg, r_hi, n, neg_ln2_lo);
125
126    // Compute the truncated Taylor series of e^r.
127    //   poly = scale * (1 + c1 * r + c2 * r^2 + c3 * r^3 + c4 * r^4 + c5 * r^5)
128    const auto r2 = svmul_f32_z(pg, r, r);
129
130    const auto p1 = svmul_f32_z(pg, c1, r);
131    const auto p23 = svmla_f32_z(pg, c2, c3, r);
132    const auto p45 = svmla_f32_z(pg, c4, c5, r);
133    const auto p2345 = svmla_f32_z(pg, p23, p45, r2);
134    const auto p12345 = svmla_f32_z(pg, p1, p2345, r2);
135
136    auto poly = svmla_f32_z(pg, scale, p12345, scale);
137
138    // Handle underflow and overflow.
139    poly = svsel_f32(svcmplt_f32(pg, x, min_input), zero, poly);
140    poly = svsel_f32(svcmpgt_f32(pg, x, max_input), inf, poly);
141
142    return poly;
143}
144
145inline svfloat16_t svexp_f16_z(svbool_t pg, svfloat16_t x)
146{
147    auto bottom = svcvt_f32_z(pg, x);
148#if defined(ARM_COMPUTE_ENABLE_SVE2)
149    auto top    = svcvtlt_f32_x(pg, x);
150    auto pg_top = pg;
151#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
152    auto pg_top = svptrue_b16();
153    auto top    = svcvt_f32_z(pg_top, svreinterpret_f16(svrevh_z(svptrue_b16(), svreinterpret_u32(x))));
154#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
155
156    bottom = svexp_f32_z(pg, bottom);
157    top    = svexp_f32_z(pg_top, top);
158
159#if defined(ARM_COMPUTE_ENABLE_SVE2)
160    return svcvtnt_f16_m(svcvt_f16_z(pg, bottom), pg_top, top);
161#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
162    return svtrn1(svcvt_f16_z(pg, bottom), svcvt_f16_z(pg_top, top));
163#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
164}
165
166inline svfloat32_t svtanh_f32_z(svbool_t pg, svfloat32_t val)
167{
168    const svfloat32_t CONST_1        = svdup_n_f32(1.f);
169    const svfloat32_t CONST_2        = svdup_n_f32(2.f);
170    const svfloat32_t CONST_MIN_TANH = svdup_n_f32(-10.f);
171    const svfloat32_t CONST_MAX_TANH = svdup_n_f32(10.f);
172
173    svfloat32_t x     = svmin_f32_z(pg, svmax_f32_z(pg, val, CONST_MIN_TANH), CONST_MAX_TANH);
174    svfloat32_t exp2x = svexp_f32_z(pg, svmul_f32_z(pg, CONST_2, x));
175    svfloat32_t num   = svsub_f32_z(pg, exp2x, CONST_1);
176    svfloat32_t den   = svadd_f32_z(pg, exp2x, CONST_1);
177    svfloat32_t tanh  = svdiv_f32_z(pg, num, den);
178    return tanh;
179}
180
181inline svfloat16_t svtanh_f16_z(svbool_t pg, svfloat16_t val)
182{
183    const svfloat16_t CONST_1        = svdup_n_f16(1.f);
184    const svfloat16_t CONST_2        = svdup_n_f16(2.f);
185    const svfloat16_t CONST_MIN_TANH = svdup_n_f16(-10.f);
186    const svfloat16_t CONST_MAX_TANH = svdup_n_f16(10.f);
187
188    const svfloat16_t x     = svmin_f16_z(pg, svmax_f16_z(pg, val, CONST_MIN_TANH), CONST_MAX_TANH);
189    const svfloat16_t exp2x = svexp_f16_z(pg, svmul_f16_z(pg, CONST_2, x));
190    const svfloat16_t num   = svsub_f16_z(pg, exp2x, CONST_1);
191    const svfloat16_t den   = svadd_f16_z(pg, exp2x, CONST_1);
192    const svfloat16_t tanh  = svdiv_f16_z(pg, num, den);
193    return tanh;
194}
195
196inline svfloat32_t svlog_f32_z(svbool_t pg, svfloat32_t x)
197{
198    /** Logarithm polynomial coefficients */
199    const svfloat32_t log_tab_1 = svdup_n_f32(-2.29561495781f);
200    const svfloat32_t log_tab_2 = svdup_n_f32(-2.47071170807f);
201    const svfloat32_t log_tab_3 = svdup_n_f32(-5.68692588806f);
202    const svfloat32_t log_tab_4 = svdup_n_f32(-0.165253549814f);
203    const svfloat32_t log_tab_5 = svdup_n_f32(5.17591238022f);
204    const svfloat32_t log_tab_6 = svdup_n_f32(0.844007015228f);
205    const svfloat32_t log_tab_7 = svdup_n_f32(4.58445882797f);
206    const svfloat32_t log_tab_8 = svdup_n_f32(0.0141278216615f);
207
208    const auto CONST_127 = svdup_n_s32(127);           // 127
209    const auto CONST_LN2 = svdup_n_f32(0.6931471805f); // ln(2)
210
211    // Extract exponent
212    auto m   = svsub_s32_z(pg, svasr_n_s32_z(pg, svreinterpret_s32_f32(x), 23), CONST_127);
213    auto val = svreinterpret_f32_s32(svsub_s32_z(pg, svreinterpret_s32_f32(x), svlsl_n_s32_z(pg, m, 23)));
214
215    // Polynomial Approximation
216    auto poly = svtaylor_poly_f32_z(pg, val, log_tab_1, log_tab_2, log_tab_3, log_tab_4, log_tab_5, log_tab_6, log_tab_7, log_tab_8);
217
218    // Reconstruct
219    poly = svmla_f32_z(pg, poly, svcvt_f32_s32_z(pg, m), CONST_LN2);
220
221    return poly;
222}
223
224inline svfloat16_t svlog_f16_z(svbool_t pg, svfloat16_t x)
225{
226    auto bottom = svcvt_f32_z(pg, x);
227#if defined(ARM_COMPUTE_ENABLE_SVE2)
228    auto top    = svcvtlt_f32_x(pg, x);
229    auto pg_top = pg;
230#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
231    auto pg_top = svptrue_b16();
232    auto top    = svcvt_f32_z(pg_top, svreinterpret_f16(svrevh_z(svptrue_b16(), svreinterpret_u32(x))));
233#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
234
235    bottom = svlog_f32_z(pg, bottom);
236    top    = svlog_f32_z(pg_top, top);
237
238#if defined(ARM_COMPUTE_ENABLE_SVE2)
239    return svcvtnt_f16_m(svcvt_f16_z(pg, bottom), pg_top, top);
240#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
241    return svtrn1(svcvt_f16_z(pg, bottom), svcvt_f16_z(pg_top, top));
242#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
243}
244
245inline svfloat32_t svsin_f32_z(svbool_t pg, svfloat32_t val)
246{
247    using ScalarType = float;
248    using IntType    = uint32_t;
249
250    constexpr float te_sin_coeff2 = 0.166666666666f; // 1/(2*3)
251    constexpr float te_sin_coeff3 = 0.05f;           // 1/(4*5)
252    constexpr float te_sin_coeff4 = 0.023809523810f; // 1/(6*7)
253    constexpr float te_sin_coeff5 = 0.013888888889f; // 1/(8*9)
254
255    const auto pi_v   = wrapper::svdup_n(ScalarType(M_PI));
256    const auto pio2_v = wrapper::svdup_n(ScalarType(M_PI / 2));
257    const auto ipi_v  = wrapper::svdup_n(ScalarType(1 / M_PI));
258
259    //Find positive or negative
260    const auto c_v    = svabs_z(pg, wrapper::svcvt_z<int32_t>(pg, svmul_z(pg, val, ipi_v)));
261    const auto sign_v = svcmple(pg, val, wrapper::svdup_n(ScalarType(0)));
262    const auto odd_v  = svcmpne(pg, svand_z(pg, wrapper::svreinterpret<IntType>(c_v), wrapper::svdup_n(IntType(1))), wrapper::svdup_n(IntType(0)));
263
264    auto neg_v = sveor_z(pg, odd_v, sign_v);
265
266    //Modulus a - (n * int(a*(1/n)))
267    auto       ma    = svsub_z(pg, svabs_z(pg, val), svmul_z(pg, pi_v, wrapper::svcvt_z<ScalarType>(pg, c_v)));
268    const auto reb_v = svcmpge(pg, ma, pio2_v);
269
270    //Rebase a between 0 and pi/2
271    ma = svsel(reb_v, svsub_z(pg, pi_v, ma), ma);
272
273    //Taylor series
274    const auto ma2 = svmul_z(pg, ma, ma);
275
276    //2nd elem: x^3 / 3!
277    auto elem = svmul_z(pg, svmul_z(pg, ma, ma2), wrapper::svdup_n(ScalarType(te_sin_coeff2)));
278    auto res  = svsub_z(pg, ma, elem);
279
280    //3rd elem: x^5 / 5!
281    elem = svmul_z(pg, svmul_z(pg, elem, ma2), wrapper::svdup_n(ScalarType(te_sin_coeff3)));
282    res  = svadd_z(pg, res, elem);
283
284    //4th elem: x^7 / 7!float32x2_t vsin_f32(float32x2_t val)
285    elem = svmul_z(pg, svmul_z(pg, elem, ma2), wrapper::svdup_n(ScalarType(te_sin_coeff4)));
286    res  = svsub_z(pg, res, elem);
287
288    //5th elem: x^9 / 9!
289    elem = svmul_z(pg, svmul_z(pg, elem, ma2), wrapper::svdup_n(ScalarType(te_sin_coeff5)));
290    res  = svadd_z(pg, res, elem);
291
292    //Change of sign
293    res = svneg_m(res, neg_v, res);
294    return res;
295}
296
297inline svfloat16_t svsin_f16_z(svbool_t pg, svfloat16_t val)
298{
299    auto bottom = svcvt_f32_z(pg, val);
300#if defined(ARM_COMPUTE_ENABLE_SVE2)
301    auto top    = svcvtlt_f32_x(pg, val);
302    auto pg_top = pg;
303#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
304    auto pg_top = svptrue_b16();
305    auto top    = svcvt_f32_z(pg_top, svreinterpret_f16(svrevh_z(svptrue_b16(), svreinterpret_u32(val))));
306#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
307
308    bottom = svsin_f32_z(pg, bottom);
309    top    = svsin_f32_z(pg_top, top);
310
311#if defined(ARM_COMPUTE_ENABLE_SVE2)
312    return svcvtnt_f16_m(svcvt_f16_z(pg, bottom), pg_top, top);
313#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
314    return svtrn1(svcvt_f16_z(pg, bottom), svcvt_f16_z(pg_top, top));
315#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
316}
317
318inline svfloat32_t svpow_f32_z(svbool_t pg, svfloat32_t a, svfloat32_t b)
319{
320    return svexp_f32_z(pg, svmul_z(pg, b, svlog_f32_z(pg, a)));
321}
322
323inline svfloat16_t svpow_f16_z(svbool_t pg, svfloat16_t a, svfloat16_t b)
324{
325    auto a_bottom = svcvt_f32_z(pg, a);
326    auto b_bottom = svcvt_f32_z(pg, b);
327
328#if defined(ARM_COMPUTE_ENABLE_SVE2)
329    auto pg_top = pg;
330    auto a_top  = svcvtlt_f32_x(pg, a);
331    auto b_top  = svcvtlt_f32_x(pg, b);
332#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
333    auto pg_top = svptrue_b16();
334    auto a_top  = svcvt_f32_z(pg_top, svreinterpret_f16(svrevh_z(svptrue_b16(), svreinterpret_u32(a))));
335    auto b_top  = svcvt_f32_z(pg_top, svreinterpret_f16(svrevh_z(svptrue_b16(), svreinterpret_u32(b))));
336#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
337
338    auto res_bottom = svpow_f32_z(pg, a_bottom, b_bottom);
339    auto res_top    = svpow_f32_z(pg_top, a_top, b_top);
340
341#if defined(ARM_COMPUTE_ENABLE_SVE2)
342    return svcvtnt_f16_m(svcvt_f16_z(pg, res_bottom), pg_top, res_top);
343#else  /* defined(ARM_COMPUTE_ENABLE_SVE2) */
344    return svtrn1(svcvt_f16_z(pg, res_bottom), svcvt_f16_z(pg_top, res_top));
345#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
346}
347
348#if defined(ARM_COMPUTE_ENABLE_SVE2)
349template <>
350inline svuint8_t convert_float_to_int<svuint8_t>(const svfloat32_t &in_0, const svfloat32_t &in_1, const svfloat32_t &in_2, const svfloat32_t &in_3)
351{
352    svuint8_t  out;
353    const auto all_true_pg = svptrue_b32();
354    auto       tmp_0       = svcvt_u32_f32_z(all_true_pg, in_0);
355    auto       tmp_1       = svcvt_u32_f32_z(all_true_pg, in_1);
356    auto       tmp_2       = svcvt_u32_f32_z(all_true_pg, in_2);
357    auto       tmp_3       = svcvt_u32_f32_z(all_true_pg, in_3);
358
359    auto tmp_16_0 = svqxtnt_u32(svqxtnb_u32(tmp_0), tmp_1);
360    auto tmp_16_1 = svqxtnt_u32(svqxtnb_u32(tmp_2), tmp_3);
361
362    auto tmp_16_uzp_0 = svuzp1(tmp_16_0, tmp_16_0);
363    auto tmp_16_uzp_1 = svuzp2(tmp_16_0, tmp_16_0);
364    auto tmp_16_uzp_2 = svuzp1(tmp_16_1, tmp_16_1);
365    auto tmp_16_uzp_3 = svuzp2(tmp_16_1, tmp_16_1);
366
367    auto pg = svwhilelt_b16_s32(0, svcnth() / 2);
368
369    tmp_16_0 = svsplice(pg, tmp_16_uzp_0, tmp_16_uzp_1);
370    tmp_16_1 = svsplice(pg, tmp_16_uzp_2, tmp_16_uzp_3);
371
372    out = svqxtnt_u16(svqxtnb_u16(tmp_16_0), tmp_16_1);
373
374    auto out_uzp_0 = svuzp1(out, out);
375    auto out_uzp_1 = svuzp2(out, out);
376
377    pg  = svwhilelt_b8_s32(0, svcntb() / 2);
378    out = svsplice(pg, out_uzp_0, out_uzp_1);
379
380    return out;
381}
382
383template <>
384inline svint8_t convert_float_to_int<svint8_t>(const svfloat32_t &in_0, const svfloat32_t &in_1, const svfloat32_t &in_2, const svfloat32_t &in_3)
385{
386    svint8_t   out;
387    const auto all_true_pg = svptrue_b32();
388    auto       tmp_0       = svcvt_s32_f32_z(all_true_pg, in_0);
389    auto       tmp_1       = svcvt_s32_f32_z(all_true_pg, in_1);
390    auto       tmp_2       = svcvt_s32_f32_z(all_true_pg, in_2);
391    auto       tmp_3       = svcvt_s32_f32_z(all_true_pg, in_3);
392
393    auto tmp_16_0 = svqxtnt_s32(svqxtnb_s32(tmp_0), tmp_1);
394    auto tmp_16_1 = svqxtnt_s32(svqxtnb_s32(tmp_2), tmp_3);
395
396    auto tmp_16_uzp_0 = svuzp1(tmp_16_0, tmp_16_0);
397    auto tmp_16_uzp_1 = svuzp2(tmp_16_0, tmp_16_0);
398    auto tmp_16_uzp_2 = svuzp1(tmp_16_1, tmp_16_1);
399    auto tmp_16_uzp_3 = svuzp2(tmp_16_1, tmp_16_1);
400
401    auto pg = svwhilelt_b16_s32(0, svcnth() / 2);
402
403    tmp_16_0 = svsplice(pg, tmp_16_uzp_0, tmp_16_uzp_1);
404    tmp_16_1 = svsplice(pg, tmp_16_uzp_2, tmp_16_uzp_3);
405
406    out = svqxtnt_s16(svqxtnb_s16(tmp_16_0), tmp_16_1);
407
408    auto out_uzp_0 = svuzp1(out, out);
409    auto out_uzp_1 = svuzp2(out, out);
410
411    pg  = svwhilelt_b8_s32(0, svcntb() / 2);
412    out = svsplice(pg, out_uzp_0, out_uzp_1);
413
414    return out;
415}
416#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
417
418} // namespace arm_compute
419#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
420