1 /*
2 * Copyright (c) 2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/NEON/NEMath.h"
25
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Validate.h"
28 #include "src/core/NEON/wrapper/wrapper.h"
29 #include "src/core/common/StdTypes.h"
30 #include "src/core/common/Validate.h"
31
32 #include <arm_neon.h>
33 #include <cmath>
34 #include <cstddef>
35
36 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
37
38 namespace arm_compute
39 {
40 namespace cpu
41 {
42 namespace
43 {
44 #ifndef __aarch64__
mask_float_vector(const float16x8_t & in,const uint16x8_t & mask)45 inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &mask)
46 {
47 auto int_in = vreinterpretq_u16_f16(in);
48 return vreinterpretq_f16_u16(wrapper::vand(int_in, mask));
49 }
50 #endif /* __arch64__ */
51 } // namespace
52
fp16_neon_activation(const ITensor * src,ITensor * dst,const ActivationLayerInfo & act_info,const Window & window)53 void fp16_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
54 {
55 /** NEON vector tag type. */
56 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
57 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
58
59 constexpr int window_step_x = 8;
60 const auto window_start_x = static_cast<int>(window.x().start());
61 const auto window_end_x = static_cast<int>(window.x().end());
62
63 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
64 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
65
66 Iterator input(src, win_collapsed);
67 Iterator output(dst, win_collapsed);
68
69 // In case of non-aarch64, a small delta value is added to the input
70 // to prevent NAN values caused by zeros in inputs to SQRT.
71 // In case of aarh64, we call vsqrt directly, so we don't use delta.
72 #ifndef __aarch64__
73 const auto delta = wrapper::vdup_n(static_cast<float16_t>((1e-7), ExactTagType {});
74 #endif /* __aarch64 */
75
76 const auto const_1 = wrapper::vdup_n(static_cast<float16_t>(1.f), ExactTagType {});
77 const auto const_0 = wrapper::vdup_n(static_cast<float16_t>(0.f), ExactTagType{});
78 const auto const_6 = wrapper::vdup_n(static_cast<float16_t>(6.f), ExactTagType{});
79 const auto const_3 = wrapper::vdup_n(static_cast<float16_t>(3.f), ExactTagType{});
80 const auto const_inv_6 = wrapper::vdup_n(static_cast<float16_t>(0.166666667f), ExactTagType{});
81
82 const auto va = wrapper::vdup_n(static_cast<float16_t>(act_info.a()), ExactTagType{});
83 const auto vb = wrapper::vdup_n(static_cast<float16_t>(act_info.b()), ExactTagType{});
84 const auto a = static_cast<float16_t>(act_info.a());
85 const auto b = static_cast<float16_t>(act_info.b());
86 execute_window_loop(win_collapsed, [&](const Coordinates &)
87 {
88 const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
89 const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
90
91 wrapper::traits::neon_bitvector_t<float16_t, wrapper::traits::BitWidth::W128> tmp;
92
93 // Compute S elements per iteration
94 int x = window_start_x;
95 for(; x <= (window_end_x - window_step_x); x += window_step_x)
96 {
97 const auto vin = wrapper::vloadq(input_ptr + x);
98 switch(act)
99 {
100 case ActivationLayerInfo::ActivationFunction::ABS:
101 tmp = wrapper::vabs(vin);
102 break;
103 case ActivationLayerInfo::ActivationFunction::LINEAR:
104 tmp = wrapper::vmla(vb, va, vin);
105 break;
106 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
107 tmp = wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(vin))));
108 break;
109 case ActivationLayerInfo::ActivationFunction::RELU:
110 tmp = wrapper::vmax(const_0, vin);
111 break;
112 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
113 tmp = wrapper::vmin(va, wrapper::vmax(const_0, vin));
114 break;
115 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
116 tmp = wrapper::vmin(va, wrapper::vmax(vb, vin));
117 break;
118 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
119 tmp = wrapper::vbsl(wrapper::vcgt(vin, const_0), vin, wrapper::vmul(va, vin));
120 break;
121 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
122 tmp = wrapper::vlog(wrapper::vadd(const_1, wrapper::vexpq(vin)));
123 break;
124 case ActivationLayerInfo::ActivationFunction::ELU:
125 tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1)));
126 break;
127 case ActivationLayerInfo::ActivationFunction::SQRT:
128 #ifdef __aarch64__
129 tmp = wrapper::vsqrt(vin);
130 #else /* aarch64 */
131 {
132 const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(0, ExactTagType{}));
133 tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask))));
134 tmp = mask_float_vector(tmp, wrapper::vnot(bitmask));
135 }
136 #endif /* aarch64 */
137 break;
138 case ActivationLayerInfo::ActivationFunction::SQUARE:
139 tmp = wrapper::vmul(vin, vin);
140 break;
141 case ActivationLayerInfo::ActivationFunction::TANH:
142 tmp = wrapper::vmul(va, wrapper::vtanh(wrapper::vmul(vb, vin)));
143 break;
144 case ActivationLayerInfo::ActivationFunction::IDENTITY:
145 tmp = vin;
146 break;
147 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
148 tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_6, wrapper::vmin(const_6, wrapper::vmax(const_0, wrapper::vadd(vin, const_3)))));
149 break;
150 default:
151 ARM_COMPUTE_ERROR("Unsupported activation function");
152 }
153 wrapper::vstore(output_ptr + x, tmp);
154 }
155
156 // Compute left-over elements
157 for(; x < window_end_x; ++x)
158 {
159 const float16_t in = *(reinterpret_cast<const float16_t *>(input_ptr + x));
160 float16_t tmp;
161 switch(act)
162 {
163 case ActivationLayerInfo::ActivationFunction::ABS:
164 tmp = std::abs(in);
165 break;
166 case ActivationLayerInfo::ActivationFunction::LINEAR:
167 tmp = a * in + b;
168 break;
169 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
170 tmp = static_cast<float16_t>(1) / (static_cast<float16_t>(1) + std::exp(-in));
171 break;
172 case ActivationLayerInfo::ActivationFunction::RELU:
173 tmp = std::max<float16_t>(static_cast<float16_t>(0), in);
174 break;
175 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
176 tmp = std::min<float16_t>(a, std::max(static_cast<float16_t>(0), in));
177 break;
178 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
179 tmp = std::min<float16_t>(a, std::max<float16_t>(b, in));
180 break;
181 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
182 tmp = (in > 0) ? in : a * in;
183 break;
184 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
185 tmp = std::log(static_cast<float16_t>(1) + std::exp(in));
186 break;
187 case ActivationLayerInfo::ActivationFunction::ELU:
188 tmp = (in >= 0) ? in : a * (std::exp(in) - 1);
189 break;
190 case ActivationLayerInfo::ActivationFunction::SQRT:
191 tmp = std::sqrt(in);
192 break;
193 case ActivationLayerInfo::ActivationFunction::SQUARE:
194 tmp = in * in;
195 break;
196 case ActivationLayerInfo::ActivationFunction::TANH:
197 tmp = a * std::tanh(b * in);
198 break;
199 case ActivationLayerInfo::ActivationFunction::IDENTITY:
200 tmp = in;
201 break;
202 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
203 tmp = in * ((std::min(std::max((in + 3), 0.0f), 6.0f)) * 0.166666667f);
204 break;
205 default:
206 ARM_COMPUTE_ERROR("Unsupported activation function");
207 }
208 *(output_ptr + x) = tmp;
209 }
210 },
211 input, output);
212 }
213 } // namespace cpu
214 } // namespace arm_compute
215
216 #endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */