• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NESelectKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/IAccessWindow.h"
29 #include "arm_compute/core/ITensor.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Types.h"
32 #include "arm_compute/core/Validate.h"
33 #include "src/core/CPP/Validate.h"
34 #include "src/core/NEON/wrapper/wrapper.h"
35 #include "src/core/helpers/AutoConfiguration.h"
36 #include "src/core/helpers/WindowHelpers.h"
37 #include "utils/TypePrinter.h"
38 
39 #include <arm_neon.h>
40 #include <map>
41 #include <string>
42 
43 namespace arm_compute
44 {
45 namespace
46 {
47 template <typename ScalarType, typename VectorType>
select_op(const ITensor * cond,const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window,const int window_step_x,const int window_start_x,const int window_end_x,const int limit,VectorType (* condition_conversion)(const uint8_t *))48 void select_op(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
49                const int window_step_x, const int window_start_x, const int window_end_x, const int limit, VectorType (*condition_conversion)(const uint8_t *))
50 {
51     Window win = window;
52     win.set(Window::DimX, Window::Dimension(0, 1, 1));
53 
54     Iterator condition(cond, win);
55     Iterator input1(in1, win);
56     Iterator input2(in2, win);
57     Iterator output(out, win);
58 
59     execute_window_loop(win, [&](const Coordinates &)
60     {
61         auto       output_ptr    = reinterpret_cast<ScalarType *>(output.ptr());
62         const auto condition_ptr = reinterpret_cast<const uint8_t *>(condition.ptr());
63         const auto input1_ptr    = reinterpret_cast<const ScalarType *>(input1.ptr());
64         const auto input2_ptr    = reinterpret_cast<const ScalarType *>(input2.ptr());
65 
66         int x = window_start_x;
67         for(; x <= limit; x += window_step_x)
68         {
69             const auto c = (*condition_conversion)(condition_ptr + x);
70             const auto a = wrapper::vloadq(input1_ptr + x);
71             const auto b = wrapper::vloadq(input2_ptr + x);
72             wrapper::vstore(output_ptr + x, wrapper::vbsl(c, a, b));
73         }
74         for(; x < window_end_x; ++x)
75         {
76             const auto c      = *(condition_ptr + x);
77             const auto a      = *(input1_ptr + x);
78             const auto b      = *(input2_ptr + x);
79             *(output_ptr + x) = static_cast<bool>(c) ? a : b;
80         }
81     },
82     condition, input1, input2, output);
83 }
84 
85 template <typename ScalarType, typename VectorType>
select_op_8(const ITensor * cond,const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)86 void select_op_8(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
87 {
88     const auto window_step_x  = 16 / sizeof(ScalarType);
89     const auto window_start_x = static_cast<int>(window.x().start());
90     const auto window_end_x   = static_cast<int>(window.x().end());
91 
92     select_op<ScalarType, VectorType>(cond, in1, in2, out, window, window_step_x, window_start_x, window_end_x, window_end_x - window_step_x, [](const uint8_t *condition_ptr) -> VectorType
93     {
94         static const auto zero = wrapper::vdup_n(static_cast<uint8_t>(0), arm_compute::wrapper::traits::vector_128_tag());
95         return wrapper::vcgt(wrapper::vloadq(condition_ptr), zero);
96     });
97 }
98 
99 template <typename ScalarType, typename VectorType>
select_op_16(const ITensor * cond,const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)100 void select_op_16(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
101 {
102     const auto window_step_x  = 16 / sizeof(ScalarType);
103     const auto window_start_x = static_cast<int>(window.x().start());
104     const auto window_end_x   = static_cast<int>(window.x().end());
105 
106     select_op<ScalarType, VectorType>(cond, in1, in2, out, window, window_step_x, window_start_x, window_end_x, window_end_x - window_step_x, [](const uint8_t *condition_ptr) -> VectorType
107     {
108         static const auto zero = wrapper::vdup_n(static_cast<uint16_t>(0), arm_compute::wrapper::traits::vector_128_tag());
109         return wrapper::vcgt(wrapper::vmovl(wrapper::vload(condition_ptr)), zero);
110     });
111 }
112 
113 template <typename ScalarType, typename VectorType>
select_op_32(const ITensor * cond,const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)114 void select_op_32(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
115 {
116     const auto window_step_x  = 16 / sizeof(ScalarType);
117     const auto window_start_x = static_cast<int>(window.x().start());
118     const auto window_end_x   = static_cast<int>(window.x().end());
119 
120     select_op<ScalarType, VectorType>(cond, in1, in2, out, window, window_step_x, window_start_x, window_end_x, window_end_x - window_step_x, [](const uint8_t *condition_ptr) -> VectorType
121     {
122         static const auto zero = wrapper::vdup_n(static_cast<uint32_t>(0), arm_compute::wrapper::traits::vector_128_tag());
123         return wrapper::vcgt(wrapper::vmovl(wrapper::vgetlow(wrapper::vmovl(wrapper::vload(condition_ptr)))), zero);
124     });
125 }
126 
127 template <typename ScalarType>
select_op_not_same_rank(const ITensor * cond,const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)128 void select_op_not_same_rank(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
129 {
130     ARM_COMPUTE_UNUSED(window);
131 
132     auto       output_ptr    = reinterpret_cast<ScalarType *>(out->buffer());
133     const auto condition_ptr = reinterpret_cast<const uint8_t *>(cond->buffer());
134     const auto input1_ptr    = reinterpret_cast<const ScalarType *>(in1->buffer());
135     const auto input2_ptr    = reinterpret_cast<const ScalarType *>(in2->buffer());
136 
137     const int outer_size = cond->info()->total_size() / cond->info()->element_size();
138     const int inner_size = (in1->info()->total_size() / in1->info()->element_size()) / outer_size;
139     int       offset     = 0;
140     const int step       = 16 / in1->info()->element_size();
141 
142     for(int i = 0; i < outer_size; ++i)
143     {
144         int        x         = offset;
145         const auto input_ptr = static_cast<bool>(*(condition_ptr + i)) ? input1_ptr : input2_ptr;
146         for(; x <= offset + inner_size - step; x += step)
147         {
148             wrapper::vstore(output_ptr + x, wrapper::vloadq(input_ptr + x));
149         }
150         if(x <= offset + inner_size - (step / 2))
151         {
152             wrapper::vstore(output_ptr + x, wrapper::vload(input_ptr + x));
153             x += step / 2;
154         }
155         for(; x < offset + inner_size; ++x)
156         {
157             *(output_ptr + x) = *(input_ptr + x);
158         }
159         offset += inner_size;
160     }
161 }
162 } // namespace
163 
NESelectKernel()164 NESelectKernel::NESelectKernel()
165     : _function(nullptr), _c(nullptr), _x(nullptr), _y(nullptr), _output(nullptr), _has_same_rank(false)
166 {
167 }
168 
configure(const ITensor * c,const ITensor * x,const ITensor * y,ITensor * output)169 void NESelectKernel::configure(const ITensor *c, const ITensor *x, const ITensor *y, ITensor *output)
170 {
171     ARM_COMPUTE_ERROR_ON_NULLPTR(c, x, y, output);
172 
173     // Auto initialize output if not initialized
174     auto_init_if_empty(*output->info(), x->info()->tensor_shape(), 1, x->info()->data_type());
175     ARM_COMPUTE_ERROR_THROW_ON(validate(c->info(), x->info(), y->info(), output->info()));
176 
177     _c             = c;
178     _x             = x;
179     _y             = y;
180     _output        = output;
181     _has_same_rank = (c->info()->tensor_shape().num_dimensions() == x->info()->tensor_shape().num_dimensions());
182 
183     std::string function_to_call("op_");
184     function_to_call += string_from_data_type(x->info()->data_type());
185 
186     static std::map<std::string, SelectFunction *> map_function;
187 
188     if(_has_same_rank)
189     {
190         map_function =
191         {
192             { "op_S8", &select_op_8<int8_t, uint8x16_t> },
193             { "op_S16", &select_op_16<int16_t, uint16x8_t> },
194             { "op_S32", &select_op_32<int32_t, uint32x4_t> },
195             { "op_U8", &select_op_8<uint8_t, uint8x16_t> },
196             { "op_U16", &select_op_16<uint16_t, uint16x8_t> },
197             { "op_U32", &select_op_32<uint32_t, uint32x4_t> },
198             { "op_F32", &select_op_32<float, uint32x4_t> }
199         };
200 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
201         map_function["op_F16"] = &select_op_16<float16_t, uint16x8_t>;
202 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
203     }
204     else
205     {
206         map_function =
207         {
208             { "op_S8", &select_op_not_same_rank<int8_t> },
209             { "op_S16", &select_op_not_same_rank<int16_t> },
210             { "op_S32", &select_op_not_same_rank<int32_t> },
211             { "op_U8", &select_op_not_same_rank<uint8_t> },
212             { "op_U16", &select_op_not_same_rank<uint16_t> },
213             { "op_U32", &select_op_not_same_rank<uint32_t> },
214             { "op_F32", &select_op_not_same_rank<float> }
215         };
216 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
217         map_function["op_F16"] = &select_op_not_same_rank<float16_t>;
218 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
219     }
220 
221     auto it = map_function.find(function_to_call);
222 
223     if(it != map_function.end())
224     {
225         _function = it->second;
226     }
227 
228     Window win = calculate_max_window(x->info()->valid_region());
229     INEKernel::configure(win);
230 }
231 
validate(const ITensorInfo * c,const ITensorInfo * x,const ITensorInfo * y,const ITensorInfo * output)232 Status NESelectKernel::validate(const ITensorInfo *c, const ITensorInfo *x, const ITensorInfo *y, const ITensorInfo *output)
233 {
234     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(c, x, y);
235     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(x);
236     ARM_COMPUTE_RETURN_ERROR_ON(x->data_type() == DataType::UNKNOWN);
237     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, y);
238     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(x, y);
239     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(c, 1, DataType::U8);
240 
241     const bool is_same_rank = (c->tensor_shape().num_dimensions() == x->tensor_shape().num_dimensions());
242     ARM_COMPUTE_RETURN_ERROR_ON(is_same_rank && (x->tensor_shape() != c->tensor_shape()));
243     ARM_COMPUTE_RETURN_ERROR_ON(!is_same_rank && ((c->tensor_shape().num_dimensions() > 1) || (c->tensor_shape().x() != x->tensor_shape()[x->tensor_shape().num_dimensions() - 1])));
244 
245     if(output != nullptr && output->total_size() != 0)
246     {
247         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, output);
248         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(x, output);
249     }
250 
251     return Status{};
252 }
253 
run(const Window & window,const ThreadInfo & info)254 void NESelectKernel::run(const Window &window, const ThreadInfo &info)
255 {
256     ARM_COMPUTE_UNUSED(info);
257     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
258     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
259     ARM_COMPUTE_ERROR_ON(_function == nullptr);
260     _function(_c, _x, _y, _output, window);
261 }
262 } // namespace arm_compute
263