• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEUpsampleLayerKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/Window.h"
32 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
33 #include "src/core/CPP/Validate.h"
34 #include "src/core/NEON/wrapper/wrapper.h"
35 #include "src/core/helpers/AutoConfiguration.h"
36 #include "src/core/helpers/WindowHelpers.h"
37 
38 #include <arm_neon.h>
39 
40 namespace arm_compute
41 {
42 namespace
43 {
44 template <typename T, int S>
get_data_out(T data,int offset)45 inline T get_data_out(T data, int offset)
46 {
47     T out{ 0 };
48     for(int i = 0; i < S / 2; ++i)
49     {
50         out[2 * i]     = wrapper::vgetlane(data, i + offset);
51         out[2 * i + 1] = wrapper::vgetlane(data, i + offset);
52     }
53     return out;
54 }
55 } // namespace
NEUpsampleLayerKernel()56 NEUpsampleLayerKernel::NEUpsampleLayerKernel()
57     : _func(nullptr), _input(nullptr), _output(nullptr), _info()
58 {
59 }
60 
validate(const ITensorInfo * input,const ITensorInfo * output,const Size2D & info,const InterpolationPolicy policy)61 Status NEUpsampleLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &info, const InterpolationPolicy policy)
62 {
63     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
64     ARM_COMPUTE_UNUSED(policy);
65 
66     const DataLayout data_layout = input->data_layout();
67     const int        idx_width   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
68     const int        idx_height  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
69 
70     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
71     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32);
72     ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.x() != 2 || info.y() != 2, "Only stride 2 is supported");
73     ARM_COMPUTE_RETURN_ERROR_ON_MSG(policy != InterpolationPolicy::NEAREST_NEIGHBOR, "Only nearest neighbor policy supported");
74 
75     // Check output if configured
76     if(output->total_size() != 0)
77     {
78         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
79         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
80         ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_width) != info.x() * input->dimension(idx_width));
81         ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(idx_height) != info.y() * input->dimension(idx_height));
82         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
83     }
84     return Status{};
85 }
86 
87 template <typename T, int S>
upsample_nchw(const arm_compute::Window & window)88 void NEUpsampleLayerKernel::upsample_nchw(const arm_compute::Window &window)
89 {
90     using VectorType = typename wrapper::traits::neon_vector<T, S>::type;
91 
92     Window window_in(window);
93     window_in.set(Window::DimX, Window::Dimension(0, 1, 1));
94 
95     Window window_out(window);
96     window_out.set(Window::DimX, Window::Dimension(0, 1, 1));
97     window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.y()));
98 
99     const auto window_start_x = static_cast<int>(window.x().start());
100     const auto window_end_x   = static_cast<int>(window.x().end());
101     const int  window_step_x  = S;
102 
103     Iterator  input(_input, window_in);
104     Iterator  output(_output, window_out);
105     const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(T);
106 
107     execute_window_loop(window_out, [&](const Coordinates &)
108     {
109         const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
110         const auto output_ptr = reinterpret_cast<T *>(output.ptr());
111 
112         int x = window_start_x;
113         for(; x <= (window_end_x - window_step_x); x += window_step_x)
114         {
115             const VectorType data      = wrapper::vloadq(reinterpret_cast<const T *>(input_ptr + x));
116             const VectorType data_out1 = get_data_out<VectorType, S>(data, 0);
117             const VectorType data_out2 = get_data_out<VectorType, S>(data, S / 2);
118 
119             wrapper::vstore(output_ptr + 2 * x, data_out1);
120             wrapper::vstore(output_ptr + 2 * x + S, data_out2);
121             wrapper::vstore(output_ptr + 2 * x + offset_y_out, data_out1);
122             wrapper::vstore(output_ptr + 2 * x + offset_y_out + S, data_out2);
123         }
124 
125         // Compute left-over elements
126         for(; x < window_end_x; ++x)
127         {
128             *(output_ptr + 2 * x)                    = *(input_ptr + x);
129             *(output_ptr + 2 * x + 1)                = *(input_ptr + x);
130             *(output_ptr + 2 * x + offset_y_out)     = *(input_ptr + x);
131             *(output_ptr + 2 * x + offset_y_out + 1) = *(input_ptr + x);
132         }
133     },
134     input, output);
135 }
136 
137 template <typename T, int S>
upsample_nhwc(const arm_compute::Window & window)138 void NEUpsampleLayerKernel::upsample_nhwc(const arm_compute::Window &window)
139 {
140     using VectorType = typename wrapper::traits::neon_vector<T, S>::type;
141 
142     Window window_out(window);
143     window_out.set(Window::DimX, Window::Dimension(0, 1, 1));
144     window_out.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), _info.x()));
145     window_out.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), _info.y()));
146 
147     const auto window_start_x = static_cast<int>(window.x().start());
148     const auto window_end_x   = static_cast<int>(window.x().end());
149     const int  window_step_x  = S;
150 
151     Window window_in{ window };
152     window_in.set(Window::DimX, Window::Dimension(0, 1, 1));
153 
154     Iterator input(_input, window_in);
155     Iterator output(_output, window_out);
156 
157     const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(T);
158     const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(T);
159 
160     execute_window_loop(window_out, [&](const Coordinates &)
161     {
162         const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
163         const auto output_ptr = reinterpret_cast<T *>(output.ptr());
164 
165         int x = window_start_x;
166         for(; x <= (window_end_x - window_step_x); x += window_step_x)
167         {
168             const VectorType data = wrapper::vloadq(reinterpret_cast<const T *>(input_ptr + x));
169 
170             wrapper::vstore(output_ptr + x, data);
171             wrapper::vstore(output_ptr + x + offset_y_out, data);
172             wrapper::vstore(output_ptr + x + offset_z_out, data);
173             wrapper::vstore(output_ptr + x + offset_y_out + offset_z_out, data);
174         }
175 
176         // Compute left-over elements
177         for(; x < window_end_x; ++x)
178         {
179             *(output_ptr + x)                               = *(input_ptr + x);
180             *(output_ptr + x + offset_y_out)                = *(input_ptr + x);
181             *(output_ptr + x + offset_z_out)                = *(input_ptr + x);
182             *(output_ptr + x + offset_y_out + offset_z_out) = *(input_ptr + x);
183         }
184     },
185     input, output);
186 }
187 
configure(const ITensor * input,ITensor * output,const Size2D & info,const InterpolationPolicy policy)188 void NEUpsampleLayerKernel::configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy policy)
189 {
190     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
191     ARM_COMPUTE_UNUSED(policy);
192 
193     _input  = input;
194     _output = output;
195     _info   = info;
196 
197     const DataLayout data_layout = input->info()->data_layout();
198 
199     TensorShape output_shape = misc::shape_calculator::compute_upsample_shape(*input->info(), info);
200     auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
201     output->info()->set_data_layout(data_layout);
202 
203     // Perform validation step
204     ARM_COMPUTE_ERROR_THROW_ON(NEUpsampleLayerKernel::validate(input->info(), output->info(), info, policy));
205 
206     switch(data_layout)
207     {
208         case DataLayout::NCHW:
209         {
210             switch(input->info()->data_type())
211             {
212                 case DataType::QASYMM8_SIGNED:
213                     _func = &NEUpsampleLayerKernel::upsample_nchw<int8_t, 16>;
214                     break;
215                 case DataType::QASYMM8:
216                     _func = &NEUpsampleLayerKernel::upsample_nchw<uint8_t, 16>;
217                     break;
218                 case DataType::F32:
219                     _func = &NEUpsampleLayerKernel::upsample_nchw<float, 4>;
220                     break;
221 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
222                 case DataType::F16:
223                     _func = &NEUpsampleLayerKernel::upsample_nchw<float16_t, 8>;
224                     ;
225                     break;
226 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
227                 default:
228                     ARM_COMPUTE_ERROR("Not implemented");
229             }
230             break;
231         }
232         case DataLayout::NHWC:
233         {
234             switch(input->info()->data_type())
235             {
236                 case DataType::QASYMM8_SIGNED:
237                     _func = &NEUpsampleLayerKernel::upsample_nhwc<int8_t, 16>;
238                     break;
239                 case DataType::QASYMM8:
240                     _func = &NEUpsampleLayerKernel::upsample_nhwc<uint8_t, 16>;
241                     break;
242                 case DataType::F32:
243                     _func = &NEUpsampleLayerKernel::upsample_nhwc<float, 4>;
244                     break;
245 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
246                 case DataType::F16:
247                     _func = &NEUpsampleLayerKernel::upsample_nhwc<float16_t, 8>;
248                     break;
249 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
250                 default:
251                     ARM_COMPUTE_ERROR("Not implemented");
252             }
253             break;
254         }
255         default:
256             ARM_COMPUTE_ERROR("Not implemented");
257     }
258 
259     // Configure window
260     Window      win = calculate_max_window(*input->info(), Steps());
261     Coordinates coord;
262     coord.set_num_dimensions(output->info()->num_dimensions());
263     output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
264     INEKernel::configure(win);
265 }
266 
run(const Window & window,const ThreadInfo & info)267 void NEUpsampleLayerKernel::run(const Window &window, const ThreadInfo &info)
268 {
269     ARM_COMPUTE_UNUSED(info);
270     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
271     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
272     ARM_COMPUTE_ERROR_ON(_func == nullptr);
273 
274     (this->*_func)(window);
275 }
276 } // namespace arm_compute
277