• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEPermuteKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Validate.h"
32 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
33 #include "src/core/helpers/AutoConfiguration.h"
34 #include "src/core/helpers/WindowHelpers.h"
35 
36 namespace
37 {
38 #include "src/core/NEON/kernels/convolution/common/shims.hpp"
39 } // namespace
40 
41 namespace arm_compute
42 {
43 namespace
44 {
is_permutation_supported(const PermutationVector & v)45 inline bool is_permutation_supported(const PermutationVector &v)
46 {
47     static const std::array<PermutationVector, 2> permutations2 =
48     {
49         {
50             PermutationVector(0U, 1U),
51             PermutationVector(1U, 0U),
52         }
53     };
54     static const std::array<PermutationVector, 6> permutations3 =
55     {
56         {
57             PermutationVector(2U, 0U, 1U),
58             PermutationVector(1U, 2U, 0U),
59             PermutationVector(0U, 1U, 2U),
60             PermutationVector(0U, 2U, 1U),
61             PermutationVector(1U, 0U, 2U),
62             PermutationVector(2U, 1U, 0U),
63         }
64     };
65     static const std::array<PermutationVector, 24> permutations4 =
66     {
67         {
68             PermutationVector(0U, 1U, 2U, 3U),
69             PermutationVector(1U, 0U, 2U, 3U),
70             PermutationVector(2U, 0U, 1U, 3U),
71             PermutationVector(0U, 2U, 1U, 3U),
72             PermutationVector(1U, 2U, 0U, 3U),
73             PermutationVector(2U, 1U, 0U, 3U),
74             PermutationVector(2U, 1U, 3U, 0U),
75             PermutationVector(1U, 2U, 3U, 0U),
76             PermutationVector(3U, 2U, 1U, 0U),
77             PermutationVector(2U, 3U, 1U, 0U),
78             PermutationVector(1U, 3U, 2U, 0U),
79             PermutationVector(3U, 1U, 2U, 0U),
80             PermutationVector(3U, 0U, 2U, 1U),
81             PermutationVector(0U, 3U, 2U, 1U),
82             PermutationVector(2U, 3U, 0U, 1U),
83             PermutationVector(3U, 2U, 0U, 1U),
84             PermutationVector(0U, 2U, 3U, 1U),
85             PermutationVector(2U, 0U, 3U, 1U),
86             PermutationVector(1U, 0U, 3U, 2U),
87             PermutationVector(0U, 1U, 3U, 2U),
88             PermutationVector(3U, 1U, 0U, 2U),
89             PermutationVector(1U, 3U, 0U, 2U),
90             PermutationVector(0U, 3U, 1U, 2U),
91             PermutationVector(3U, 0U, 1U, 2U)
92         }
93     };
94 
95     return (permutations2.end() != std::find(permutations2.begin(), permutations2.end(), v)) || (permutations3.end() != std::find(permutations3.begin(), permutations3.end(), v))
96            || (permutations4.end() != std::find(permutations4.begin(), permutations4.end(), v));
97 }
98 
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,const PermutationVector & perm)99 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
100 {
101     ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
102     ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_permutation_supported(perm), "PermutationVector not supported.");
103 
104     const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input, perm);
105 
106     // Validate configured output
107     if(output->total_size() != 0)
108     {
109         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
110         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
111         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
112     }
113 
114     return Status{};
115 }
116 } // namespace
117 
118 template <typename T>
run_permute(const Window & window)119 void NEPermuteKernel::run_permute(const Window &window)
120 {
121     const DataLayout input_layout = _input->info()->data_layout();
122 
123     // Input window
124     Window window_in = window;
125 
126     // we only support these two configs in src/core/NEON/kernels/convolution/common/shims.hpp, for all others
127     // we have to fall back to C++
128     if((input_layout == DataLayout::NCHW && _perm == PermutationVector{ 2U, 0U, 1U }) || (input_layout == DataLayout::NHWC && _perm == PermutationVector{ 1U, 2U, 0U }))
129     {
130         window_in.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().end() - window.x().start()));
131         window_in.set(Window::DimY, Window::Dimension(window.y().start(), window.y().end(), window.y().end() - window.y().start()));
132         window_in.set(Window::DimZ, Window::Dimension(window.z().start(), window.z().end(), window.z().end() - window.z().start()));
133         window_in.set(3, Window::Dimension(window[3].start(), window[3].end(), window[3].end() - window[3].start()));
134     }
135 
136     // Output window
137     Window                  window_out(window);
138     const Window::Dimension zero_window = Window::Dimension(0, 0, 0);
139     for(size_t d = 0; d <= _output->info()->num_dimensions(); ++d)
140     {
141         window_out.set(d, zero_window);
142     }
143 
144     // Create iterators
145     Iterator in(_input, window_in);
146     Iterator out(_output, window_out);
147 
148     int in_row_stride     = 0;
149     int in_col_stride     = 0;
150     int in_channel_stride = 0;
151     int in_batch_stride   = 0;
152     int n_cols            = 0;
153     int n_rows            = 0;
154     int n_channels        = 0;
155     int n_batches         = 0;
156 
157     switch(input_layout)
158     {
159         case DataLayout::NCHW:
160         {
161             in_row_stride     = _input->info()->strides_in_bytes().y() / sizeof(T);
162             in_channel_stride = _input->info()->strides_in_bytes().z() / sizeof(T);
163             in_batch_stride   = _input->info()->strides_in_bytes()[3] / sizeof(T);
164             n_cols            = _input->info()->tensor_shape().x();
165             n_rows            = window_in.y().step();
166             n_channels        = _input->info()->tensor_shape().z();
167             n_batches         = _input->info()->tensor_shape()[3];
168             break;
169         }
170         case DataLayout::NHWC:
171         {
172             in_col_stride   = _input->info()->strides_in_bytes().y() / sizeof(T);
173             in_row_stride   = _input->info()->strides_in_bytes().z() / sizeof(T);
174             in_batch_stride = _input->info()->strides_in_bytes()[3] / sizeof(T);
175             n_channels      = _input->info()->tensor_shape().x();
176             n_cols          = window_in.y().step();
177             n_rows          = _input->info()->tensor_shape().z();
178             n_batches       = _input->info()->tensor_shape()[3];
179             break;
180         }
181         default:
182         {
183             ARM_COMPUTE_ERROR("Invalid input data layout.");
184             break;
185         }
186     }
187 
188     // CHW -> HWC
189     if(input_layout == DataLayout::NCHW && _perm == PermutationVector{ 2U, 0U, 1U })
190     {
191         const int out_channel_stride = _output->info()->strides_in_bytes().x() / sizeof(T);
192         const int out_col_stride     = _output->info()->strides_in_bytes().y() / sizeof(T);
193         const int out_row_stride     = _output->info()->strides_in_bytes().z() / sizeof(T);
194         const int out_batch_stride   = _output->info()->strides_in_bytes()[3] / sizeof(T);
195         execute_window_loop(window_in, [&](const Coordinates & id)
196         {
197             const int idx = id[0] * out_col_stride + id[1] * out_row_stride + id[2] * out_channel_stride;
198             reorder::nchw_to_nhwc(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
199                                   n_batches, n_channels, n_rows, n_cols,
200                                   in_batch_stride, in_channel_stride, in_row_stride,
201                                   out_batch_stride, out_row_stride, out_col_stride);
202         },
203         in, out);
204     }
205     // HWC -> CHW
206     else if(input_layout == DataLayout::NHWC && _perm == PermutationVector{ 1U, 2U, 0U })
207     {
208         const int out_col_stride     = _output->info()->strides_in_bytes().x() / sizeof(T);
209         const int out_row_stride     = _output->info()->strides_in_bytes().y() / sizeof(T);
210         const int out_channel_stride = _output->info()->strides_in_bytes().z() / sizeof(T);
211         const int out_batch_stride   = _output->info()->strides_in_bytes()[3] / sizeof(T);
212         execute_window_loop(window_in, [&](const Coordinates & id)
213         {
214             const int idx = id[0] * out_channel_stride + id[1] * out_col_stride + id[2] * out_row_stride;
215             reorder::nhwc_to_nchw(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
216                                   n_batches, n_rows, n_cols, n_channels,
217                                   in_batch_stride, in_row_stride, in_col_stride,
218                                   out_batch_stride, out_channel_stride, out_row_stride);
219         },
220         in, out);
221     }
222     else
223     {
224         // All other cases fall back to C++
225         // Permute strides
226         Strides strides      = _output->info()->strides_in_bytes();
227         Strides perm_strides = strides;
228         permute_strides(perm_strides, _perm);
229         const int perm_stride_3 = _input->info()->num_dimensions() >= 4 ? perm_strides[3] : 0;
230         execute_window_loop(window, [&](const Coordinates & id)
231         {
232             const int idx                             = id[0] * perm_strides[0] + id[1] * perm_strides[1] + id[2] * perm_strides[2] + id[3] * perm_stride_3;
233             *(reinterpret_cast<T *>(out.ptr() + idx)) = *(reinterpret_cast<const T *>(in.ptr()));
234         },
235         in, out);
236     }
237 }
238 
NEPermuteKernel()239 NEPermuteKernel::NEPermuteKernel()
240     : _func(), _input(nullptr), _output(nullptr), _perm()
241 {
242 }
243 
configure(const ITensor * input,ITensor * output,const PermutationVector & perm)244 void NEPermuteKernel::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
245 {
246     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
247     const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input->info(), perm);
248     // Output auto inizialitation if not yet initialized
249     auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
250 
251     // Perform validation step
252     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm));
253 
254     _input  = input;
255     _output = output;
256     _perm   = perm;
257 
258     switch(input->info()->element_size())
259     {
260         case 1:
261             _func = &NEPermuteKernel::run_permute<uint8_t>;
262             break;
263         case 2:
264             _func = &NEPermuteKernel::run_permute<uint16_t>;
265             break;
266         case 4:
267             _func = &NEPermuteKernel::run_permute<uint32_t>;
268             break;
269         default:
270             ARM_COMPUTE_ERROR("Element size not supported");
271             break;
272     }
273 
274     // Configure kernel window
275     Window win = calculate_max_window(*input->info(), Steps());
276 
277     // The NEPermute doesn't need padding so update_window_and_padding() can be skipped
278     Coordinates coord;
279     coord.set_num_dimensions(output->info()->num_dimensions());
280     output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
281 
282     ICPPKernel::configure(win);
283 }
284 
validate(const ITensorInfo * input,const ITensorInfo * output,const PermutationVector & perm)285 Status NEPermuteKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
286 {
287     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, perm));
288     return Status{};
289 }
290 
run(const Window & window,const ThreadInfo & info)291 void NEPermuteKernel::run(const Window &window, const ThreadInfo &info)
292 {
293     ARM_COMPUTE_UNUSED(info);
294     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
295     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
296 
297     if(_func != nullptr)
298     {
299         (this->*_func)(window);
300     }
301 }
302 } // namespace arm_compute
303