• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h"
25 
26 #include "arm_compute/core/TensorInfo.h"
27 #include "arm_compute/core/Validate.h"
28 #include "arm_compute/core/Window.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "src/core/CPP/Validate.h"
31 #include "src/core/helpers/AutoConfiguration.h"
32 #include "src/core/helpers/WindowHelpers.h"
33 
34 #include "support/ToolchainSupport.h"
35 
36 namespace arm_compute
37 {
38 using namespace misc::shape_calculator;
39 
40 namespace
41 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,const PoolingLayerInfo & pool_info,const ITensorInfo * indices)42 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
43 {
44     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, indices);
45     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
46     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
47     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32);
48     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, indices);
49 
50     int                 pool_stride_x   = 0;
51     int                 pool_stride_y   = 0;
52     PoolingType         pool_type       = pool_info.pool_type;
53     const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
54     std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
55     const int    pool_size_x = pool_info.pool_size.width;
56     const int    pool_size_y = pool_info.pool_size.height;
57     const Size2D pool_size(pool_size_x, pool_size_y);
58 
59     ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method");
60     ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
61     if(output->total_size() != 0)
62     {
63         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
64         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
65     }
66 
67     return Status{};
68 }
69 } // namespace
70 
NEMaxUnpoolingLayerKernel()71 NEMaxUnpoolingLayerKernel::NEMaxUnpoolingLayerKernel()
72     : _func(nullptr), _input(nullptr), _output(nullptr), _indices(nullptr)
73 {
74 }
75 
configure(const ITensor * input,const ITensor * indices,ITensor * output,const PoolingLayerInfo & pool_info)76 void NEMaxUnpoolingLayerKernel::configure(const ITensor *input, const ITensor *indices, ITensor *output, const PoolingLayerInfo &pool_info)
77 {
78     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
79     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, indices->info()));
80 
81     _input   = input;
82     _output  = output;
83     _indices = indices;
84 
85     switch(input->info()->data_type())
86     {
87         case DataType::F32:
88             _func = &NEMaxUnpoolingLayerKernel::unpooling2<float>;
89             break;
90         case DataType::QASYMM8:
91             _func = &NEMaxUnpoolingLayerKernel::unpooling2<uint8_t>;
92             break;
93         case DataType::QASYMM8_SIGNED:
94             _func = &NEMaxUnpoolingLayerKernel::unpooling2<int8_t>;
95             break;
96 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
97         case DataType::F16:
98             _func = &NEMaxUnpoolingLayerKernel::unpooling2<float16_t>;
99             break;
100 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
101         default:
102             break;
103     }
104     const TensorShape output_shape = compute_unpool_shape(*input->info(), pool_info);
105     auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
106 
107     auto window = calculate_max_window(*input->info(), Steps());
108     INEKernel::configure(window);
109 }
110 template <typename T>
unpooling2(const Window & window)111 void NEMaxUnpoolingLayerKernel::unpooling2(const Window &window)
112 {
113     Iterator  input(_input, window);
114     Iterator  indices(_indices, window);
115     auto      out_ptr      = reinterpret_cast<T *>(_output->buffer());
116     const int out_stride_w = static_cast<int>(_output->info()->strides_in_bytes()[3]);
117     execute_window_loop(window, [&](const Coordinates & id)
118     {
119         auto vindices                                         = reinterpret_cast<uint32_t *>(indices.ptr());
120         auto vinput                                           = reinterpret_cast<T *>(input.ptr());
121         out_ptr[id[3] * out_stride_w / sizeof(T) + *vindices] = *vinput;
122     },
123     input, indices);
124 }
125 
validate(const ITensorInfo * input,const ITensorInfo * indices,const ITensorInfo * output,const PoolingLayerInfo & pool_info)126 Status NEMaxUnpoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
127 {
128     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output);
129     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, pool_info, indices));
130     return Status{};
131 }
132 
run(const Window & window,const ThreadInfo & info)133 void NEMaxUnpoolingLayerKernel::run(const Window &window, const ThreadInfo &info)
134 {
135     ARM_COMPUTE_UNUSED(info);
136     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
137     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
138     ARM_COMPUTE_ERROR_ON(_func == nullptr);
139     // Run function
140     (this->*_func)(window);
141 }
142 } // namespace arm_compute
143