1 /*
2 * Copyright (c) 2018-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
25
26 #include "arm_compute/runtime/NEON/NEScheduler.h"
27
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "src/common/utils/Log.h"
31 #include "src/core/NEON/kernels/NEPadLayerKernel.h"
32 #include "src/core/helpers/AutoConfiguration.h"
33
34 namespace arm_compute
35 {
36 namespace
37 {
last_padding_dimension(const PaddingList & padding)38 uint32_t last_padding_dimension(const PaddingList &padding)
39 {
40 int last_padding_dim = padding.size() - 1;
41 for(; last_padding_dim >= 0; --last_padding_dim)
42 {
43 if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0)
44 {
45 break;
46 }
47 }
48 return static_cast<uint32_t>(last_padding_dim);
49 }
50 } // namespace
51
52 NEPadLayer::~NEPadLayer() = default;
53
NEPadLayer()54 NEPadLayer::NEPadLayer()
55 : _copy_function(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
56 {
57 }
58
configure_constant_mode(ITensor * input,ITensor * output,const PaddingList & padding,const PixelValue constant_value)59 void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
60 {
61 _pad_kernel = std::make_unique<NEPadLayerKernel>();
62 _pad_kernel->configure(input, output, padding, constant_value, PaddingMode::CONSTANT);
63 }
64
configure_reflect_symmetric_mode(ITensor * input,ITensor * output)65 void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output)
66 {
67 // Reflecting can be performed by effectively unfolding the input as follows:
68 // For each dimension starting at DimX:
69 // For before and after:
70 // Use strided slice to extract and reverse the part of the
71 // input / previously produced tensor required for the padding.
72 // Concatenate the before and after padding with the input / previously
73 // produced tensor along the current dimension.
74
75 // Two strided slice functions will be required for each dimension padded as well as a
76 // concatenate function and the tensors to hold the temporary results.
77 _slice_functions.resize(2 * _num_dimensions);
78 _slice_results.resize(2 * _num_dimensions);
79 _concat_functions.resize(_num_dimensions);
80 _concat_results.resize(_num_dimensions - 1);
81
82 Coordinates starts_before{};
83 Coordinates ends_before{};
84 Coordinates starts_after{};
85 Coordinates ends_after{};
86 Coordinates strides{};
87 ITensor *prev = input;
88 for(uint32_t i = 0; i < _num_dimensions; ++i)
89 {
90 // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again.
91 if(i > 0)
92 {
93 strides.set(i - 1, 1);
94 }
95
96 if(_padding[i].first > 0 || _padding[i].second > 0)
97 {
98 // Set the starts, ends, and strides values for the current dimension.
99 // Due to the bit masks passed to strided slice, the values below the current dimension in
100 // starts and ends will be ignored so do not need to be modified.
101 if(_mode == PaddingMode::REFLECT)
102 {
103 starts_before.set(i, _padding[i].first);
104 ends_before.set(i, 0);
105 starts_after.set(i, input->info()->dimension(i) - 2);
106 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2);
107 strides.set(i, -1);
108 }
109 else
110 {
111 starts_before.set(i, _padding[i].first - 1);
112 ends_before.set(i, -1);
113 starts_after.set(i, input->info()->dimension(i) - 1);
114 ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1);
115 strides.set(i, -1);
116 }
117
118 // Strided slice wraps negative indexes around to the end of the range,
119 // instead this should indicate use of the full range and so the bit mask will be modified.
120 const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i);
121 const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i);
122 const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i);
123 const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i);
124
125 // Reflect the input values for the padding before and after the input.
126 std::vector<const ITensor *> concat_vector;
127 if(_padding[i].first > 0)
128 {
129 if(i < prev->info()->num_dimensions())
130 {
131 _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
132 concat_vector.emplace_back(&_slice_results[2 * i]);
133 }
134 else
135 {
136 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
137 concat_vector.push_back(prev);
138 }
139 }
140 concat_vector.push_back(prev);
141 if(_padding[i].second > 0)
142 {
143 if(i < prev->info()->num_dimensions())
144 {
145 _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
146 concat_vector.emplace_back(&_slice_results[2 * i + 1]);
147 }
148 else
149 {
150 // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
151 concat_vector.push_back(prev);
152 }
153 }
154 // Concatenate the padding before and after with the input.
155 ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
156 out->info()->set_quantization_info(output->info()->quantization_info());
157 for(auto &v : concat_vector)
158 {
159 v->info()->set_quantization_info(input->info()->quantization_info());
160 }
161 _concat_functions[i].configure(concat_vector, out, i);
162 if(i != _num_dimensions - 1)
163 {
164 _concat_results[i].allocator()->allocate();
165 }
166 prev = out;
167 }
168 _slice_results[2 * i].allocator()->allocate();
169 _slice_results[2 * i + 1].allocator()->allocate();
170 }
171 }
172
configure(ITensor * input,ITensor * output,const PaddingList & padding,const PixelValue constant_value,const PaddingMode mode)173 void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
174 {
175 ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode));
176 ARM_COMPUTE_LOG_PARAMS(input, output, padding, constant_value, mode);
177
178 _padding = padding;
179 _mode = mode;
180
181 const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding);
182
183 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape));
184
185 // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied.
186 _num_dimensions = last_padding_dimension(padding) + 1;
187 if(_num_dimensions > 0)
188 {
189 switch(_mode)
190 {
191 case PaddingMode::CONSTANT:
192 {
193 configure_constant_mode(input, output, padding, constant_value);
194 break;
195 }
196 case PaddingMode::REFLECT:
197 case PaddingMode::SYMMETRIC:
198 {
199 configure_reflect_symmetric_mode(input, output);
200 break;
201 }
202 default:
203 ARM_COMPUTE_ERROR("Padding mode not supported.");
204 }
205 }
206 else
207 {
208 // Copy the input to the whole output if no padding is applied
209 _copy_function.configure(input, output);
210 }
211 }
212
validate(const ITensorInfo * input,const ITensorInfo * output,const PaddingList & padding,const PixelValue constant_value,const PaddingMode mode)213 Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
214 {
215 ARM_COMPUTE_UNUSED(constant_value);
216
217 const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
218
219 if(output->total_size() > 0)
220 {
221 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape);
222 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
223 }
224
225 switch(mode)
226 {
227 case PaddingMode::CONSTANT:
228 {
229 return NEPadLayerKernel::validate(input, output, padding, constant_value, mode);
230 }
231 case PaddingMode::REFLECT:
232 case PaddingMode::SYMMETRIC:
233 {
234 for(uint32_t i = 0; i < padding.size(); ++i)
235 {
236 if(mode == PaddingMode::REFLECT)
237 {
238 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i));
239 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i));
240 }
241 else
242 {
243 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i));
244 ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i));
245 }
246 }
247 break;
248 }
249 default:
250 {
251 ARM_COMPUTE_ERROR("Invalid mode");
252 }
253 }
254 return Status{};
255 }
256
run()257 void NEPadLayer::run()
258 {
259 if(_num_dimensions > 0)
260 {
261 switch(_mode)
262 {
263 case PaddingMode::CONSTANT:
264 {
265 NEScheduler::get().schedule(_pad_kernel.get(), Window::DimZ);
266 break;
267 }
268 case PaddingMode::REFLECT:
269 case PaddingMode::SYMMETRIC:
270 {
271 for(uint32_t i = 0; i < _num_dimensions; ++i)
272 {
273 if(_padding[i].first > 0 || _padding[i].second > 0)
274 {
275 if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0)
276 {
277 _slice_functions[2 * i].run();
278 }
279 if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0)
280 {
281 _slice_functions[2 * i + 1].run();
282 }
283 _concat_functions[i].run();
284 }
285 }
286 break;
287 }
288 default:
289 ARM_COMPUTE_ERROR("Padding mode not supported.");
290 }
291 }
292 else
293 {
294 _copy_function.run();
295 }
296 }
297 } // namespace arm_compute
298