1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/functions/CLReductionOperation.h"
25
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/PixelValue.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
32 #include "arm_compute/runtime/CL/CLScheduler.h"
33 #include "src/core/CL/kernels/CLFillBorderKernel.h"
34 #include "src/core/CL/kernels/CLReductionOperationKernel.h"
35 #include "src/core/helpers/AutoConfiguration.h"
36 #include "src/runtime/Utils.h"
37 #include "support/MemorySupport.h"
38
39 namespace arm_compute
40 {
CLReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)41 CLReductionOperation::CLReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
42 : _memory_group(std::move(memory_manager)), _results_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _reshape(), _num_of_stages(), _reduction_axis(), _is_serial(),
43 _is_reshape_required(false)
44 {
45 }
46
47 CLReductionOperation::~CLReductionOperation() = default;
48
validate(const ITensorInfo * input,const ITensorInfo * output,unsigned int axis,ReductionOperation op,bool keep_dims)49 Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims)
50 {
51 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
52 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
53 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
54
55 const unsigned int num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->dimension(0), axis);
56 const bool is_serial = needs_serialized_reduction(op, input->data_type(), axis);
57 const bool is_reshape_required = !keep_dims;
58
59 if(is_reshape_required && output->total_size() != 0)
60 {
61 const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, keep_dims));
62 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
63 }
64
65 auto *output_internal = output;
66
67 TensorInfo output_before_reshape;
68 const auto input_shape = input->tensor_shape();
69 const auto input_data_type = input->data_type();
70 const auto input_num_channles = input->num_channels();
71 const auto input_qinfo = input->quantization_info();
72 const auto output_data_type = output->data_type();
73
74 auto initialize_tensorinfo = [](TensorInfo & ti, TensorShape shape, DataType data_type, int num_channels, QuantizationInfo qinfo)
75 {
76 ti.set_data_type(data_type).set_tensor_shape(shape).set_num_channels(num_channels).set_quantization_info(qinfo);
77 };
78
79 if(is_reshape_required)
80 {
81 auto shape_before_reshape = input_shape;
82 shape_before_reshape.set(axis, 1);
83 initialize_tensorinfo(output_before_reshape, shape_before_reshape, output_data_type, input_num_channles, input_qinfo);
84 output_internal = &output_before_reshape;
85 }
86
87 if(is_serial)
88 {
89 ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output_internal, axis, op));
90 }
91 else
92 {
93 // Create temporary tensor infos
94 std::vector<TensorInfo> sums_vector(num_of_stages - 1);
95
96 // Create intermediate tensor info
97 TensorShape shape{ input_shape };
98
99 shape.set(0, ceil(shape.x() / 128.f));
100
101 for(unsigned int i = 0; i < num_of_stages - 1; i++)
102 {
103 initialize_tensorinfo(sums_vector[i], shape, input_data_type, input_num_channles, input_qinfo);
104 }
105
106 ReductionOperation first_kernel_op;
107 ReductionOperation intermediate_kernel_op;
108 ReductionOperation last_kernel_op;
109 switch(op)
110 {
111 case ReductionOperation::SUM:
112 case ReductionOperation::MEAN_SUM:
113 first_kernel_op = ReductionOperation::SUM;
114 intermediate_kernel_op = ReductionOperation::SUM;
115 last_kernel_op = op;
116 break;
117 case ReductionOperation::SUM_SQUARE:
118 first_kernel_op = ReductionOperation::SUM_SQUARE;
119 intermediate_kernel_op = ReductionOperation::SUM;
120 last_kernel_op = ReductionOperation::SUM;
121 break;
122 case ReductionOperation::PROD:
123 first_kernel_op = ReductionOperation::PROD;
124 intermediate_kernel_op = ReductionOperation::PROD;
125 last_kernel_op = ReductionOperation::PROD;
126 break;
127 case ReductionOperation::MIN:
128 first_kernel_op = ReductionOperation::MIN;
129 intermediate_kernel_op = ReductionOperation::MIN;
130 last_kernel_op = ReductionOperation::MIN;
131 break;
132 case ReductionOperation::MAX:
133 first_kernel_op = ReductionOperation::MAX;
134 intermediate_kernel_op = ReductionOperation::MAX;
135 last_kernel_op = ReductionOperation::MAX;
136 break;
137 default:
138 ARM_COMPUTE_ERROR("Not supported");
139 }
140
141 // Validate ReductionOperation only on first kernel
142 ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op));
143
144 // Validate ReductionOperation on intermediate stages
145 for(unsigned int i = 1; i < num_of_stages - 1; ++i)
146 {
147 ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op));
148 }
149
150 // Validate ReductionOperation on the last stage
151 const unsigned int last_stage = num_of_stages - 1;
152 ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output_internal, axis, last_kernel_op, input->dimension(0)));
153 }
154
155 if(is_reshape_required)
156 {
157 ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(output_internal, output));
158 }
159
160 return Status{};
161 }
162
configure_intermediate_result_vector(ICLTensor * input,ICLTensor * output)163 ICLTensor *CLReductionOperation::configure_intermediate_result_vector(ICLTensor *input, ICLTensor *output)
164 {
165 if(!_is_reshape_required && _is_serial)
166 {
167 return output;
168 }
169
170 auto intermediate_result_vector_size = _is_serial ? 1 : _num_of_stages;
171
172 if(!_is_reshape_required)
173 {
174 --intermediate_result_vector_size;
175 }
176
177 _results_vector.resize(intermediate_result_vector_size);
178 auto shape = input->info()->tensor_shape();
179
180 shape.set(_reduction_axis, _is_serial ? 1 : ceil(shape.x() / 128.f));
181
182 for(auto &v : _results_vector)
183 {
184 if(&v == &_results_vector.back() && _is_reshape_required)
185 {
186 shape.set(_reduction_axis, 1);
187 }
188 v.allocator()->init(input->info()->clone()->set_tensor_shape(shape));
189 }
190
191 return _is_reshape_required ? &_results_vector.back() : output;
192 }
193
configure(ICLTensor * input,ICLTensor * output,unsigned int axis,ReductionOperation op,bool keep_dims)194 void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
195 {
196 configure(CLKernelLibrary::get().get_compile_context(), input, output, axis, op, keep_dims);
197 }
198
configure(const CLCompileContext & compile_context,ICLTensor * input,ICLTensor * output,unsigned int axis,ReductionOperation op,bool keep_dims)199 void CLReductionOperation::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
200 {
201 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
202 _num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis);
203 _reduction_axis = axis;
204 _is_serial = needs_serialized_reduction(op, input->info()->data_type(), axis);
205 _is_reshape_required = !keep_dims;
206
207 auto *output_internal = configure_intermediate_result_vector(input, output);
208
209 if(_is_reshape_required)
210 {
211 const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
212 const auto output_data_type = input->info()->data_type();
213 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
214 }
215
216 // Configure reduction operation kernels
217 _reduction_kernels_vector.reserve(_num_of_stages);
218
219 // Create temporary tensors
220 if(_is_serial)
221 {
222 if(_is_reshape_required)
223 {
224 _memory_group.manage(&_results_vector.back());
225 }
226
227 _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
228 _reduction_kernels_vector[0]->configure(compile_context, input, output_internal, axis, op, 0);
229 }
230 else
231 {
232 _border_handlers_vector.reserve(_num_of_stages);
233 _memory_group.manage(&_results_vector[0]);
234
235 ReductionOperation first_kernel_op;
236 ReductionOperation intermediate_kernel_op;
237 ReductionOperation last_kernel_op;
238 PixelValue pixelValue;
239 switch(op)
240 {
241 case ReductionOperation::SUM:
242 case ReductionOperation::MEAN_SUM:
243 first_kernel_op = ReductionOperation::SUM;
244 intermediate_kernel_op = ReductionOperation::SUM;
245 last_kernel_op = op;
246 pixelValue = PixelValue();
247 break;
248 case ReductionOperation::SUM_SQUARE:
249 first_kernel_op = ReductionOperation::SUM_SQUARE;
250 intermediate_kernel_op = ReductionOperation::SUM;
251 last_kernel_op = ReductionOperation::SUM;
252 pixelValue = PixelValue();
253 break;
254 case ReductionOperation::PROD:
255 first_kernel_op = ReductionOperation::PROD;
256 intermediate_kernel_op = ReductionOperation::PROD;
257 last_kernel_op = ReductionOperation::PROD;
258 pixelValue = PixelValue(1, input->info()->data_type());
259 break;
260 case ReductionOperation::MIN:
261 first_kernel_op = ReductionOperation::MIN;
262 intermediate_kernel_op = ReductionOperation::MIN;
263 last_kernel_op = ReductionOperation::MIN;
264 pixelValue = std::get<1>(get_min_max(input->info()->data_type()));
265 break;
266 case ReductionOperation::MAX:
267 first_kernel_op = ReductionOperation::MAX;
268 intermediate_kernel_op = ReductionOperation::MAX;
269 last_kernel_op = ReductionOperation::MAX;
270 pixelValue = std::get<0>(get_min_max(input->info()->data_type()));
271 break;
272 default:
273 ARM_COMPUTE_ERROR("Not supported");
274 }
275
276 _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
277 _reduction_kernels_vector[0]->configure(compile_context, input, &_results_vector[0], axis, first_kernel_op);
278
279 _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
280 _border_handlers_vector[0]->configure(compile_context, input, _reduction_kernels_vector[0]->border_size(), BorderMode::CONSTANT, pixelValue);
281
282 // Apply ReductionOperation on intermediate stages
283 for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
284 {
285 _memory_group.manage(&_results_vector[i]);
286
287 _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
288 _reduction_kernels_vector[i]->configure(compile_context, &_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
289
290 _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
291 _border_handlers_vector[i]->configure(compile_context, &_results_vector[i - 1], _reduction_kernels_vector[i]->border_size(), BorderMode::CONSTANT, pixelValue);
292
293 _results_vector[i - 1].allocator()->allocate();
294 }
295
296 // Apply ReductionOperation on the last stage
297 const unsigned int last_stage = _num_of_stages - 1;
298 const unsigned int input_width = input->info()->dimension(0);
299
300 if(_is_reshape_required)
301 {
302 _memory_group.manage(&_results_vector.back());
303 }
304
305 _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
306 _reduction_kernels_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], output_internal, axis, last_kernel_op, input_width);
307
308 _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
309 _border_handlers_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage]->border_size(), BorderMode::CONSTANT, pixelValue);
310
311 _results_vector[last_stage - 1].allocator()->allocate();
312 }
313
314 if(_is_reshape_required)
315 {
316 _reshape.configure(compile_context, &_results_vector.back(), output);
317 _results_vector.back().allocator()->allocate();
318 }
319 }
320
run()321 void CLReductionOperation::run()
322 {
323 MemoryGroupResourceScope scope_mg(_memory_group);
324
325 if(_is_serial)
326 {
327 CLScheduler::get().enqueue(*_reduction_kernels_vector[0], false);
328 }
329 else
330 {
331 for(unsigned int i = 0; i < _num_of_stages; ++i)
332 {
333 CLScheduler::get().enqueue(*_border_handlers_vector[i], false);
334 CLScheduler::get().enqueue(*_reduction_kernels_vector[i], false);
335 }
336 }
337
338 if(_is_reshape_required)
339 {
340 _reshape.run();
341 }
342 }
343 } // namespace arm_compute
344