1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/CL/kernels/CLElementwiseOperationKernel.h"
25
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/ICLTensor.h"
28 #include "src/core/CL/CLValidate.h"
29 #include "src/core/common/Validate.h"
30 #include "src/core/helpers/AutoConfiguration.h"
31 #include "src/core/helpers/WindowHelpers.h"
32 #include "support/Cast.h"
33 #include "support/StringSupport.h"
34 #include <map>
35
36 namespace arm_compute
37 {
38 namespace
39 {
40 constexpr unsigned int vector_size_byte_opencl = 16;
41
42 std::map<ArithmeticOperation, std::string> supported_arithmetic_ops =
43 {
44 { ArithmeticOperation::ADD, "ADD" },
45 { ArithmeticOperation::SUB, "SUB" },
46 { ArithmeticOperation::DIV, "DIV" },
47 { ArithmeticOperation::SQUARED_DIFF, "SQUARED_DIFF" },
48 { ArithmeticOperation::MIN, "MIN" },
49 { ArithmeticOperation::MAX, "MAX" },
50 { ArithmeticOperation::POWER, "POWER" },
51 { ArithmeticOperation::PRELU, "PRELU" },
52 };
53
54 std::map<ArithmeticOperation, std::string> supported_sat_arithmetic_ops =
55 {
56 { ArithmeticOperation::ADD, "ADD" },
57 { ArithmeticOperation::SUB, "SUB" },
58 };
59
generate_id_for_tuning_common(const std::string & kernel_name,const ITensorInfo & input1,const ITensorInfo & output)60 std::string generate_id_for_tuning_common(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
61 {
62 std::string config_id;
63 // Set config_id for enabling LWS tuning
64 config_id = kernel_name;
65 config_id += "_";
66 config_id += lower_string(string_from_data_type(input1.data_type()));
67 config_id += "_";
68 config_id += support::cpp11::to_string(output.dimension(0));
69 config_id += "_";
70 config_id += support::cpp11::to_string(output.dimension(1));
71 return config_id;
72 }
73
validate_arguments_with_float_only_supported_rules(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)74 Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
75 {
76 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(&input1, &input2, &output);
77 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1);
78 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::F16, DataType::F32);
79 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
80
81 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
82
83 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
84
85 // Validate in case of configured output
86 if(output.total_size() > 0)
87 {
88 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::F16, DataType::F32);
89 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
90 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
91 "Wrong shape for output");
92 }
93
94 return Status{};
95 }
96
validate_arguments_with_arithmetic_rules(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)97 Status validate_arguments_with_arithmetic_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
98 {
99 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1);
100 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
101 DataType::S16, DataType::QSYMM16, DataType::F16,
102 DataType::S32, DataType::F32);
103 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input2);
104 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
105 DataType::S16, DataType::QSYMM16, DataType::F16,
106 DataType::S32, DataType::F32);
107
108 const bool is_quantized = is_data_type_quantized(input1.data_type()) || is_data_type_quantized(input2.data_type());
109 if(is_quantized)
110 {
111 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
112
113 if(is_data_type_quantized_symmetric(input1.data_type()))
114 {
115 const int32_t in1_offset = input1.quantization_info().uniform().offset;
116 const int32_t in2_offset = input2.quantization_info().uniform().offset;
117 ARM_COMPUTE_RETURN_ERROR_ON_MSG(in1_offset != 0, "For quantized symmetric, offset must be zero");
118 ARM_COMPUTE_RETURN_ERROR_ON_MSG(in2_offset != 0, "For quantized symmetric, offset must be zero");
119 }
120 }
121
122 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
123
124 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
125
126 // Validate in case of configured output
127 if(output.total_size() > 0)
128 {
129 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&output);
130 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
131 DataType::S16, DataType::QSYMM16, DataType::F16,
132 DataType::S32, DataType::F32);
133 ARM_COMPUTE_RETURN_ERROR_ON_MSG((output.data_type() == DataType::U8) && ((input1.data_type() != DataType::U8) || (input2.data_type() != DataType::U8)),
134 "Output can only be U8 if both inputs are U8");
135 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
136 "Wrong shape for output");
137
138 if(is_quantized)
139 {
140 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
141
142 if(is_data_type_quantized_symmetric(output.data_type()))
143 {
144 const int32_t offset = output.quantization_info().uniform().offset;
145 ARM_COMPUTE_RETURN_ERROR_ON_MSG(offset != 0, "For quantized symmetric, offset must be zero");
146 }
147 }
148 }
149 return Status{};
150 }
151
generate_build_options_with_arithmetic_rules(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output,const std::string & operation_string)152 CLBuildOptions generate_build_options_with_arithmetic_rules(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, const std::string &operation_string)
153 {
154 CLBuildOptions build_opts;
155
156 const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / output.element_size(), output.dimension(0));
157
158 build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1.data_type()));
159 build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2.data_type()));
160 build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output.data_type()));
161 build_opts.add_option("-DVEC_SIZE_IN1=" + support::cpp11::to_string(input1.dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
162 build_opts.add_option("-DVEC_SIZE_IN2=" + support::cpp11::to_string(input2.dimension(0) == 1 ? 1 : num_elems_processed_per_iteration));
163 build_opts.add_option("-DVEC_SIZE_OUT=" + support::cpp11::to_string(num_elems_processed_per_iteration));
164 build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(output.dimension(0) % num_elems_processed_per_iteration));
165 build_opts.add_option("-DOP=" + operation_string);
166 if(is_data_type_quantized(input1.data_type()))
167 {
168 const UniformQuantizationInfo iq1info = input1.quantization_info().uniform();
169 const UniformQuantizationInfo iq2info = input2.quantization_info().uniform();
170 const UniformQuantizationInfo oqinfo = output.quantization_info().uniform();
171
172 build_opts.add_option("-DOFFSET_IN1=" + support::cpp11::to_string(iq1info.offset));
173 build_opts.add_option("-DOFFSET_IN2=" + support::cpp11::to_string(iq2info.offset));
174 build_opts.add_option("-DOFFSET_OUT=" + support::cpp11::to_string(oqinfo.offset));
175 build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1info.scale));
176 build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2info.scale));
177 build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oqinfo.scale));
178 }
179 return build_opts;
180 }
181
configure_window_arithmetic_common(ITensorInfo & output)182 std::pair<Status, Window> configure_window_arithmetic_common(ITensorInfo &output)
183 {
184 const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / output.element_size(), output.dimension(0));
185 Window win = calculate_max_window(output, Steps(num_elems_processed_per_iteration));
186 return std::make_pair(Status{}, win);
187 }
188
validate_and_configure_window_for_arithmetic_operators(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)189 std::pair<Status, Window> validate_and_configure_window_for_arithmetic_operators(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
190 {
191 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
192 const TensorShape &out_shape = broadcast_pair.first;
193
194 set_shape_if_empty(output, out_shape);
195
196 if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
197 {
198 set_format_if_unknown(output, Format::S16);
199 }
200 else if(input1.data_type() == DataType::F16 || input2.data_type() == DataType::F16)
201 {
202 set_format_if_unknown(output, Format::F16);
203 }
204 else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
205 {
206 set_format_if_unknown(output, Format::F32);
207 }
208 else if(input1.data_type() == DataType::QASYMM8 || input2.data_type() == DataType::QASYMM8)
209 {
210 set_data_type_if_unknown(output, DataType::QASYMM8);
211 }
212 else if(input1.data_type() == DataType::QASYMM8_SIGNED || input2.data_type() == DataType::QASYMM8_SIGNED)
213 {
214 set_data_type_if_unknown(output, DataType::QASYMM8_SIGNED);
215 }
216 else if(input1.data_type() == DataType::QSYMM16 || input2.data_type() == DataType::QSYMM16)
217 {
218 set_data_type_if_unknown(output, DataType::QSYMM16);
219 }
220
221 return configure_window_arithmetic_common(output);
222 }
223
validate_and_configure_window_for_logical_binary_operators(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)224 std::pair<Status, Window> validate_and_configure_window_for_logical_binary_operators(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
225 {
226 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
227 const TensorShape &out_shape = broadcast_pair.first;
228
229 set_shape_if_empty(output, out_shape);
230 set_data_type_if_unknown(output, DataType::U8);
231
232 // The arithmetic utility functions can be share
233 return configure_window_arithmetic_common(output);
234 }
235
validate_and_configure_window_for_division(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)236 std::pair<Status, Window> validate_and_configure_window_for_division(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
237 {
238 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
239 const TensorShape &out_shape = broadcast_pair.first;
240 auto_init_if_empty(output, out_shape, 1, input1.data_type());
241 return configure_window_arithmetic_common(output);
242 }
243 } // namespace
244
CLElementwiseOperationKernel()245 CLElementwiseOperationKernel::CLElementwiseOperationKernel()
246 : _act_info(), _input1(nullptr), _input2(nullptr), _output(nullptr)
247 {
248 }
249
configure_common(ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output)250 void CLElementwiseOperationKernel::configure_common(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
251 {
252 configure_common(CLKernelLibrary::get().get_compile_context(), input1, input2, output);
253 }
254
configure_common(const CLCompileContext & compile_context,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output)255 void CLElementwiseOperationKernel::configure_common(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
256 {
257 // Configure kernel window
258 auto win_config = validate_and_configure_window(*input1, *input2, *output);
259 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
260
261 _input1 = input1;
262 _input2 = input2;
263 _output = output;
264
265 std::string kernel_name = "elementwise_operation_" + name();
266 if(is_data_type_quantized(input1->data_type()))
267 {
268 kernel_name += "_quantized";
269 }
270
271 // Set kernel build options
272 CLBuildOptions build_opts = generate_build_options(*input1, *input2, *output);
273 if(_act_info.enabled())
274 {
275 build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(_act_info.activation())));
276 build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(_act_info.a()));
277 build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(_act_info.b()));
278 }
279
280 // Create kernel
281 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
282
283 ICLKernel::configure_internal(win_config.second);
284
285 _config_id = generate_id_for_tuning(kernel_name, *input1, *output);
286 }
287
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)288 void CLElementwiseOperationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
289 {
290 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
291 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
292
293 const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
294 const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
295 auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
296
297 const TensorShape &in_shape1 = src_0->info()->tensor_shape();
298 const TensorShape &in_shape2 = src_1->info()->tensor_shape();
299 const TensorShape &out_shape = dst->info()->tensor_shape();
300
301 bool can_collapse = true;
302 const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1;
303 if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
304 {
305 can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
306 for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
307 {
308 can_collapse = (in_shape1[d] == in_shape2[d]);
309 }
310 }
311
312 bool has_collapsed = false;
313 Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
314
315 const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
316 const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
317
318 Window slice = collapsed.first_slice_window_3D();
319 Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
320 Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
321 do
322 {
323 unsigned int idx = 0;
324 add_3D_tensor_argument(idx, src_0, slice_input1);
325 add_3D_tensor_argument(idx, src_1, slice_input2);
326 add_3D_tensor_argument(idx, dst, slice);
327
328 enqueue(queue, *this, slice, lws_hint());
329 ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
330 ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
331 }
332 while(collapsed.slide_window_slice_3D(slice));
333 }
334
335 /** Logical binary */
configure(const CLCompileContext & compile_context,kernels::LogicalOperation op,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output)336 void CLLogicalBinaryKernel::configure(const CLCompileContext &compile_context, kernels::LogicalOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
337 {
338 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
339 ARM_COMPUTE_ERROR_THROW_ON(CLLogicalBinaryKernel::validate(op, input1, input2, output));
340 _op = op;
341 configure_common(compile_context, input1, input2, output);
342 }
343
validate(kernels::LogicalOperation op,const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output)344 Status CLLogicalBinaryKernel::validate(kernels::LogicalOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
345 {
346 ARM_COMPUTE_UNUSED(op);
347 ARM_COMPUTE_ASSERT(op != kernels::LogicalOperation::Unknown && op != kernels::LogicalOperation::Not);
348 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
349
350 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
351 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
352
353 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
354 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_logical_binary_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
355
356 return Status{};
357 }
358
name()359 std::string CLLogicalBinaryKernel::name()
360 {
361 switch(_op)
362 {
363 case kernels::LogicalOperation::And:
364 return "AND";
365 case kernels::LogicalOperation::Or:
366 return "OR";
367 case kernels::LogicalOperation::Not:
368 /* fall through */
369 default:
370 ARM_COMPUTE_ASSERT(true);
371 }
372 return "";
373 }
374
validate_and_configure_window(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)375 std::pair<Status, Window> CLLogicalBinaryKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
376 {
377 return validate_and_configure_window_for_logical_binary_operators(input1, input2, output);
378 }
379
generate_build_options(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)380 CLBuildOptions CLLogicalBinaryKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
381 {
382 // The arithmetic utility functions can be share
383 return generate_build_options_with_arithmetic_rules(input1, input2, output, name());
384 }
385
generate_id_for_tuning(const std::string & kernel_name,const ITensorInfo & input1,const ITensorInfo & output)386 std::string CLLogicalBinaryKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
387 {
388 return generate_id_for_tuning_common(kernel_name, input1, output);
389 }
390
391 /** Arithmetic operations with saturation*/
392
configure(ArithmeticOperation op,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output,const ConvertPolicy & policy,const ActivationLayerInfo & act_info)393 void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy,
394 const ActivationLayerInfo &act_info)
395 {
396 configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, policy, act_info);
397 }
398
configure(const CLCompileContext & compile_context,ArithmeticOperation op,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output,const ConvertPolicy & policy,const ActivationLayerInfo & act_info)399 void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output,
400 const ConvertPolicy &policy,
401 const ActivationLayerInfo &act_info)
402 {
403 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
404 ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1, input2, output, policy, act_info));
405 auto padding_info = get_padding_info({ input1, input2, output });
406
407 _policy = policy;
408 _op = op;
409 _act_info = act_info;
410 configure_common(compile_context, input1, input2, output);
411 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
412 }
413
validate(ArithmeticOperation op,const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output,const ConvertPolicy & policy,const ActivationLayerInfo & act_info)414 Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
415 const ActivationLayerInfo &act_info)
416 {
417 ARM_COMPUTE_UNUSED(op, policy);
418 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
419 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
420 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
421 ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
422
423 return Status{};
424 }
425
validate_and_configure_window(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)426 std::pair<Status, Window> CLSaturatedArithmeticOperationKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
427 {
428 return validate_and_configure_window_for_arithmetic_operators(input1, input2, output);
429 }
430
generate_build_options(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)431 CLBuildOptions CLSaturatedArithmeticOperationKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
432 {
433 const bool has_float_out = is_data_type_float(output.data_type());
434 auto build_options = generate_build_options_with_arithmetic_rules(input1, input2, output, name());
435 build_options.add_option((_policy == ConvertPolicy::WRAP || has_float_out) ? "-DWRAP" : "-DSATURATE");
436 return build_options;
437 }
generate_id_for_tuning(const std::string & kernel_name,const ITensorInfo & input1,const ITensorInfo & output)438 std::string CLSaturatedArithmeticOperationKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
439 {
440 auto config_id = generate_id_for_tuning_common(kernel_name, input1, output);
441 config_id += (_policy == ConvertPolicy::WRAP) ? "_wrap_" : "_saturate_";
442 config_id += lower_string(string_from_data_layout(input1.data_layout()));
443 return config_id;
444 }
445
name()446 std::string CLSaturatedArithmeticOperationKernel::name()
447 {
448 return supported_sat_arithmetic_ops[_op];
449 }
450
451 /** Arithmetic operations*/
452
configure(ArithmeticOperation op,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output,const ActivationLayerInfo & act_info)453 void CLArithmeticOperationKernel::configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
454 {
455 configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, act_info);
456 }
457
configure(const CLCompileContext & compile_context,ArithmeticOperation op,ITensorInfo * input1,ITensorInfo * input2,ITensorInfo * output,const ActivationLayerInfo & act_info)458 void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output,
459 const ActivationLayerInfo &act_info)
460 {
461 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
462 ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1, input2, output, act_info));
463 auto padding_info = get_padding_info({ input1, input2, output });
464
465 _op = op;
466 _act_info = act_info;
467 configure_common(compile_context, input1, input2, output);
468 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
469 }
470
validate(ArithmeticOperation op,const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output,const ActivationLayerInfo & act_info)471 Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
472 {
473 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
474 if(op == ArithmeticOperation::DIV || op == ArithmeticOperation::POWER)
475 {
476 // Division and Power operators don't support integer arithmetic
477 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_float_only_supported_rules(*input1, *input2, *output));
478 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_division(*input1->clone(), *input2->clone(), *output->clone()).first);
479 }
480 else
481 {
482 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_with_arithmetic_rules(*input1, *input2, *output));
483 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_for_arithmetic_operators(*input1->clone(), *input2->clone(), *output->clone()).first);
484 }
485 ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
486
487 return Status{};
488 }
validate_and_configure_window(ITensorInfo & input1,ITensorInfo & input2,ITensorInfo & output)489 std::pair<Status, Window> CLArithmeticOperationKernel::validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
490 {
491 if(_op == ArithmeticOperation::DIV || _op == ArithmeticOperation::POWER)
492 {
493 // Division and Power operators don't support integer arithmetic
494 return validate_and_configure_window_for_division(input1, input2, output);
495 }
496 else
497 {
498 return validate_and_configure_window_for_arithmetic_operators(input1, input2, output);
499 }
500 }
501
generate_build_options(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)502 CLBuildOptions CLArithmeticOperationKernel::generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
503 {
504 return generate_build_options_with_arithmetic_rules(input1, input2, output, name());
505 }
generate_id_for_tuning(const std::string & kernel_name,const ITensorInfo & input1,const ITensorInfo & output)506 std::string CLArithmeticOperationKernel::generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output)
507 {
508 return generate_id_for_tuning_common(kernel_name, input1, output);
509 }
510
name()511 std::string CLArithmeticOperationKernel::name()
512 {
513 return supported_arithmetic_ops[_op];
514 }
515 } // namespace arm_compute
516