• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "ClComponentElementwiseBinary.h"
25 
26 #include "arm_compute/core/Validate.h"
27 #include "src/core/CL/CLValidate.h"
28 #include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.h"
29 
30 namespace arm_compute
31 {
32 namespace experimental
33 {
34 namespace dynamic_fusion
35 {
36 namespace
37 {
38 std::set<ElementwiseBinaryCommonAttributes::ElementwiseOp> supported_ops
39 {
40     ElementwiseBinaryCommonAttributes::ElementwiseOp::Add,
41     ElementwiseBinaryCommonAttributes::ElementwiseOp::Sub,
42     ElementwiseBinaryCommonAttributes::ElementwiseOp::Mul
43 };
44 }
45 
validate(const ArgumentPack<ITensorInfo> & tensors,const ElementwiseBinaryCommonAttributes & attributes)46 Status ClComponentElementwiseBinary::validate(const ArgumentPack<ITensorInfo> &tensors, const ElementwiseBinaryCommonAttributes &attributes)
47 {
48     const auto lhs = tensors.get_const_tensor(TensorType::ACL_SRC_0);
49     const auto rhs = tensors.get_const_tensor(TensorType::ACL_SRC_1);
50     const auto dst = tensors.get_const_tensor(TensorType::ACL_DST_0);
51 
52     // Check operator type
53     ARM_COMPUTE_RETURN_ERROR_ON_MSG(supported_ops.find(attributes.operation()) == supported_ops.end(), "Provided Elementwise operation not supported.");
54 
55     // Check validity
56     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst);
57 
58     //Check data type for different elementwise operators
59     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16, DataType::S32, DataType::S16, DataType::U8);
60 
61     // dst shape is correct
62     const TensorShape out_shape = TensorShape::broadcast_shape(lhs->tensor_shape(), rhs->tensor_shape());
63     ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
64     ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0), "Wrong shape for dst.");
65 
66     const auto &lhs_shape = lhs->tensor_shape();
67     const auto &rhs_shape = rhs->tensor_shape();
68     const auto &dst_shape = dst->tensor_shape();
69 
70     ARM_COMPUTE_RETURN_ERROR_ON_MSG(
71         detail::have_different_dimensions(lhs_shape, dst_shape, 0) && detail::have_different_dimensions(rhs_shape, dst_shape, 0),
72         "Only LHS or RHS can be broadcasting, not both.");
73 
74     // Dimension Y and Z are collapsed together in the current kernel implementation,
75     // hence they cannot be independently broadcast or non-broadcast.
76     // See: ClTemplateElementwiseBinary::get_window
77     ARM_COMPUTE_RETURN_ERROR_ON_MSG(
78         (lhs_shape[1] != dst_shape[1] || rhs_shape[1] != dst_shape[1]) != (lhs_shape[2] != dst_shape[2] || rhs_shape[2] != dst_shape[2]),
79         "Dimension Y and Z must both be either broadcast or non-broadcast.");
80 
81     ARM_COMPUTE_RETURN_ERROR_ON_MSG(
82         detail::have_different_dimensions(lhs_shape, dst_shape, 3),
83         "LHS broadcast in dimension 3 or higher is not supported.");
84 
85     ARM_COMPUTE_RETURN_ERROR_ON_MSG(
86         detail::have_different_dimensions(rhs_shape, dst_shape, 3),
87         "RHS broadcast in dimension 3 or higher is not supported.");
88 
89     // Matching data type
90     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs);
91     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst);
92 
93     // Matching data layout
94     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(lhs, rhs);
95     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(lhs, dst);
96 
97     // All tensor infos are initialized
98     ARM_COMPUTE_RETURN_ERROR_ON(lhs->tensor_shape().total_size() == 0);
99     ARM_COMPUTE_RETURN_ERROR_ON(rhs->tensor_shape().total_size() == 0);
100     ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0);
101 
102     // Device requirements are met
103     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(lhs);
104 
105     return Status{};
106 }
107 
ClComponentElementwiseBinary(ComponentId id,const Properties & properties,const ArgumentPack<ITensorInfo> & tensors,const Attributes & attributes)108 ClComponentElementwiseBinary::ClComponentElementwiseBinary(
109     ComponentId                      id,
110     const Properties                &properties,
111     const ArgumentPack<ITensorInfo> &tensors,
112     const Attributes                &attributes)
113     : IGpuKernelComponent{ id, properties, tensors },
114       _component_writer{ std::make_unique<ClTemplateElementwiseBinary>(id, tensors, attributes) }
115 {
116 }
~ClComponentElementwiseBinary()117 ClComponentElementwiseBinary::~ClComponentElementwiseBinary()
118 {
119 }
template_writer() const120 const IGpuTemplateComponentWriter *ClComponentElementwiseBinary::template_writer() const
121 {
122     return _component_writer.get();
123 }
124 } // namespace dynamic_fusion
125 } // namespace experimental
126 } // namespace arm_compute
127