1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/graph/nodes/SplitLayerNode.h"
25
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Utils.h"
28 #include "arm_compute/graph/Graph.h"
29 #include "arm_compute/graph/INodeVisitor.h"
30
31 namespace arm_compute
32 {
33 namespace graph
34 {
SplitLayerNode(unsigned int num_splits,int axis,std::vector<int> size_splits)35 SplitLayerNode::SplitLayerNode(unsigned int num_splits, int axis, std::vector<int> size_splits)
36 : _num_splits(num_splits), _axis(axis), _size_splits(size_splits)
37 {
38 _input_edges.resize(1, EmptyEdgeID);
39 _outputs.resize(num_splits, NullTensorID);
40 }
41
num_splits() const42 unsigned int SplitLayerNode::num_splits() const
43 {
44 return _num_splits;
45 }
46
axis() const47 unsigned int SplitLayerNode::axis() const
48 {
49 return _axis;
50 }
51
compute_output_descriptor(const TensorDescriptor & input_descriptor,unsigned int num_splits,int axis,unsigned int idx)52 std::pair<TensorDescriptor, Coordinates> SplitLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
53 unsigned int num_splits, int axis, unsigned int idx)
54 {
55 // Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis).
56 int num_dimension = static_cast<int32_t>(input_descriptor.shape.num_dimensions());
57 int tmp_axis = wrap_around(axis, num_dimension);
58 Coordinates coords;
59 TensorDescriptor output_descriptor = input_descriptor;
60 int split_size = input_descriptor.shape[tmp_axis] / num_splits;
61 if(_size_splits.empty())
62 {
63 output_descriptor.shape.set(tmp_axis, split_size);
64 coords.set(tmp_axis, idx * split_size);
65 }
66 else
67 {
68 int split_size = _size_splits[idx];
69 if(split_size == -1)
70 {
71 split_size = input_descriptor.shape[tmp_axis];
72 for(unsigned int i = 0; i < _size_splits.size() - 1; ++i)
73 split_size -= _size_splits[i];
74 }
75 output_descriptor.shape.set(tmp_axis, split_size);
76 int coord_value = 0;
77 for(unsigned int i = 0; i < idx; ++i)
78 coord_value += _size_splits[i];
79 coords.set(tmp_axis, coord_value);
80 }
81
82 return std::make_pair(output_descriptor, coords);
83 }
84
forward_descriptors()85 bool SplitLayerNode::forward_descriptors()
86 {
87 if(input_id(0) != NullTensorID)
88 {
89 validate();
90 for(unsigned int i = 0; i < _outputs.size(); ++i)
91 {
92 if(output_id(i) != NullTensorID)
93 {
94 Tensor *dst_i = output(i);
95 ARM_COMPUTE_ERROR_ON(dst_i == nullptr);
96 dst_i->desc() = configure_output(i);
97 }
98 }
99 return true;
100 }
101 return false;
102 }
103
configure_output(size_t idx) const104 TensorDescriptor SplitLayerNode::configure_output(size_t idx) const
105 {
106 ARM_COMPUTE_UNUSED(idx);
107 ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
108
109 const Tensor *src = input(0);
110 ARM_COMPUTE_ERROR_ON(src == nullptr);
111
112 TensorDescriptor input_descriptor = src->desc();
113 TensorDescriptor output_descriptor = input_descriptor;
114
115 // Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis).
116 int num_dimension = static_cast<int32_t>(src->desc().shape.num_dimensions());
117 int tmp_axis = wrap_around(_axis, num_dimension);
118
119 int split_size = (_size_splits.empty()) ? (input_descriptor.shape[tmp_axis] / _num_splits) : _size_splits[idx];
120 if(split_size == -1)
121 {
122 split_size = input_descriptor.shape[tmp_axis];
123 for(unsigned int i = 0; i < _size_splits.size() - 1; ++i)
124 split_size -= _size_splits[i];
125 }
126 output_descriptor.shape.set(tmp_axis, split_size);
127
128 return output_descriptor;
129 }
130
validate() const131 Status SplitLayerNode::validate() const
132 {
133 const Tensor *src = input(0);
134 ARM_COMPUTE_RETURN_ERROR_ON(src == nullptr);
135 int num_dimension = static_cast<int32_t>(src->desc().shape.num_dimensions());
136 ARM_COMPUTE_RETURN_ERROR_ON(_axis < (-num_dimension) || _axis >= num_dimension);
137
138 // Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis).
139 int tmp_axis = wrap_around(_axis, num_dimension);
140
141 if(_size_splits.empty())
142 {
143 ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->desc().shape[tmp_axis] % _num_splits, "Split should be exact");
144 }
145
146 return Status{};
147 }
148
type() const149 NodeType SplitLayerNode::type() const
150 {
151 return NodeType::SplitLayer;
152 }
153
accept(INodeVisitor & v)154 void SplitLayerNode::accept(INodeVisitor &v)
155 {
156 v.visit(*this);
157 }
158 } // namespace graph
159 } // namespace arm_compute