1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/functions/CLLocallyConnectedLayer.h"
25
26 #include "arm_compute/core/PixelValue.h"
27 #include "arm_compute/core/Utils.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/runtime/CL/CLScheduler.h"
30 #include "src/core/CL/kernels/CLCol2ImKernel.h"
31 #include "src/core/CL/kernels/CLIm2ColKernel.h"
32 #include "src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h"
33 #include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
34 #include "support/MemorySupport.h"
35
36 #include <cmath>
37 #include <tuple>
38
39 using namespace arm_compute;
40
41 namespace
42 {
calculate_shapes(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,TensorShape & shape_wr,TensorShape & shape_im2col,TensorShape & shape_gemm)43 void calculate_shapes(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
44 TensorShape &shape_wr, TensorShape &shape_im2col, TensorShape &shape_gemm)
45 {
46 ARM_COMPUTE_UNUSED(output);
47
48 const unsigned int kernel_width = weights->dimension(0);
49 const unsigned int kernel_height = weights->dimension(1);
50
51 bool has_bias = (biases != nullptr);
52
53 // Get convolved dimensions
54 unsigned int conv_w = 0;
55 unsigned int conv_h = 0;
56 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(0),
57 input->dimension(1),
58 kernel_width,
59 kernel_height,
60 conv_info);
61
62 const size_t mat_weights_cols = weights->dimension(3);
63 const size_t mat_weights_rows = weights->dimension(0) * weights->dimension(1) * weights->dimension(2) + ((has_bias) ? 1 : 0);
64 const size_t mat_weights_num = weights->dimension(4);
65
66 shape_wr = TensorShape(mat_weights_cols, mat_weights_rows, mat_weights_num);
67
68 const size_t mat_input_cols = mat_weights_rows;
69 const size_t mat_input_rows = conv_w * conv_h;
70
71 shape_im2col = input->tensor_shape();
72 if(shape_im2col.num_dimensions() >= 3)
73 {
74 shape_im2col.remove_dimension(2);
75 }
76 shape_im2col.set(0, mat_input_cols);
77 shape_im2col.set(1, mat_input_rows);
78
79 shape_gemm = shape_im2col;
80 shape_gemm.set(0, mat_weights_cols);
81 shape_gemm.set(1, mat_input_rows);
82 }
83 } // namespace
84
CLLocallyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)85 CLLocallyConnectedLayer::CLLocallyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
86 : _memory_group(std::move(memory_manager)),
87 _input_im2col_kernel(support::cpp14::make_unique<CLIm2ColKernel>()),
88 _weights_reshape_kernel(support::cpp14::make_unique<CLWeightsReshapeKernel>()),
89 _mm_kernel(support::cpp14::make_unique<CLLocallyConnectedMatrixMultiplyKernel>()),
90 _output_col2im_kernel(support::cpp14::make_unique<CLCol2ImKernel>()),
91 _input_im2col_reshaped(),
92 _weights_reshaped(),
93 _gemm_output(),
94 _is_prepared(false),
95 _original_weights(nullptr)
96 {
97 }
98
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info)99 Status CLLocallyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
100 {
101 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
102 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2));
103 ARM_COMPUTE_RETURN_ERROR_ON(!conv_info.padding_is_symmetric());
104
105 bool has_bias = (biases != nullptr);
106
107 if(has_bias)
108 {
109 ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(3));
110 ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 2);
111 }
112
113 const unsigned int kernel_width = weights->dimension(0);
114 const unsigned int kernel_height = weights->dimension(1);
115
116 // Get convolved dimensions
117 unsigned int conv_w = 0;
118 unsigned int conv_h = 0;
119 std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(0), input->dimension(1), kernel_width, kernel_height,
120 conv_info);
121
122 ARM_COMPUTE_RETURN_ERROR_ON_MSG((output->dimension(0) != conv_w) || (output->dimension(1) != conv_h), "Output shape does not match the expected one");
123 ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(4) != (conv_w * conv_h), "Weights shape does not match the expected one");
124
125 // Calculate intermediate buffer shapes
126 TensorShape shape_wr;
127 TensorShape shape_im2col;
128 TensorShape shape_gemm;
129 calculate_shapes(input, weights, biases, output, conv_info, shape_wr, shape_im2col, shape_gemm);
130
131 TensorInfo weights_reshaped_info(shape_wr, 1, weights->data_type());
132 TensorInfo input_im2col_reshaped_info(shape_im2col, 1, input->data_type());
133 TensorInfo gemm_output_info(shape_gemm, 1, input->data_type());
134
135 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &input_im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, has_bias));
136 ARM_COMPUTE_RETURN_ON_ERROR(CLWeightsReshapeKernel::validate(weights, biases, &weights_reshaped_info));
137 ARM_COMPUTE_RETURN_ON_ERROR(CLLocallyConnectedMatrixMultiplyKernel::validate(&input_im2col_reshaped_info, &weights_reshaped_info, &gemm_output_info));
138 ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(&gemm_output_info, output, Size2D(conv_w, conv_h)));
139
140 return Status{};
141 }
142
143 #pragma GCC diagnostic push
144 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
configure(const ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info)145 void CLLocallyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
146 {
147 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
148 }
149 #pragma GCC diagnostic pop
150
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info)151 void CLLocallyConnectedLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
152 const PadStrideInfo &conv_info)
153 {
154 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
155 ARM_COMPUTE_ERROR_THROW_ON(CLLocallyConnectedLayer::validate(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output->info(), conv_info));
156
157 bool _has_bias = (biases != nullptr);
158 _original_weights = weights;
159 _is_prepared = false;
160
161 const unsigned int kernel_width = weights->info()->dimension(0);
162 const unsigned int kernel_height = weights->info()->dimension(1);
163
164 // Get convolved dimensions
165 unsigned int conv_w = 0;
166 unsigned int conv_h = 0;
167 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
168 conv_info);
169
170 // Calculate intermediate buffer shapes
171 TensorShape shape_wr;
172 TensorShape shape_im2col;
173 TensorShape shape_gemm;
174 calculate_shapes(input->info(), weights->info(), biases == nullptr ? nullptr : biases->info(), output->info(), conv_info, shape_wr, shape_im2col, shape_gemm);
175
176 _weights_reshaped.allocator()->init(TensorInfo(shape_wr, 1, weights->info()->data_type()));
177 _input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, input->info()->data_type()));
178 _gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, input->info()->data_type()));
179
180 // Manage intermediate buffers
181 _memory_group.manage(&_input_im2col_reshaped);
182 _memory_group.manage(&_gemm_output);
183
184 // Configure kernels
185 _input_im2col_kernel->configure(compile_context, input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
186 _weights_reshape_kernel->configure(compile_context, weights, biases, &_weights_reshaped);
187 _mm_kernel->configure(compile_context, &_input_im2col_reshaped, &_weights_reshaped, &_gemm_output);
188 _output_col2im_kernel->configure(compile_context, &_gemm_output, output, Size2D(conv_w, conv_h));
189
190 // Allocate intermediate tensors
191 _input_im2col_reshaped.allocator()->allocate();
192 _gemm_output.allocator()->allocate();
193
194 CLScheduler::get().tune_kernel_static(*_input_im2col_kernel);
195 }
196
run()197 void CLLocallyConnectedLayer::run()
198 {
199 prepare();
200
201 MemoryGroupResourceScope scope_mg(_memory_group);
202
203 // Run input reshaping
204 CLScheduler::get().enqueue(*_input_im2col_kernel);
205
206 // Runs vector matrix multiply on reshaped matrices
207 CLScheduler::get().enqueue(*_mm_kernel);
208
209 // Reshape output matrix
210 CLScheduler::get().enqueue(*_output_col2im_kernel.get(), false);
211 }
212
prepare()213 void CLLocallyConnectedLayer::prepare()
214 {
215 if(!_is_prepared)
216 {
217 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
218
219 // Run weights reshaping and mark original weights tensor as unused
220 _weights_reshaped.allocator()->allocate();
221 CLScheduler::get().enqueue(*_weights_reshape_kernel);
222 _original_weights->mark_as_unused();
223
224 CLScheduler::get().queue().finish();
225 _is_prepared = true;
226 }
227 }
228