1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
25
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/CL/ICLTensor.h"
29 #include "arm_compute/core/CL/OpenCL.h"
30 #include "arm_compute/core/Error.h"
31 #include "arm_compute/core/Helpers.h"
32 #include "arm_compute/core/Types.h"
33 #include "arm_compute/core/Utils.h"
34 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
35 #include "src/core/AccessWindowStatic.h"
36 #include "src/core/CL/CLValidate.h"
37 #include "src/core/helpers/AutoConfiguration.h"
38 #include "src/core/helpers/WindowHelpers.h"
39 #include "support/StringSupport.h"
40
41 using namespace arm_compute;
42
43 namespace
44 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,const WinogradInfo & winograd_info)45 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
46 {
47 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
48 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
49
50 const PadStrideInfo conv_info = winograd_info.convolution_info;
51 const Size2D output_tile_size = winograd_info.output_tile_size;
52 const Size2D kernel_size = winograd_info.kernel_size;
53 ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides");
54 ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd input transform not supported");
55
56 ARM_COMPUTE_UNUSED(conv_info);
57 ARM_COMPUTE_UNUSED(output_tile_size);
58 ARM_COMPUTE_UNUSED(kernel_size);
59
60 // Validate configured output
61 if(output->total_size() != 0)
62 {
63 const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
64
65 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
66 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
67 }
68
69 return Status{};
70 }
71
validate_and_configure_window(ITensorInfo * input,ITensorInfo * output,const WinogradInfo & winograd_info)72 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const WinogradInfo &winograd_info)
73 {
74 ARM_COMPUTE_UNUSED(output);
75 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
76
77 bool window_changed = false;
78 Window win = calculate_max_window(*input, Steps(1, 1));
79
80 if(input->data_layout() == DataLayout::NCHW)
81 {
82 const PadStrideInfo conv_info = winograd_info.convolution_info;
83 const Size2D output_tile_size = winograd_info.output_tile_size;
84 const Size2D kernel_size = winograd_info.kernel_size;
85
86 unsigned int num_elems_read_per_iteration_x = output_tile_size.width + kernel_size.width - 1;
87 unsigned int num_elems_read_per_iteration_y = output_tile_size.height + kernel_size.height - 1;
88
89 AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(), num_elems_read_per_iteration_x, num_elems_read_per_iteration_y);
90 window_changed = update_window_and_padding(win, input_access);
91 }
92
93 Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
94 return std::make_pair(err, win);
95 }
96 } // namespace
97
CLWinogradInputTransformKernel()98 CLWinogradInputTransformKernel::CLWinogradInputTransformKernel()
99 : _border_size(0), _input(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _num_tiles_x(0), _num_tiles_y(0), _step_z(1)
100 {
101 }
102
border_size() const103 BorderSize CLWinogradInputTransformKernel::border_size() const
104 {
105 return _border_size;
106 }
107
configure(const ICLTensor * input,ICLTensor * output,const WinogradInfo & winograd_info)108 void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
109 {
110 configure(CLKernelLibrary::get().get_compile_context(), input, output, winograd_info);
111 }
112
configure(const CLCompileContext & compile_context,const ICLTensor * input,ICLTensor * output,const WinogradInfo & winograd_info)113 void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
114 {
115 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
116 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
117
118 auto padding_info = get_padding_info({ input, output });
119
120 const PadStrideInfo conv_info = winograd_info.convolution_info;
121 const Size2D output_tile_size = winograd_info.output_tile_size;
122 const Size2D kernel_size = winograd_info.kernel_size;
123
124 _data_layout = input->info()->data_layout();
125
126 const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
127 const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
128
129 // Compute number of elements to process in the X and Y direction
130 const int num_elements_x = input->info()->dimension(idx_w) - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right();
131 const int num_elements_y = input->info()->dimension(idx_h) - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom();
132
133 if(_data_layout == DataLayout::NCHW)
134 {
135 // Check if we need to extend the right or bottom border
136 const unsigned int extra_border_right = ((num_elements_x % output_tile_size.width) == 0) ? 0u : static_cast<unsigned int>(output_tile_size.width - 1);
137 const unsigned int extra_border_bottom = ((num_elements_y % output_tile_size.height) == 0) ? 0u : static_cast<unsigned int>(output_tile_size.height - 1);
138
139 _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right() + extra_border_right, conv_info.pad_bottom() + extra_border_bottom, conv_info.pad_left());
140 }
141 else
142 {
143 _border_size = BorderSize();
144 }
145
146 // Compute the number of output tiles along the x and y direction of size "output_tile_size"
147 const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input->info()->dimension(idx_w), input->info()->dimension(idx_h)),
148 kernel_size,
149 output_tile_size,
150 conv_info);
151
152 _input = input;
153 _output = output;
154 _num_tiles_x = num_tiles.width;
155 _num_tiles_y = num_tiles.height;
156
157 const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input->info(), winograd_info);
158
159 // Output auto initialization if not yet initialized
160 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
161
162 ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(output->info()->dimension(1)));
163 const size_t total_batches = input->info()->tensor_shape().total_size_upper(3);
164
165 CLBuildOptions build_opts;
166 build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
167 build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
168 build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
169 build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
170 build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
171 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
172 build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
173 build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
174 if(_data_layout == DataLayout::NHWC)
175 {
176 build_opts.add_option_if(total_batches > 1, "-DNUM_TILES_Y=" + support::cpp11::to_string(_num_tiles_y));
177 build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1)));
178 build_opts.add_option("-DSRC_DIM_2=" + support::cpp11::to_string(_input->info()->dimension(2)));
179 }
180 else
181 {
182 build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(_input->info()->dimension(2)));
183 }
184
185 // Create kernel
186 std::string kernel_name = "winograd_input_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string();
187
188 // Get the maximum dimension from the tile size
189 const unsigned int tile_max_dim = std::max(output_tile_size.width, output_tile_size.height);
190
191 // Check optimized kernel if output_dims == 2x2
192 if((tile_max_dim == 2) && (_data_layout == DataLayout::NCHW))
193 {
194 _step_z = (_input->info()->dimension(2) % 2) != 0 ? 1 : 2;
195 }
196
197 // Append stepz and data layout
198 kernel_name += "_stepz";
199 kernel_name += support::cpp11::to_string(_step_z);
200 kernel_name += "_" + lower_string(string_from_data_layout(_data_layout));
201
202 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
203
204 // Create window and update padding
205 auto win_config = validate_and_configure_window(input->info(), output->info(), winograd_info);
206 ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
207 ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
208
209 ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
210
211 _config_id = kernel_name;
212 _config_id += support::cpp11::to_string(input->info()->dimension(0));
213 _config_id += "_";
214 _config_id += support::cpp11::to_string(input->info()->dimension(1));
215 _config_id += "_";
216 _config_id += support::cpp11::to_string(input->info()->dimension(2));
217 _config_id += "_";
218 _config_id += support::cpp11::to_string(conv_info.pad_left());
219 _config_id += "_";
220 _config_id += support::cpp11::to_string(conv_info.pad_top());
221 _config_id += "_";
222 _config_id += lower_string(string_from_data_layout(_data_layout));
223 }
224
validate(const ITensorInfo * input,const ITensorInfo * output,const WinogradInfo & winograd_info)225 Status CLWinogradInputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
226 {
227 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
228 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info));
229 ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), winograd_info).first);
230
231 return Status{};
232 }
233
run(const Window & window,cl::CommandQueue & queue)234 void CLWinogradInputTransformKernel::run(const Window &window, cl::CommandQueue &queue)
235 {
236 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
237 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
238
239 const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
240 const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
241 const size_t idx_c = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
242 const size_t total_batches = window.shape().total_size_upper(3);
243
244 // Collapse window
245 Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
246
247 Window slice = window_collapsed.first_slice_window_3D();
248 slice.set(idx_w, Window::Dimension(0, _num_tiles_x, 1));
249 slice.set(idx_h, Window::Dimension(0, _num_tiles_y, 1));
250 if(_data_layout == DataLayout::NHWC)
251 {
252 slice.set(idx_h, Window::Dimension(0, _num_tiles_y * total_batches, 1));
253 }
254
255 ARM_COMPUTE_ERROR_ON(((slice[idx_c].end() - slice[idx_c].start()) % _step_z) != 0);
256 slice.set(idx_c, Window::Dimension(slice[idx_c].start(), slice[idx_c].end(), _step_z));
257
258 unsigned int idx = 2 * num_arguments_per_3D_tensor();
259 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input->info()->strides_in_bytes()[3]));
260 _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[3]));
261
262 do
263 {
264 unsigned int idx = 0;
265 add_3D_tensor_argument(idx, _input, slice);
266 add_3D_tensor_argument(idx, _output, slice);
267
268 enqueue(queue, *this, slice, lws_hint());
269 }
270 while(window_collapsed.slide_window_slice_3D(slice));
271 }
272