• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/operators/ClConv2d.h"
25 
26 #include "arm_compute/core/Validate.h"
27 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
28 #include "arm_compute/runtime/CL/CLScheduler.h"
29 #include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
30 #include "src/gpu/cl/operators/ClDirectConv2d.h"
31 #include "src/gpu/cl/operators/ClGemmConv2d.h"
32 #include "src/gpu/cl/operators/ClIndirectConv2d.h"
33 #include "src/gpu/cl/operators/ClWinogradConv2d.h"
34 
35 #include "src/common/utils/Log.h"
36 
37 #include <memory>
38 
39 namespace
40 {
41 /** Get the suitable kernel size for using direct convolution method with NHWC data layout.
42  *
43  * @note Direct convolution should be executed when the kernel has the spatial dimensions greater than or equal to the value returned by this function
44  *
45  * @param[in] gpu_target GPU target
46  *
47  * @return the suitable kernel size for using direct convolution method with NHWC data layout
48  */
get_direct_conv_kernel_threshold_nhwc(arm_compute::GPUTarget gpu_target)49 size_t get_direct_conv_kernel_threshold_nhwc(arm_compute::GPUTarget gpu_target)
50 {
51     switch(gpu_target)
52     {
53         case arm_compute::GPUTarget::G76:
54         case arm_compute::GPUTarget::G77:
55         case arm_compute::GPUTarget::G78:
56             return 5;
57         case arm_compute::GPUTarget::G71:
58         case arm_compute::GPUTarget::G72:
59         case arm_compute::GPUTarget::MIDGARD:
60         case arm_compute::GPUTarget::BIFROST:
61             return 7;
62         default:
63             return 5;
64     }
65 }
66 } // namespace
67 
68 namespace arm_compute
69 {
70 namespace opencl
71 {
72 using namespace arm_compute::misc::shape_calculator;
73 
ClConv2d()74 ClConv2d::ClConv2d()
75     : _operator()
76 {
77 }
78 
79 ClConv2d::~ClConv2d() = default;
80 
configure(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * biases,ITensorInfo * dst,const Conv2dInfo & conv2d_info,const WeightsInfo & weights_info)81 void ClConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const Conv2dInfo &conv2d_info,
82                          const WeightsInfo &weights_info)
83 {
84     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
85     ARM_COMPUTE_ERROR_THROW_ON(ClConv2d::validate(src, weights, ((biases != nullptr) ? biases : nullptr), dst, conv2d_info, weights_info));
86     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv2d_info, weights_info);
87 
88     switch(ClConv2d::get_convolution_method(src, weights, dst, conv2d_info, weights_info, CLScheduler::get().target()))
89     {
90         case ConvolutionMethod::WINOGRAD:
91         {
92             ARM_COMPUTE_ERROR_ON(conv2d_info.num_groups != 1);
93             ARM_COMPUTE_ERROR_ON(conv2d_info.post_ops.size() > 0);
94             auto f = std::make_unique<ClWinogradConv2d>();
95             f->configure(compile_context, src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info, conv2d_info.enable_fast_math);
96             _operator = std::move(f);
97             break;
98         }
99         case ConvolutionMethod::DIRECT:
100         {
101             ARM_COMPUTE_ERROR_ON(conv2d_info.num_groups != 1);
102             ARM_COMPUTE_ERROR_ON(conv2d_info.post_ops.size() > 0);
103             auto f = std::make_unique<ClDirectConv2d>();
104             f->configure(compile_context, src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info);
105             _operator = std::move(f);
106             break;
107         }
108         case ConvolutionMethod::INDIRECT:
109         {
110             ARM_COMPUTE_ERROR_ON(conv2d_info.num_groups != 1);
111             ARM_COMPUTE_ERROR_ON(conv2d_info.post_ops.size() > 0);
112             auto f = std::make_unique<ClIndirectConv2d>();
113             f->configure(compile_context, src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info);
114             _operator = std::move(f);
115             break;
116         }
117         case ConvolutionMethod::GEMM:
118         {
119             auto f = std::make_unique<ClGemmConv2d>();
120             f->configure(compile_context, src, weights, biases, dst, conv2d_info, weights_info);
121             _operator = std::move(f);
122             break;
123         }
124         default:
125             ARM_COMPUTE_ERROR("Not supported.");
126             break;
127     }
128     _aux_mem = _operator->workspace();
129 }
130 
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const Conv2dInfo & conv2d_info,const WeightsInfo & weights_info)131 Status ClConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
132                           const WeightsInfo &weights_info)
133 {
134     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
135     ARM_COMPUTE_RETURN_ERROR_ON_MSG((conv2d_info.num_groups != 1) && (src->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
136 
137     const GPUTarget gpu_target = CLScheduler::get().target();
138 
139     switch(ClConv2d::get_convolution_method(src, weights, dst, conv2d_info, weights_info, gpu_target))
140     {
141         case ConvolutionMethod::WINOGRAD:
142         {
143             //Validate Winograd
144             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.num_groups != 1, "Grouping (num_groups != 1) with ClWinogradConv2d is not supported");
145             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClWinogradConv2d does not support PostOps");
146             ARM_COMPUTE_RETURN_ON_ERROR(ClWinogradConv2d::validate(src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info, conv2d_info.enable_fast_math));
147             break;
148         }
149         case ConvolutionMethod::DIRECT:
150         {
151             // Validate direct convolution layer
152             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.num_groups != 1, "Grouping (num_groups != 1) with ClDirectConv2d is not supported");
153             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClDirectConv2d does not support PostOps");
154             ARM_COMPUTE_RETURN_ON_ERROR(ClDirectConv2d::validate(src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info));
155             break;
156         }
157         case ConvolutionMethod::INDIRECT:
158         {
159             // Validate indirect convolution layer
160             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.num_groups != 1, "Grouping (num_groups != 1) with ClIndirectConv2d is not supported");
161             ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv2d_info.post_ops.size() > 0, "ClIndirectConv2d does not support PostOps");
162             ARM_COMPUTE_RETURN_ON_ERROR(ClIndirectConv2d::validate(src, weights, biases, dst, conv2d_info.conv_info, conv2d_info.act_info));
163             break;
164         }
165         case ConvolutionMethod::GEMM:
166         {
167             // Validate gemm-based convolution layer
168             ARM_COMPUTE_RETURN_ON_ERROR(ClGemmConv2d::validate(src, weights, biases, dst, conv2d_info, weights_info));
169             break;
170         }
171         default:
172             ARM_COMPUTE_ERROR("Not supported.");
173             break;
174     }
175 
176     return Status{};
177 }
178 
get_convolution_method(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const Conv2dInfo & conv2d_info,const WeightsInfo & weights_info,const GPUTarget gpu_target)179 ConvolutionMethod ClConv2d::get_convolution_method(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const Conv2dInfo &conv2d_info,
180                                                    const WeightsInfo &weights_info, const GPUTarget gpu_target)
181 {
182     ARM_COMPUTE_ERROR_ON_NULLPTR(src);
183     ARM_COMPUTE_ERROR_ON_NULLPTR(dst);
184     ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
185     ARM_COMPUTE_UNUSED(weights_info);
186 
187     const PadStrideInfo       conv_info        = conv2d_info.conv_info;
188     const ActivationLayerInfo act_info         = conv2d_info.act_info;
189     const Size2D              dilation         = conv2d_info.dilation;
190     bool                      enable_fast_math = conv2d_info.enable_fast_math;
191 
192     const size_t idx_w = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH);
193     const size_t idx_h = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT);
194     const size_t idx_c = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::CHANNEL);
195 
196     /* Input spatial dims, kernel size, IFM/OFM, conv info*/
197     using ConvolutionConfiguration = std::tuple<Size2D, Size2D, Size2D, PadStrideInfo, DataLayout>;
198     using ConfigurationMethod      = std::pair<ConvolutionConfiguration, ConvolutionMethod>;
199 
200     const std::vector<ConfigurationMethod> known_configs =
201     {
202         // Alexnet
203         ConfigurationMethod(ConvolutionConfiguration(Size2D(27U, 27U), Size2D(5U, 5U), Size2D(48U, 128U), PadStrideInfo(1U, 1U, 2U, 2U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
204         // VGG16 / VGG19
205         ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 64U), PadStrideInfo(1U, 1U, 1U, 1U), DataLayout::NCHW), ConvolutionMethod::DIRECT),
206         // Mobilenet 224
207         ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
208         // Mobilenet 160
209         ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NCHW), ConvolutionMethod::GEMM),
210         // Mobilenet 224
211         ConfigurationMethod(ConvolutionConfiguration(Size2D(224U, 224U), Size2D(3U, 3U), Size2D(3U, 32U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
212         // Mobilenet 160
213         ConfigurationMethod(ConvolutionConfiguration(Size2D(160U, 160U), Size2D(3U, 3U), Size2D(3U, 24U), PadStrideInfo(2U, 2U, 0U, 1U, 0U, 1U, DimensionRoundingType::FLOOR), DataLayout::NHWC), ConvolutionMethod::GEMM),
214     };
215 
216     const auto find_config = [&](ConfigurationMethod c)
217     {
218         const ConvolutionConfiguration config      = c.first;
219         const PadStrideInfo            info        = std::get<3>(config);
220         const DataLayout               data_layout = std::get<4>(config);
221 
222         return std::get<0>(config) == Size2D(src->dimension(idx_w), src->dimension(idx_h)) && std::get<1>(config) == Size2D(weights->dimension(idx_w), weights->dimension(idx_h))
223                && std::get<2>(config) == Size2D(weights->dimension(idx_c), weights->dimension(3)) && info.pad_top() == conv_info.pad_top() && info.pad_right() == conv_info.pad_right()
224                && info.pad_bottom() == conv_info.pad_bottom() && info.pad_left() == conv_info.pad_left() && info.stride() == conv_info.stride() && (data_layout == src->data_layout());
225     };
226 
227     std::vector<ConfigurationMethod>::const_iterator found;
228     if((found = std::find_if(known_configs.begin(), known_configs.end(), find_config)) != known_configs.end())
229     {
230         return (*found).second;
231     }
232 
233     if(dilation != Size2D(1U, 1U))
234     {
235         return ConvolutionMethod::GEMM;
236     }
237     else
238     {
239         if(src->data_layout() == DataLayout::NCHW)
240         {
241             // SRGAN
242             if((src->dimension(idx_h) > 720U) && (dst->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
243                && (ClDirectConv2d::validate(src, weights, nullptr, dst, conv_info, act_info)))
244             {
245                 return ConvolutionMethod::DIRECT;
246             }
247             if((weights->dimension(idx_h) > 5) && (src->dimension(idx_c) > dst->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(src, weights, nullptr, dst, conv_info, act_info, enable_fast_math)))
248             {
249                 return ConvolutionMethod::FFT;
250             }
251             if(src->dimension(idx_c) < 16)
252             {
253                 return ConvolutionMethod::GEMM;
254             }
255             return bool(ClWinogradConv2d::validate(src, weights, nullptr, dst, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
256         }
257         else
258         {
259             const bool   is_direct_valid           = bool(ClDirectConv2d::validate(src, weights, nullptr, dst, conv_info, act_info));
260             const bool   is_wino_valid             = bool(ClWinogradConv2d::validate(src, weights, nullptr, dst, conv_info, act_info, enable_fast_math));
261             const size_t kernel_sz_direct_conv_thr = get_direct_conv_kernel_threshold_nhwc(gpu_target);
262 
263             // SRGAN case
264             if((src->dimension(idx_h) > 720U) && (dst->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
265                && is_direct_valid)
266             {
267                 return ConvolutionMethod::DIRECT;
268             }
269 
270             // Floating-point case: GeMM/Direct/Winograd
271             if(is_data_type_float(src->data_type()))
272             {
273                 // Get dst shape
274                 TensorShape output_shape       = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
275                 const bool  is_large_kernel_sz = (weights->dimension(idx_w) >= kernel_sz_direct_conv_thr) && (weights->dimension(idx_h) >= kernel_sz_direct_conv_thr);
276                 const bool  is_ifm_ge_8        = src->dimension(idx_c) >= 8;
277                 const bool  is_ifm_ge_16       = src->dimension(idx_c) >= 16;
278                 const bool  is_ofm_lte_8       = weights->dimension(3U) <= 8;
279                 const bool  is_ofm_lt_64       = weights->dimension(3U) < 64;
280                 const bool  workload_gte_8192  = (output_shape[0] * output_shape[1] * output_shape[2]) / 16 >= 8192;
281                 const bool  is_ifm_gt_ofm      = src->dimension(idx_c) > weights->dimension(3U);
282                 const bool  is_m_one           = output_shape[1] * output_shape[2] == 1;
283                 const bool  is_unit_stride     = (conv2d_info.conv_info.stride().first == 1) && (conv2d_info.conv_info.stride().second == 1);
284                 const int32_t kernel_sz        = weights->dimension(idx_w) * weights->dimension(idx_h);
285 
286                 // Run Winograd if valid and IFM >= 8
287                 if(is_wino_valid && is_ifm_ge_8)
288                 {
289                     if(is_ofm_lte_8)
290                     {
291                         if(gpu_target == arm_compute::GPUTarget::G71 || gpu_target == arm_compute::GPUTarget::G72 || get_arch_from_target(gpu_target) == arm_compute::GPUTarget::MIDGARD)
292                         {
293                             return ConvolutionMethod::WINOGRAD;
294                         }
295                     }
296                     else
297                     {
298                         return ConvolutionMethod::WINOGRAD;
299                     }
300                 }
301 
302                 // Direct convolution case
303                 if(is_direct_valid)
304                 {
305                     if((gpu_target == arm_compute::GPUTarget::G71 || gpu_target == arm_compute::GPUTarget::G72 || get_arch_from_target(gpu_target) == arm_compute::GPUTarget::MIDGARD))
306                     {
307                         if(is_large_kernel_sz && is_ifm_ge_16 && is_ifm_gt_ofm)
308                         {
309                             return ConvolutionMethod::DIRECT;
310                         }
311                     }
312                     else if(gpu_target == arm_compute::GPUTarget::G76)
313                     {
314                         if((is_large_kernel_sz && workload_gte_8192 && is_ifm_ge_16) || (is_ofm_lte_8 && is_ifm_ge_16))
315                         {
316                             return ConvolutionMethod::DIRECT;
317                         }
318                     }
319                     else
320                     {
321                         ConvolutionMethod preferred_conv_method = ConvolutionMethod::DIRECT;
322 
323                         const bool is_indirect_valid = bool(ClIndirectConv2d::validate(src, weights, nullptr, dst, conv_info, act_info));
324 
325                         // indirect conv2d should be called when:
326                         // 1- When the kernel size is greater than 1x1 and less than or equal to 9x9 (81)
327                         // 2- When the kernel size is odd
328                         // 3- When the Gpu target is Arm Mali-G77
329                         if(is_indirect_valid)
330                         {
331                             const bool is_kernel_sz_odd = kernel_sz % 2;
332                             const bool is_g77           = gpu_target == GPUTarget::G77;
333                             preferred_conv_method = (kernel_sz > 1) && (kernel_sz <= 81) && is_kernel_sz_odd && is_g77? ConvolutionMethod::INDIRECT : ConvolutionMethod::DIRECT;
334                         }
335 
336                         // Direct/indirect convolution used for the first layer of the network
337                         if(workload_gte_8192 && !is_ifm_ge_16 && !is_unit_stride && is_ofm_lt_64)
338                         {
339                             // In general, the question we should ask for the first convolution layer of a model is:
340                             // when the execution time of im2col + gemm < direct?. Since im2col does not depend on the OFM, it means that
341                             // when OFM is big enough, the contribution of im2col is small and the GEMM approach is preferable.
342                             // From internal experiments, the OFM threshold is 64 (is_ofm_lt_64)
343                             return preferred_conv_method;
344                         }
345 
346                         if((is_large_kernel_sz || is_m_one) && workload_gte_8192 && is_ifm_ge_16)
347                         {
348                             return preferred_conv_method;
349                         }
350 
351                         // Direct convolution used for the last layer of the network
352                         if(is_ofm_lte_8)
353                         {
354                             return preferred_conv_method;
355                         }
356                     }
357                 }
358 
359                 // Default case
360                 return ConvolutionMethod::GEMM;
361             }
362 
363             // Generic case for quantized. Only GeMM
364             return ConvolutionMethod::GEMM;
365         }
366     }
367 }
368 
run(ITensorPack & tensors)369 void ClConv2d::run(ITensorPack &tensors)
370 {
371     prepare(tensors);
372     _operator->run(tensors);
373 }
374 
prepare(ITensorPack & tensors)375 void ClConv2d::prepare(ITensorPack &tensors)
376 {
377     _operator->prepare(tensors);
378 }
379 
workspace() const380 experimental::MemoryRequirements ClConv2d::workspace() const
381 {
382     return _aux_mem;
383 }
384 } // namespace opencl
385 } // namespace arm_compute
386