1 /* 2 * Copyright (c) 2017-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H 25 #define ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H 26 27 #include "arm_compute/runtime/IFunction.h" 28 29 #include "arm_compute/core/CL/CLKernelLibrary.h" 30 #include "arm_compute/core/Types.h" 31 #include "arm_compute/runtime/CL/CLTensor.h" 32 #include "arm_compute/runtime/CL/functions/CLActivationLayer.h" 33 #include "arm_compute/runtime/CL/functions/CLGEMM.h" 34 #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" 35 #include "arm_compute/runtime/IMemoryManager.h" 36 #include "arm_compute/runtime/ITransformWeights.h" 37 #include "arm_compute/runtime/IWeightsManager.h" 38 #include "arm_compute/runtime/MemoryGroup.h" 39 40 #include <memory> 41 42 namespace arm_compute 43 { 44 class CLCol2ImKernel; 45 class CLIm2ColKernel; 46 class CLWeightsReshapeKernel; 47 class ICLTensor; 48 49 /** Function to reshape and transpose the weights. This function calls the following kernels: 50 * -# @ref CLWeightsReshapeKernel 51 */ 52 class CLConvolutionLayerReshapeWeights : public IFunction 53 { 54 public: 55 /** Constructor */ 56 CLConvolutionLayerReshapeWeights(); 57 /** Prevent instances of this class from being copied */ 58 CLConvolutionLayerReshapeWeights(const CLConvolutionLayerReshapeWeights &) = delete; 59 /** Prevent instances of this class from being copied */ 60 CLConvolutionLayerReshapeWeights &operator=(const CLConvolutionLayerReshapeWeights &) = delete; 61 /** Default move constructor */ 62 CLConvolutionLayerReshapeWeights(CLConvolutionLayerReshapeWeights &&) = default; 63 /** Default move assignment operator */ 64 CLConvolutionLayerReshapeWeights &operator=(CLConvolutionLayerReshapeWeights &&) = default; 65 /** Default destructor */ 66 ~CLConvolutionLayerReshapeWeights(); 67 /** Set the input and output tensors. 68 * 69 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 70 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32. 71 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. 72 * @param[out] output Destination tensor. Data types supported: Same as @p weights. 73 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 74 */ 75 void configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); 76 /** Set the input and output tensors. 77 * 78 * @param[in] compile_context The compile context to be used. 79 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 80 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32. 81 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. 82 * @param[out] output Destination tensor. Data types supported: Same as @p weights. 83 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 84 */ 85 void configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); 86 /** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights 87 * 88 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 89 * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32. 90 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights. 91 * @param[in] output Destination tensor. Data types supported: Same as @p weights. 92 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 93 * 94 * @return a status 95 */ 96 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1); 97 // Inherited methods overridden: 98 void run() override; 99 100 private: 101 std::unique_ptr<CLWeightsReshapeKernel> _weights_reshape_kernel; 102 }; 103 104 namespace weights_transformations 105 { 106 /** Basic function to manage the reshape weights generated from @ref CLConvolutionLayerReshapeWeights */ 107 class CLConvolutionLayerReshapeWeightsTransform : public ITransformWeights 108 { 109 public: 110 /** Configures the @ref CLConvolutionLayerReshapeWeights function 111 * 112 * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32. 113 * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized. 114 * @param[in] num_groups Number of groups when performing a grouped convolution. 115 */ configure(const ICLTensor * input,const ICLTensor * biases,unsigned int num_groups)116 void configure(const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups) 117 { 118 configure(CLKernelLibrary::get().get_compile_context(), input, biases, num_groups); 119 } 120 /** Configures the @ref CLConvolutionLayerReshapeWeights function 121 * 122 * @param[in] compile_context The compile context to be used. 123 * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/F16/F32. 124 * @param[in] biases Biases tensor. Data type supported: same as @p input, S32 if @p input is quantized. 125 * @param[in] num_groups Number of groups when performing a grouped convolution. 126 */ configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * biases,unsigned int num_groups)127 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, unsigned int num_groups) 128 { 129 _bias_bit = (biases != nullptr) ? 1 : 0; 130 _num_groups = num_groups; 131 _func.configure(compile_context, input, biases, &_output, num_groups); 132 } 133 134 //Inherited method override run()135 void run() override 136 { 137 _output.allocator()->allocate(); 138 _func.run(); 139 _reshape_run = true; 140 } 141 142 //Inherited method override get_weights()143 ICLTensor *get_weights() override 144 { 145 return &_output; 146 } 147 148 //Inherited method override release()149 void release() override 150 { 151 _output.allocator()->free(); 152 } 153 154 //Inherited method override uid()155 uint32_t uid() override 156 { 157 return ((0x9) | (_bias_bit << 7) | (_num_groups << 8)); 158 } 159 160 private: 161 CLTensor _output{}; 162 CLConvolutionLayerReshapeWeights _func{}; 163 int32_t _bias_bit{ 0 }; 164 unsigned int _num_groups{ 0 }; 165 }; 166 } // namespace weights_transformations 167 168 /** Basic function to compute the convolution layer. This function calls the following OpenCL kernels/functions: 169 * 170 * -# @ref CLIm2ColKernel 171 * -# @ref CLGEMM (if the data type is FP32 or FP16) 172 * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED) 173 * -# @ref CLGEMMLowpOutputStage with QUANTIZE_DOWN_FIXEDPOINT type of quantization (if the data type is QASYMM8/QASYMM8_SIGNED) 174 * -# @ref CLCol2ImKernel (if NCHW data layout) 175 */ 176 class CLGEMMConvolutionLayer : public IFunction 177 { 178 public: 179 /** Constructor 180 * 181 * @param[in] memory_manager (Optional) Memory manager. 182 * @param[in] weights_manager (Optional) Weights manager. 183 */ 184 CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr); 185 /** Prevent instances of this class from being copied (As this class contains pointers) */ 186 CLGEMMConvolutionLayer(const CLGEMMConvolutionLayer &) = delete; 187 /** Default move constructor */ 188 CLGEMMConvolutionLayer(CLGEMMConvolutionLayer &&) = default; 189 /** Prevent instances of this class from being copied (As this class contains pointers) */ 190 CLGEMMConvolutionLayer &operator=(const CLGEMMConvolutionLayer &) = delete; 191 /** Default move assignment operator */ 192 CLGEMMConvolutionLayer &operator=(CLGEMMConvolutionLayer &&) = default; 193 /**Default destructor */ 194 ~CLGEMMConvolutionLayer(); 195 /** Set the input and output tensors. 196 * 197 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], 198 * while every optional dimension from 4 and above represent a batch of inputs. 199 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 200 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 201 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED. 202 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 203 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type. 204 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. 205 * Data types supported: Same as @p input. 206 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. 207 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights 208 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input. 209 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). 210 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 211 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 212 */ 213 void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(), 214 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1); 215 /** Set the input and output tensors. 216 * 217 * @param[in] compile_context The compile context to be used. 218 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], 219 * while every optional dimension from 4 and above represent a batch of inputs. 220 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 221 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 222 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED. 223 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 224 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type. 225 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. 226 * Data types supported: Same as @p input. 227 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. 228 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights 229 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input. 230 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). 231 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 232 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 233 */ 234 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, 235 const WeightsInfo &weights_info = WeightsInfo(), 236 const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1); 237 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer. 238 * 239 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM], 240 * while every optional dimension from 4 and above represent a batch of inputs. 241 * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 242 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 243 * Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED. 244 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 245 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type. 246 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. 247 * Data types supported: Same as @p input. 248 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. 249 * @param[in] weights_info Specifies if the weights tensor has been reshaped with CLWeightsReshapeKernel. If this is not part of the fully connected layer the weights 250 * tensor has also been transposed with CLGEMMReshapeRHSMatrixKernel. Data type supported: Same as @p input. 251 * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). 252 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 253 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 254 * 255 * @return a status 256 */ 257 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, 258 const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), unsigned int num_groups = 1); 259 260 // Inherited methods overridden: 261 void run() override; 262 void prepare() override; 263 264 private: 265 /** Configures the appropriate matrix multiply routine 266 * 267 * @param[in] compile_context The compile context to be used. 268 * @param[in] input Input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 269 * @param[in] weights Weights tensor. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or 270 * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED. 271 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 272 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type. 273 * @param[in, out] output Output tensor. Data types supported: same as @p input. 274 * @param[in] gemmlowp_output_stage GEMMLowp output stage info 275 * @param[in] gemm_3d_depth Depth of GEMM 3D 276 * @param[in] act_info Activation to apply after the matrix multiplication 277 */ 278 void configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, 279 const GEMMLowpOutputStageInfo &gemmlowp_output_stage, 280 int gemm_3d_depth, const ActivationLayerInfo &act_info); 281 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines 282 * 283 * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 284 * @param[in] weights Weights tensor info. Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8 or 285 * QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8_SIGNED. 286 * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 287 * Data type supported: Should match @p input data type, except for input of quantized type where biases should be of S32 type. 288 * @param[in] output Output tensor info. Data types supported: same as @p input. 289 * @param[in] gemmlowp_output_stage GEMMLowp output stage info 290 * @param[in] gemm_3d_depth Depth of GEMM 3D 291 * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. 292 * @param[in] act_info Activation to apply after the matrix multiplication 293 * 294 * @return a status 295 */ 296 static Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const GEMMLowpOutputStageInfo &gemmlowp_output_stage, 297 int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info); 298 299 private: 300 MemoryGroup _memory_group; 301 IWeightsManager *_weights_manager; 302 CLConvolutionLayerReshapeWeights _reshape_weights; 303 weights_transformations::CLConvolutionLayerReshapeWeightsTransform _reshape_weights_managed; 304 std::unique_ptr<CLIm2ColKernel> _im2col_kernel; 305 CLGEMM _mm_gemm; 306 CLGEMMLowpMatrixMultiplyCore _mm_gemmlowp; 307 std::unique_ptr<CLCol2ImKernel> _col2im_kernel; 308 CLActivationLayer _activationlayer_function; 309 310 const ICLTensor *_original_weights; 311 312 CLTensor _im2col_output; 313 CLTensor _weights_reshaped; 314 CLTensor _gemm_output; 315 316 bool _skip_im2col; 317 bool _skip_col2im; 318 bool _is_quantized; 319 bool _fuse_activation; 320 bool _is_prepared; 321 }; 322 } // namespace arm_compute 323 #endif /* ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H */ 324