1 /* 2 * Copyright (c) 2017-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H 25 #define ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H 26 27 #include "src/core/CL/ICLKernel.h" 28 29 namespace arm_compute 30 { 31 /** OpenCL kernel to perform reshaping on the weights used by convolution and locally connected layer 32 * 33 * Rearranges each 3-dimensional kernel to a single row leading to a matrix with linearized kernels. 34 * In combination with the @ref CLIm2ColKernel can transform a convolution to a matrix multiplication. 35 * 36 * For example assuming a 3D weight kernel of 3x3 dimensions and depth of 2 we have: 37 * @f[ 38 * \left( \begin{array}{ccc} 39 * a000 & a001 & a002 \\ 40 * a010 & a011 & a012 \\ 41 * a020 & a021 & a022 \\ 42 * \end{array} \right) 43 * \left( \begin{array}{ccc} 44 * a100 & a101 & a102 \\ 45 * a110 & a111 & a112 \\ 46 * a120 & a121 & a122 \\ 47 * \end{array} \right) 48 * \rightarrow 49 * \left( \begin{array}{ccccccccc} 50 * a000 & a001 & a002 & a010 & a011 & a012 & a020 & a021 & a022 & a100 & a101 & a102 & a110 & a111 & a112 & a120 & a121 & a122 \\ 51 * \end{array} \right) 52 * @f] 53 */ 54 class CLWeightsReshapeKernel : public ICLKernel 55 { 56 public: 57 /** Constructor.*/ 58 CLWeightsReshapeKernel(); 59 /** Prevent instances of this class from being copied (As this class contains pointers) */ 60 CLWeightsReshapeKernel(const CLWeightsReshapeKernel &) = delete; 61 /** Prevent instances of this class from being copied (As this class contains pointers) */ 62 CLWeightsReshapeKernel &operator=(const CLWeightsReshapeKernel &) = delete; 63 /** Allow instances of this class to be moved */ 64 CLWeightsReshapeKernel(CLWeightsReshapeKernel &&) = default; 65 /** Allow instances of this class to be moved */ 66 CLWeightsReshapeKernel &operator=(CLWeightsReshapeKernel &&) = default; 67 /** Default destructor */ 68 ~CLWeightsReshapeKernel() = default; 69 /** Set the input and output of the kernel. 70 * 71 * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared, 72 * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All 73 * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with 74 * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr. 75 * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types. 76 * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise. 77 * Data types supported: Same as @p input 78 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 79 * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it. 80 */ 81 void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); 82 /** Set the input and output of the kernel. 83 * 84 * @param[in] compile_context The compile context to be used. 85 * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared, 86 * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All 87 * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with 88 * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr. 89 * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types. 90 * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise. 91 * Data types supported: Same as @p input 92 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 93 * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it. 94 */ 95 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups = 1); 96 /** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel 97 * 98 * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared, 99 * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: All 100 * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with 101 * dimensions [OFM, num_patches] if unshared. Data types supported: F16/F32, for quantized types this must be nullptr. 102 * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types. 103 * @param[in] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise. 104 * Data types supported: Same as @p input 105 * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is only supported for NCHW data layout 106 * Number of groups greater than one are only supported for NCHW data layout, and the number of weights must be a multiple of it. 107 * 108 * @return a status 109 */ 110 static Status validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups = 1); 111 112 // Inherited methods overridden: 113 void run(const Window &window, cl::CommandQueue &queue) override; 114 115 private: 116 const ICLTensor *_input; 117 const ICLTensor *_biases; 118 ICLTensor *_output; 119 }; 120 } // namespace arm_compute 121 #endif /*ARM_COMPUTE_CLWEIGHTSRESHAPEKERNEL_H */