• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
25 #define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
26 
27 #include "src/core/CL/ICLKernel.h"
28 
29 #include "arm_compute/core/KernelDescriptors.h"
30 
31 namespace arm_compute
32 {
33 class ICLTensor;
34 
35 /** Interface for the kernel to run a MxN depthwise convolution. M and N are respectively the rows and columns of the filter
36     This kernel assumes that tensor for the weights is NOT reshaped (Native version) */
37 class CLDepthwiseConvolutionLayerNativeKernel : public ICLKernel
38 {
39 public:
40     /** Default Constructor */
41     CLDepthwiseConvolutionLayerNativeKernel();
42     /** Prevent instances of this class from being copied (As this class contains pointers) */
43     CLDepthwiseConvolutionLayerNativeKernel(const CLDepthwiseConvolutionLayerNativeKernel &) = delete;
44     /** Prevent instances of this class from being copied (As this class contains pointers) */
45     CLDepthwiseConvolutionLayerNativeKernel &operator=(const CLDepthwiseConvolutionLayerNativeKernel &) = delete;
46     /** Allow instances of this class to be moved */
47     CLDepthwiseConvolutionLayerNativeKernel(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
48     /** Allow instances of this class to be moved */
49     CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default;
50     /** Initialize the function's source, destination and parameters
51      *
52      * @param[in]  input              Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
53      * @param[in]  weights            Weights tensor. A 3D tensor with dimensions [IFM, N, M].
54      *                                Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
55      * @param[in]  biases             Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
56      *                                Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
57      * @param[out] output             Destination tensor. Data type supported: Same as @p input.
58      * @param[in]  dwc_weights_info   Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
59      * @param[in]  dwc_info           Depthwise convolution layer info
60      * @param[in]  conv_info          Padding and stride information to use for the convolution.
61      * @param[in]  depth_multiplier   (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
62      * @param[in]  dilation           (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
63      * @param[in]  output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
64      *                                the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
65      * @param[in]  output_shifts      (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
66      *                                the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
67      */
68     void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
69                    const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
70                    const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
71     /** Initialize the function's source, destination and parameters
72      *
73      * @param[in]  compile_context    The compile context to be used.
74      * @param[in]  input              Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
75      * @param[in]  weights            Weights tensor. A 3D tensor with dimensions [IFM, N, M].
76      *                                Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
77      * @param[in]  biases             Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
78      *                                Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
79      * @param[out] output             Destination tensor. Data type supported: Same as @p input.
80      * @param[in]  dwc_weights_info   Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
81      * @param[in]  dwc_info           Depthwise convolution layer info
82      * @param[in]  conv_info          Padding and stride information to use for the convolution.
83      * @param[in]  depth_multiplier   (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
84      * @param[in]  dilation           (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
85      * @param[in]  output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
86      *                                the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
87      * @param[in]  output_shifts      (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
88      *                                the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
89      */
90     void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info,
91                    const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
92                    const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr);
93     /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel
94      *
95      * @param[in] input              Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC
96      * @param[in] weights            Weights tensor info. A 3D tensor with dimensions [IFM, N, M].
97      *                               Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
98      * @param[in] biases             Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
99      *                               Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
100      * @param[in] output             Destination tensor info. Data type supported: Same as @p input.
101      * @param[in] dwc_weights_info   Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread
102      * @param[in] dwc_info           Depthwise convolution layer info
103      * @param[in] conv_info          Padding and stride information to use for the convolution.
104      * @param[in] depth_multiplier   (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
105      * @param[in] dilation           (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
106      * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization,
107      *                               the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
108      * @param[in] output_shifts      (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization,
109      *                               the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32
110      *
111      * @return a status
112      */
113     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info,
114                            const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U),
115                            const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr);
116 
117     // Inherited methods overridden:
118     void run(const Window &window, cl::CommandQueue &queue) override;
119 
120 private:
121     const ICLTensor *_input;
122     const ICLTensor *_weights;
123     const ICLTensor *_biases;
124     ICLTensor       *_output;
125     unsigned int     _depth_multiplier;
126     const ICLTensor *_output_multipliers;
127     const ICLTensor *_output_shifts;
128     bool             _is_quantized;
129 };
130 } // namespace arm_compute
131 #endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H */
132