• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
25 #define ARM_COMPUTE_NEDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H
26 
27 #include "arm_compute/core/utils/misc/Traits.h"
28 #include "src/core/NEON/INEKernel.h"
29 #include "support/Requires.h"
30 
31 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
32 #include <arm_neon.h>
33 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
34 
35 namespace arm_compute
36 {
37 // Forward declarations
38 class ITensor;
39 
40 /** Interface for the kernel to run a depthwise convolution native on a tensor. */
41 class NEDepthwiseConvolutionLayerNativeKernel : public INEKernel
42 {
43 public:
name()44     const char *name() const override
45     {
46         return "NEDepthwiseConvolutionLayerNativeKernel";
47     }
48     /** Default constructor */
49     NEDepthwiseConvolutionLayerNativeKernel();
50     /** Prevent instances of this class from being copied (As this class contains pointers) */
51     NEDepthwiseConvolutionLayerNativeKernel(const NEDepthwiseConvolutionLayerNativeKernel &) = delete;
52     /** Prevent instances of this class from being copied (As this class contains pointers) */
53     NEDepthwiseConvolutionLayerNativeKernel &operator=(const NEDepthwiseConvolutionLayerNativeKernel &) = delete;
54     /** Default Move Constructor. */
55     NEDepthwiseConvolutionLayerNativeKernel(NEDepthwiseConvolutionLayerNativeKernel &&) = default;
56     /** Default move assignment operator */
57     NEDepthwiseConvolutionLayerNativeKernel &operator=(NEDepthwiseConvolutionLayerNativeKernel &&) = default;
58     /** Default destructor */
59     ~NEDepthwiseConvolutionLayerNativeKernel() = default;
60     /** Initialize the function's source, destination and parameters.
61      *
62      * @note Supported data layouts: NHWC
63      *
64      * @param[in]  input            Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
65      * @param[in]  weights          Weights tensor. This is a 3D tensor with dimensions [IFM, W, H].
66      *                              Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
67      * @param[in]  biases           Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
68      *                              Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
69      * @param[out] output           Destination tensor. Data type supported: Same as @p input.
70      * @param[in]  conv_info        Padding and stride information to use for the convolution.
71      * @param[in]  depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
72      * @param[in]  dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
73      *
74      */
75     void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
76                    const Size2D &dilation = Size2D(1U, 1U));
77     /** Static function to check if given info will lead to a valid configuration of @ref NEDepthwiseConvolutionLayerNativeKernel
78      *
79      * @note Supported data layouts: NHWC
80      *
81      * @param[in] input            Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
82      * @param[in] weights          Weights tensor info. This is a 3D tensor with dimensions [IFM, W, H].
83      *                             Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED.
84      * @param[in] biases           Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed.
85      *                             Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED.
86      * @param[in] output           Destination tensor info. Data type supported: Same as @p input.
87      * @param[in] conv_info        Padding and stride information to use for the convolution.
88      * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
89      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
90      *
91      * @return a status
92      */
93     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1,
94                            const Size2D &dilation = Size2D(1U, 1U));
95 
96     // Inherited methods overridden:
97     void run(const Window &window, const ThreadInfo &info) override;
98 
99 private:
100     template <typename T>
101     using FloatEnalber = typename std::enable_if<arm_compute::utils::traits::is_floating_point<T>::value, int>::type;
102 
103     template <typename T, typename TW, FloatEnalber<T> = 0>
104     void run_depthwise(const Window &window, bool has_biases);
105 
106     template <typename T>
107     using Quantized8bitEnalber = typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type;
108 
109     template <typename T, typename TW, Quantized8bitEnalber<T> = 0>
110     void run_depthwise(const Window &window, bool has_biases);
111 
112     /** Common signature for all the specialised depthwise convolution native functions
113      *
114      * @param[in] window Region on which to execute the kernel.
115      */
116     using DepthwiseFunctionPtr = void (NEDepthwiseConvolutionLayerNativeKernel::*)(const Window &window, bool has_biases);
117 
118     DepthwiseFunctionPtr _func;
119     const ITensor       *_input;
120     const ITensor       *_weights;
121     const ITensor       *_biases;
122     ITensor             *_output;
123     PadStrideInfo        _conv_info;
124     unsigned int         _depth_multiplier;
125     Size2D               _dilation;
126     std::vector<int>     _output_multiplier;
127     std::vector<int>     _output_shift;
128     bool                 _has_biases;
129 };
130 } // namespace arm_compute
131 #endif /* ARM_COMPUTE_NEDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H */
132