• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H
25 #define ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H
26 
27 #include "arm_compute/runtime/IFunction.h"
28 
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
31 #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
32 #include "arm_compute/runtime/NEON/functions/NEFFT2D.h"
33 #include "arm_compute/runtime/NEON/functions/NEPadLayer.h"
34 #include "arm_compute/runtime/NEON/functions/NEPermute.h"
35 #include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
36 #include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
37 #include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
38 #include "arm_compute/runtime/NEON/functions/NEReverse.h"
39 #include "arm_compute/runtime/NEON/functions/NESlice.h"
40 
41 namespace arm_compute
42 {
43 // Forward declarations
44 class ITensor;
45 
46 /** Basic function to execute FFT-based convolution on CPU. This function calls the following functions/kernels:
47  *
48  *  -# @ref NEPermute                        Permute input if NHWC(only NCHW is supported).
49  *  -# @ref NEPadLayer                       Pad input.
50  *  -# @ref NEFFT2D                          Forward transform to the frequency domain.
51  *  -# @ref NEComplexPixelWiseMultiplication Complex element-wise product of input and the weights.
52  *  -# @ref NEReductionOperation             Reduction across channels.
53  *  -# @ref NEFFT2D                          Inverse transform back to the time domain.
54  *  -# @ref NEStridedSlice                   Extract valid output.
55  *  -# @ref NEArithmeticAddition             Add bias.
56  *  -# @ref NEActivationLayer                Perform activation.
57  *  -# @ref NEPermute                        Permute output if NHWC(only NCHW is supported).
58  */
59 class NEFFTConvolutionLayer : public IFunction
60 {
61 public:
62     /** Default constructor */
63     NEFFTConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
64     /** Prevent instances of this class from being copied (As this class contains pointers) */
65     NEFFTConvolutionLayer(const NEFFTConvolutionLayer &) = delete;
66     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
67     NEFFTConvolutionLayer(NEFFTConvolutionLayer &&) = delete;
68     /** Prevent instances of this class from being copied (As this class contains pointers) */
69     NEFFTConvolutionLayer &operator=(const NEFFTConvolutionLayer &) = delete;
70     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
71     NEFFTConvolutionLayer &operator=(NEFFTConvolutionLayer &&) = delete;
72     /** Default destructor */
73     ~NEFFTConvolutionLayer();
74     /** Set the input and output tensors.
75      *
76      * Valid data layouts:
77      * - All
78      *
79      * Valid data type configurations:
80      * |src    |dst    |
81      * |:------|:------|
82      * |F32    |F32    |
83      *
84      * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
85      *
86      * @param[in]  input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
87      *                              while every optional dimension from 4 and above represent a batch of inputs.
88      *                              Data types supported: F32.
89      * @param[in]  weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
90      * @param[in]  biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
91      * @param[out] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
92      *                              Data types supported: Same as @p input.
93      * @param[in]  conv_info        Contains padding and stride information described in @ref PadStrideInfo.
94      * @param[in]  act_info         (Optional) Activation layer information in case of a fused activation.
95      * @param[in]  enable_fast_math (Optional) Enable fast math computation. Unused for CPU backend.
96      */
97     void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
98                    const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
99     /** Static function to check if given info will lead to a valid configuration of @ref NEFFTConvolutionLayer
100      *
101      * @note: This function only works with any square kernel size and unit strides for both NCHW and NHWC data layout
102      *
103      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
104      *                             while every optional dimension from 4 and above represent a batch of inputs.
105      *                             Data types supported: F32.
106      * @param[in] weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
107      * @param[in] biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].Data type supported: Same as @p input
108      * @param[in] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
109      *                             Data types supported: Same as @p input.
110      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
111      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation.
112      * @param[in] enable_fast_math (Optional) Enable fast math computation. Unused for CPU backend.
113      *
114      * @return a status
115      */
116     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
117                            const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
118 
119     // Inherited methods overridden:
120     void run() override;
121     void prepare() override;
122 
123 private:
124     MemoryGroup                      _memory_group;
125     NEReverse                        _flip_weights_func;
126     NEPermute                        _permute_input_func;
127     NEPermute                        _permute_output_func;
128     NEPermute                        _permute_weights_func;
129     NEPermute                        _permute_bias_func;
130     NEPadLayer                       _pad_input_func;
131     NEPadLayer                       _pad_weights_func;
132     NEFFT2D                          _transform_input_func;
133     std::unique_ptr<NEFFT2D>         _transform_weights_func;
134     NEFFT2D                          _itransform_output_func;
135     NEComplexPixelWiseMultiplication _prod_func;
136     NEReductionOperation             _reduce_func;
137     NESlice                          _extract_output_func;
138     NEArithmeticAddition             _bias_add_func;
139     NEActivationLayer                _activation_layer_func;
140 
141     Tensor _permuted_input;
142     Tensor _permuted_weights;
143     Tensor _permuted_bias;
144     Tensor _permuted_output;
145     Tensor _padded_input;
146     Tensor _padded_weights;
147     Tensor _flip_axis;
148     Tensor _flipped_weights;
149     Tensor _transformed_input;
150     Tensor _transformed_weights;
151     Tensor _input_weights_product;
152     Tensor _output_product;
153     Tensor _output_reduced;
154     Tensor _itransformed_output;
155     Tensor _reshaped_output;
156     Tensor _bias_output;
157 
158     const ITensor *_original_weights;
159     const ITensor *_original_bias;
160     bool           _is_activationlayer_enabled;
161     bool           _needs_permute;
162     bool           _has_bias;
163     bool           _is_prepared;
164 };
165 } // namespace arm_compute
166 #endif /* ARM_COMPUTE_NEFFTCONVOLUTIONLAYER_H */
167