• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
25 #define ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H
26 
27 #include "arm_compute/core/Types.h"
28 #include "arm_compute/runtime/IFunction.h"
29 #include "arm_compute/runtime/NEON/INEOperator.h"
30 
31 namespace arm_compute
32 {
33 class ITensor;
34 class ITensorInfo;
35 
36 namespace experimental
37 {
38 /** Basic function to run @ref NEPixelWiseMultiplicationKernel */
39 class NEPixelWiseMultiplication : public INEOperator
40 {
41 public:
42     /** Initialise the kernel's inputs, output and convertion policy.
43      *
44      * Valid configurations (Input1,Input2) -> Output :
45      *
46      *                                                       Support: Broadcast? Scale=1/255?
47      *   - (U8,U8)                         -> U8, S16                 N          Y
48      *   - (U8,S16)                        -> S16                     N          Y
49      *   - (S16,U8)                        -> S16                     N          Y
50      *   - (S16,S16)                       -> S16                     N          Y
51      *   - (S32,S32)                       -> S32                     Y          N
52      *   - (F16,F16)                       -> F16                     N          Y
53      *   - (F32,F32)                       -> F32                     Y          Y
54      *   - (QASYMM8,QASYMM8)               -> QASYMM8                 Y          Y
55      *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED          Y          Y
56      *   - (QSYMM16,QSYMM16)               -> QSYMM16, S32            N          Y
57      *
58      * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
59      *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
60      *
61      * @param[in, out] input1          First input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
62      *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
63      * @param[in, out] input2          Second input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
64      *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
65      * @param[out]     output          Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
66      * @param[in]      scale           Scale to apply after multiplication.
67      *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
68      *                                 If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
69      * @param[in]      overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
70      * @param[in]      rounding_policy Rounding policy.
71      * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
72      */
73     void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
74                    const ActivationLayerInfo &act_info = ActivationLayerInfo());
75     /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
76      *
77      * Valid configurations (Input1,Input2) -> Output :
78      *
79      *                                                       Support: Broadcast? Scale=1/255?
80      *   - (U8,U8)                         -> U8, S16                 N          Y
81      *   - (U8,S16)                        -> S16                     N          Y
82      *   - (S16,U8)                        -> S16                     N          Y
83      *   - (S16,S16)                       -> S16                     N          Y
84      *   - (S32,S32)                       -> S32                     Y          N
85      *   - (F16,F16)                       -> F16                     N          Y
86      *   - (F32,F32)                       -> F32                     Y          Y
87      *   - (QASYMM8,QASYMM8)               -> QASYMM8                 Y          Y
88      *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED          Y          Y
89      *   - (QSYMM16,QSYMM16)               -> QSYMM16, S32            N          Y
90      *
91      * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
92      *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
93      *
94      * @param[in] input1          First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
95      * @param[in] input2          Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
96      * @param[in] output          Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
97      * @param[in] scale           Scale to apply after multiplication.
98      *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
99      *                            If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
100      * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
101      * @param[in] rounding_policy Rounding policy.
102      * @param[in] act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
103      *
104      * @return a status
105      */
106     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
107                            const ActivationLayerInfo &act_info = ActivationLayerInfo());
108 };
109 
110 /** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
111 class NEComplexPixelWiseMultiplication : public INEOperator
112 {
113 public:
114     /** Initialise the kernel's inputs, output.
115      *
116      * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
117      *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
118      * @param[in, out] input2   An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
119      *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
120      * @param[out]     output   The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
121      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
122      */
123     void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
124     /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
125      *
126      * @param[in] input1   An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
127      * @param[in] input2   An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
128      * @param[in] output   The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
129      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
130      */
131     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
132 };
133 } // namespace experimental
134 
135 /** Basic function to run @ref NEPixelWiseMultiplicationKernel */
136 class NEPixelWiseMultiplication : public IFunction
137 {
138 public:
139     /** Default Constructor */
140     NEPixelWiseMultiplication();
141     /** Default Destructor */
142     ~NEPixelWiseMultiplication();
143     /** Prevent instances of this class from being copied (As this class contains pointers) */
144     NEPixelWiseMultiplication(const NEPixelWiseMultiplication &) = delete;
145     /** Default move constructor */
146     NEPixelWiseMultiplication(NEPixelWiseMultiplication &&);
147     /** Prevent instances of this class from being copied (As this class contains pointers) */
148     NEPixelWiseMultiplication &operator=(const NEPixelWiseMultiplication &) = delete;
149     /** Default move assignment operator */
150     NEPixelWiseMultiplication &operator=(NEPixelWiseMultiplication &&);
151     /** Initialise the kernel's inputs, output and convertion policy.
152      *
153      * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
154      *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
155      *
156      * @param[in, out] input1          An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
157      *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
158      * @param[in, out] input2          An input tensor. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
159      *                                 This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
160      * @param[out]     output          Output tensor. Data types supported:
161      *                                 - U8, only if both inputs are U8.
162      *                                 - QASYMM8, only if both inputs are QASYMM8.
163      *                                 - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
164      *                                 - S16.
165      *                                 - QSYMM16, only if both inputs are QSYMM16.
166      *                                 - S32, only if both inputs are S32 or both are QSYMM16.
167      *                                 - F16, only if @p input1 is F16.
168      *                                 - F32, only if both inputs are F32.
169      * @param[in]      scale           Scale to apply after multiplication.
170      *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
171      *                                 If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
172      * @param[in]      overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
173      * @param[in]      rounding_policy Rounding policy.
174      * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
175      */
176     void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
177                    const ActivationLayerInfo &act_info = ActivationLayerInfo());
178     /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
179      *
180      * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
181      *       For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
182      *
183      * @param[in] input1          An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32
184      * @param[in] input2          An input tensor info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if both inputs are QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
185      * @param[in] output          Output tensor info. Data types supported:
186      *                            - U8, only if both inputs are U8.
187      *                            - QASYMM8, only if both inputs are QASYMM8.
188      *                            - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
189      *                            - S16.
190      *                            - QSYMM16, only if both inputs are QSYMM16.
191      *                            - S32, only if both inputs are S32 or both are QSYMM16.
192      *                            - F16, only if @p input1 is F16.
193      *                            - F32, only if both inputs are F32.
194      * @param[in] scale           Scale to apply after multiplication.
195      *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
196      *                            If both @p input1, @p input2 and @p output are of datatype S32, scale cannot be 1/255
197      * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype
198      * @param[in] rounding_policy Rounding policy.
199      * @param[in] act_info        (Optional) Activation layer information in case of a fused activation. Currently not supported.
200      *
201      * @return a status
202      */
203     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
204                            const ActivationLayerInfo &act_info = ActivationLayerInfo());
205 
206     // Inherited methods overridden:
207     void run() override;
208 
209 private:
210     struct Impl;
211     std::unique_ptr<Impl> _impl;
212 };
213 
214 /** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
215 class NEComplexPixelWiseMultiplication : public IFunction
216 {
217 public:
218     /** Default Constructor */
219     NEComplexPixelWiseMultiplication();
220     /** Default Destructor */
221     ~NEComplexPixelWiseMultiplication();
222     /** Prevent instances of this class from being copied (As this class contains pointers) */
223     NEComplexPixelWiseMultiplication(const NEComplexPixelWiseMultiplication &) = delete;
224     /** Default move constructor */
225     NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&);
226     /** Prevent instances of this class from being copied (As this class contains pointers) */
227     NEComplexPixelWiseMultiplication &operator=(const NEComplexPixelWiseMultiplication &) = delete;
228     /** Default move assignment operator */
229     NEComplexPixelWiseMultiplication &operator=(NEComplexPixelWiseMultiplication &&);
230     /** Initialise the kernel's inputs, output.
231      *
232      * @param[in, out] input1   An input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor).
233      *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
234      * @param[in, out] input2   An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
235      *                          The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
236      * @param[out]     output   The output tensor. Data types supported: same as @p input1. Number of channels: same as @p input1.
237      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
238      */
239     void configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
240     /** Static function to check if given info will lead to a valid configuration of @ref NEComplexPixelWiseMultiplication
241      *
242      * @param[in] input1   An input tensor info. Data types supported: F32. Number of channels supported: 2 (complex tensor).
243      * @param[in] input2   An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
244      * @param[in] output   The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
245      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
246      */
247     static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
248 
249     // Inherited methods overridden:
250     void run() override;
251 
252 private:
253     struct Impl;
254     std::unique_ptr<Impl> _impl;
255 };
256 }
257 #endif /*ARM_COMPUTE_NEPIXELWISEMULTIPLICATION_H */
258