1 /* 2 * Copyright (c) 2016-2021 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CPU_MUL_H 25 #define ARM_COMPUTE_CPU_MUL_H 26 27 #include "arm_compute/core/ITensorInfo.h" 28 #include "src/cpu/ICpuOperator.h" 29 30 namespace arm_compute 31 { 32 namespace cpu 33 { 34 /** Basic function to run @ref kernels::CpuMulKernel */ 35 class CpuMul : public ICpuOperator 36 { 37 public: 38 /** Initialise the kernel's inputs, dst and convertion policy. 39 * 40 * @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported. 41 * For all other scale values only round to zero (implemented as round towards minus infinity) is supported. 42 * 43 * @param[in, out] src1 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/S32/QSYMM16/F16/F32 44 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 45 * @param[in, out] src2 Second input tensor info. Data types supported: U8, QASYMM8 (only if @p src1 is QASYMM8), QASYMM8_SIGNED (only if @p src1 is QASYMM8_SIGNED), S16, S32, QSYMM16 (only if @p src1 is QSYMM16), F16 (only if @p src1 is F16), F32 (only if @p src1 is F32). 46 * This input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 47 * @param[out] dst dst tensor info. Data types supported: 48 * - U8, only if both inputs are U8. 49 * - QASYMM8, only if both inputs are QASYMM8. 50 * - QASYMM8_SIGNED, only if @p src1 is QASYMM8_SIGNED. 51 * - S16. 52 * - QSYMM16, only if both inputs are QSYMM16. 53 * - S32, only if both inputs are S32 or both are QSYMM16. 54 * - F16, only if @p src1 is F16. 55 * - F32, only if both inputs are F32. 56 * @param[in] scale Scale to apply after multiplication. 57 * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. 58 * If both @p src1, @p src2 and @p dst are of datatype S32, scale cannot be 1/255 59 * @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if any of the inputs is of quantized datatype 60 * @param[in] rounding_policy Rounding policy. 61 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. 62 */ 63 void configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, 64 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 65 /** Static function to check if given info will lead to a valid configuration 66 * 67 * Similar to @ref CpuMul::configure() 68 * 69 * @return a status 70 */ 71 static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, 72 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 73 74 // Inherited methods overridden: 75 void run(ITensorPack &tensors) override; 76 }; 77 78 /** Basic function to run @ref kernels::CpuComplexMulKernel */ 79 class CpuComplexMul : public ICpuOperator 80 { 81 public: 82 /** Initialise the kernel's inputs, dst. 83 * 84 * @param[in, out] src1 First input tensor. Data types supported: F32. Number of channels supported: 2 (complex tensor). 85 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 86 * @param[in, out] src2 Second input tensor. Data types supported: same as @p src1. Number of channels supported: same as @p src1. 87 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 88 * @param[out] dst The dst tensor. Data types supported: same as @p src1. Number of channels: same as @p src1. 89 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. 90 */ 91 void configure(ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 92 /** Static function to check if given info will lead to a valid configuration 93 * 94 * Similar to @ref CpuComplexMul::configure() 95 * 96 * @return a status 97 */ 98 static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 99 100 // Inherited methods overridden: 101 void run(ITensorPack &tensors) override; 102 }; 103 } // namespace cpu 104 } // namespace arm_compute 105 #endif /* ARM_COMPUTE_CPU_MUL_H */