1 /* 2 * Copyright (c) 2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H 25 #define ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H 26 27 #include "src/core/NEON/INEKernel.h" 28 29 namespace arm_compute 30 { 31 class ITensor; 32 33 /** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED 34 * 35 * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value. 36 * The following computations will be performed by the kernel: 37 * 38 * -# Add offset terms to final result 39 * -# Multiply each entry of result by result_mult_int 40 * -# Add bias to final result if bias tensor is not a nullptr 41 * -# Shift the int32 accumulator by result_shift 42 * -# Clamp the value between the specified min and max bounds 43 * -# Clamp the resulting int32 values: 44 * -# -to the [0..255] range and cast to QASYMM8. 45 * -# -to the [-128..127] range and cast to QASYMM8_SIGNED. 46 * 47 */ 48 class NEGEMMLowpQuantizeDownInt32ScaleKernel : public INEKernel 49 { 50 public: name()51 const char *name() const override 52 { 53 return "NEGEMMLowpQuantizeDownInt32ScaleKernel"; 54 } 55 /** Constructor */ 56 NEGEMMLowpQuantizeDownInt32ScaleKernel(); 57 /** Prevent instances of this class from being copied (As this class contains pointers)*/ 58 NEGEMMLowpQuantizeDownInt32ScaleKernel(const NEGEMMLowpQuantizeDownInt32ScaleKernel &) = delete; 59 /** Prevent instances of this class from being copied (As this class contains pointers)*/ 60 NEGEMMLowpQuantizeDownInt32ScaleKernel &operator=(const NEGEMMLowpQuantizeDownInt32ScaleKernel &) = delete; 61 /** Allow instances of this class to be moved */ 62 NEGEMMLowpQuantizeDownInt32ScaleKernel(NEGEMMLowpQuantizeDownInt32ScaleKernel &&) = default; 63 /** Allow instances of this class to be moved */ 64 NEGEMMLowpQuantizeDownInt32ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ScaleKernel &&) = default; 65 /** Default destructor */ 66 ~NEGEMMLowpQuantizeDownInt32ScaleKernel() = default; 67 /** Initialise the kernel's input and output. 68 * 69 * @param[in] input Input tensor. Data type supported: S32 70 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. 71 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. 72 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED 73 * @param[out] output_stage GEMMLowp output stage metadata. 74 */ 75 void configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo *output_stage); 76 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ScaleKernel 77 * 78 * @param[in] input Input tensor. Data type supported: S32 79 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. 80 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. 81 * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED 82 * @param[out] output_stage GEMMLowp output stage metadata. 83 * 84 * @return a status 85 */ 86 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage); 87 88 // Inherited methods overridden: 89 void run(const Window &window, const ThreadInfo &info) override; 90 91 private: 92 /** Template function to run the NEGEMMLowpQuantizeDownInt32ScaleKernel 93 * 94 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). 95 */ 96 template <typename T> 97 void run(const Window &window); 98 99 /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ScaleKernel functions 100 * 101 * @param[in] window Region on which to execute the kernel. 102 */ 103 using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ScaleKernel::*)(const Window &window); 104 105 QuantizeDownFunctionPtr _func; 106 const ITensor *_input; 107 const ITensor *_bias; 108 ITensor *_output; 109 const GEMMLowpOutputStageInfo *_output_stage; 110 bool _is_bounded_relu; 111 }; 112 } // namespace arm_compute 113 114 #endif /* ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H */ 115