• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H
25 #define ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H
26 
27 #include "src/core/CL/ICLKernel.h"
28 
29 namespace arm_compute
30 {
31 class ICLTensor;
32 
33 /** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
34  *
35  * This kernel takes a final int32 accumulator value (the output of the matrix multiplication), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
36  * The following computations will be performed by the kernel:
37  *
38  *  -# Add offset terms to final result
39  *  -# Multiply each entry of result by result_mult_int
40  *  -# Add bias to final result if bias tensor is not a nullptr
41  *  -# Shift the int32 accumulator by result_shift
42  *  -# Clamp the value between the specified min and max bounds
43  *  -# Clamp the resulting int32 values:
44  *  -#  -to the [0..255] range and cast to QASYMM8.
45  *  -#  -to the [-128..127] range and cast to QASYMM8_SIGNED.
46  *
47  */
48 class CLGEMMLowpQuantizeDownInt32ScaleKernel : public ICLKernel
49 {
50 public:
51     /** Constructor */
52     CLGEMMLowpQuantizeDownInt32ScaleKernel();
53     /** Prevent instances of this class from being copied (As this class contains pointers)*/
54     CLGEMMLowpQuantizeDownInt32ScaleKernel(const CLGEMMLowpQuantizeDownInt32ScaleKernel &) = delete;
55     /** Prevent instances of this class from being copied (As this class contains pointers)*/
56     CLGEMMLowpQuantizeDownInt32ScaleKernel &operator=(const CLGEMMLowpQuantizeDownInt32ScaleKernel &) = delete;
57     /** Allow instances of this class to be moved */
58     CLGEMMLowpQuantizeDownInt32ScaleKernel(CLGEMMLowpQuantizeDownInt32ScaleKernel &&) = default;
59     /** Allow instances of this class to be moved */
60     CLGEMMLowpQuantizeDownInt32ScaleKernel &operator=(CLGEMMLowpQuantizeDownInt32ScaleKernel &&) = default;
61     /** Initialise the kernel's input and output.
62      *
63      * @param[in]  input        Input tensor. Data type supported: S32
64      * @param[in]  bias         Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
65      *                          Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
66      * @param[out] output       Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
67      * @param[in]  output_stage GEMMLowp output stage metadata.
68      */
69     void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage);
70     /** Initialise the kernel's input and output.
71      *
72      * @param[in]  compile_context The compile context to be used.
73      * @param[in]  input           Input tensor. Data type supported: S32
74      * @param[in]  bias            Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
75      *                             Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
76      * @param[out] output          Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
77      * @param[in]  output_stage    GEMMLowp output stage metadata.
78      */
79     void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *output_stage);
80     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
81      *
82      * @param[in] input        Input tensor. Data type supported: S32
83      * @param[in] bias         Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
84      *                         Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
85      * @param[in] output       Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
86      * @param[in] output_stage GEMMLowp output stage metadata.
87      *
88      * @return a status
89      */
90     static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage);
91 
92     // Inherited methods overridden:
93     void run(const Window &window, cl::CommandQueue &queue) override;
94 
95 private:
96     const ICLTensor *_input;
97     const ICLTensor *_bias;
98     ICLTensor       *_output;
99 };
100 } // namespace arm_compute
101 
102 #endif /* ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEKERNEL_H */