• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H
25 #define ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H
26 
27 #include "arm_compute/core/Types.h"
28 #include "arm_compute/runtime/IFunction.h"
29 
30 /** This file contains all available output stages for GEMMLowp.
31  *
32  *  In gemmlowp, the "output stage" is the process that takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyCore),
33  *  and processes it to obtain the final ASYMM8 value.
34  *
35  *  More information about the GEMMLowp output stage can be found at https://github.com/google/gemmlowp/blob/master/doc/output.md
36  */
37 
38 namespace arm_compute
39 {
40 class ITensor;
41 class ITensorInfo;
42 /** Basic function to execute GEMMLowpQuantizeDown kernels.
43  *
44  *  This function calls the following operators:
45  *
46  * -# @ref cpu::CpuGemmLowpOutputStage
47 */
48 class NEGEMMLowpOutputStage : public IFunction
49 {
50 public:
51     /** Constructor */
52     NEGEMMLowpOutputStage();
53     /** Prevent instances of this class from being copied (As this class contains pointers) */
54     NEGEMMLowpOutputStage(const NEGEMMLowpOutputStage &) = delete;
55     /** Prevent instances of this class from being copied (As this class contains pointers) */
56     NEGEMMLowpOutputStage &operator=(const NEGEMMLowpOutputStage &) = delete;
57     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
58     NEGEMMLowpOutputStage(NEGEMMLowpOutputStage &&) = delete;
59     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
60     NEGEMMLowpOutputStage &operator=(NEGEMMLowpOutputStage &&) = delete;
61     /** Default destructor */
62     ~NEGEMMLowpOutputStage();
63     /** Initialise the kernel's inputs, output
64      *
65      * Valid data layouts:
66      * - All
67      *
68      * Valid data type configurations:
69      * |src0           |src1          |dst           |
70      * |:--------------|:-------------|:-------------|
71      * |S32            |S32           |QASYMM8       |
72      * |S32            |S32           |QASYMM8_SIGNED|
73      * |S32            |S32           |QSYMM16       |
74      *
75      * @param[in]  input  Input tensor. Data type supported: S32
76      * @param[in]  bias   Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
77      *                    Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
78      * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM16
79      * @param[in]  info   GEMMLowp output stage metadata.
80      */
81     void configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo &info);
82     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpOutputStage
83      *
84      * @param[in] input  Input tensor info. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
85      * @param[in] bias   Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
86      *                   Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
87      * @param[in] output Output tensor info. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM16
88      * @param[in] info   GEMMLowp output stage metadata.
89      *
90      * @return a status
91      */
92     static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info);
93 
94     // Inherited methods overridden:
95     void run() override;
96 
97 private:
98     struct Impl;
99     std::unique_ptr<Impl> _impl;
100 };
101 } // namespace arm_compute
102 #endif /*ARM_COMPUTE_NEGEMMLOWPOUTPUTSTAGE_H */
103