• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
25 #define ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
26 
27 #include "src/core/CL/ICLKernel.h"
28 
29 namespace arm_compute
30 {
31 class ICLTensor;
32 
33 /** OpenCL kernel used to add the offset contribution after the matrix multiplication. The computation is performed in-place
34  *
35  * This kernel takes a final int32 accumulator value (the output of the matrix multiplication),
36  * and adds to it the offset contribution of matrix A and matrix B in-place.
37  *
38  * The final result is:
39  *
40  * mm_result[i][k] = mm_result[i][k] +
41  *                   (vector_sum_col[k] * a_offset) +
42  *                   (vector_sum_row[i] * b_offset) +
43  *                   (a_offset * b_offset * k)
44  *
45  */
46 class CLGEMMLowpOffsetContributionKernel : public ICLKernel
47 {
48 public:
49     /** Constructor */
50     CLGEMMLowpOffsetContributionKernel();
51     /** Prevent instances of this class from being copied (As this class contains pointers)*/
52     CLGEMMLowpOffsetContributionKernel(const CLGEMMLowpOffsetContributionKernel &) = delete;
53     /** Prevent instances of this class from being copied (As this class contains pointers)*/
54     CLGEMMLowpOffsetContributionKernel &operator=(const CLGEMMLowpOffsetContributionKernel &) = delete;
55     /** Allow instances of this class to be moved */
56     CLGEMMLowpOffsetContributionKernel(CLGEMMLowpOffsetContributionKernel &&) = default;
57     /** Allow instances of this class to be moved */
58     CLGEMMLowpOffsetContributionKernel &operator=(CLGEMMLowpOffsetContributionKernel &&) = default;
59     /** Initialise the kernel's input and output.
60      *
61      * @param[in, out] mm_result      Input tensor containing the result of the matrix multiplication. Data type supported: S32
62      * @param[in]      vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
63      *                                Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
64      * @param[in]      vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
65      *                                Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
66      * @param[in]      bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
67      *                                Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
68      * @param[in]      k              Number of matrix A columns or Matrix B rows
69      * @param[in]      a_offset       Offset to be added to each element of the matrix A.
70      * @param[in]      b_offset       Offset to be added to each element of the matrix B.
71      */
72     void configure(ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset, int32_t b_offset);
73     /** Initialise the kernel's input and output.
74      *
75      * @param[in]      compile_context The compile context to be used.
76      * @param[in, out] mm_result       Input tensor containing the result of the matrix multiplication. Data type supported: S32
77      * @param[in]      vector_sum_col  Input row-vector of sums of all the entries in each column of matrix B.
78      *                                 Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
79      * @param[in]      vector_sum_row  Input row-vector of sums of all the entries in each row of matrix A.
80      *                                 Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
81      * @param[in]      bias            Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
82      *                                 Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
83      * @param[in]      k               Number of matrix A columns or Matrix B rows
84      * @param[in]      a_offset        Offset to be added to each element of the matrix A.
85      * @param[in]      b_offset        Offset to be added to each element of the matrix B.
86      */
87     void configure(const CLCompileContext &compile_context, ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, int32_t k, int32_t a_offset,
88                    int32_t b_offset);
89     /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpOffsetContributionKernel
90      *
91      * @param[in] mm_result      Input tensor containing the result of @ref CLGEMMLowpOffsetContributionKernel. Data type supported: S32
92      * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of matrix B.
93      *                           Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
94      * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of matrix A.
95      *                           Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p mm_result
96      * @param[in] bias           Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
97      *                           Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
98      * @param[in] a_offset       Offset to be added to each element of the matrix A.
99      * @param[in] b_offset       Offset to be added to each element of the matrix B.
100      *
101      * @return a status
102      */
103     static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, int32_t a_offset, int32_t b_offset);
104 
105     // Inherited methods overridden:
106     void run(const Window &window, cl::CommandQueue &queue) override;
107 
108 private:
109     const ICLTensor *_vector_sum_col;
110     const ICLTensor *_vector_sum_row;
111     ICLTensor       *_mm_result;
112     const ICLTensor *_bias;
113 };
114 } // namespace arm_compute
115 
116 #endif /* ARM_COMPUTE_CLGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H */
117