• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "GEMM.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Types.h"
28 
29 namespace arm_compute
30 {
31 namespace test
32 {
33 namespace validation
34 {
35 namespace reference
36 {
37 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
gemm(const SimpleTensor<T> & a,const SimpleTensor<T> & b,const SimpleTensor<T> & c,float alpha,float beta)38 SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
39 {
40     // Create reference
41     SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
42 
43     // Compute reference
44     const int M = a.shape().y();
45     const int N = b.shape().x();
46     const int K = a.shape().x();
47     const int D = a.shape().z(); // Number of matrices in a batch
48     const int W = a.shape()[3];  // Number of batched-gemm (Winograd case)
49 
50     const int a_stride_z = K * M;
51     const int a_stride_w = K * M * D;
52 
53     const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0;     // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
54     int       b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
55 
56     // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
57     // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
58     const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
59 
60     // Batched-GEMM
61     if(is_batched_gemm)
62     {
63         b_stride_w = b_stride_z;
64     }
65 
66     const int c_stride_z = N * M;
67     const int c_stride_w = N * M * D;
68 
69 #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
70     #pragma omp parallel for collapse(2)
71 #endif /* _OPENMP */
72     for(int w = 0; w < W; ++w)
73     {
74         for(int depth = 0; depth < D; ++depth)
75         {
76             const int base_addr_a = depth * a_stride_z + w * a_stride_w;
77             const int base_addr_b = depth * b_stride_z + w * b_stride_w;
78             const int base_addr_c = depth * c_stride_z + w * c_stride_w;
79 
80             for(int row = 0; row < M; ++row)
81             {
82                 for(int col = 0; col < N; ++col)
83                 {
84                     T acc(0);
85 
86                     for(int k = 0; k < K; ++k)
87                     {
88                         acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N];
89                     }
90 
91                     // Finalize the result: alpha * A * B + beta * C
92                     dst[base_addr_c + col + row * N] = alpha * acc + beta * c[base_addr_c + col + row * N];
93                 }
94             }
95         }
96     }
97 
98     return dst;
99 }
100 
101 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
gemm_mixed_precision(const SimpleTensor<T> & a,const SimpleTensor<T> & b,const SimpleTensor<T> & c,float alpha,float beta)102 SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
103 {
104     // GEMM mixed-precision combines F32 accumulators with F16 multiplications
105     // Create reference
106     SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
107 
108     // Compute reference
109     const int M = a.shape().y();
110     const int N = b.shape().x();
111     const int K = a.shape().x();
112     const int D = a.shape().z(); // Number of matrices in a batch
113     const int W = a.shape()[3];  // Number of batched-gemm (Winograd case)
114 
115     const int a_stride_z = K * M;
116     const int a_stride_w = K * M * D;
117 
118     const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0;     // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
119     int       b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
120 
121     // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
122     // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
123     const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
124 
125     // Batched-GEMM
126     if(is_batched_gemm)
127     {
128         b_stride_w = b_stride_z;
129     }
130 
131     const int c_stride_z = N * M;
132     const int c_stride_w = N * M * D;
133 
134 #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
135     #pragma omp parallel for collapse(2)
136 #endif /* _OPENMP */
137     for(int w = 0; w < W; ++w)
138     {
139         for(int depth = 0; depth < D; ++depth)
140         {
141             const int base_addr_a = depth * a_stride_z + w * a_stride_w;
142             const int base_addr_b = depth * b_stride_z + w * b_stride_w;
143             const int base_addr_c = depth * c_stride_z + w * c_stride_w;
144 
145             for(int row = 0; row < M; ++row)
146             {
147                 for(int col = 0; col < N; ++col)
148                 {
149                     float acc(0);
150 
151                     for(int k = 0; k < K; ++k)
152                     {
153                         acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]);
154                     }
155 
156                     // Finalize the result: alpha * A * B + beta * C
157                     dst[base_addr_c + col + row * N] = static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
158                 }
159             }
160         }
161     }
162 
163     return dst;
164 }
165 
166 template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
167 template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
168 template SimpleTensor<half> gemm_mixed_precision(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
169 } // namespace reference
170 } // namespace validation
171 } // namespace test
172 } // namespace arm_compute
173