• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_gemm.hpp"
25 #include "gemm_common.hpp"
26 #include "gemm_hybrid.hpp"
27 #include "gemm_hybrid_indirect.hpp"
28 #include "gemm_implementation.hpp"
29 #include "gemm_interleaved.hpp"
30 #include "gemm_interleaved_pretransposed_2d.hpp"
31 #include "gemv_batched.hpp"
32 #include "gemv_pretransposed.hpp"
33 
34 #include "kernels/a32_sgemm_8x6.hpp"
35 #include "kernels/a64_gemv_fp32_mla_32.hpp"
36 #include "kernels/a64_hybrid_fp32_mla_6x16.hpp"
37 #include "kernels/a64_hybrid_fp32_mla_8x4.hpp"
38 #include "kernels/a64_sgemm_8x12.hpp"
39 #include "kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp"
40 #include "kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp"
41 
42 #include "kernels/sve_gemv_fp32_mla_8VL.hpp"
43 #include "kernels/sve_hybrid_fp32_mla_6x4VL.hpp"
44 #include "kernels/sve_hybrid_fp32_mla_8x1VL.hpp"
45 #include "kernels/sve_interleaved_fp32_mla_8x3VL.hpp"
46 #include "kernels/sve_interleaved_fp32_mmla_8x3VL.hpp"
47 #include "kernels/sve_smallK_hybrid_fp32_mla_8x1VL.hpp"
48 
49 namespace arm_gemm {
50 
51 static const GemmImplementation<float, float> gemm_fp32_methods[] =
52 {
53 // GEMV cases - starting with 'gemv_batched' wrapper to turn batched GEMV into GEMM.
54 {
55     GemmMethod::GEMV_BATCHED,
56     "gemv_batched",
__anonaa945b140102() 57     [](const GemmArgs &args) { return args._Msize==1 && args._nbatches>1 && !args._indirect_input; },
58     nullptr,
__anonaa945b140202() 59     [](const GemmArgs &args) { return new GemvBatched<float, float>(args); }
60 },
61 #ifdef __aarch64__
62 #ifdef __ARM_FEATURE_SVE
63 {
64     GemmMethod::GEMM_HYBRID,
65     "sve_gemv_fp32_mla_8VL",
__anonaa945b140302() 66     [](const GemmArgs &args) { return args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
67     nullptr,
__anonaa945b140402() 68     [](const GemmArgs &args) { return new GemvPretransposed<cls_sve_gemv_fp32_mla_8VL, float, float>(args); }
69 },
70 #endif
71 {
72     GemmMethod::GEMM_HYBRID,
73     "a64_gemv_fp32_mla_32",
__anonaa945b140502() 74     [](const GemmArgs &args) { return args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
75     nullptr,
__anonaa945b140602() 76     [](const GemmArgs &args) { return new GemvPretransposed<cls_a64_gemv_fp32_mla_32, float, float>(args); }
77 },
78 
79 // MMLA next due to higher throughput (SVE only)
80 #if defined(__ARM_FEATURE_SVE) && defined(MMLA_FP32)
81 {
82     GemmMethod::GEMM_INTERLEAVED,
83     "sve_interleaved_fp32_mmla_8x3VL",
__anonaa945b140702() 84     [](const GemmArgs &args) { return (args._Ksize>4); },
85     nullptr,
__anonaa945b140802() 86     [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float>(args); }
87 },
88 #endif // __ARM_FEATURE_SVE && MMLA_FP32
89 
90 #ifdef __ARM_FEATURE_SVE
91 // SVE smallk / hybrid methods
92 {
93     GemmMethod::GEMM_HYBRID,
94     "sve_smallK_hybrid_fp32_mla_8x1VL",
__anonaa945b140902() 95     [](const GemmArgs &args) { return args._Ksize <= 24 && !args._indirect_input; },
96     nullptr,
__anonaa945b140a02() 97     [](const GemmArgs &args) { return new GemmHybrid<cls_sve_smallK_hybrid_fp32_mla_8x1VL, float, float>(args); }
98 },
99 {
100     GemmMethod::GEMM_HYBRID,
101     "sve_hybrid_fp32_mla_8x1VL",
102     nullptr,
__anonaa945b140b02() 103     [](const GemmArgs &args) { return (args._Nsize < 12); },
__anonaa945b140c02() 104     [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float>(args); }
105 },
106 {
107     GemmMethod::GEMM_HYBRID,
108     "sve_hybrid_fp32_mla_6x4VL",
109     nullptr,
__anonaa945b140d02() 110     [](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
__anonaa945b140e02() 111     [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>(args); }
112 },
113 #endif // __ARM_FEATURE_SVE
114 
115 // NEON hybrid methods
116 {
117     GemmMethod::GEMM_HYBRID,
118     "a64_smallK_hybrid_fp32_mla_8x4",
__anonaa945b140f02() 119     [](const GemmArgs &args) { return args._Ksize <= 8 && (args._Nsize % 4)==0 && !args._indirect_input; },
120     nullptr,
__anonaa945b141002() 121     [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_fp32_mla_8x4, float, float>(args); }
122 },
123 {
124     GemmMethod::GEMM_HYBRID,
125     "a64_smallK_hybrid_fp32_mla_6x4",
__anonaa945b141102() 126     [](const GemmArgs &args) { return (args._Ksize > 8 && args._Ksize <= 16) && (args._Nsize % 4)==0 && !args._indirect_input; },
127     nullptr,
__anonaa945b141202() 128     [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_fp32_mla_6x4, float, float>(args); }
129 },
130 {
131     GemmMethod::GEMM_HYBRID,
132     "a64_hybrid_fp32_mla_8x4",
133     nullptr,
__anonaa945b141302() 134     [](const GemmArgs &args) { return (args._Nsize < 12); },
__anonaa945b141402() 135     [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float>(args); }
136 },
137 GemmImplementation<float, float>::with_estimate(
138     GemmMethod::GEMM_HYBRID,
139     "a64_hybrid_fp32_mla_6x16",
140     nullptr,
__anonaa945b141502() 141     [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>::estimate_cycles(args, cls_a64_hybrid_fp32_mla_6x16::get_performance_parameters(args._ci)); },
__anonaa945b141602() 142     [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>(args); }
143 ),
144 #ifdef __ARM_FEATURE_SVE
145 {
146     GemmMethod::GEMM_INTERLEAVED,
147     "sve_interleaved_fp32_mla_8x3VL",
__anonaa945b141702() 148     [](const GemmArgs &args) { return (args._Ksize>4); },
149     nullptr,
__anonaa945b141802() 150     [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>(args); }
151 },
152 #endif // __ARM_FEATURE_SVE
153 GemmImplementation<float, float>::with_estimate(
154     GemmMethod::GEMM_INTERLEAVED,
155     "a64_sgemm_8x12",
156     nullptr,
__anonaa945b141902() 157     [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float>::estimate_cycles(args, cls_a64_sgemm_8x12::get_performance_parameters(args._ci)); },
__anonaa945b141a02() 158     [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float>(args); }
159 ),
160 #endif // __aarch64__
161 
162 #ifdef __arm__
163 {
164     GemmMethod::GEMM_INTERLEAVED,
165     "sgemm_8x6",
166     nullptr,
167     nullptr,
__anonaa945b141b02() 168     [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, float, float>(args); }
169 },
170 #endif // __arm__
171 {
172     GemmMethod::DEFAULT,
173     "",
174     nullptr,
175     nullptr,
176     nullptr
177 }
178 };
179 
180 /* Templated function to return this list. */
181 template<>
gemm_implementation_list()182 const GemmImplementation<float, float> *gemm_implementation_list<float, float>() {
183     return gemm_fp32_methods;
184 }
185 
186 /* Explicitly instantiate the external functions for these types. */
187 template UniqueGemmCommon<float, float> gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
188 template KernelDescription get_gemm_method<float, float, Nothing>(const GemmArgs &args, const Nothing &);
189 template std::vector<KernelDescription> get_compatible_kernels<float, float, Nothing> (const GemmArgs &args, const Nothing &);
190 
191 } // namespace arm_gemm
192