• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <algorithm>
10 #include <cfloat>
11 #include <cmath>
12 #include <functional>
13 #include <random>
14 #include <vector>
15 
16 #include <cpuinfo.h>
17 
18 #include <benchmark/benchmark.h>
19 #include <fp16/fp16.h>
20 #include "bench/gemm.h"
21 #include "bench/utils.h"
22 #include <xnnpack/AlignedAllocator.h>
23 #include <xnnpack/common.h>
24 #include <xnnpack/gemm.h>
25 #include <xnnpack/pack.h>
26 #include <xnnpack/params-init.h>
27 #include <xnnpack/params.h>
28 
29 
GEMMBenchmark(benchmark::State & state,xnn_f16_gemm_minmax_ukernel_function gemm,size_t mr,size_t nr,size_t kr,size_t sr)30 static void GEMMBenchmark(benchmark::State& state,
31   xnn_f16_gemm_minmax_ukernel_function gemm,
32   size_t mr, size_t nr, size_t kr, size_t sr)
33 {
34   if (!cpuinfo_initialize()) {
35     state.SkipWithError("cpuinfo initialization failed");
36     return;
37   }
38   if (!benchmark::utils::CheckNEONFP16ARITH(state)) {
39     return;
40   }
41 
42   const size_t mc = state.range(0);
43   const size_t nc = state.range(1);
44   const size_t kc = state.range(2);
45 
46   const size_t nc_stride = benchmark::utils::RoundUp(nc, nr);
47   const size_t kc_stride = benchmark::utils::RoundUp(kc, kr);
48 
49   std::random_device random_device;
50   auto rng = std::mt19937(random_device());
51   auto f32rng = std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
52   auto f16rng = std::bind(fp16_ieee_from_fp32_value, f32rng);
53 
54   std::vector<uint16_t> a(mc * kc);
55   std::generate(a.begin(), a.end(), std::ref(f16rng));
56   std::vector<uint16_t> k(nc * kc);
57   std::generate(k.begin(), k.end(), std::ref(f16rng));
58   std::vector<uint16_t> b(nc);
59   std::generate(b.begin(), b.end(), std::ref(f16rng));
60 
61   const size_t w_elements = nc_stride * kc_stride + nc_stride;
62   const size_t c_elements = mc * nc;
63   const size_t num_buffers = 1 +
64     benchmark::utils::DivideRoundUp<size_t>(benchmark::utils::GetMaxCacheSize(),
65       sizeof(uint16_t) * (w_elements + c_elements));
66 
67   std::vector<uint16_t, AlignedAllocator<uint16_t, 32>> w(w_elements * num_buffers);
68   std::fill(w.begin(), w.end(), 0);
69   xnn_pack_f16_gemm_goi_w(1 /* groups */, nc, kc, nr, kr, sr, k.data(), b.data(), w.data(), nullptr);
70   std::vector<uint16_t> c(c_elements * num_buffers);
71   std::fill(c.begin(), c.end(), UINT16_C(0x7E00) /* NaN */);
72 
73   // Prepare minmax parameters.
74   xnn_f16_scaleminmax_params params;
75   params = xnn_init_f16_scaleminmax_params(
76     UINT16_C(0x3C00)  /* 1.0 */, UINT16_C(0x7C00)  /* inf */, UINT16_C(0xFC00)  /* -inf */);
77 
78   size_t buffer_index = 0;
79   for (auto _ : state) {
80     // Use circular buffers (exceeding cache size) and prefetch to control cache state:
81     // - A is always in L1 cache (if fits, otherwise L2, L3, etc)
82     // - W is not in cache (for any cache level)
83     // - C is not in cache (for any cache level)
84     state.PauseTiming();
85     benchmark::utils::PrefetchToL1(a.data(), a.size() * sizeof(uint16_t));
86     buffer_index = (buffer_index + 1) % num_buffers;
87     state.ResumeTiming();
88 
89     for (uint32_t m = 0; m < mc; m += mr) {
90       const uint32_t mb = min(mc - m, mr);
91       for (uint32_t n = 0; n < nc; n += nr) {
92         const uint32_t nb = min(nc - n, nr);
93         gemm(
94           mb, nb, kc * sizeof(uint16_t),
95           a.data() + m * kc, kc * sizeof(uint16_t),
96           w.data() + (nc_stride * buffer_index + n) * (kc_stride + 1),
97           c.data() + (mc * buffer_index + m) * nc + n, nc * sizeof(uint16_t), nr * sizeof(uint16_t),
98           &params);
99       }
100     }
101   }
102 
103   const uint64_t cpu_frequency = benchmark::utils::GetCurrentCpuFrequency();
104   if (cpu_frequency != 0) {
105     state.counters["cpufreq"] = cpu_frequency;
106   }
107 
108   state.counters["FLOPS"] = benchmark::Counter(
109     uint64_t(state.iterations()) * 2 * mc * nc * kc, benchmark::Counter::kIsRate);
110 }
111 
112 #if XNN_ARCH_ARM64
f16_gemm_1x8__neonfp16arith_ld64(benchmark::State & state,const char * net)113   static void f16_gemm_1x8__neonfp16arith_ld64(benchmark::State& state, const char* net) {
114     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64, 1, 8, 1, 1);
115   }
116 
f16_gemm_4x8__neonfp16arith_ld64(benchmark::State & state,const char * net)117   static void f16_gemm_4x8__neonfp16arith_ld64(benchmark::State& state, const char* net) {
118     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_4x8__neonfp16arith_ld64, 4, 8, 1, 1);
119   }
120 
f16_gemm_6x8__neonfp16arith_ld64(benchmark::State & state,const char * net)121   static void f16_gemm_6x8__neonfp16arith_ld64(benchmark::State& state, const char* net) {
122     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64, 6, 8, 1, 1);
123   }
124 
f16_gemm_8x8__neonfp16arith_ld64(benchmark::State & state,const char * net)125   static void f16_gemm_8x8__neonfp16arith_ld64(benchmark::State& state, const char* net) {
126     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64, 8, 8, 1, 1);
127   }
128 
f16_gemm_1x16__neonfp16arith_ld64(benchmark::State & state,const char * net)129   static void f16_gemm_1x16__neonfp16arith_ld64(benchmark::State& state, const char* net) {
130     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64, 1, 16, 1, 1);
131   }
132 
f16_gemm_4x16__neonfp16arith_ld64(benchmark::State & state,const char * net)133   static void f16_gemm_4x16__neonfp16arith_ld64(benchmark::State& state, const char* net) {
134     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_4x16__neonfp16arith_ld64, 4, 16, 1, 1);
135   }
136 
f16_gemm_6x16__neonfp16arith_ld64(benchmark::State & state,const char * net)137   static void f16_gemm_6x16__neonfp16arith_ld64(benchmark::State& state, const char* net) {
138     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64, 6, 16, 1, 1);
139   }
140 
f16_gemm_8x16__neonfp16arith_ld64(benchmark::State & state,const char * net)141   static void f16_gemm_8x16__neonfp16arith_ld64(benchmark::State& state, const char* net) {
142     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64, 8, 16, 1, 1);
143   }
144 
145   BENCHMARK_GEMM(f16_gemm_1x8__neonfp16arith_ld64)
BENCHMARK_GEMM(f16_gemm_4x8__neonfp16arith_ld64)146   BENCHMARK_GEMM(f16_gemm_4x8__neonfp16arith_ld64)
147   BENCHMARK_GEMM(f16_gemm_6x8__neonfp16arith_ld64)
148   BENCHMARK_GEMM(f16_gemm_8x8__neonfp16arith_ld64)
149   BENCHMARK_GEMM(f16_gemm_1x16__neonfp16arith_ld64)
150   BENCHMARK_GEMM(f16_gemm_4x16__neonfp16arith_ld64)
151   BENCHMARK_GEMM(f16_gemm_6x16__neonfp16arith_ld64)
152   BENCHMARK_GEMM(f16_gemm_8x16__neonfp16arith_ld64)
153 #endif
154 
155 #if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
156   static void f16_gemm_1x16__aarch64_neonfp16arith_ld32(benchmark::State& state, const char* net) {
157     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_1x16__aarch64_neonfp16arith_ld32, 1, 16, 1, 1);
158   }
159 
f16_gemm_4x16__aarch64_neonfp16arith_ld32(benchmark::State & state,const char * net)160   static void f16_gemm_4x16__aarch64_neonfp16arith_ld32(benchmark::State& state, const char* net) {
161     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_4x16__aarch64_neonfp16arith_ld32, 4, 16, 1, 1);
162   }
163 
f16_gemm_6x16__aarch64_neonfp16arith_ld32(benchmark::State & state,const char * net)164   static void f16_gemm_6x16__aarch64_neonfp16arith_ld32(benchmark::State& state, const char* net) {
165     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32, 6, 16, 1, 1);
166   }
167 
f16_gemm_1x8__aarch64_neonfp16arith_ld64(benchmark::State & state,const char * net)168   static void f16_gemm_1x8__aarch64_neonfp16arith_ld64(benchmark::State& state, const char* net) {
169     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_1x8__aarch64_neonfp16arith_ld64, 1, 8, 1, 1);
170   }
171 
f16_gemm_4x8__aarch64_neonfp16arith_ld64(benchmark::State & state,const char * net)172   static void f16_gemm_4x8__aarch64_neonfp16arith_ld64(benchmark::State& state, const char* net) {
173     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_4x8__aarch64_neonfp16arith_ld64, 4, 8, 1, 1);
174   }
175 
f16_gemm_6x8__aarch64_neonfp16arith_ld64(benchmark::State & state,const char * net)176   static void f16_gemm_6x8__aarch64_neonfp16arith_ld64(benchmark::State& state, const char* net) {
177     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64, 6, 8, 1, 1);
178   }
179 
f16_gemm_8x8__aarch64_neonfp16arith_ld64(benchmark::State & state,const char * net)180   static void f16_gemm_8x8__aarch64_neonfp16arith_ld64(benchmark::State& state, const char* net) {
181     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_8x8__aarch64_neonfp16arith_ld64, 8, 8, 1, 1);
182   }
183 
184   BENCHMARK_GEMM(f16_gemm_1x16__aarch64_neonfp16arith_ld32)
185   BENCHMARK_GEMM(f16_gemm_4x16__aarch64_neonfp16arith_ld32)
186   BENCHMARK_GEMM(f16_gemm_6x16__aarch64_neonfp16arith_ld32)
187   BENCHMARK_GEMM(f16_gemm_1x8__aarch64_neonfp16arith_ld64)
188   BENCHMARK_GEMM(f16_gemm_4x8__aarch64_neonfp16arith_ld64)
189   BENCHMARK_GEMM(f16_gemm_6x8__aarch64_neonfp16arith_ld64)
190   BENCHMARK_GEMM(f16_gemm_8x8__aarch64_neonfp16arith_ld64)
191 #endif  // XNN_ARCH_ARM64
192 
193 #ifndef XNNPACK_BENCHMARK_NO_MAIN
194 BENCHMARK_MAIN();
195 #endif
196