1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <algorithm>
7 #include <cfloat>
8 #include <cmath>
9 #include <functional>
10 #include <memory>
11 #include <numeric>
12 #include <random>
13 #include <vector>
14
15 #include <cpuinfo.h>
16 #include <pthreadpool.h>
17
18 #include <benchmark/benchmark.h>
19 #include <fp16/fp16.h>
20
21 #include "bench/utils.h"
22 #include <xnnpack/aligned-allocator.h>
23 #include <xnnpack/common.h>
24 #include <xnnpack/math-stubs.h>
25
26
27 struct ComputeErrorContext {
28 const uint16_t* input;
29 const uint16_t* output;
30 float* error;
31 };
32
ComputeError(struct ComputeErrorContext * context,size_t start,size_t range)33 static void ComputeError(
34 struct ComputeErrorContext* context,
35 size_t start,
36 size_t range)
37 {
38 const uint16_t* input = context->input;
39 const uint16_t* output = context->output;
40 float* error = context->error;
41 for (size_t i = start; i < start + range; i++) {
42 const float output_ref = std::expm1(fp16_ieee_to_fp32_value(input[i]));
43 const float abs_error = std::abs(output_ref - fp16_ieee_to_fp32_value(output[i]));
44 const uint16_t output_abs = fp16_ieee_from_fp32_value(std::abs(output_ref));
45 const float output_ulp = fp16_ieee_to_fp32_value(output_abs + 1) - fp16_ieee_to_fp32_value(output_abs);
46 error[i] = float(abs_error / output_ulp);
47 }
48 }
49
ExpM1Error(benchmark::State & state,xnn_f16_unary_math_function expm1,benchmark::utils::IsaCheckFunction isa_check=nullptr)50 static void ExpM1Error(
51 benchmark::State& state,
52 xnn_f16_unary_math_function expm1,
53 benchmark::utils::IsaCheckFunction isa_check = nullptr)
54 {
55 if (!cpuinfo_initialize()) {
56 state.SkipWithError("failed cpuinfo init");
57 return;
58 }
59 if (isa_check && !isa_check(state)) {
60 return;
61 }
62
63 // The smallest x for which expm1f(x) is not saturated at -1 (-0x1.0A0p+3h).
64 const uint16_t min_input = UINT16_C(0xC828);
65
66 // Number of elements in one block of inputs/outputs.
67 // Combining multiple elements in a block reduce function call overhead.
68 const size_t block_size = 16384;
69 // Number of elements in one parallelization tile. Worker threads process this many elements in each task.
70 const size_t tile_size = 64;
71
72 uint32_t num_threads = cpuinfo_get_cores_count();
73 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
74 // Use all cores except for the least performant cluster
75 if (cpuinfo_get_clusters_count() > 1) {
76 num_threads -= cpuinfo_get_cluster(cpuinfo_get_clusters_count() - 1)->core_count;
77 }
78 #endif // XNN_ARCH_ARM || XNN_ARCH_ARM64
79
80 std::unique_ptr<pthreadpool, decltype(&pthreadpool_destroy)> threadpool(
81 pthreadpool_create(num_threads), pthreadpool_destroy);
82
83 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> x(block_size);
84 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> y(block_size);
85 std::vector<float> ulp_error(block_size);
86 float max_ulp_error = 0.0f;
87
88 ComputeErrorContext context;
89 context.input = x.data();
90 context.output = y.data();
91 context.error = ulp_error.data();
92 for (auto _ : state) {
93 for (uint16_t n = min_input; int16_t(n) < 0; n -= block_size) {
94 for (uint16_t i = 0; i < block_size; i++) {
95 x[i] = std::max<uint16_t>(n - i, UINT16_C(0x8000));
96 }
97 std::fill(y.begin(), y.end(), UINT16_C(0x7E00) /* NaN */);
98
99 expm1(block_size * sizeof(uint16_t), x.data(), y.data());
100
101 pthreadpool_parallelize_1d_tile_1d(
102 threadpool.get(),
103 reinterpret_cast<pthreadpool_task_1d_tile_1d_t>(ComputeError),
104 static_cast<void*>(&context),
105 block_size, tile_size, 0 /* flags */);
106
107 max_ulp_error = std::accumulate(ulp_error.cbegin(), ulp_error.cend(), max_ulp_error,
108 static_cast<const float& (*)(const float&, const float&)>(std::max<float>));
109 }
110 }
111
112 state.counters["ULPERROR"] = benchmark::Counter(max_ulp_error);
113 }
114
115 #if XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
116 BENCHMARK_CAPTURE(ExpM1Error, neonfp16arith_rr1_p3,
117 xnn_math_f16_expm1minus__neonfp16arith_rr1_p3,
118 benchmark::utils::CheckNEONFP16ARITH)
119 ->Unit(benchmark::kMillisecond)
120 ->Iterations(1);
121 BENCHMARK_CAPTURE(ExpM1Error, neonfp16arith_rr2_p3,
122 xnn_math_f16_expm1minus__neonfp16arith_rr2_p3,
123 benchmark::utils::CheckNEONFP16ARITH)
124 ->Unit(benchmark::kMillisecond)
125 ->Iterations(1);
126 #endif // XNN_ENABLE_ARM_FP16 && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
127
128 #if XNN_ARCH_X86 || XNN_ARCH_X86_64
129 BENCHMARK_CAPTURE(ExpM1Error, avx2_rr1_p3,
130 xnn_math_f16_expm1minus__avx2_rr1_p3,
131 benchmark::utils::CheckAVX2)
132 ->Unit(benchmark::kMillisecond)
133 ->Iterations(1);
134 #endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
135
136 #ifndef XNNPACK_BENCHMARK_NO_MAIN
137 BENCHMARK_MAIN();
138 #endif
139