• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- malloc_benchmark.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "allocator_config.h"
10 #include "combined.h"
11 #include "common.h"
12 
13 #include "benchmark/benchmark.h"
14 
15 #include <memory>
16 #include <vector>
17 
18 void *CurrentAllocator;
PostInitCallback()19 template <typename Config> void PostInitCallback() {
20   reinterpret_cast<scudo::Allocator<Config> *>(CurrentAllocator)->initGwpAsan();
21 }
22 
BM_malloc_free(benchmark::State & State)23 template <typename Config> static void BM_malloc_free(benchmark::State &State) {
24   using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
25   auto Deleter = [](AllocatorT *A) {
26     A->unmapTestOnly();
27     delete A;
28   };
29   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
30                                                            Deleter);
31   CurrentAllocator = Allocator.get();
32 
33   const size_t NBytes = State.range(0);
34   size_t PageSize = scudo::getPageSizeCached();
35 
36   for (auto _ : State) {
37     void *Ptr = Allocator->allocate(NBytes, scudo::Chunk::Origin::Malloc);
38     auto *Data = reinterpret_cast<uint8_t *>(Ptr);
39     for (size_t I = 0; I < NBytes; I += PageSize)
40       Data[I] = 1;
41     benchmark::DoNotOptimize(Ptr);
42     Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
43   }
44 
45   State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NBytes));
46 }
47 
48 static const size_t MinSize = 8;
49 static const size_t MaxSize = 128 * 1024;
50 
51 // FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
52 // cleanly.
53 BENCHMARK_TEMPLATE(BM_malloc_free, scudo::AndroidConfig)
54     ->Range(MinSize, MaxSize);
55 #if SCUDO_CAN_USE_PRIMARY64
56 BENCHMARK_TEMPLATE(BM_malloc_free, scudo::FuchsiaConfig)
57     ->Range(MinSize, MaxSize);
58 #endif
59 
60 template <typename Config>
BM_malloc_free_loop(benchmark::State & State)61 static void BM_malloc_free_loop(benchmark::State &State) {
62   using AllocatorT = scudo::Allocator<Config, PostInitCallback<Config>>;
63   auto Deleter = [](AllocatorT *A) {
64     A->unmapTestOnly();
65     delete A;
66   };
67   std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
68                                                            Deleter);
69   CurrentAllocator = Allocator.get();
70 
71   const size_t NumIters = State.range(0);
72   size_t PageSize = scudo::getPageSizeCached();
73   std::vector<void *> Ptrs(NumIters);
74 
75   for (auto _ : State) {
76     size_t SizeLog2 = 0;
77     for (void *&Ptr : Ptrs) {
78       Ptr = Allocator->allocate(1 << SizeLog2, scudo::Chunk::Origin::Malloc);
79       auto *Data = reinterpret_cast<uint8_t *>(Ptr);
80       for (size_t I = 0; I < 1 << SizeLog2; I += PageSize)
81         Data[I] = 1;
82       benchmark::DoNotOptimize(Ptr);
83       SizeLog2 = (SizeLog2 + 1) % 16;
84     }
85     for (void *&Ptr : Ptrs)
86       Allocator->deallocate(Ptr, scudo::Chunk::Origin::Malloc);
87   }
88 
89   State.SetBytesProcessed(uint64_t(State.iterations()) * uint64_t(NumIters) *
90                           8192);
91 }
92 
93 static const size_t MinIters = 8;
94 static const size_t MaxIters = 32 * 1024;
95 
96 // FIXME: Add DefaultConfig here once we can tear down the exclusive TSD
97 // cleanly.
98 BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::AndroidConfig)
99     ->Range(MinIters, MaxIters);
100 #if SCUDO_CAN_USE_PRIMARY64
101 BENCHMARK_TEMPLATE(BM_malloc_free_loop, scudo::FuchsiaConfig)
102     ->Range(MinIters, MaxIters);
103 #endif
104 
105 BENCHMARK_MAIN();
106