• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #[macro_use]
2 extern crate alloc;
3 #[macro_use]
4 extern crate ctor;
5 
6 use std::sync::Arc;
7 use std::thread;
8 use std::thread::sleep;
9 use std::time::Duration;
10 
11 use alloc::alloc::GlobalAlloc;
12 use alloc::alloc::Layout;
13 use buddy_system_allocator::LockedHeap;
14 use criterion::{black_box, criterion_group, criterion_main, Criterion};
15 
16 const SMALL_SIZE: usize = 8;
17 const LARGE_SIZE: usize = 1024 * 1024; // 1M
18 const ALIGN: usize = 8;
19 
20 /// Alloc small object
21 #[inline]
small_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>)22 pub fn small_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) {
23     let layout = unsafe { Layout::from_size_align_unchecked(SMALL_SIZE, ALIGN) };
24     unsafe {
25         let addr = heap.alloc(layout);
26         heap.dealloc(addr, layout);
27     }
28 }
29 
30 /// Alloc large object
31 #[inline]
large_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>)32 pub fn large_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) {
33     let layout = unsafe { Layout::from_size_align_unchecked(LARGE_SIZE, ALIGN) };
34     unsafe {
35         let addr = heap.alloc(layout);
36         heap.dealloc(addr, layout);
37     }
38 }
39 
40 /// Multithreads alloc random sizes of object
41 #[inline]
mutil_thread_random_size<const ORDER: usize>(heap: &'static LockedHeap<ORDER>)42 pub fn mutil_thread_random_size<const ORDER: usize>(heap: &'static LockedHeap<ORDER>) {
43     const THREAD_SIZE: usize = 10;
44 
45     use rand::prelude::*;
46     use rand::{Rng, SeedableRng};
47     use rand_chacha::ChaCha8Rng;
48 
49     let mut threads = Vec::with_capacity(THREAD_SIZE);
50     let alloc = Arc::new(heap);
51     for i in 0..THREAD_SIZE {
52         let prethread_alloc = alloc.clone();
53         let handle = thread::spawn(move || {
54             // generate a random size of object use seed `i` to ensure the fixed
55             // result of each turn
56             let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(i as u64);
57             // generate a random object size in range of [SMALL_SIZE ..= LARGE_SIZE]
58             let layout = unsafe {
59                 Layout::from_size_align_unchecked(rng.gen_range(SMALL_SIZE..=LARGE_SIZE), ALIGN)
60             };
61             let addr = unsafe { prethread_alloc.alloc(layout) };
62 
63             // sleep for a while
64             sleep(Duration::from_nanos((THREAD_SIZE - i) as u64));
65 
66             unsafe { prethread_alloc.dealloc(addr, layout) }
67         });
68         threads.push(handle);
69     }
70     drop(alloc);
71 
72     for t in threads {
73         t.join().unwrap();
74     }
75 }
76 
77 /// Multithread benchmark inspired by **Hoard** benchmark
78 ///
79 /// Warning: This benchmark generally needs long time to finish
80 ///
81 /// ----------------------------------------------------------------------
82 /// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
83 ///       for Shared-Memory Multiprocessors
84 /// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
85 //
86 /// Copyright (c) 1998-2000, The University of Texas at Austin.
87 ///
88 /// This library is free software; you can redistribute it and/or modify
89 /// it under the terms of the GNU Library General Public License as
90 /// published by the Free Software Foundation, http://www.fsf.org.
91 ///
92 /// This library is distributed in the hope that it will be useful, but
93 /// WITHOUT ANY WARRANTY; without even the implied warranty of
94 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
95 /// Library General Public License for more details.
96 /// ----------------------------------------------------------------------
97 ///
98 #[inline]
thread_test()99 pub fn thread_test() {
100     const N_ITERATIONS: usize = 50;
101     const N_OBJECTS: usize = 30000;
102     const N_THREADS: usize = 10;
103     const OBJECT_SIZE: usize = 1;
104 
105     #[derive(Clone)]
106     struct Foo {
107         pub a: i32,
108         pub b: i32,
109     }
110 
111     let mut threads = Vec::with_capacity(N_THREADS);
112 
113     for _i in 0..N_THREADS {
114         let handle = thread::spawn(move || {
115             // let a = new Foo * [nobjects / nthreads];
116             let mut a = Vec::with_capacity(N_OBJECTS / N_THREADS);
117             for j in 0..N_ITERATIONS {
118                 // inner object:
119                 // a[i] = new Foo[objSize];
120                 for k in 0..(N_OBJECTS / N_THREADS) {
121                     a.push(vec![
122                         Foo {
123                             a: k as i32,
124                             b: j as i32
125                         };
126                         OBJECT_SIZE
127                     ]);
128 
129                     // in order to prevent optimization delete allocation directly
130                     // FIXME: don't know whether it works or not
131                     a[k][0].a += a[k][0].b;
132                 }
133             }
134             // auto drop here
135         });
136         threads.push(handle);
137     }
138 
139     for t in threads {
140         t.join().unwrap();
141     }
142 }
143 
144 const ORDER: usize = 32;
145 const MACHINE_ALIGN: usize = core::mem::size_of::<usize>();
146 /// for now 128M is needed
147 /// TODO: reduce memory use
148 const KERNEL_HEAP_SIZE: usize = 128 * 1024 * 1024;
149 const HEAP_BLOCK: usize = KERNEL_HEAP_SIZE / MACHINE_ALIGN;
150 static mut HEAP: [usize; HEAP_BLOCK] = [0; HEAP_BLOCK];
151 
152 /// Use `LockedHeap` as global allocator
153 #[global_allocator]
154 static HEAP_ALLOCATOR: LockedHeap<ORDER> = LockedHeap::<ORDER>::new();
155 
156 /// Init heap
157 ///
158 /// We need `ctor` here because benchmark is running behind the std enviroment,
159 /// which means std will do some initialization before execute `fn main()`.
160 /// However, our memory allocator must be init in runtime(use linkedlist, which
161 /// can not be evaluated in compile time). And in the initialization phase, heap
162 /// memory is needed.
163 ///
164 /// So the solution in this dilemma is to run `fn init_heap()` in initialization phase
165 /// rather than in `fn main()`. We need `ctor` to do this.
166 #[ctor]
init_heap()167 fn init_heap() {
168     let heap_start = unsafe { HEAP.as_ptr() as usize };
169     unsafe {
170         HEAP_ALLOCATOR
171             .lock()
172             .init(heap_start, HEAP_BLOCK * MACHINE_ALIGN);
173     }
174 }
175 
176 /// Entry of benchmarks
criterion_benchmark(c: &mut Criterion)177 pub fn criterion_benchmark(c: &mut Criterion) {
178     // run benchmark
179     c.bench_function("small alloc", |b| {
180         b.iter(|| small_alloc(black_box(&HEAP_ALLOCATOR)))
181     });
182     c.bench_function("large alloc", |b| {
183         b.iter(|| large_alloc(black_box(&HEAP_ALLOCATOR)))
184     });
185     c.bench_function("mutil thread random size", |b| {
186         b.iter(|| mutil_thread_random_size(black_box(&HEAP_ALLOCATOR)))
187     });
188     c.bench_function("threadtest", |b| b.iter(|| thread_test()));
189 }
190 
191 criterion_group!(benches, criterion_benchmark);
192 criterion_main!(benches);
193