• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use criterion::{criterion_group, criterion_main, Criterion};
2 use log::trace;
3 use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
4 use std::sync::Arc;
5 use std::time::{Duration, Instant};
6 use tracing_subscriber::{EnvFilter, FmtSubscriber};
7 
8 // This creates a bunch of threads and makes sure they start executing
9 // a given callback almost exactly at the same time.
run_on_many_threads<F, R>(thread_count: usize, callback: F) -> Vec<R> where F: Fn() -> R + 'static + Send + Clone, R: Send + 'static,10 fn run_on_many_threads<F, R>(thread_count: usize, callback: F) -> Vec<R>
11 where
12     F: Fn() -> R + 'static + Send + Clone,
13     R: Send + 'static,
14 {
15     let started_count = Arc::new(AtomicUsize::new(0));
16     let barrier = Arc::new(AtomicBool::new(false));
17     #[allow(clippy::needless_collect)]
18     let threads: Vec<_> = (0..thread_count)
19         .map(|_| {
20             let started_count = started_count.clone();
21             let barrier = barrier.clone();
22             let callback = callback.clone();
23 
24             std::thread::spawn(move || {
25                 started_count.fetch_add(1, Ordering::SeqCst);
26                 while !barrier.load(Ordering::SeqCst) {
27                     std::thread::yield_now();
28                 }
29 
30                 callback()
31             })
32         })
33         .collect();
34 
35     while started_count.load(Ordering::SeqCst) != thread_count {
36         std::thread::yield_now();
37     }
38     barrier.store(true, Ordering::SeqCst);
39 
40     threads
41         .into_iter()
42         .map(|handle| handle.join())
43         .collect::<Result<Vec<R>, _>>()
44         .unwrap()
45 }
46 
bench_logger(c: &mut Criterion)47 fn bench_logger(c: &mut Criterion) {
48     let env_filter = EnvFilter::default()
49         .add_directive("info".parse().unwrap())
50         .add_directive("ws=off".parse().unwrap())
51         .add_directive("yamux=off".parse().unwrap())
52         .add_directive("regalloc=off".parse().unwrap())
53         .add_directive("cranelift_codegen=off".parse().unwrap())
54         .add_directive("cranelift_wasm=warn".parse().unwrap())
55         .add_directive("hyper=warn".parse().unwrap())
56         .add_directive("dummy=trace".parse().unwrap());
57 
58     let builder = tracing_log::LogTracer::builder().with_max_level(log::LevelFilter::Trace);
59 
60     #[cfg(feature = "interest-cache")]
61     let builder = builder.with_interest_cache(tracing_log::InterestCacheConfig::default());
62 
63     builder.init().unwrap();
64 
65     let builder = FmtSubscriber::builder()
66         .with_env_filter(env_filter)
67         .with_filter_reloading();
68 
69     let subscriber = builder.finish();
70     tracing::subscriber::set_global_default(subscriber).unwrap();
71 
72     const THREAD_COUNT: usize = 8;
73 
74     c.bench_function("log_from_multiple_threads", |b| {
75         b.iter_custom(|count| {
76             let durations = run_on_many_threads(THREAD_COUNT, move || {
77                 let start = Instant::now();
78                 for _ in 0..count {
79                     trace!("A dummy log!");
80                 }
81                 start.elapsed()
82             });
83 
84             let total_time: Duration = durations.into_iter().sum();
85             Duration::from_nanos((total_time.as_nanos() / THREAD_COUNT as u128) as u64)
86         })
87     });
88 }
89 
90 criterion_group!(benches, bench_logger);
91 criterion_main!(benches);
92