• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::runtime::{Config, MetricsBatch, WorkerMetrics};
2 
3 use std::cmp;
4 use std::time::{Duration, Instant};
5 
6 /// Per-worker statistics. This is used for both tuning the scheduler and
7 /// reporting runtime-level metrics/stats.
8 pub(crate) struct Stats {
9     /// The metrics batch used to report runtime-level metrics/stats to the
10     /// user.
11     batch: MetricsBatch,
12 
13     /// Instant at which work last resumed (continued after park).
14     ///
15     /// This duplicates the value stored in `MetricsBatch`. We will unify
16     /// `Stats` and `MetricsBatch` when we stabilize metrics.
17     processing_scheduled_tasks_started_at: Instant,
18 
19     /// Number of tasks polled in the batch of scheduled tasks
20     tasks_polled_in_batch: usize,
21 
22     /// Exponentially-weighted moving average of time spent polling scheduled a
23     /// task.
24     ///
25     /// Tracked in nanoseconds, stored as a f64 since that is what we use with
26     /// the EWMA calculations
27     task_poll_time_ewma: f64,
28 }
29 
30 /// How to weigh each individual poll time, value is plucked from thin air.
31 const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1;
32 
33 /// Ideally, we wouldn't go above this, value is plucked from thin air.
34 const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64;
35 
36 /// Max value for the global queue interval. This is 2x the previous default
37 const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127;
38 
39 /// This is the previous default
40 const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61;
41 
42 impl Stats {
new(worker_metrics: &WorkerMetrics) -> Stats43     pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats {
44         // Seed the value with what we hope to see.
45         let task_poll_time_ewma =
46             TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64;
47 
48         Stats {
49             batch: MetricsBatch::new(worker_metrics),
50             processing_scheduled_tasks_started_at: Instant::now(),
51             tasks_polled_in_batch: 0,
52             task_poll_time_ewma,
53         }
54     }
55 
tuned_global_queue_interval(&self, config: &Config) -> u3256     pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 {
57         // If an interval is explicitly set, don't tune.
58         if let Some(configured) = config.global_queue_interval {
59             return configured;
60         }
61 
62         // As of Rust 1.45, casts from f64 -> u32 are saturating, which is fine here.
63         let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32;
64 
65         cmp::max(
66             // We don't want to return less than 2 as that would result in the
67             // global queue always getting checked first.
68             2,
69             cmp::min(
70                 MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL,
71                 tasks_per_interval,
72             ),
73         )
74     }
75 
submit(&mut self, to: &WorkerMetrics)76     pub(crate) fn submit(&mut self, to: &WorkerMetrics) {
77         self.batch.submit(to, self.task_poll_time_ewma as u64);
78     }
79 
about_to_park(&mut self)80     pub(crate) fn about_to_park(&mut self) {
81         self.batch.about_to_park();
82     }
83 
inc_local_schedule_count(&mut self)84     pub(crate) fn inc_local_schedule_count(&mut self) {
85         self.batch.inc_local_schedule_count();
86     }
87 
start_processing_scheduled_tasks(&mut self)88     pub(crate) fn start_processing_scheduled_tasks(&mut self) {
89         self.batch.start_processing_scheduled_tasks();
90 
91         self.processing_scheduled_tasks_started_at = Instant::now();
92         self.tasks_polled_in_batch = 0;
93     }
94 
end_processing_scheduled_tasks(&mut self)95     pub(crate) fn end_processing_scheduled_tasks(&mut self) {
96         self.batch.end_processing_scheduled_tasks();
97 
98         // Update the EWMA task poll time
99         if self.tasks_polled_in_batch > 0 {
100             let now = Instant::now();
101 
102             // If we "overflow" this conversion, we have bigger problems than
103             // slightly off stats.
104             let elapsed = (now - self.processing_scheduled_tasks_started_at).as_nanos() as f64;
105             let num_polls = self.tasks_polled_in_batch as f64;
106 
107             // Calculate the mean poll duration for a single task in the batch
108             let mean_poll_duration = elapsed / num_polls;
109 
110             // Compute the alpha weighted by the number of tasks polled this batch.
111             let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls);
112 
113             // Now compute the new weighted average task poll time.
114             self.task_poll_time_ewma = weighted_alpha * mean_poll_duration
115                 + (1.0 - weighted_alpha) * self.task_poll_time_ewma;
116         }
117     }
118 
start_poll(&mut self)119     pub(crate) fn start_poll(&mut self) {
120         self.batch.start_poll();
121 
122         self.tasks_polled_in_batch += 1;
123     }
124 
end_poll(&mut self)125     pub(crate) fn end_poll(&mut self) {
126         self.batch.end_poll();
127     }
128 
incr_steal_count(&mut self, by: u16)129     pub(crate) fn incr_steal_count(&mut self, by: u16) {
130         self.batch.incr_steal_count(by);
131     }
132 
incr_steal_operations(&mut self)133     pub(crate) fn incr_steal_operations(&mut self) {
134         self.batch.incr_steal_operations();
135     }
136 
incr_overflow_count(&mut self)137     pub(crate) fn incr_overflow_count(&mut self) {
138         self.batch.incr_overflow_count();
139     }
140 }
141