1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_hwlatdetect.c - A simple Hardware Latency detector.
4 *
5 * Use this tracer to detect large system latencies induced by the behavior of
6 * certain underlying system hardware or firmware, independent of Linux itself.
7 * The code was developed originally to detect the presence of SMIs on Intel
8 * and AMD systems, although there is no dependency upon x86 herein.
9 *
10 * The classical example usage of this tracer is in detecting the presence of
11 * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
12 * somewhat special form of hardware interrupt spawned from earlier CPU debug
13 * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
14 * LPC (or other device) to generate a special interrupt under certain
15 * circumstances, for example, upon expiration of a special SMI timer device,
16 * due to certain external thermal readings, on certain I/O address accesses,
17 * and other situations. An SMI hits a special CPU pin, triggers a special
18 * SMI mode (complete with special memory map), and the OS is unaware.
19 *
20 * Although certain hardware-inducing latencies are necessary (for example,
21 * a modern system often requires an SMI handler for correct thermal control
22 * and remote management) they can wreak havoc upon any OS-level performance
23 * guarantees toward low-latency, especially when the OS is not even made
24 * aware of the presence of these interrupts. For this reason, we need a
25 * somewhat brute force mechanism to detect these interrupts. In this case,
26 * we do it by hogging all of the CPU(s) for configurable timer intervals,
27 * sampling the built-in CPU timer, looking for discontiguous readings.
28 *
29 * WARNING: This implementation necessarily introduces latencies. Therefore,
30 * you should NEVER use this tracer while running in a production
31 * environment requiring any kind of low-latency performance
32 * guarantee(s).
33 *
34 * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
35 * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
36 *
37 * Includes useful feedback from Clark Williams <clark@redhat.com>
38 *
39 */
40 #include <linux/kthread.h>
41 #include <linux/tracefs.h>
42 #include <linux/uaccess.h>
43 #include <linux/cpumask.h>
44 #include <linux/delay.h>
45 #include <linux/sched/clock.h>
46 #include "trace.h"
47
48 static struct trace_array *hwlat_trace;
49
50 #define U64STR_SIZE 22 /* 20 digits max */
51
52 #define BANNER "hwlat_detector: "
53 #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
54 #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
55 #define DEFAULT_LAT_THRESHOLD 10 /* 10us */
56
57 /* sampling thread*/
58 static struct task_struct *hwlat_kthread;
59
60 static struct dentry *hwlat_sample_width; /* sample width us */
61 static struct dentry *hwlat_sample_window; /* sample window us */
62
63 /* Save the previous tracing_thresh value */
64 static unsigned long save_tracing_thresh;
65
66 /* NMI timestamp counters */
67 static u64 nmi_ts_start;
68 static u64 nmi_total_ts;
69 static int nmi_count;
70 static int nmi_cpu;
71
72 /* Tells NMIs to call back to the hwlat tracer to record timestamps */
73 bool trace_hwlat_callback_enabled;
74
75 /* If the user changed threshold, remember it */
76 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
77
78 /* Individual latency samples are stored here when detected. */
79 struct hwlat_sample {
80 u64 seqnum; /* unique sequence */
81 u64 duration; /* delta */
82 u64 outer_duration; /* delta (outer loop) */
83 u64 nmi_total_ts; /* Total time spent in NMIs */
84 struct timespec64 timestamp; /* wall time */
85 int nmi_count; /* # NMIs during this sample */
86 };
87
88 /* keep the global state somewhere. */
89 static struct hwlat_data {
90
91 struct mutex lock; /* protect changes */
92
93 u64 count; /* total since reset */
94
95 u64 sample_window; /* total sampling window (on+off) */
96 u64 sample_width; /* active sampling portion of window */
97
98 } hwlat_data = {
99 .sample_window = DEFAULT_SAMPLE_WINDOW,
100 .sample_width = DEFAULT_SAMPLE_WIDTH,
101 };
102
trace_hwlat_sample(struct hwlat_sample * sample)103 static void trace_hwlat_sample(struct hwlat_sample *sample)
104 {
105 struct trace_array *tr = hwlat_trace;
106 struct trace_event_call *call = &event_hwlat;
107 struct ring_buffer *buffer = tr->trace_buffer.buffer;
108 struct ring_buffer_event *event;
109 struct hwlat_entry *entry;
110 unsigned long flags;
111 int pc;
112
113 pc = preempt_count();
114 local_save_flags(flags);
115
116 event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
117 flags, pc);
118 if (!event)
119 return;
120 entry = ring_buffer_event_data(event);
121 entry->seqnum = sample->seqnum;
122 entry->duration = sample->duration;
123 entry->outer_duration = sample->outer_duration;
124 entry->timestamp = sample->timestamp;
125 entry->nmi_total_ts = sample->nmi_total_ts;
126 entry->nmi_count = sample->nmi_count;
127
128 if (!call_filter_check_discard(call, entry, buffer, event))
129 trace_buffer_unlock_commit_nostack(buffer, event);
130 }
131
132 /* Macros to encapsulate the time capturing infrastructure */
133 #define time_type u64
134 #define time_get() trace_clock_local()
135 #define time_to_us(x) div_u64(x, 1000)
136 #define time_sub(a, b) ((a) - (b))
137 #define init_time(a, b) (a = b)
138 #define time_u64(a) a
139
trace_hwlat_callback(bool enter)140 void trace_hwlat_callback(bool enter)
141 {
142 if (smp_processor_id() != nmi_cpu)
143 return;
144
145 /*
146 * Currently trace_clock_local() calls sched_clock() and the
147 * generic version is not NMI safe.
148 */
149 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
150 if (enter)
151 nmi_ts_start = time_get();
152 else
153 nmi_total_ts += time_get() - nmi_ts_start;
154 }
155
156 if (enter)
157 nmi_count++;
158 }
159
160 /**
161 * get_sample - sample the CPU TSC and look for likely hardware latencies
162 *
163 * Used to repeatedly capture the CPU TSC (or similar), looking for potential
164 * hardware-induced latency. Called with interrupts disabled and with
165 * hwlat_data.lock held.
166 */
get_sample(void)167 static int get_sample(void)
168 {
169 struct trace_array *tr = hwlat_trace;
170 time_type start, t1, t2, last_t2;
171 s64 diff, total, last_total = 0;
172 u64 sample = 0;
173 u64 thresh = tracing_thresh;
174 u64 outer_sample = 0;
175 int ret = -1;
176
177 do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
178
179 nmi_cpu = smp_processor_id();
180 nmi_total_ts = 0;
181 nmi_count = 0;
182 /* Make sure NMIs see this first */
183 barrier();
184
185 trace_hwlat_callback_enabled = true;
186
187 init_time(last_t2, 0);
188 start = time_get(); /* start timestamp */
189
190 do {
191
192 t1 = time_get(); /* we'll look for a discontinuity */
193 t2 = time_get();
194
195 if (time_u64(last_t2)) {
196 /* Check the delta from outer loop (t2 to next t1) */
197 diff = time_to_us(time_sub(t1, last_t2));
198 /* This shouldn't happen */
199 if (diff < 0) {
200 pr_err(BANNER "time running backwards\n");
201 goto out;
202 }
203 if (diff > outer_sample)
204 outer_sample = diff;
205 }
206 last_t2 = t2;
207
208 total = time_to_us(time_sub(t2, start)); /* sample width */
209
210 /* Check for possible overflows */
211 if (total < last_total) {
212 pr_err("Time total overflowed\n");
213 break;
214 }
215 last_total = total;
216
217 /* This checks the inner loop (t1 to t2) */
218 diff = time_to_us(time_sub(t2, t1)); /* current diff */
219
220 /* This shouldn't happen */
221 if (diff < 0) {
222 pr_err(BANNER "time running backwards\n");
223 goto out;
224 }
225
226 if (diff > sample)
227 sample = diff; /* only want highest value */
228
229 } while (total <= hwlat_data.sample_width);
230
231 barrier(); /* finish the above in the view for NMIs */
232 trace_hwlat_callback_enabled = false;
233 barrier(); /* Make sure nmi_total_ts is no longer updated */
234
235 ret = 0;
236
237 /* If we exceed the threshold value, we have found a hardware latency */
238 if (sample > thresh || outer_sample > thresh) {
239 struct hwlat_sample s;
240
241 ret = 1;
242
243 /* We read in microseconds */
244 if (nmi_total_ts)
245 do_div(nmi_total_ts, NSEC_PER_USEC);
246
247 hwlat_data.count++;
248 s.seqnum = hwlat_data.count;
249 s.duration = sample;
250 s.outer_duration = outer_sample;
251 ktime_get_real_ts64(&s.timestamp);
252 s.nmi_total_ts = nmi_total_ts;
253 s.nmi_count = nmi_count;
254 trace_hwlat_sample(&s);
255
256 /* Keep a running maximum ever recorded hardware latency */
257 if (sample > tr->max_latency)
258 tr->max_latency = sample;
259 if (outer_sample > tr->max_latency)
260 tr->max_latency = outer_sample;
261 }
262
263 out:
264 return ret;
265 }
266
267 static struct cpumask save_cpumask;
268 static bool disable_migrate;
269
move_to_next_cpu(void)270 static void move_to_next_cpu(void)
271 {
272 struct cpumask *current_mask = &save_cpumask;
273 struct trace_array *tr = hwlat_trace;
274 int next_cpu;
275
276 if (disable_migrate)
277 return;
278 /*
279 * If for some reason the user modifies the CPU affinity
280 * of this thread, than stop migrating for the duration
281 * of the current test.
282 */
283 if (!cpumask_equal(current_mask, current->cpus_ptr))
284 goto disable;
285
286 get_online_cpus();
287 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
288 next_cpu = cpumask_next(smp_processor_id(), current_mask);
289 put_online_cpus();
290
291 if (next_cpu >= nr_cpu_ids)
292 next_cpu = cpumask_first(current_mask);
293
294 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
295 goto disable;
296
297 cpumask_clear(current_mask);
298 cpumask_set_cpu(next_cpu, current_mask);
299
300 sched_setaffinity(0, current_mask);
301 return;
302
303 disable:
304 disable_migrate = true;
305 }
306
307 /*
308 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
309 *
310 * Used to periodically sample the CPU TSC via a call to get_sample. We
311 * disable interrupts, which does (intentionally) introduce latency since we
312 * need to ensure nothing else might be running (and thus preempting).
313 * Obviously this should never be used in production environments.
314 *
315 * Executes one loop interaction on each CPU in tracing_cpumask sysfs file.
316 */
kthread_fn(void * data)317 static int kthread_fn(void *data)
318 {
319 u64 interval;
320
321 while (!kthread_should_stop()) {
322
323 move_to_next_cpu();
324
325 local_irq_disable();
326 get_sample();
327 local_irq_enable();
328
329 mutex_lock(&hwlat_data.lock);
330 interval = hwlat_data.sample_window - hwlat_data.sample_width;
331 mutex_unlock(&hwlat_data.lock);
332
333 do_div(interval, USEC_PER_MSEC); /* modifies interval value */
334
335 /* Always sleep for at least 1ms */
336 if (interval < 1)
337 interval = 1;
338
339 if (msleep_interruptible(interval))
340 break;
341 }
342
343 return 0;
344 }
345
346 /**
347 * start_kthread - Kick off the hardware latency sampling/detector kthread
348 *
349 * This starts the kernel thread that will sit and sample the CPU timestamp
350 * counter (TSC or similar) and look for potential hardware latencies.
351 */
start_kthread(struct trace_array * tr)352 static int start_kthread(struct trace_array *tr)
353 {
354 struct cpumask *current_mask = &save_cpumask;
355 struct task_struct *kthread;
356 int next_cpu;
357
358 if (hwlat_kthread)
359 return 0;
360
361 /* Just pick the first CPU on first iteration */
362 current_mask = &save_cpumask;
363 get_online_cpus();
364 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
365 put_online_cpus();
366 next_cpu = cpumask_first(current_mask);
367
368 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
369 if (IS_ERR(kthread)) {
370 pr_err(BANNER "could not start sampling thread\n");
371 return -ENOMEM;
372 }
373
374 cpumask_clear(current_mask);
375 cpumask_set_cpu(next_cpu, current_mask);
376 sched_setaffinity(kthread->pid, current_mask);
377
378 hwlat_kthread = kthread;
379 wake_up_process(kthread);
380
381 return 0;
382 }
383
384 /**
385 * stop_kthread - Inform the hardware latency samping/detector kthread to stop
386 *
387 * This kicks the running hardware latency sampling/detector kernel thread and
388 * tells it to stop sampling now. Use this on unload and at system shutdown.
389 */
stop_kthread(void)390 static void stop_kthread(void)
391 {
392 if (!hwlat_kthread)
393 return;
394 kthread_stop(hwlat_kthread);
395 hwlat_kthread = NULL;
396 }
397
398 /*
399 * hwlat_read - Wrapper read function for reading both window and width
400 * @filp: The active open file structure
401 * @ubuf: The userspace provided buffer to read value into
402 * @cnt: The maximum number of bytes to read
403 * @ppos: The current "file" position
404 *
405 * This function provides a generic read implementation for the global state
406 * "hwlat_data" structure filesystem entries.
407 */
hwlat_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)408 static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
409 size_t cnt, loff_t *ppos)
410 {
411 char buf[U64STR_SIZE];
412 u64 *entry = filp->private_data;
413 u64 val;
414 int len;
415
416 if (!entry)
417 return -EFAULT;
418
419 if (cnt > sizeof(buf))
420 cnt = sizeof(buf);
421
422 val = *entry;
423
424 len = snprintf(buf, sizeof(buf), "%llu\n", val);
425
426 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
427 }
428
429 /**
430 * hwlat_width_write - Write function for "width" entry
431 * @filp: The active open file structure
432 * @ubuf: The user buffer that contains the value to write
433 * @cnt: The maximum number of bytes to write to "file"
434 * @ppos: The current position in @file
435 *
436 * This function provides a write implementation for the "width" interface
437 * to the hardware latency detector. It can be used to configure
438 * for how many us of the total window us we will actively sample for any
439 * hardware-induced latency periods. Obviously, it is not possible to
440 * sample constantly and have the system respond to a sample reader, or,
441 * worse, without having the system appear to have gone out to lunch. It
442 * is enforced that width is less that the total window size.
443 */
444 static ssize_t
hwlat_width_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)445 hwlat_width_write(struct file *filp, const char __user *ubuf,
446 size_t cnt, loff_t *ppos)
447 {
448 u64 val;
449 int err;
450
451 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
452 if (err)
453 return err;
454
455 mutex_lock(&hwlat_data.lock);
456 if (val < hwlat_data.sample_window)
457 hwlat_data.sample_width = val;
458 else
459 err = -EINVAL;
460 mutex_unlock(&hwlat_data.lock);
461
462 if (err)
463 return err;
464
465 return cnt;
466 }
467
468 /**
469 * hwlat_window_write - Write function for "window" entry
470 * @filp: The active open file structure
471 * @ubuf: The user buffer that contains the value to write
472 * @cnt: The maximum number of bytes to write to "file"
473 * @ppos: The current position in @file
474 *
475 * This function provides a write implementation for the "window" interface
476 * to the hardware latency detetector. The window is the total time
477 * in us that will be considered one sample period. Conceptually, windows
478 * occur back-to-back and contain a sample width period during which
479 * actual sampling occurs. Can be used to write a new total window size. It
480 * is enfoced that any value written must be greater than the sample width
481 * size, or an error results.
482 */
483 static ssize_t
hwlat_window_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)484 hwlat_window_write(struct file *filp, const char __user *ubuf,
485 size_t cnt, loff_t *ppos)
486 {
487 u64 val;
488 int err;
489
490 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
491 if (err)
492 return err;
493
494 mutex_lock(&hwlat_data.lock);
495 if (hwlat_data.sample_width < val)
496 hwlat_data.sample_window = val;
497 else
498 err = -EINVAL;
499 mutex_unlock(&hwlat_data.lock);
500
501 if (err)
502 return err;
503
504 return cnt;
505 }
506
507 static const struct file_operations width_fops = {
508 .open = tracing_open_generic,
509 .read = hwlat_read,
510 .write = hwlat_width_write,
511 };
512
513 static const struct file_operations window_fops = {
514 .open = tracing_open_generic,
515 .read = hwlat_read,
516 .write = hwlat_window_write,
517 };
518
519 /**
520 * init_tracefs - A function to initialize the tracefs interface files
521 *
522 * This function creates entries in tracefs for "hwlat_detector".
523 * It creates the hwlat_detector directory in the tracing directory,
524 * and within that directory is the count, width and window files to
525 * change and view those values.
526 */
init_tracefs(void)527 static int init_tracefs(void)
528 {
529 struct dentry *d_tracer;
530 struct dentry *top_dir;
531
532 d_tracer = tracing_init_dentry();
533 if (IS_ERR(d_tracer))
534 return -ENOMEM;
535
536 top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
537 if (!top_dir)
538 return -ENOMEM;
539
540 hwlat_sample_window = tracefs_create_file("window", 0640,
541 top_dir,
542 &hwlat_data.sample_window,
543 &window_fops);
544 if (!hwlat_sample_window)
545 goto err;
546
547 hwlat_sample_width = tracefs_create_file("width", 0644,
548 top_dir,
549 &hwlat_data.sample_width,
550 &width_fops);
551 if (!hwlat_sample_width)
552 goto err;
553
554 return 0;
555
556 err:
557 tracefs_remove_recursive(top_dir);
558 return -ENOMEM;
559 }
560
hwlat_tracer_start(struct trace_array * tr)561 static void hwlat_tracer_start(struct trace_array *tr)
562 {
563 int err;
564
565 err = start_kthread(tr);
566 if (err)
567 pr_err(BANNER "Cannot start hwlat kthread\n");
568 }
569
hwlat_tracer_stop(struct trace_array * tr)570 static void hwlat_tracer_stop(struct trace_array *tr)
571 {
572 stop_kthread();
573 }
574
575 static bool hwlat_busy;
576
hwlat_tracer_init(struct trace_array * tr)577 static int hwlat_tracer_init(struct trace_array *tr)
578 {
579 /* Only allow one instance to enable this */
580 if (hwlat_busy)
581 return -EBUSY;
582
583 hwlat_trace = tr;
584
585 disable_migrate = false;
586 hwlat_data.count = 0;
587 tr->max_latency = 0;
588 save_tracing_thresh = tracing_thresh;
589
590 /* tracing_thresh is in nsecs, we speak in usecs */
591 if (!tracing_thresh)
592 tracing_thresh = last_tracing_thresh;
593
594 if (tracer_tracing_is_on(tr))
595 hwlat_tracer_start(tr);
596
597 hwlat_busy = true;
598
599 return 0;
600 }
601
hwlat_tracer_reset(struct trace_array * tr)602 static void hwlat_tracer_reset(struct trace_array *tr)
603 {
604 stop_kthread();
605
606 /* the tracing threshold is static between runs */
607 last_tracing_thresh = tracing_thresh;
608
609 tracing_thresh = save_tracing_thresh;
610 hwlat_busy = false;
611 }
612
613 static struct tracer hwlat_tracer __read_mostly =
614 {
615 .name = "hwlat",
616 .init = hwlat_tracer_init,
617 .reset = hwlat_tracer_reset,
618 .start = hwlat_tracer_start,
619 .stop = hwlat_tracer_stop,
620 .allow_instances = true,
621 };
622
init_hwlat_tracer(void)623 __init static int init_hwlat_tracer(void)
624 {
625 int ret;
626
627 mutex_init(&hwlat_data.lock);
628
629 ret = register_tracer(&hwlat_tracer);
630 if (ret)
631 return ret;
632
633 init_tracefs();
634
635 return 0;
636 }
637 late_initcall(init_hwlat_tracer);
638