1#!/usr/bin/python 2# @lint-avoid-python-3-compatibility-imports 3# 4# runqlat Run queue (scheduler) latency as a histogram. 5# For Linux, uses BCC, eBPF. 6# 7# USAGE: runqlat [-h] [-T] [-m] [-P] [-L] [-p PID] [interval] [count] 8# 9# This measures the time a task spends waiting on a run queue for a turn 10# on-CPU, and shows this time as a histogram. This time should be small, but a 11# task may need to wait its turn due to CPU load. 12# 13# This measures two types of run queue latency: 14# 1. The time from a task being enqueued on a run queue to its context switch 15# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() -> 16# finish_task_switch() with either raw tracepoints (if supported) or kprobes 17# and instruments the run queue latency after a voluntary context switch. 18# 2. The time from when a task was involuntary context switched and still 19# in the runnable state, to when it next executed. This is instrumented 20# from finish_task_switch() alone. 21# 22# Copyright 2016 Netflix, Inc. 23# Licensed under the Apache License, Version 2.0 (the "License") 24# 25# 07-Feb-2016 Brendan Gregg Created this. 26 27from __future__ import print_function 28from bcc import BPF 29from time import sleep, strftime 30import argparse 31 32# arguments 33examples = """examples: 34 ./runqlat # summarize run queue latency as a histogram 35 ./runqlat 1 10 # print 1 second summaries, 10 times 36 ./runqlat -mT 1 # 1s summaries, milliseconds, and timestamps 37 ./runqlat -P # show each PID separately 38 ./runqlat -p 185 # trace PID 185 only 39""" 40parser = argparse.ArgumentParser( 41 description="Summarize run queue (scheduler) latency as a histogram", 42 formatter_class=argparse.RawDescriptionHelpFormatter, 43 epilog=examples) 44parser.add_argument("-T", "--timestamp", action="store_true", 45 help="include timestamp on output") 46parser.add_argument("-m", "--milliseconds", action="store_true", 47 help="millisecond histogram") 48parser.add_argument("-P", "--pids", action="store_true", 49 help="print a histogram per process ID") 50# PID options are --pid and --pids, so namespaces should be --pidns (not done 51# yet) and --pidnss: 52parser.add_argument("--pidnss", action="store_true", 53 help="print a histogram per PID namespace") 54parser.add_argument("-L", "--tids", action="store_true", 55 help="print a histogram per thread ID") 56parser.add_argument("-p", "--pid", 57 help="trace this PID only") 58parser.add_argument("interval", nargs="?", default=99999999, 59 help="output interval, in seconds") 60parser.add_argument("count", nargs="?", default=99999999, 61 help="number of outputs") 62parser.add_argument("--ebpf", action="store_true", 63 help=argparse.SUPPRESS) 64args = parser.parse_args() 65countdown = int(args.count) 66debug = 0 67 68# define BPF program 69bpf_text = """ 70#include <uapi/linux/ptrace.h> 71#include <linux/sched.h> 72#include <linux/nsproxy.h> 73#include <linux/pid_namespace.h> 74#include <linux/init_task.h> 75 76typedef struct pid_key { 77 u64 id; // work around 78 u64 slot; 79} pid_key_t; 80 81typedef struct pidns_key { 82 u64 id; // work around 83 u64 slot; 84} pidns_key_t; 85 86BPF_HASH(start, u32); 87STORAGE 88 89struct rq; 90 91// record enqueue timestamp 92static int trace_enqueue(u32 tgid, u32 pid) 93{ 94 if (FILTER || pid == 0) 95 return 0; 96 u64 ts = bpf_ktime_get_ns(); 97 start.update(&pid, &ts); 98 return 0; 99} 100 101static __always_inline unsigned int pid_namespace(struct task_struct *task) 102{ 103 104/* pids[] was removed from task_struct since commit 2c4704756cab7cfa031ada4dab361562f0e357c0 105 * Using the macro INIT_PID_LINK as a conditional judgment. 106 */ 107#ifdef INIT_PID_LINK 108 struct pid_link pids; 109 unsigned int level; 110 struct upid upid; 111 struct ns_common ns; 112 113 /* get the pid namespace by following task_active_pid_ns(), 114 * pid->numbers[pid->level].ns 115 */ 116 bpf_probe_read_kernel(&pids, sizeof(pids), &task->pids[PIDTYPE_PID]); 117 bpf_probe_read_kernel(&level, sizeof(level), &pids.pid->level); 118 bpf_probe_read_kernel(&upid, sizeof(upid), &pids.pid->numbers[level]); 119 bpf_probe_read_kernel(&ns, sizeof(ns), &upid.ns->ns); 120 121 return ns.inum; 122#else 123 struct pid *pid; 124 unsigned int level; 125 struct upid upid; 126 struct ns_common ns; 127 128 /* get the pid namespace by following task_active_pid_ns(), 129 * pid->numbers[pid->level].ns 130 */ 131 bpf_probe_read_kernel(&pid, sizeof(pid), &task->thread_pid); 132 bpf_probe_read_kernel(&level, sizeof(level), &pid->level); 133 bpf_probe_read_kernel(&upid, sizeof(upid), &pid->numbers[level]); 134 bpf_probe_read_kernel(&ns, sizeof(ns), &upid.ns->ns); 135 136 return ns.inum; 137#endif 138} 139""" 140 141bpf_text_kprobe = """ 142int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p) 143{ 144 return trace_enqueue(p->tgid, p->pid); 145} 146 147int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p, 148 int wake_flags) 149{ 150 return trace_enqueue(p->tgid, p->pid); 151} 152 153// calculate latency 154int trace_run(struct pt_regs *ctx, struct task_struct *prev) 155{ 156 u32 pid, tgid; 157 158 // ivcsw: treat like an enqueue event and store timestamp 159 if (prev->STATE_FIELD == TASK_RUNNING) { 160 tgid = prev->tgid; 161 pid = prev->pid; 162 if (!(FILTER || pid == 0)) { 163 u64 ts = bpf_ktime_get_ns(); 164 start.update(&pid, &ts); 165 } 166 } 167 168 tgid = bpf_get_current_pid_tgid() >> 32; 169 pid = bpf_get_current_pid_tgid(); 170 if (FILTER || pid == 0) 171 return 0; 172 u64 *tsp, delta; 173 174 // fetch timestamp and calculate delta 175 tsp = start.lookup(&pid); 176 if (tsp == 0) { 177 return 0; // missed enqueue 178 } 179 delta = bpf_ktime_get_ns() - *tsp; 180 FACTOR 181 182 // store as histogram 183 STORE 184 185 start.delete(&pid); 186 return 0; 187} 188""" 189 190bpf_text_raw_tp = """ 191RAW_TRACEPOINT_PROBE(sched_wakeup) 192{ 193 // TP_PROTO(struct task_struct *p) 194 struct task_struct *p = (struct task_struct *)ctx->args[0]; 195 return trace_enqueue(p->tgid, p->pid); 196} 197 198RAW_TRACEPOINT_PROBE(sched_wakeup_new) 199{ 200 // TP_PROTO(struct task_struct *p) 201 struct task_struct *p = (struct task_struct *)ctx->args[0]; 202 return trace_enqueue(p->tgid, p->pid); 203} 204 205RAW_TRACEPOINT_PROBE(sched_switch) 206{ 207 // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next) 208 struct task_struct *prev = (struct task_struct *)ctx->args[1]; 209 struct task_struct *next = (struct task_struct *)ctx->args[2]; 210 u32 pid, tgid; 211 212 // ivcsw: treat like an enqueue event and store timestamp 213 if (prev->STATE_FIELD == TASK_RUNNING) { 214 tgid = prev->tgid; 215 pid = prev->pid; 216 if (!(FILTER || pid == 0)) { 217 u64 ts = bpf_ktime_get_ns(); 218 start.update(&pid, &ts); 219 } 220 } 221 222 tgid = next->tgid; 223 pid = next->pid; 224 if (FILTER || pid == 0) 225 return 0; 226 u64 *tsp, delta; 227 228 // fetch timestamp and calculate delta 229 tsp = start.lookup(&pid); 230 if (tsp == 0) { 231 return 0; // missed enqueue 232 } 233 delta = bpf_ktime_get_ns() - *tsp; 234 FACTOR 235 236 // store as histogram 237 STORE 238 239 start.delete(&pid); 240 return 0; 241} 242""" 243 244is_support_raw_tp = BPF.support_raw_tracepoint() 245if is_support_raw_tp: 246 bpf_text += bpf_text_raw_tp 247else: 248 bpf_text += bpf_text_kprobe 249 250# code substitutions 251if BPF.kernel_struct_has_field(b'task_struct', b'__state') == 1: 252 bpf_text = bpf_text.replace('STATE_FIELD', '__state') 253else: 254 bpf_text = bpf_text.replace('STATE_FIELD', 'state') 255if args.pid: 256 # pid from userspace point of view is thread group from kernel pov 257 bpf_text = bpf_text.replace('FILTER', 'tgid != %s' % args.pid) 258else: 259 bpf_text = bpf_text.replace('FILTER', '0') 260if args.milliseconds: 261 bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000000;') 262 label = "msecs" 263else: 264 bpf_text = bpf_text.replace('FACTOR', 'delta /= 1000;') 265 label = "usecs" 266if args.pids or args.tids: 267 section = "pid" 268 pid = "tgid" 269 if args.tids: 270 pid = "pid" 271 section = "tid" 272 bpf_text = bpf_text.replace('STORAGE', 273 'BPF_HISTOGRAM(dist, pid_key_t);') 274 bpf_text = bpf_text.replace('STORE', 275 'pid_key_t key = {.id = ' + pid + ', .slot = bpf_log2l(delta)}; ' + 276 'dist.increment(key);') 277elif args.pidnss: 278 section = "pidns" 279 bpf_text = bpf_text.replace('STORAGE', 280 'BPF_HISTOGRAM(dist, pidns_key_t);') 281 bpf_text = bpf_text.replace('STORE', 'pidns_key_t key = ' + 282 '{.id = pid_namespace(prev), ' + 283 '.slot = bpf_log2l(delta)}; dist.atomic_increment(key);') 284else: 285 section = "" 286 bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);') 287 bpf_text = bpf_text.replace('STORE', 288 'dist.atomic_increment(bpf_log2l(delta));') 289if debug or args.ebpf: 290 print(bpf_text) 291 if args.ebpf: 292 exit() 293 294# load BPF program 295b = BPF(text=bpf_text) 296if not is_support_raw_tp: 297 b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup") 298 b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task") 299 b.attach_kprobe(event_re="^finish_task_switch$|^finish_task_switch\.isra\.\d$", 300 fn_name="trace_run") 301 302print("Tracing run queue latency... Hit Ctrl-C to end.") 303 304# output 305exiting = 0 if args.interval else 1 306dist = b.get_table("dist") 307while (1): 308 try: 309 sleep(int(args.interval)) 310 except KeyboardInterrupt: 311 exiting = 1 312 313 print() 314 if args.timestamp: 315 print("%-8s\n" % strftime("%H:%M:%S"), end="") 316 317 dist.print_log2_hist(label, section, section_print_fn=int) 318 dist.clear() 319 320 countdown -= 1 321 if exiting or countdown == 0: 322 exit() 323