1#!/usr/bin/python 2# @lint-avoid-python-3-compatibility-imports 3# 4# biosnoop Trace block device I/O and print details including issuing PID. 5# For Linux, uses BCC, eBPF. 6# 7# This uses in-kernel eBPF maps to cache process details (PID and comm) by I/O 8# request, as well as a starting timestamp for calculating I/O latency. 9# 10# Copyright (c) 2015 Brendan Gregg. 11# Licensed under the Apache License, Version 2.0 (the "License") 12# 13# 16-Sep-2015 Brendan Gregg Created this. 14# 11-Feb-2016 Allan McAleavy updated for BPF_PERF_OUTPUT 15 16from __future__ import print_function 17from bcc import BPF 18import re 19import argparse 20 21# arguments 22examples = """examples: 23 ./biosnoop # trace all block I/O 24 ./biosnoop -Q # include OS queued time 25""" 26parser = argparse.ArgumentParser( 27 description="Trace block I/O", 28 formatter_class=argparse.RawDescriptionHelpFormatter, 29 epilog=examples) 30parser.add_argument("-Q", "--queue", action="store_true", 31 help="include OS queued time") 32parser.add_argument("--ebpf", action="store_true", 33 help=argparse.SUPPRESS) 34args = parser.parse_args() 35debug = 0 36 37# define BPF program 38bpf_text=""" 39#include <uapi/linux/ptrace.h> 40#include <linux/blk-mq.h> 41 42// for saving the timestamp and __data_len of each request 43struct start_req_t { 44 u64 ts; 45 u64 data_len; 46}; 47 48struct val_t { 49 u64 ts; 50 u32 pid; 51 char name[TASK_COMM_LEN]; 52}; 53 54struct data_t { 55 u32 pid; 56 u64 rwflag; 57 u64 delta; 58 u64 qdelta; 59 u64 sector; 60 u64 len; 61 u64 ts; 62 char disk_name[DISK_NAME_LEN]; 63 char name[TASK_COMM_LEN]; 64}; 65 66BPF_HASH(start, struct request *, struct start_req_t); 67BPF_HASH(infobyreq, struct request *, struct val_t); 68BPF_PERF_OUTPUT(events); 69 70// cache PID and comm by-req 71int trace_pid_start(struct pt_regs *ctx, struct request *req) 72{ 73 struct val_t val = {}; 74 u64 ts; 75 76 if (bpf_get_current_comm(&val.name, sizeof(val.name)) == 0) { 77 val.pid = bpf_get_current_pid_tgid() >> 32; 78 if (##QUEUE##) { 79 val.ts = bpf_ktime_get_ns(); 80 } 81 infobyreq.update(&req, &val); 82 } 83 return 0; 84} 85 86// time block I/O 87int trace_req_start(struct pt_regs *ctx, struct request *req) 88{ 89 struct start_req_t start_req = { 90 .ts = bpf_ktime_get_ns(), 91 .data_len = req->__data_len 92 }; 93 start.update(&req, &start_req); 94 return 0; 95} 96 97// output 98int trace_req_completion(struct pt_regs *ctx, struct request *req) 99{ 100 struct start_req_t *startp; 101 struct val_t *valp; 102 struct data_t data = {}; 103 u64 ts; 104 105 // fetch timestamp and calculate delta 106 startp = start.lookup(&req); 107 if (startp == 0) { 108 // missed tracing issue 109 return 0; 110 } 111 ts = bpf_ktime_get_ns(); 112 data.delta = ts - startp->ts; 113 data.ts = ts / 1000; 114 data.qdelta = 0; 115 116 valp = infobyreq.lookup(&req); 117 data.len = startp->data_len; 118 if (valp == 0) { 119 data.name[0] = '?'; 120 data.name[1] = 0; 121 } else { 122 if (##QUEUE##) { 123 data.qdelta = startp->ts - valp->ts; 124 } 125 data.pid = valp->pid; 126 data.sector = req->__sector; 127 bpf_probe_read_kernel(&data.name, sizeof(data.name), valp->name); 128 struct gendisk *rq_disk = req->__RQ_DISK__; 129 bpf_probe_read_kernel(&data.disk_name, sizeof(data.disk_name), 130 rq_disk->disk_name); 131 } 132 133/* 134 * The following deals with a kernel version change (in mainline 4.7, although 135 * it may be backported to earlier kernels) with how block request write flags 136 * are tested. We handle both pre- and post-change versions here. Please avoid 137 * kernel version tests like this as much as possible: they inflate the code, 138 * test, and maintenance burden. 139 */ 140#ifdef REQ_WRITE 141 data.rwflag = !!(req->cmd_flags & REQ_WRITE); 142#elif defined(REQ_OP_SHIFT) 143 data.rwflag = !!((req->cmd_flags >> REQ_OP_SHIFT) == REQ_OP_WRITE); 144#else 145 data.rwflag = !!((req->cmd_flags & REQ_OP_MASK) == REQ_OP_WRITE); 146#endif 147 148 events.perf_submit(ctx, &data, sizeof(data)); 149 start.delete(&req); 150 infobyreq.delete(&req); 151 152 return 0; 153} 154""" 155if args.queue: 156 bpf_text = bpf_text.replace('##QUEUE##', '1') 157else: 158 bpf_text = bpf_text.replace('##QUEUE##', '0') 159if BPF.kernel_struct_has_field(b'request', b'rq_disk'): 160 bpf_text = bpf_text.replace('__RQ_DISK__', 'rq_disk') 161else: 162 bpf_text = bpf_text.replace('__RQ_DISK__', 'q->disk') 163if debug or args.ebpf: 164 print(bpf_text) 165 if args.ebpf: 166 exit() 167 168# initialize BPF 169b = BPF(text=bpf_text) 170if BPF.get_kprobe_functions(b'__blk_account_io_start'): 171 b.attach_kprobe(event="__blk_account_io_start", fn_name="trace_pid_start") 172else: 173 b.attach_kprobe(event="blk_account_io_start", fn_name="trace_pid_start") 174if BPF.get_kprobe_functions(b'blk_start_request'): 175 b.attach_kprobe(event="blk_start_request", fn_name="trace_req_start") 176b.attach_kprobe(event="blk_mq_start_request", fn_name="trace_req_start") 177if BPF.get_kprobe_functions(b'__blk_account_io_done'): 178 b.attach_kprobe(event="__blk_account_io_done", fn_name="trace_req_completion") 179else: 180 b.attach_kprobe(event="blk_account_io_done", fn_name="trace_req_completion") 181 182# header 183print("%-11s %-14s %-7s %-9s %-1s %-10s %-7s" % ("TIME(s)", "COMM", "PID", 184 "DISK", "T", "SECTOR", "BYTES"), end="") 185if args.queue: 186 print("%7s " % ("QUE(ms)"), end="") 187print("%7s" % "LAT(ms)") 188 189rwflg = "" 190start_ts = 0 191prev_ts = 0 192delta = 0 193 194# process event 195def print_event(cpu, data, size): 196 event = b["events"].event(data) 197 198 global start_ts 199 if start_ts == 0: 200 start_ts = event.ts 201 202 if event.rwflag == 1: 203 rwflg = "W" 204 else: 205 rwflg = "R" 206 207 delta = float(event.ts) - start_ts 208 209 disk_name = event.disk_name.decode('utf-8', 'replace') 210 if not disk_name: 211 disk_name = '<unknown>' 212 213 print("%-11.6f %-14.14s %-7s %-9s %-1s %-10s %-7s" % ( 214 delta / 1000000, event.name.decode('utf-8', 'replace'), event.pid, 215 disk_name, rwflg, event.sector, event.len), end="") 216 if args.queue: 217 print("%7.2f " % (float(event.qdelta) / 1000000), end="") 218 print("%7.2f" % (float(event.delta) / 1000000)) 219 220# loop with callback to print_event 221b["events"].open_perf_buffer(print_event, page_cnt=64) 222while 1: 223 try: 224 b.perf_buffer_poll() 225 except KeyboardInterrupt: 226 exit() 227