• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# funcslower  Trace slow kernel or user function calls.
5#             For Linux, uses BCC, eBPF.
6#
7# USAGE: funcslower [-h] [-p PID] [-m MIN_MS] [-u MIN_US] [-a ARGUMENTS]
8#                   [-T] [-t] [-v] function [function ...]
9#
10# WARNING: This tool traces function calls by instrumenting the entry and
11# return from each function. For commonly-invoked functions like memory allocs
12# or file writes, this can be extremely expensive. Mind the overhead.
13#
14# NOTE: This tool cannot trace nested functions in the same invocation
15# due to instrumentation specifics, only innermost calls will be visible.
16#
17# By default, a minimum millisecond threshold of 1 is used.
18#
19# Copyright 2017, Sasha Goldshtein
20# Licensed under the Apache License, Version 2.0 (the "License")
21#
22# 30-Mar-2017   Sasha Goldshtein    Created this.
23
24from __future__ import print_function
25from bcc import BPF
26import argparse
27import time
28
29examples = """examples:
30  ./funcslower vfs_write        # trace vfs_write calls slower than 1ms
31  ./funcslower -m 10 vfs_write  # same, but slower than 10ms
32  ./funcslower -u 10 c:open     # trace open calls slower than 10us
33  ./funcslower -p 135 c:open    # trace pid 135 only
34  ./funcslower c:malloc c:free  # trace both malloc and free slower than 1ms
35  ./funcslower -a 2 c:open      # show first two arguments to open
36  ./funcslower -UK -m 10 c:open # Show user and kernel stack frame of open calls slower than 10ms
37  ./funcslower -f -UK c:open    # Output in folded format for flame graphs
38"""
39parser = argparse.ArgumentParser(
40    description="Trace slow kernel or user function calls.",
41    formatter_class=argparse.RawDescriptionHelpFormatter,
42    epilog=examples)
43parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="tgid",
44    help="trace this PID only")
45parser.add_argument("-m", "--min-ms", type=float, dest="min_ms",
46    help="minimum duration to trace (ms)")
47parser.add_argument("-u", "--min-us", type=float, dest="min_us",
48    help="minimum duration to trace (us)")
49parser.add_argument("-a", "--arguments", type=int,
50    help="print this many entry arguments, as hex")
51parser.add_argument("-T", "--time", action="store_true",
52    help="show HH:MM:SS timestamp")
53parser.add_argument("-t", "--timestamp", action="store_true",
54    help="show timestamp in seconds at us resolution")
55parser.add_argument("-v", "--verbose", action="store_true",
56    help="print the BPF program for debugging purposes")
57parser.add_argument(metavar="function", nargs="+", dest="functions",
58    help="function(s) to trace")
59parser.add_argument("--ebpf", action="store_true",
60    help=argparse.SUPPRESS)
61parser.add_argument("-f", "--folded", action="store_true",
62    help="output folded format, one line per stack (for flame graphs)")
63parser.add_argument("-U", "--user-stack",
64  action="store_true", help="output user stack trace")
65parser.add_argument("-K", "--kernel-stack",
66  action="store_true", help="output kernel stack trace")
67
68args = parser.parse_args()
69# fractions are allowed, but rounded to an integer nanosecond
70if args.min_ms:
71    duration_ns = int(args.min_ms * 1000000)
72elif args.min_us:
73    duration_ns = int(args.min_us * 1000)
74else:
75    duration_ns = 1000000   # default to 1ms
76
77bpf_text = """
78#include <uapi/linux/ptrace.h>
79#include <linux/sched.h>    // for TASK_COMM_LEN
80
81struct entry_t {
82    u64 id;
83    u64 start_ns;
84#ifdef GRAB_ARGS
85#ifndef __s390x__
86    u64 args[6];
87#else
88    u64 args[5];
89#endif
90#endif
91#ifdef USER_STACKS
92    int user_stack_id;
93#endif
94#ifdef KERNEL_STACKS
95    int kernel_stack_id;
96    u64 kernel_ip;
97#endif
98};
99
100struct data_t {
101    u64 id;
102    u64 tgid_pid;
103    u64 start_ns;
104    u64 duration_ns;
105    u64 retval;
106    char comm[TASK_COMM_LEN];
107#ifdef GRAB_ARGS
108#ifndef __s390x__
109    u64 args[6];
110#else
111    u64 args[5];
112#endif
113#endif
114#ifdef USER_STACKS
115    int user_stack_id;
116#endif
117#ifdef KERNEL_STACKS
118    int kernel_stack_id;
119    u64 kernel_ip;
120#endif
121};
122
123BPF_HASH(entryinfo, u64, struct entry_t);
124BPF_PERF_OUTPUT(events);
125
126#if defined(USER_STACKS) || defined(KERNEL_STACKS)
127BPF_STACK_TRACE(stacks, 2048);
128#endif
129
130static int trace_entry(struct pt_regs *ctx, int id)
131{
132    u64 tgid_pid = bpf_get_current_pid_tgid();
133    u32 tgid = tgid_pid >> 32;
134    if (TGID_FILTER)
135        return 0;
136
137    u32 pid = tgid_pid;
138
139    struct entry_t entry = {};
140    entry.start_ns = bpf_ktime_get_ns();
141    entry.id = id;
142#ifdef GRAB_ARGS
143    entry.args[0] = PT_REGS_PARM1(ctx);
144    entry.args[1] = PT_REGS_PARM2(ctx);
145    entry.args[2] = PT_REGS_PARM3(ctx);
146    entry.args[3] = PT_REGS_PARM4(ctx);
147    entry.args[4] = PT_REGS_PARM5(ctx);
148#ifndef __s390x__
149    entry.args[5] = PT_REGS_PARM6(ctx);
150#endif
151#endif
152
153#ifdef USER_STACKS
154    entry.user_stack_id = stacks.get_stackid(ctx, BPF_F_USER_STACK);
155#endif
156
157#ifdef KERNEL_STACKS
158    entry.kernel_stack_id = stacks.get_stackid(ctx, 0);
159
160    if (entry.kernel_stack_id >= 0) {
161        u64 ip = PT_REGS_IP(ctx);
162        u64 page_offset;
163
164        // if ip isn't sane, leave key ips as zero for later checking
165#if defined(CONFIG_X86_64) && defined(__PAGE_OFFSET_BASE)
166        // x64, 4.16, ..., 4.11, etc., but some earlier kernel didn't have it
167        page_offset = __PAGE_OFFSET_BASE;
168#elif defined(CONFIG_X86_64) && defined(__PAGE_OFFSET_BASE_L4)
169        // x64, 4.17, and later
170#if defined(CONFIG_DYNAMIC_MEMORY_LAYOUT) && defined(CONFIG_X86_5LEVEL)
171        page_offset = __PAGE_OFFSET_BASE_L5;
172#else
173        page_offset = __PAGE_OFFSET_BASE_L4;
174#endif
175#else
176        // earlier x86_64 kernels, e.g., 4.6, comes here
177        // arm64, s390, powerpc, x86_32
178        page_offset = PAGE_OFFSET;
179#endif
180
181        if (ip > page_offset) {
182            entry.kernel_ip = ip;
183        }
184    }
185#endif
186
187    entryinfo.update(&tgid_pid, &entry);
188
189    return 0;
190}
191
192int trace_return(struct pt_regs *ctx)
193{
194    struct entry_t *entryp;
195    u64 tgid_pid = bpf_get_current_pid_tgid();
196
197    entryp = entryinfo.lookup(&tgid_pid);
198    if (entryp == 0) {
199        return 0;
200    }
201
202    u64 delta_ns = bpf_ktime_get_ns() - entryp->start_ns;
203    entryinfo.delete(&tgid_pid);
204
205    if (delta_ns < DURATION_NS)
206        return 0;
207
208    struct data_t data = {};
209    data.id = entryp->id;
210    data.tgid_pid = tgid_pid;
211    data.start_ns = entryp->start_ns;
212    data.duration_ns = delta_ns;
213    data.retval = PT_REGS_RC(ctx);
214
215#ifdef USER_STACKS
216    data.user_stack_id = entryp->user_stack_id;
217#endif
218
219#ifdef KERNEL_STACKS
220    data.kernel_stack_id = entryp->kernel_stack_id;
221    data.kernel_ip = entryp->kernel_ip;
222#endif
223
224#ifdef GRAB_ARGS
225    bpf_probe_read_kernel(&data.args[0], sizeof(data.args), entryp->args);
226#endif
227    bpf_get_current_comm(&data.comm, sizeof(data.comm));
228    events.perf_submit(ctx, &data, sizeof(data));
229
230    return 0;
231}
232"""
233
234bpf_text = bpf_text.replace('DURATION_NS', str(duration_ns))
235if args.arguments:
236    bpf_text = "#define GRAB_ARGS\n" + bpf_text
237if args.user_stack:
238    bpf_text = "#define USER_STACKS\n" + bpf_text
239if args.kernel_stack:
240    bpf_text = "#define KERNEL_STACKS\n" + bpf_text
241if args.tgid:
242    bpf_text = bpf_text.replace('TGID_FILTER', 'tgid != %d' % args.tgid)
243else:
244    bpf_text = bpf_text.replace('TGID_FILTER', '0')
245
246for i in range(len(args.functions)):
247    bpf_text += """
248int trace_%d(struct pt_regs *ctx) {
249    return trace_entry(ctx, %d);
250}
251""" % (i, i)
252
253if args.verbose or args.ebpf:
254    print(bpf_text)
255    if args.ebpf:
256        exit()
257
258b = BPF(text=bpf_text)
259
260for i, function in enumerate(args.functions):
261    if ":" in function:
262        library, func = function.split(":")
263        b.attach_uprobe(name=library, sym=func, fn_name="trace_%d" % i)
264        b.attach_uretprobe(name=library, sym=func, fn_name="trace_return")
265    else:
266        b.attach_kprobe(event=function, fn_name="trace_%d" % i)
267        b.attach_kretprobe(event=function, fn_name="trace_return")
268
269time_designator = "us" if args.min_us else "ms"
270time_value = args.min_us or args.min_ms or 1
271time_multiplier = 1000 if args.min_us else 1000000
272time_col = args.time or args.timestamp
273
274# Do not print header when folded
275if not args.folded:
276    print("Tracing function calls slower than %g %s... Ctrl+C to quit." %
277          (time_value, time_designator))
278    print((("%-10s " % "TIME" if time_col else "") + "%-14s %-6s %7s %16s %s") %
279        ("COMM", "PID", "LAT(%s)" % time_designator, "RVAL",
280        "FUNC" + (" ARGS" if args.arguments else "")))
281
282earliest_ts = 0
283
284def time_str(event):
285    if args.time:
286        return "%-10s " % time.strftime("%H:%M:%S")
287    if args.timestamp:
288        global earliest_ts
289        if earliest_ts == 0:
290            earliest_ts = event.start_ns
291        return "%-10.6f " % ((event.start_ns - earliest_ts) / 1000000000.0)
292    return ""
293
294def args_str(event):
295    if not args.arguments:
296        return ""
297    return str.join(" ", ["0x%x" % arg for arg in event.args[:args.arguments]])
298
299def print_stack(event):
300    user_stack = []
301    stack_traces = b.get_table("stacks")
302
303    if args.user_stack and event.user_stack_id > 0:
304        user_stack = stack_traces.walk(event.user_stack_id)
305
306    kernel_stack = []
307    if args.kernel_stack and event.kernel_stack_id > 0:
308        kernel_tmp = stack_traces.walk(event.kernel_stack_id)
309
310        # fix kernel stack
311        for addr in kernel_tmp:
312            kernel_stack.append(addr)
313
314    do_delimiter = user_stack and kernel_stack
315
316    if args.folded:
317        # print folded stack output
318        user_stack = list(user_stack)
319        kernel_stack = list(kernel_stack)
320        line = [event.comm.decode('utf-8', 'replace')] + \
321            [b.sym(addr, event.tgid_pid) for addr in reversed(user_stack)] + \
322            (do_delimiter and ["-"] or []) + \
323            [b.ksym(addr) for addr in reversed(kernel_stack)]
324        print("%s %d" % (";".join(line), 1))
325    else:
326        # print default multi-line stack output.
327        for addr in kernel_stack:
328            print("    %s" % b.ksym(addr))
329        for addr in user_stack:
330            print("    %s" % b.sym(addr, event.tgid_pid))
331
332def print_event(cpu, data, size):
333    event = b["events"].event(data)
334    ts = float(event.duration_ns) / time_multiplier
335    if not args.folded:
336        print((time_str(event) + "%-14.14s %-6s %7.2f %16x %s %s") %
337            (event.comm.decode('utf-8', 'replace'), event.tgid_pid >> 32,
338             ts, event.retval, args.functions[event.id], args_str(event)))
339    if args.user_stack or args.kernel_stack:
340        print_stack(event)
341
342b["events"].open_perf_buffer(print_event, page_cnt=64)
343while True:
344    try:
345        b.perf_buffer_poll()
346    except KeyboardInterrupt:
347        exit()
348