1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "biostacks.h"
8 #include "bits.bpf.h"
9 #include "maps.bpf.h"
10
11 #define MAX_ENTRIES 10240
12
13 const volatile bool targ_ms = false;
14 const volatile bool filter_dev = false;
15 const volatile __u32 targ_dev = -1;
16
17 struct internal_rqinfo {
18 u64 start_ts;
19 struct rqinfo rqinfo;
20 };
21
22 struct {
23 __uint(type, BPF_MAP_TYPE_HASH);
24 __uint(max_entries, MAX_ENTRIES);
25 __type(key, struct request *);
26 __type(value, struct internal_rqinfo);
27 __uint(map_flags, BPF_F_NO_PREALLOC);
28 } rqinfos SEC(".maps");
29
30 struct {
31 __uint(type, BPF_MAP_TYPE_HASH);
32 __uint(max_entries, MAX_ENTRIES);
33 __type(key, struct rqinfo);
34 __type(value, struct hist);
35 __uint(map_flags, BPF_F_NO_PREALLOC);
36 } hists SEC(".maps");
37
38 static struct hist zero;
39
40 static __always_inline
trace_start(void * ctx,struct request * rq,bool merge_bio)41 int trace_start(void *ctx, struct request *rq, bool merge_bio)
42 {
43 struct internal_rqinfo *i_rqinfop = NULL, i_rqinfo = {};
44 struct gendisk *disk = BPF_CORE_READ(rq, rq_disk);
45 u32 dev;
46
47 dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
48 BPF_CORE_READ(disk, first_minor)) : 0;
49 if (filter_dev && targ_dev != dev)
50 return 0;
51
52 if (merge_bio)
53 i_rqinfop = bpf_map_lookup_elem(&rqinfos, &rq);
54 if (!i_rqinfop)
55 i_rqinfop = &i_rqinfo;
56
57 i_rqinfop->start_ts = bpf_ktime_get_ns();
58 i_rqinfop->rqinfo.pid = bpf_get_current_pid_tgid();
59 i_rqinfop->rqinfo.kern_stack_size =
60 bpf_get_stack(ctx, i_rqinfop->rqinfo.kern_stack,
61 sizeof(i_rqinfop->rqinfo.kern_stack), 0);
62 bpf_get_current_comm(&i_rqinfop->rqinfo.comm,
63 sizeof(&i_rqinfop->rqinfo.comm));
64 i_rqinfop->rqinfo.dev = dev;
65
66 if (i_rqinfop == &i_rqinfo)
67 bpf_map_update_elem(&rqinfos, &rq, i_rqinfop, 0);
68 return 0;
69 }
70
71 SEC("fentry/blk_account_io_start")
BPF_PROG(blk_account_io_start,struct request * rq)72 int BPF_PROG(blk_account_io_start, struct request *rq)
73 {
74 return trace_start(ctx, rq, false);
75 }
76
77 SEC("kprobe/blk_account_io_merge_bio")
BPF_KPROBE(blk_account_io_merge_bio,struct request * rq)78 int BPF_KPROBE(blk_account_io_merge_bio, struct request *rq)
79 {
80 return trace_start(ctx, rq, true);
81 }
82
83 SEC("fentry/blk_account_io_done")
BPF_PROG(blk_account_io_done,struct request * rq)84 int BPF_PROG(blk_account_io_done, struct request *rq)
85 {
86 u64 slot, ts = bpf_ktime_get_ns();
87 struct internal_rqinfo *i_rqinfop;
88 struct hist *histp;
89 s64 delta;
90
91 i_rqinfop = bpf_map_lookup_elem(&rqinfos, &rq);
92 if (!i_rqinfop)
93 return 0;
94 delta = (s64)(ts - i_rqinfop->start_ts);
95 if (delta < 0)
96 goto cleanup;
97 histp = bpf_map_lookup_or_try_init(&hists, &i_rqinfop->rqinfo, &zero);
98 if (!histp)
99 goto cleanup;
100 if (targ_ms)
101 delta /= 1000000U;
102 else
103 delta /= 1000U;
104 slot = log2l(delta);
105 if (slot >= MAX_SLOTS)
106 slot = MAX_SLOTS - 1;
107 __sync_fetch_and_add(&histp->slots[slot], 1);
108
109 cleanup:
110 bpf_map_delete_elem(&rqinfos, &rq);
111 return 0;
112 }
113
114 char LICENSE[] SEC("license") = "GPL";
115