1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "runqlat.h"
8 #include "bits.bpf.h"
9 #include "maps.bpf.h"
10 #include "core_fixes.bpf.h"
11
12 #define MAX_ENTRIES 10240
13 #define TASK_RUNNING 0
14
15 const volatile bool targ_per_process = false;
16 const volatile bool targ_per_thread = false;
17 const volatile bool targ_per_pidns = false;
18 const volatile bool targ_ms = false;
19 const volatile pid_t targ_tgid = 0;
20
21 struct {
22 __uint(type, BPF_MAP_TYPE_HASH);
23 __uint(max_entries, MAX_ENTRIES);
24 __type(key, u32);
25 __type(value, u64);
26 } start SEC(".maps");
27
28 static struct hist zero;
29
30 struct {
31 __uint(type, BPF_MAP_TYPE_HASH);
32 __uint(max_entries, MAX_ENTRIES);
33 __type(key, u32);
34 __type(value, struct hist);
35 } hists SEC(".maps");
36
37 static __always_inline
trace_enqueue(u32 tgid,u32 pid)38 int trace_enqueue(u32 tgid, u32 pid)
39 {
40 u64 ts;
41
42 if (!pid)
43 return 0;
44 if (targ_tgid && targ_tgid != tgid)
45 return 0;
46
47 ts = bpf_ktime_get_ns();
48 bpf_map_update_elem(&start, &pid, &ts, 0);
49 return 0;
50 }
51
pid_namespace(struct task_struct * task)52 static __always_inline unsigned int pid_namespace(struct task_struct *task)
53 {
54 struct pid *pid;
55 unsigned int level;
56 struct upid upid;
57 unsigned int inum;
58
59 /* get the pid namespace by following task_active_pid_ns(),
60 * pid->numbers[pid->level].ns
61 */
62 pid = BPF_CORE_READ(task, thread_pid);
63 level = BPF_CORE_READ(pid, level);
64 bpf_core_read(&upid, sizeof(upid), &pid->numbers[level]);
65 inum = BPF_CORE_READ(upid.ns, ns.inum);
66
67 return inum;
68 }
69
70 SEC("tp_btf/sched_wakeup")
BPF_PROG(sched_wakeup,struct task_struct * p)71 int BPF_PROG(sched_wakeup, struct task_struct *p)
72 {
73 return trace_enqueue(p->tgid, p->pid);
74 }
75
76 SEC("tp_btf/sched_wakeup_new")
BPF_PROG(sched_wakeup_new,struct task_struct * p)77 int BPF_PROG(sched_wakeup_new, struct task_struct *p)
78 {
79 return trace_enqueue(p->tgid, p->pid);
80 }
81
82 SEC("tp_btf/sched_switch")
BPF_PROG(sched_swith,bool preempt,struct task_struct * prev,struct task_struct * next)83 int BPF_PROG(sched_swith, bool preempt, struct task_struct *prev,
84 struct task_struct *next)
85 {
86 struct hist *histp;
87 u64 *tsp, slot;
88 u32 pid, hkey;
89 s64 delta;
90
91 if (get_task_state(prev) == TASK_RUNNING)
92 trace_enqueue(prev->tgid, prev->pid);
93
94 pid = next->pid;
95
96 tsp = bpf_map_lookup_elem(&start, &pid);
97 if (!tsp)
98 return 0;
99 delta = bpf_ktime_get_ns() - *tsp;
100 if (delta < 0)
101 goto cleanup;
102
103 if (targ_per_process)
104 hkey = next->tgid;
105 else if (targ_per_thread)
106 hkey = pid;
107 else if (targ_per_pidns)
108 hkey = pid_namespace(next);
109 else
110 hkey = -1;
111 histp = bpf_map_lookup_or_try_init(&hists, &hkey, &zero);
112 if (!histp)
113 goto cleanup;
114 if (!histp->comm[0])
115 bpf_probe_read_kernel_str(&histp->comm, sizeof(histp->comm),
116 next->comm);
117 if (targ_ms)
118 delta /= 1000000U;
119 else
120 delta /= 1000U;
121 slot = log2l(delta);
122 if (slot >= MAX_SLOTS)
123 slot = MAX_SLOTS - 1;
124 __sync_fetch_and_add(&histp->slots[slot], 1);
125
126 cleanup:
127 bpf_map_delete_elem(&start, &pid);
128 return 0;
129 }
130
131 char LICENSE[] SEC("license") = "GPL";
132