1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2022 Google
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_core_read.h>
7 #include <asm-generic/errno-base.h>
8
9 #include "lock_data.h"
10
11 /* for collect_lock_syms(). 4096 was rejected by the verifier */
12 #define MAX_CPUS 1024
13
14 /* lock contention flags from include/trace/events/lock.h */
15 #define LCB_F_SPIN (1U << 0)
16 #define LCB_F_READ (1U << 1)
17 #define LCB_F_WRITE (1U << 2)
18 #define LCB_F_RT (1U << 3)
19 #define LCB_F_PERCPU (1U << 4)
20 #define LCB_F_MUTEX (1U << 5)
21
22 struct tstamp_data {
23 __u64 timestamp;
24 __u64 lock;
25 __u32 flags;
26 __s32 stack_id;
27 };
28
29 /* callstack storage */
30 struct {
31 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
32 __uint(key_size, sizeof(__u32));
33 __uint(value_size, sizeof(__u64));
34 __uint(max_entries, MAX_ENTRIES);
35 } stacks SEC(".maps");
36
37 /* maintain timestamp at the beginning of contention */
38 struct {
39 __uint(type, BPF_MAP_TYPE_HASH);
40 __type(key, int);
41 __type(value, struct tstamp_data);
42 __uint(max_entries, MAX_ENTRIES);
43 } tstamp SEC(".maps");
44
45 /* actual lock contention statistics */
46 struct {
47 __uint(type, BPF_MAP_TYPE_HASH);
48 __uint(key_size, sizeof(struct contention_key));
49 __uint(value_size, sizeof(struct contention_data));
50 __uint(max_entries, MAX_ENTRIES);
51 } lock_stat SEC(".maps");
52
53 struct {
54 __uint(type, BPF_MAP_TYPE_HASH);
55 __uint(key_size, sizeof(__u32));
56 __uint(value_size, sizeof(struct contention_task_data));
57 __uint(max_entries, MAX_ENTRIES);
58 } task_data SEC(".maps");
59
60 struct {
61 __uint(type, BPF_MAP_TYPE_HASH);
62 __uint(key_size, sizeof(__u64));
63 __uint(value_size, sizeof(__u32));
64 __uint(max_entries, MAX_ENTRIES);
65 } lock_syms SEC(".maps");
66
67 struct {
68 __uint(type, BPF_MAP_TYPE_HASH);
69 __uint(key_size, sizeof(__u32));
70 __uint(value_size, sizeof(__u8));
71 __uint(max_entries, 1);
72 } cpu_filter SEC(".maps");
73
74 struct {
75 __uint(type, BPF_MAP_TYPE_HASH);
76 __uint(key_size, sizeof(__u32));
77 __uint(value_size, sizeof(__u8));
78 __uint(max_entries, 1);
79 } task_filter SEC(".maps");
80
81 struct {
82 __uint(type, BPF_MAP_TYPE_HASH);
83 __uint(key_size, sizeof(__u32));
84 __uint(value_size, sizeof(__u8));
85 __uint(max_entries, 1);
86 } type_filter SEC(".maps");
87
88 struct {
89 __uint(type, BPF_MAP_TYPE_HASH);
90 __uint(key_size, sizeof(__u64));
91 __uint(value_size, sizeof(__u8));
92 __uint(max_entries, 1);
93 } addr_filter SEC(".maps");
94
95 struct rw_semaphore___old {
96 struct task_struct *owner;
97 } __attribute__((preserve_access_index));
98
99 struct rw_semaphore___new {
100 atomic_long_t owner;
101 } __attribute__((preserve_access_index));
102
103 struct mm_struct___old {
104 struct rw_semaphore mmap_sem;
105 } __attribute__((preserve_access_index));
106
107 struct mm_struct___new {
108 struct rw_semaphore mmap_lock;
109 } __attribute__((preserve_access_index));
110
111 /* control flags */
112 int enabled;
113 int has_cpu;
114 int has_task;
115 int has_type;
116 int has_addr;
117 int needs_callstack;
118 int stack_skip;
119 int lock_owner;
120
121 /* determine the key of lock stat */
122 int aggr_mode;
123
124 /* error stat */
125 int task_fail;
126 int stack_fail;
127 int time_fail;
128 int data_fail;
129
130 int task_map_full;
131 int data_map_full;
132
can_record(u64 * ctx)133 static inline int can_record(u64 *ctx)
134 {
135 if (has_cpu) {
136 __u32 cpu = bpf_get_smp_processor_id();
137 __u8 *ok;
138
139 ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
140 if (!ok)
141 return 0;
142 }
143
144 if (has_task) {
145 __u8 *ok;
146 __u32 pid = bpf_get_current_pid_tgid();
147
148 ok = bpf_map_lookup_elem(&task_filter, &pid);
149 if (!ok)
150 return 0;
151 }
152
153 if (has_type) {
154 __u8 *ok;
155 __u32 flags = (__u32)ctx[1];
156
157 ok = bpf_map_lookup_elem(&type_filter, &flags);
158 if (!ok)
159 return 0;
160 }
161
162 if (has_addr) {
163 __u8 *ok;
164 __u64 addr = ctx[0];
165
166 ok = bpf_map_lookup_elem(&addr_filter, &addr);
167 if (!ok)
168 return 0;
169 }
170
171 return 1;
172 }
173
update_task_data(struct task_struct * task)174 static inline int update_task_data(struct task_struct *task)
175 {
176 struct contention_task_data *p;
177 int pid, err;
178
179 err = bpf_core_read(&pid, sizeof(pid), &task->pid);
180 if (err)
181 return -1;
182
183 p = bpf_map_lookup_elem(&task_data, &pid);
184 if (p == NULL && !task_map_full) {
185 struct contention_task_data data = {};
186
187 BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
188 if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG)
189 task_map_full = 1;
190 }
191
192 return 0;
193 }
194
195 #ifndef __has_builtin
196 # define __has_builtin(x) 0
197 #endif
198
get_lock_owner(__u64 lock,__u32 flags)199 static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)
200 {
201 struct task_struct *task;
202 __u64 owner = 0;
203
204 if (flags & LCB_F_MUTEX) {
205 struct mutex *mutex = (void *)lock;
206 owner = BPF_CORE_READ(mutex, owner.counter);
207 } else if (flags == LCB_F_READ || flags == LCB_F_WRITE) {
208 /*
209 * Support for the BPF_TYPE_MATCHES argument to the
210 * __builtin_preserve_type_info builtin was added at some point during
211 * development of clang 15 and it's what is needed for
212 * bpf_core_type_matches.
213 */
214 #if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
215 if (bpf_core_type_matches(struct rw_semaphore___old)) {
216 struct rw_semaphore___old *rwsem = (void *)lock;
217 owner = (unsigned long)BPF_CORE_READ(rwsem, owner);
218 } else if (bpf_core_type_matches(struct rw_semaphore___new)) {
219 struct rw_semaphore___new *rwsem = (void *)lock;
220 owner = BPF_CORE_READ(rwsem, owner.counter);
221 }
222 #else
223 /* assume new struct */
224 struct rw_semaphore *rwsem = (void *)lock;
225 owner = BPF_CORE_READ(rwsem, owner.counter);
226 #endif
227 }
228
229 if (!owner)
230 return NULL;
231
232 task = (void *)(owner & ~7UL);
233 return task;
234 }
235
check_lock_type(__u64 lock,__u32 flags)236 static inline __u32 check_lock_type(__u64 lock, __u32 flags)
237 {
238 struct task_struct *curr;
239 struct mm_struct___old *mm_old;
240 struct mm_struct___new *mm_new;
241 struct sighand_struct *sighand;
242
243 switch (flags) {
244 case LCB_F_READ: /* rwsem */
245 case LCB_F_WRITE:
246 curr = bpf_get_current_task_btf();
247 if (curr->mm == NULL)
248 break;
249 mm_new = (void *)curr->mm;
250 if (bpf_core_field_exists(mm_new->mmap_lock)) {
251 if (&mm_new->mmap_lock == (void *)lock)
252 return LCD_F_MMAP_LOCK;
253 break;
254 }
255 mm_old = (void *)curr->mm;
256 if (bpf_core_field_exists(mm_old->mmap_sem)) {
257 if (&mm_old->mmap_sem == (void *)lock)
258 return LCD_F_MMAP_LOCK;
259 }
260 break;
261 case LCB_F_SPIN: /* spinlock */
262 curr = bpf_get_current_task_btf();
263 sighand = curr->sighand;
264
265 if (sighand && &sighand->siglock == (void *)lock)
266 return LCD_F_SIGHAND_LOCK;
267 break;
268 default:
269 break;
270 }
271 return 0;
272 }
273
274 SEC("tp_btf/contention_begin")
contention_begin(u64 * ctx)275 int contention_begin(u64 *ctx)
276 {
277 __u32 pid;
278 struct tstamp_data *pelem;
279
280 if (!enabled || !can_record(ctx))
281 return 0;
282
283 pid = bpf_get_current_pid_tgid();
284 pelem = bpf_map_lookup_elem(&tstamp, &pid);
285 if (pelem && pelem->lock)
286 return 0;
287
288 if (pelem == NULL) {
289 struct tstamp_data zero = {};
290
291 bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);
292 pelem = bpf_map_lookup_elem(&tstamp, &pid);
293 if (pelem == NULL) {
294 __sync_fetch_and_add(&task_fail, 1);
295 return 0;
296 }
297 }
298
299 pelem->timestamp = bpf_ktime_get_ns();
300 pelem->lock = (__u64)ctx[0];
301 pelem->flags = (__u32)ctx[1];
302
303 if (needs_callstack) {
304 pelem->stack_id = bpf_get_stackid(ctx, &stacks,
305 BPF_F_FAST_STACK_CMP | stack_skip);
306 if (pelem->stack_id < 0)
307 __sync_fetch_and_add(&stack_fail, 1);
308 } else if (aggr_mode == LOCK_AGGR_TASK) {
309 struct task_struct *task;
310
311 if (lock_owner) {
312 task = get_lock_owner(pelem->lock, pelem->flags);
313
314 /* The flags is not used anymore. Pass the owner pid. */
315 if (task)
316 pelem->flags = BPF_CORE_READ(task, pid);
317 else
318 pelem->flags = -1U;
319
320 } else {
321 task = bpf_get_current_task_btf();
322 }
323
324 if (task) {
325 if (update_task_data(task) < 0 && lock_owner)
326 pelem->flags = -1U;
327 }
328 }
329
330 return 0;
331 }
332
333 SEC("tp_btf/contention_end")
contention_end(u64 * ctx)334 int contention_end(u64 *ctx)
335 {
336 __u32 pid;
337 struct tstamp_data *pelem;
338 struct contention_key key = {};
339 struct contention_data *data;
340 __u64 duration;
341
342 if (!enabled)
343 return 0;
344
345 pid = bpf_get_current_pid_tgid();
346 pelem = bpf_map_lookup_elem(&tstamp, &pid);
347 if (!pelem || pelem->lock != ctx[0])
348 return 0;
349
350 duration = bpf_ktime_get_ns() - pelem->timestamp;
351 if ((__s64)duration < 0) {
352 bpf_map_delete_elem(&tstamp, &pid);
353 __sync_fetch_and_add(&time_fail, 1);
354 return 0;
355 }
356
357 switch (aggr_mode) {
358 case LOCK_AGGR_CALLER:
359 key.stack_id = pelem->stack_id;
360 break;
361 case LOCK_AGGR_TASK:
362 if (lock_owner)
363 key.pid = pelem->flags;
364 else
365 key.pid = pid;
366 if (needs_callstack)
367 key.stack_id = pelem->stack_id;
368 break;
369 case LOCK_AGGR_ADDR:
370 key.lock_addr = pelem->lock;
371 if (needs_callstack)
372 key.stack_id = pelem->stack_id;
373 break;
374 default:
375 /* should not happen */
376 return 0;
377 }
378
379 data = bpf_map_lookup_elem(&lock_stat, &key);
380 if (!data) {
381 if (data_map_full) {
382 bpf_map_delete_elem(&tstamp, &pid);
383 __sync_fetch_and_add(&data_fail, 1);
384 return 0;
385 }
386
387 struct contention_data first = {
388 .total_time = duration,
389 .max_time = duration,
390 .min_time = duration,
391 .count = 1,
392 .flags = pelem->flags,
393 };
394 int err;
395
396 if (aggr_mode == LOCK_AGGR_ADDR)
397 first.flags |= check_lock_type(pelem->lock, pelem->flags);
398
399 err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
400 if (err < 0) {
401 if (err == -E2BIG)
402 data_map_full = 1;
403 __sync_fetch_and_add(&data_fail, 1);
404 }
405 bpf_map_delete_elem(&tstamp, &pid);
406 return 0;
407 }
408
409 __sync_fetch_and_add(&data->total_time, duration);
410 __sync_fetch_and_add(&data->count, 1);
411
412 /* FIXME: need atomic operations */
413 if (data->max_time < duration)
414 data->max_time = duration;
415 if (data->min_time > duration)
416 data->min_time = duration;
417
418 bpf_map_delete_elem(&tstamp, &pid);
419 return 0;
420 }
421
422 extern struct rq runqueues __ksym;
423
424 struct rq___old {
425 raw_spinlock_t lock;
426 } __attribute__((preserve_access_index));
427
428 struct rq___new {
429 raw_spinlock_t __lock;
430 } __attribute__((preserve_access_index));
431
432 SEC("raw_tp/bpf_test_finish")
BPF_PROG(collect_lock_syms)433 int BPF_PROG(collect_lock_syms)
434 {
435 __u64 lock_addr, lock_off;
436 __u32 lock_flag;
437
438 if (bpf_core_field_exists(struct rq___new, __lock))
439 lock_off = offsetof(struct rq___new, __lock);
440 else
441 lock_off = offsetof(struct rq___old, lock);
442
443 for (int i = 0; i < MAX_CPUS; i++) {
444 struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
445
446 if (rq == NULL)
447 break;
448
449 lock_addr = (__u64)(void *)rq + lock_off;
450 lock_flag = LOCK_CLASS_RQLOCK;
451 bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
452 }
453 return 0;
454 }
455
456 char LICENSE[] SEC("license") = "Dual BSD/GPL";
457