• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/debug.h"
3 #include "util/evlist.h"
4 #include "util/machine.h"
5 #include "util/map.h"
6 #include "util/symbol.h"
7 #include "util/target.h"
8 #include "util/thread.h"
9 #include "util/thread_map.h"
10 #include "util/lock-contention.h"
11 #include <linux/zalloc.h>
12 #include <linux/string.h>
13 #include <bpf/bpf.h>
14 
15 #include "bpf_skel/lock_contention.skel.h"
16 #include "bpf_skel/lock_data.h"
17 
18 static struct lock_contention_bpf *skel;
19 
lock_contention_prepare(struct lock_contention * con)20 int lock_contention_prepare(struct lock_contention *con)
21 {
22 	int i, fd;
23 	int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
24 	struct evlist *evlist = con->evlist;
25 	struct target *target = con->target;
26 
27 	skel = lock_contention_bpf__open();
28 	if (!skel) {
29 		pr_err("Failed to open lock-contention BPF skeleton\n");
30 		return -1;
31 	}
32 
33 	bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
34 	bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
35 	bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
36 
37 	if (con->aggr_mode == LOCK_AGGR_TASK)
38 		bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
39 	else
40 		bpf_map__set_max_entries(skel->maps.task_data, 1);
41 
42 	if (con->save_callstack)
43 		bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
44 	else
45 		bpf_map__set_max_entries(skel->maps.stacks, 1);
46 
47 	if (target__has_cpu(target))
48 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
49 	if (target__has_task(target))
50 		ntasks = perf_thread_map__nr(evlist->core.threads);
51 	if (con->filters->nr_types)
52 		ntypes = con->filters->nr_types;
53 
54 	/* resolve lock name filters to addr */
55 	if (con->filters->nr_syms) {
56 		struct symbol *sym;
57 		struct map *kmap;
58 		unsigned long *addrs;
59 
60 		for (i = 0; i < con->filters->nr_syms; i++) {
61 			sym = machine__find_kernel_symbol_by_name(con->machine,
62 								  con->filters->syms[i],
63 								  &kmap);
64 			if (sym == NULL) {
65 				pr_warning("ignore unknown symbol: %s\n",
66 					   con->filters->syms[i]);
67 				continue;
68 			}
69 
70 			addrs = realloc(con->filters->addrs,
71 					(con->filters->nr_addrs + 1) * sizeof(*addrs));
72 			if (addrs == NULL) {
73 				pr_warning("memory allocation failure\n");
74 				continue;
75 			}
76 
77 			addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
78 			con->filters->addrs = addrs;
79 		}
80 		naddrs = con->filters->nr_addrs;
81 	}
82 
83 	bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
84 	bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
85 	bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
86 	bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
87 
88 	if (lock_contention_bpf__load(skel) < 0) {
89 		pr_err("Failed to load lock-contention BPF skeleton\n");
90 		return -1;
91 	}
92 
93 	if (target__has_cpu(target)) {
94 		u32 cpu;
95 		u8 val = 1;
96 
97 		skel->bss->has_cpu = 1;
98 		fd = bpf_map__fd(skel->maps.cpu_filter);
99 
100 		for (i = 0; i < ncpus; i++) {
101 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
102 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
103 		}
104 	}
105 
106 	if (target__has_task(target)) {
107 		u32 pid;
108 		u8 val = 1;
109 
110 		skel->bss->has_task = 1;
111 		fd = bpf_map__fd(skel->maps.task_filter);
112 
113 		for (i = 0; i < ntasks; i++) {
114 			pid = perf_thread_map__pid(evlist->core.threads, i);
115 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
116 		}
117 	}
118 
119 	if (target__none(target) && evlist->workload.pid > 0) {
120 		u32 pid = evlist->workload.pid;
121 		u8 val = 1;
122 
123 		skel->bss->has_task = 1;
124 		fd = bpf_map__fd(skel->maps.task_filter);
125 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
126 	}
127 
128 	if (con->filters->nr_types) {
129 		u8 val = 1;
130 
131 		skel->bss->has_type = 1;
132 		fd = bpf_map__fd(skel->maps.type_filter);
133 
134 		for (i = 0; i < con->filters->nr_types; i++)
135 			bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
136 	}
137 
138 	if (con->filters->nr_addrs) {
139 		u8 val = 1;
140 
141 		skel->bss->has_addr = 1;
142 		fd = bpf_map__fd(skel->maps.addr_filter);
143 
144 		for (i = 0; i < con->filters->nr_addrs; i++)
145 			bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
146 	}
147 
148 	/* these don't work well if in the rodata section */
149 	skel->bss->stack_skip = con->stack_skip;
150 	skel->bss->aggr_mode = con->aggr_mode;
151 	skel->bss->needs_callstack = con->save_callstack;
152 	skel->bss->lock_owner = con->owner;
153 
154 	bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
155 
156 	lock_contention_bpf__attach(skel);
157 	return 0;
158 }
159 
lock_contention_start(void)160 int lock_contention_start(void)
161 {
162 	skel->bss->enabled = 1;
163 	return 0;
164 }
165 
lock_contention_stop(void)166 int lock_contention_stop(void)
167 {
168 	skel->bss->enabled = 0;
169 	return 0;
170 }
171 
lock_contention_get_name(struct lock_contention * con,struct contention_key * key,u64 * stack_trace,u32 flags)172 static const char *lock_contention_get_name(struct lock_contention *con,
173 					    struct contention_key *key,
174 					    u64 *stack_trace, u32 flags)
175 {
176 	int idx = 0;
177 	u64 addr;
178 	const char *name = "";
179 	static char name_buf[KSYM_NAME_LEN];
180 	struct symbol *sym;
181 	struct map *kmap;
182 	struct machine *machine = con->machine;
183 
184 	if (con->aggr_mode == LOCK_AGGR_TASK) {
185 		struct contention_task_data task;
186 		int pid = key->pid;
187 		int task_fd = bpf_map__fd(skel->maps.task_data);
188 
189 		/* do not update idle comm which contains CPU number */
190 		if (pid) {
191 			struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
192 
193 			if (t == NULL)
194 				return name;
195 			if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
196 			    thread__set_comm(t, task.comm, /*timestamp=*/0))
197 				name = task.comm;
198 		}
199 		return name;
200 	}
201 
202 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
203 		int lock_fd = bpf_map__fd(skel->maps.lock_syms);
204 
205 		/* per-process locks set upper bits of the flags */
206 		if (flags & LCD_F_MMAP_LOCK)
207 			return "mmap_lock";
208 		if (flags & LCD_F_SIGHAND_LOCK)
209 			return "siglock";
210 
211 		/* global locks with symbols */
212 		sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
213 		if (sym)
214 			return sym->name;
215 
216 		/* try semi-global locks collected separately */
217 		if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr, &flags)) {
218 			if (flags == LOCK_CLASS_RQLOCK)
219 				return "rq_lock";
220 		}
221 
222 		return "";
223 	}
224 
225 	/* LOCK_AGGR_CALLER: skip lock internal functions */
226 	while (machine__is_lock_function(machine, stack_trace[idx]) &&
227 	       idx < con->max_stack - 1)
228 		idx++;
229 
230 	addr = stack_trace[idx];
231 	sym = machine__find_kernel_symbol(machine, addr, &kmap);
232 
233 	if (sym) {
234 		unsigned long offset;
235 
236 		offset = map__map_ip(kmap, addr) - sym->start;
237 
238 		if (offset == 0)
239 			return sym->name;
240 
241 		snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
242 	} else {
243 		snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
244 	}
245 
246 	return name_buf;
247 }
248 
lock_contention_read(struct lock_contention * con)249 int lock_contention_read(struct lock_contention *con)
250 {
251 	int fd, stack, err = 0;
252 	struct contention_key *prev_key, key = {};
253 	struct contention_data data = {};
254 	struct lock_stat *st = NULL;
255 	struct machine *machine = con->machine;
256 	u64 *stack_trace;
257 	size_t stack_size = con->max_stack * sizeof(*stack_trace);
258 
259 	fd = bpf_map__fd(skel->maps.lock_stat);
260 	stack = bpf_map__fd(skel->maps.stacks);
261 
262 	con->fails.task = skel->bss->task_fail;
263 	con->fails.stack = skel->bss->stack_fail;
264 	con->fails.time = skel->bss->time_fail;
265 	con->fails.data = skel->bss->data_fail;
266 
267 	stack_trace = zalloc(stack_size);
268 	if (stack_trace == NULL)
269 		return -1;
270 
271 	if (con->aggr_mode == LOCK_AGGR_TASK) {
272 		struct thread *idle = __machine__findnew_thread(machine,
273 								/*pid=*/0,
274 								/*tid=*/0);
275 		thread__set_comm(idle, "swapper", /*timestamp=*/0);
276 	}
277 
278 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
279 		DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
280 			.flags = BPF_F_TEST_RUN_ON_CPU,
281 		);
282 		int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
283 
284 		bpf_prog_test_run_opts(prog_fd, &opts);
285 	}
286 
287 	/* make sure it loads the kernel map */
288 	map__load(maps__first(machine->kmaps)->map);
289 
290 	prev_key = NULL;
291 	while (!bpf_map_get_next_key(fd, prev_key, &key)) {
292 		s64 ls_key;
293 		const char *name;
294 
295 		/* to handle errors in the loop body */
296 		err = -1;
297 
298 		bpf_map_lookup_elem(fd, &key, &data);
299 		if (con->save_callstack) {
300 			bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
301 
302 			if (!match_callstack_filter(machine, stack_trace)) {
303 				con->nr_filtered += data.count;
304 				goto next;
305 			}
306 		}
307 
308 		switch (con->aggr_mode) {
309 		case LOCK_AGGR_CALLER:
310 			ls_key = key.stack_id;
311 			break;
312 		case LOCK_AGGR_TASK:
313 			ls_key = key.pid;
314 			break;
315 		case LOCK_AGGR_ADDR:
316 			ls_key = key.lock_addr;
317 			break;
318 		default:
319 			goto next;
320 		}
321 
322 		st = lock_stat_find(ls_key);
323 		if (st != NULL) {
324 			st->wait_time_total += data.total_time;
325 			if (st->wait_time_max < data.max_time)
326 				st->wait_time_max = data.max_time;
327 			if (st->wait_time_min > data.min_time)
328 				st->wait_time_min = data.min_time;
329 
330 			st->nr_contended += data.count;
331 			if (st->nr_contended)
332 				st->avg_wait_time = st->wait_time_total / st->nr_contended;
333 			goto next;
334 		}
335 
336 		name = lock_contention_get_name(con, &key, stack_trace, data.flags);
337 		st = lock_stat_findnew(ls_key, name, data.flags);
338 		if (st == NULL)
339 			break;
340 
341 		st->nr_contended = data.count;
342 		st->wait_time_total = data.total_time;
343 		st->wait_time_max = data.max_time;
344 		st->wait_time_min = data.min_time;
345 
346 		if (data.count)
347 			st->avg_wait_time = data.total_time / data.count;
348 
349 		if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
350 			st->callstack = memdup(stack_trace, stack_size);
351 			if (st->callstack == NULL)
352 				break;
353 		}
354 
355 next:
356 		prev_key = &key;
357 
358 		/* we're fine now, reset the error */
359 		err = 0;
360 	}
361 
362 	free(stack_trace);
363 
364 	return err;
365 }
366 
lock_contention_finish(void)367 int lock_contention_finish(void)
368 {
369 	if (skel) {
370 		skel->bss->enabled = 0;
371 		lock_contention_bpf__destroy(skel);
372 	}
373 
374 	return 0;
375 }
376