1// SPDX-License-Identifier: GPL-2.0
2#include "util/cgroup.h"
3#include "util/debug.h"
4#include "util/evlist.h"
5#include "util/machine.h"
6#include "util/map.h"
7#include "util/symbol.h"
8#include "util/target.h"
9#include "util/thread.h"
10#include "util/thread_map.h"
11#include "util/lock-contention.h"
12#include <linux/zalloc.h>
13#include <linux/string.h>
14#include <bpf/bpf.h>
15#include <inttypes.h>
16
17#include "bpf_skel/lock_contention.skel.h"
18#include "bpf_skel/lock_data.h"
19
20static struct lock_contention_bpf *skel;
21
22int lock_contention_prepare(struct lock_contention *con)
23{
24	int i, fd;
25	int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
26	struct evlist *evlist = con->evlist;
27	struct target *target = con->target;
28
29	skel = lock_contention_bpf__open();
30	if (!skel) {
31		pr_err("Failed to open lock-contention BPF skeleton\n");
32		return -1;
33	}
34
35	bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
36	bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
37	bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
38
39	if (con->aggr_mode == LOCK_AGGR_TASK)
40		bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
41	else
42		bpf_map__set_max_entries(skel->maps.task_data, 1);
43
44	if (con->save_callstack)
45		bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
46	else
47		bpf_map__set_max_entries(skel->maps.stacks, 1);
48
49	if (target__has_cpu(target))
50		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
51	if (target__has_task(target))
52		ntasks = perf_thread_map__nr(evlist->core.threads);
53	if (con->filters->nr_types)
54		ntypes = con->filters->nr_types;
55	if (con->filters->nr_cgrps)
56		ncgrps = con->filters->nr_cgrps;
57
58	/* resolve lock name filters to addr */
59	if (con->filters->nr_syms) {
60		struct symbol *sym;
61		struct map *kmap;
62		unsigned long *addrs;
63
64		for (i = 0; i < con->filters->nr_syms; i++) {
65			sym = machine__find_kernel_symbol_by_name(con->machine,
66								  con->filters->syms[i],
67								  &kmap);
68			if (sym == NULL) {
69				pr_warning("ignore unknown symbol: %s\n",
70					   con->filters->syms[i]);
71				continue;
72			}
73
74			addrs = realloc(con->filters->addrs,
75					(con->filters->nr_addrs + 1) * sizeof(*addrs));
76			if (addrs == NULL) {
77				pr_warning("memory allocation failure\n");
78				continue;
79			}
80
81			addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
82			con->filters->addrs = addrs;
83		}
84		naddrs = con->filters->nr_addrs;
85	}
86
87	bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
88	bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
89	bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
90	bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
91	bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
92
93	if (lock_contention_bpf__load(skel) < 0) {
94		pr_err("Failed to load lock-contention BPF skeleton\n");
95		return -1;
96	}
97
98	if (target__has_cpu(target)) {
99		u32 cpu;
100		u8 val = 1;
101
102		skel->bss->has_cpu = 1;
103		fd = bpf_map__fd(skel->maps.cpu_filter);
104
105		for (i = 0; i < ncpus; i++) {
106			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
107			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
108		}
109	}
110
111	if (target__has_task(target)) {
112		u32 pid;
113		u8 val = 1;
114
115		skel->bss->has_task = 1;
116		fd = bpf_map__fd(skel->maps.task_filter);
117
118		for (i = 0; i < ntasks; i++) {
119			pid = perf_thread_map__pid(evlist->core.threads, i);
120			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
121		}
122	}
123
124	if (target__none(target) && evlist->workload.pid > 0) {
125		u32 pid = evlist->workload.pid;
126		u8 val = 1;
127
128		skel->bss->has_task = 1;
129		fd = bpf_map__fd(skel->maps.task_filter);
130		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
131	}
132
133	if (con->filters->nr_types) {
134		u8 val = 1;
135
136		skel->bss->has_type = 1;
137		fd = bpf_map__fd(skel->maps.type_filter);
138
139		for (i = 0; i < con->filters->nr_types; i++)
140			bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
141	}
142
143	if (con->filters->nr_addrs) {
144		u8 val = 1;
145
146		skel->bss->has_addr = 1;
147		fd = bpf_map__fd(skel->maps.addr_filter);
148
149		for (i = 0; i < con->filters->nr_addrs; i++)
150			bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
151	}
152
153	if (con->filters->nr_cgrps) {
154		u8 val = 1;
155
156		skel->bss->has_cgroup = 1;
157		fd = bpf_map__fd(skel->maps.cgroup_filter);
158
159		for (i = 0; i < con->filters->nr_cgrps; i++)
160			bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
161	}
162
163	/* these don't work well if in the rodata section */
164	skel->bss->stack_skip = con->stack_skip;
165	skel->bss->aggr_mode = con->aggr_mode;
166	skel->bss->needs_callstack = con->save_callstack;
167	skel->bss->lock_owner = con->owner;
168
169	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
170		if (cgroup_is_v2("perf_event"))
171			skel->bss->use_cgroup_v2 = 1;
172
173		read_all_cgroups(&con->cgroups);
174	}
175
176	bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
177
178	lock_contention_bpf__attach(skel);
179	return 0;
180}
181
182/*
183 * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end
184 * timestamp in ktime so that it can calculate delta easily.
185 */
186static void mark_end_timestamp(void)
187{
188	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
189		.flags = BPF_F_TEST_RUN_ON_CPU,
190	);
191	int prog_fd = bpf_program__fd(skel->progs.end_timestamp);
192
193	bpf_prog_test_run_opts(prog_fd, &opts);
194}
195
196static void update_lock_stat(int map_fd, int pid, u64 end_ts,
197			     enum lock_aggr_mode aggr_mode,
198			     struct tstamp_data *ts_data)
199{
200	u64 delta;
201	struct contention_key stat_key = {};
202	struct contention_data stat_data;
203
204	if (ts_data->timestamp >= end_ts)
205		return;
206
207	delta = end_ts - ts_data->timestamp;
208
209	switch (aggr_mode) {
210	case LOCK_AGGR_CALLER:
211		stat_key.stack_id = ts_data->stack_id;
212		break;
213	case LOCK_AGGR_TASK:
214		stat_key.pid = pid;
215		break;
216	case LOCK_AGGR_ADDR:
217		stat_key.lock_addr_or_cgroup = ts_data->lock;
218		break;
219	case LOCK_AGGR_CGROUP:
220		/* TODO */
221		return;
222	default:
223		return;
224	}
225
226	if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0)
227		return;
228
229	stat_data.total_time += delta;
230	stat_data.count++;
231
232	if (delta > stat_data.max_time)
233		stat_data.max_time = delta;
234	if (delta < stat_data.min_time)
235		stat_data.min_time = delta;
236
237	bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST);
238}
239
240/*
241 * Account entries in the tstamp map (which didn't see the corresponding
242 * lock:contention_end tracepoint) using end_ts.
243 */
244static void account_end_timestamp(struct lock_contention *con)
245{
246	int ts_fd, stat_fd;
247	int *prev_key, key;
248	u64 end_ts = skel->bss->end_ts;
249	int total_cpus;
250	enum lock_aggr_mode aggr_mode = con->aggr_mode;
251	struct tstamp_data ts_data, *cpu_data;
252
253	/* Iterate per-task tstamp map (key = TID) */
254	ts_fd = bpf_map__fd(skel->maps.tstamp);
255	stat_fd = bpf_map__fd(skel->maps.lock_stat);
256
257	prev_key = NULL;
258	while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
259		if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) {
260			int pid = key;
261
262			if (aggr_mode == LOCK_AGGR_TASK && con->owner)
263				pid = ts_data.flags;
264
265			update_lock_stat(stat_fd, pid, end_ts, aggr_mode,
266					 &ts_data);
267		}
268
269		prev_key = &key;
270	}
271
272	/* Now it'll check per-cpu tstamp map which doesn't have TID. */
273	if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP)
274		return;
275
276	total_cpus = cpu__max_cpu().cpu;
277	ts_fd = bpf_map__fd(skel->maps.tstamp_cpu);
278
279	cpu_data = calloc(total_cpus, sizeof(*cpu_data));
280	if (cpu_data == NULL)
281		return;
282
283	prev_key = NULL;
284	while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
285		if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0)
286			goto next;
287
288		for (int i = 0; i < total_cpus; i++) {
289			update_lock_stat(stat_fd, -1, end_ts, aggr_mode,
290					 &cpu_data[i]);
291		}
292
293next:
294		prev_key = &key;
295	}
296	free(cpu_data);
297}
298
299int lock_contention_start(void)
300{
301	skel->bss->enabled = 1;
302	return 0;
303}
304
305int lock_contention_stop(void)
306{
307	skel->bss->enabled = 0;
308	mark_end_timestamp();
309	return 0;
310}
311
312static const char *lock_contention_get_name(struct lock_contention *con,
313					    struct contention_key *key,
314					    u64 *stack_trace, u32 flags)
315{
316	int idx = 0;
317	u64 addr;
318	const char *name = "";
319	static char name_buf[KSYM_NAME_LEN];
320	struct symbol *sym;
321	struct map *kmap;
322	struct machine *machine = con->machine;
323
324	if (con->aggr_mode == LOCK_AGGR_TASK) {
325		struct contention_task_data task;
326		int pid = key->pid;
327		int task_fd = bpf_map__fd(skel->maps.task_data);
328
329		/* do not update idle comm which contains CPU number */
330		if (pid) {
331			struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
332
333			if (t == NULL)
334				return name;
335			if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
336			    thread__set_comm(t, task.comm, /*timestamp=*/0))
337				name = task.comm;
338		}
339		return name;
340	}
341
342	if (con->aggr_mode == LOCK_AGGR_ADDR) {
343		int lock_fd = bpf_map__fd(skel->maps.lock_syms);
344
345		/* per-process locks set upper bits of the flags */
346		if (flags & LCD_F_MMAP_LOCK)
347			return "mmap_lock";
348		if (flags & LCD_F_SIGHAND_LOCK)
349			return "siglock";
350
351		/* global locks with symbols */
352		sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
353		if (sym)
354			return sym->name;
355
356		/* try semi-global locks collected separately */
357		if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
358			if (flags == LOCK_CLASS_RQLOCK)
359				return "rq_lock";
360		}
361
362		return "";
363	}
364
365	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
366		u64 cgrp_id = key->lock_addr_or_cgroup;
367		struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
368
369		if (cgrp)
370			return cgrp->name;
371
372		snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
373		return name_buf;
374	}
375
376	/* LOCK_AGGR_CALLER: skip lock internal functions */
377	while (machine__is_lock_function(machine, stack_trace[idx]) &&
378	       idx < con->max_stack - 1)
379		idx++;
380
381	addr = stack_trace[idx];
382	sym = machine__find_kernel_symbol(machine, addr, &kmap);
383
384	if (sym) {
385		unsigned long offset;
386
387		offset = map__map_ip(kmap, addr) - sym->start;
388
389		if (offset == 0)
390			return sym->name;
391
392		snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
393	} else {
394		snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
395	}
396
397	return name_buf;
398}
399
400int lock_contention_read(struct lock_contention *con)
401{
402	int fd, stack, err = 0;
403	struct contention_key *prev_key, key = {};
404	struct contention_data data = {};
405	struct lock_stat *st = NULL;
406	struct machine *machine = con->machine;
407	u64 *stack_trace;
408	size_t stack_size = con->max_stack * sizeof(*stack_trace);
409
410	fd = bpf_map__fd(skel->maps.lock_stat);
411	stack = bpf_map__fd(skel->maps.stacks);
412
413	con->fails.task = skel->bss->task_fail;
414	con->fails.stack = skel->bss->stack_fail;
415	con->fails.time = skel->bss->time_fail;
416	con->fails.data = skel->bss->data_fail;
417
418	stack_trace = zalloc(stack_size);
419	if (stack_trace == NULL)
420		return -1;
421
422	account_end_timestamp(con);
423
424	if (con->aggr_mode == LOCK_AGGR_TASK) {
425		struct thread *idle = machine__findnew_thread(machine,
426								/*pid=*/0,
427								/*tid=*/0);
428		thread__set_comm(idle, "swapper", /*timestamp=*/0);
429	}
430
431	if (con->aggr_mode == LOCK_AGGR_ADDR) {
432		DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
433			.flags = BPF_F_TEST_RUN_ON_CPU,
434		);
435		int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
436
437		bpf_prog_test_run_opts(prog_fd, &opts);
438	}
439
440	/* make sure it loads the kernel map */
441	maps__load_first(machine->kmaps);
442
443	prev_key = NULL;
444	while (!bpf_map_get_next_key(fd, prev_key, &key)) {
445		s64 ls_key;
446		const char *name;
447
448		/* to handle errors in the loop body */
449		err = -1;
450
451		bpf_map_lookup_elem(fd, &key, &data);
452		if (con->save_callstack) {
453			bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
454
455			if (!match_callstack_filter(machine, stack_trace)) {
456				con->nr_filtered += data.count;
457				goto next;
458			}
459		}
460
461		switch (con->aggr_mode) {
462		case LOCK_AGGR_CALLER:
463			ls_key = key.stack_id;
464			break;
465		case LOCK_AGGR_TASK:
466			ls_key = key.pid;
467			break;
468		case LOCK_AGGR_ADDR:
469		case LOCK_AGGR_CGROUP:
470			ls_key = key.lock_addr_or_cgroup;
471			break;
472		default:
473			goto next;
474		}
475
476		st = lock_stat_find(ls_key);
477		if (st != NULL) {
478			st->wait_time_total += data.total_time;
479			if (st->wait_time_max < data.max_time)
480				st->wait_time_max = data.max_time;
481			if (st->wait_time_min > data.min_time)
482				st->wait_time_min = data.min_time;
483
484			st->nr_contended += data.count;
485			if (st->nr_contended)
486				st->avg_wait_time = st->wait_time_total / st->nr_contended;
487			goto next;
488		}
489
490		name = lock_contention_get_name(con, &key, stack_trace, data.flags);
491		st = lock_stat_findnew(ls_key, name, data.flags);
492		if (st == NULL)
493			break;
494
495		st->nr_contended = data.count;
496		st->wait_time_total = data.total_time;
497		st->wait_time_max = data.max_time;
498		st->wait_time_min = data.min_time;
499
500		if (data.count)
501			st->avg_wait_time = data.total_time / data.count;
502
503		if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
504			st->callstack = memdup(stack_trace, stack_size);
505			if (st->callstack == NULL)
506				break;
507		}
508
509next:
510		prev_key = &key;
511
512		/* we're fine now, reset the error */
513		err = 0;
514	}
515
516	free(stack_trace);
517
518	return err;
519}
520
521int lock_contention_finish(struct lock_contention *con)
522{
523	if (skel) {
524		skel->bss->enabled = 0;
525		lock_contention_bpf__destroy(skel);
526	}
527
528	while (!RB_EMPTY_ROOT(&con->cgroups)) {
529		struct rb_node *node = rb_first(&con->cgroups);
530		struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
531
532		rb_erase(node, &con->cgroups);
533		cgroup__put(cgrp);
534	}
535
536	return 0;
537}
538