1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <bpf/bpf.h>
5#include <bpf/btf.h>
6#include <bpf/libbpf.h>
7#include <linux/btf.h>
8#include <linux/err.h>
9#include <linux/string.h>
10#include <internal/lib.h>
11#include <symbol/kallsyms.h>
12#include "bpf-event.h"
13#include "bpf-utils.h"
14#include "debug.h"
15#include "dso.h"
16#include "symbol.h"
17#include "machine.h"
18#include "env.h"
19#include "session.h"
20#include "map.h"
21#include "evlist.h"
22#include "record.h"
23#include "util/synthetic-events.h"
24
25static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
26{
27	int ret = 0;
28	size_t i;
29
30	for (i = 0; i < len; i++)
31		ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
32	return ret;
33}
34
35static int machine__process_bpf_event_load(struct machine *machine,
36					   union perf_event *event,
37					   struct perf_sample *sample __maybe_unused)
38{
39	struct bpf_prog_info_node *info_node;
40	struct perf_env *env = machine->env;
41	struct perf_bpil *info_linear;
42	int id = event->bpf.id;
43	unsigned int i;
44
45	/* perf-record, no need to handle bpf-event */
46	if (env == NULL)
47		return 0;
48
49	info_node = perf_env__find_bpf_prog_info(env, id);
50	if (!info_node)
51		return 0;
52	info_linear = info_node->info_linear;
53
54	for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
55		u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
56		u64 addr = addrs[i];
57		struct map *map = maps__find(machine__kernel_maps(machine), addr);
58
59		if (map) {
60			struct dso *dso = map__dso(map);
61
62			dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
63			dso->bpf_prog.id = id;
64			dso->bpf_prog.sub_id = i;
65			dso->bpf_prog.env = env;
66			map__put(map);
67		}
68	}
69	return 0;
70}
71
72int machine__process_bpf(struct machine *machine, union perf_event *event,
73			 struct perf_sample *sample)
74{
75	if (dump_trace)
76		perf_event__fprintf_bpf(event, stdout);
77
78	switch (event->bpf.type) {
79	case PERF_BPF_EVENT_PROG_LOAD:
80		return machine__process_bpf_event_load(machine, event, sample);
81
82	case PERF_BPF_EVENT_PROG_UNLOAD:
83		/*
84		 * Do not free bpf_prog_info and btf of the program here,
85		 * as annotation still need them. They will be freed at
86		 * the end of the session.
87		 */
88		break;
89	default:
90		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
91		break;
92	}
93	return 0;
94}
95
96static int perf_env__fetch_btf(struct perf_env *env,
97			       u32 btf_id,
98			       struct btf *btf)
99{
100	struct btf_node *node;
101	u32 data_size;
102	const void *data;
103
104	data = btf__raw_data(btf, &data_size);
105
106	node = malloc(data_size + sizeof(struct btf_node));
107	if (!node)
108		return -1;
109
110	node->id = btf_id;
111	node->data_size = data_size;
112	memcpy(node->data, data, data_size);
113
114	if (!perf_env__insert_btf(env, node)) {
115		/* Insertion failed because of a duplicate. */
116		free(node);
117		return -1;
118	}
119	return 0;
120}
121
122static int synthesize_bpf_prog_name(char *buf, int size,
123				    struct bpf_prog_info *info,
124				    struct btf *btf,
125				    u32 sub_id)
126{
127	u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
128	void *func_infos = (void *)(uintptr_t)(info->func_info);
129	u32 sub_prog_cnt = info->nr_jited_ksyms;
130	const struct bpf_func_info *finfo;
131	const char *short_name = NULL;
132	const struct btf_type *t;
133	int name_len;
134
135	name_len = snprintf(buf, size, "bpf_prog_");
136	name_len += snprintf_hex(buf + name_len, size - name_len,
137				 prog_tags[sub_id], BPF_TAG_SIZE);
138	if (btf) {
139		finfo = func_infos + sub_id * info->func_info_rec_size;
140		t = btf__type_by_id(btf, finfo->type_id);
141		short_name = btf__name_by_offset(btf, t->name_off);
142	} else if (sub_id == 0 && sub_prog_cnt == 1) {
143		/* no subprog */
144		if (info->name[0])
145			short_name = info->name;
146	} else
147		short_name = "F";
148	if (short_name)
149		name_len += snprintf(buf + name_len, size - name_len,
150				     "_%s", short_name);
151	return name_len;
152}
153
154/*
155 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
156 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
157 * one PERF_RECORD_KSYMBOL is generated for each sub program.
158 *
159 * Returns:
160 *    0 for success;
161 *   -1 for failures;
162 *   -2 for lack of kernel support.
163 */
164static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
165					       perf_event__handler_t process,
166					       struct machine *machine,
167					       int fd,
168					       union perf_event *event,
169					       struct record_opts *opts)
170{
171	struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
172	struct perf_record_bpf_event *bpf_event = &event->bpf;
173	struct perf_tool *tool = session->tool;
174	struct bpf_prog_info_node *info_node;
175	struct perf_bpil *info_linear;
176	struct bpf_prog_info *info;
177	struct btf *btf = NULL;
178	struct perf_env *env;
179	u32 sub_prog_cnt, i;
180	int err = 0;
181	u64 arrays;
182
183	/*
184	 * for perf-record and perf-report use header.env;
185	 * otherwise, use global perf_env.
186	 */
187	env = session->data ? &session->header.env : &perf_env;
188
189	arrays = 1UL << PERF_BPIL_JITED_KSYMS;
190	arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
191	arrays |= 1UL << PERF_BPIL_FUNC_INFO;
192	arrays |= 1UL << PERF_BPIL_PROG_TAGS;
193	arrays |= 1UL << PERF_BPIL_JITED_INSNS;
194	arrays |= 1UL << PERF_BPIL_LINE_INFO;
195	arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
196
197	info_linear = get_bpf_prog_info_linear(fd, arrays);
198	if (IS_ERR_OR_NULL(info_linear)) {
199		info_linear = NULL;
200		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
201		return -1;
202	}
203
204	if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
205		free(info_linear);
206		pr_debug("%s: the kernel is too old, aborting\n", __func__);
207		return -2;
208	}
209
210	info = &info_linear->info;
211	if (!info->jited_ksyms) {
212		free(info_linear);
213		return -1;
214	}
215
216	/* number of ksyms, func_lengths, and tags should match */
217	sub_prog_cnt = info->nr_jited_ksyms;
218	if (sub_prog_cnt != info->nr_prog_tags ||
219	    sub_prog_cnt != info->nr_jited_func_lens) {
220		free(info_linear);
221		return -1;
222	}
223
224	/* check BTF func info support */
225	if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
226		/* btf func info number should be same as sub_prog_cnt */
227		if (sub_prog_cnt != info->nr_func_info) {
228			pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
229			free(info_linear);
230			return -1;
231		}
232		btf = btf__load_from_kernel_by_id(info->btf_id);
233		if (libbpf_get_error(btf)) {
234			pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
235			err = -1;
236			goto out;
237		}
238		perf_env__fetch_btf(env, info->btf_id, btf);
239	}
240
241	/* Synthesize PERF_RECORD_KSYMBOL */
242	for (i = 0; i < sub_prog_cnt; i++) {
243		__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
244		__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
245		int name_len;
246
247		*ksymbol_event = (struct perf_record_ksymbol) {
248			.header = {
249				.type = PERF_RECORD_KSYMBOL,
250				.size = offsetof(struct perf_record_ksymbol, name),
251			},
252			.addr = prog_addrs[i],
253			.len = prog_lens[i],
254			.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
255			.flags = 0,
256		};
257
258		name_len = synthesize_bpf_prog_name(ksymbol_event->name,
259						    KSYM_NAME_LEN, info, btf, i);
260		ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
261							 sizeof(u64));
262
263		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
264		event->header.size += machine->id_hdr_size;
265		err = perf_tool__process_synth_event(tool, event,
266						     machine, process);
267	}
268
269	if (!opts->no_bpf_event) {
270		/* Synthesize PERF_RECORD_BPF_EVENT */
271		*bpf_event = (struct perf_record_bpf_event) {
272			.header = {
273				.type = PERF_RECORD_BPF_EVENT,
274				.size = sizeof(struct perf_record_bpf_event),
275			},
276			.type = PERF_BPF_EVENT_PROG_LOAD,
277			.flags = 0,
278			.id = info->id,
279		};
280		memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
281		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
282		event->header.size += machine->id_hdr_size;
283
284		/* save bpf_prog_info to env */
285		info_node = malloc(sizeof(struct bpf_prog_info_node));
286		if (!info_node) {
287			err = -1;
288			goto out;
289		}
290
291		info_node->info_linear = info_linear;
292		perf_env__insert_bpf_prog_info(env, info_node);
293		info_linear = NULL;
294
295		/*
296		 * process after saving bpf_prog_info to env, so that
297		 * required information is ready for look up
298		 */
299		err = perf_tool__process_synth_event(tool, event,
300						     machine, process);
301	}
302
303out:
304	free(info_linear);
305	btf__free(btf);
306	return err ? -1 : 0;
307}
308
309struct kallsyms_parse {
310	union perf_event	*event;
311	perf_event__handler_t	 process;
312	struct machine		*machine;
313	struct perf_tool	*tool;
314};
315
316static int
317process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
318{
319	struct machine *machine = data->machine;
320	union perf_event *event = data->event;
321	struct perf_record_ksymbol *ksymbol;
322	int len;
323
324	ksymbol = &event->ksymbol;
325
326	*ksymbol = (struct perf_record_ksymbol) {
327		.header = {
328			.type = PERF_RECORD_KSYMBOL,
329			.size = offsetof(struct perf_record_ksymbol, name),
330		},
331		.addr      = addr,
332		.len       = page_size,
333		.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
334		.flags     = 0,
335	};
336
337	len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
338	ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
339	memset((void *) event + event->header.size, 0, machine->id_hdr_size);
340	event->header.size += machine->id_hdr_size;
341
342	return perf_tool__process_synth_event(data->tool, event, machine,
343					      data->process);
344}
345
346static int
347kallsyms_process_symbol(void *data, const char *_name,
348			char type __maybe_unused, u64 start)
349{
350	char disp[KSYM_NAME_LEN];
351	char *module, *name;
352	unsigned long id;
353	int err = 0;
354
355	module = strchr(_name, '\t');
356	if (!module)
357		return 0;
358
359	/* We are going after [bpf] module ... */
360	if (strcmp(module + 1, "[bpf]"))
361		return 0;
362
363	name = memdup(_name, (module - _name) + 1);
364	if (!name)
365		return -ENOMEM;
366
367	name[module - _name] = 0;
368
369	/* .. and only for trampolines and dispatchers */
370	if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
371	    (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
372		err = process_bpf_image(name, start, data);
373
374	free(name);
375	return err;
376}
377
378int perf_event__synthesize_bpf_events(struct perf_session *session,
379				      perf_event__handler_t process,
380				      struct machine *machine,
381				      struct record_opts *opts)
382{
383	const char *kallsyms_filename = "/proc/kallsyms";
384	struct kallsyms_parse arg;
385	union perf_event *event;
386	__u32 id = 0;
387	int err;
388	int fd;
389
390	if (opts->no_bpf_event)
391		return 0;
392
393	event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
394	if (!event)
395		return -1;
396
397	/* Synthesize all the bpf programs in system. */
398	while (true) {
399		err = bpf_prog_get_next_id(id, &id);
400		if (err) {
401			if (errno == ENOENT) {
402				err = 0;
403				break;
404			}
405			pr_debug("%s: can't get next program: %s%s\n",
406				 __func__, strerror(errno),
407				 errno == EINVAL ? " -- kernel too old?" : "");
408			/* don't report error on old kernel or EPERM  */
409			err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
410			break;
411		}
412		fd = bpf_prog_get_fd_by_id(id);
413		if (fd < 0) {
414			pr_debug("%s: failed to get fd for prog_id %u\n",
415				 __func__, id);
416			continue;
417		}
418
419		err = perf_event__synthesize_one_bpf_prog(session, process,
420							  machine, fd,
421							  event, opts);
422		close(fd);
423		if (err) {
424			/* do not return error for old kernel */
425			if (err == -2)
426				err = 0;
427			break;
428		}
429	}
430
431	/* Synthesize all the bpf images - trampolines/dispatchers. */
432	if (symbol_conf.kallsyms_name != NULL)
433		kallsyms_filename = symbol_conf.kallsyms_name;
434
435	arg = (struct kallsyms_parse) {
436		.event   = event,
437		.process = process,
438		.machine = machine,
439		.tool    = session->tool,
440	};
441
442	if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
443		pr_err("%s: failed to synthesize bpf images: %s\n",
444		       __func__, strerror(errno));
445	}
446
447	free(event);
448	return err;
449}
450
451static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
452{
453	struct bpf_prog_info_node *info_node;
454	struct perf_bpil *info_linear;
455	struct btf *btf = NULL;
456	u64 arrays;
457	u32 btf_id;
458	int fd;
459
460	fd = bpf_prog_get_fd_by_id(id);
461	if (fd < 0)
462		return;
463
464	arrays = 1UL << PERF_BPIL_JITED_KSYMS;
465	arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
466	arrays |= 1UL << PERF_BPIL_FUNC_INFO;
467	arrays |= 1UL << PERF_BPIL_PROG_TAGS;
468	arrays |= 1UL << PERF_BPIL_JITED_INSNS;
469	arrays |= 1UL << PERF_BPIL_LINE_INFO;
470	arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
471
472	info_linear = get_bpf_prog_info_linear(fd, arrays);
473	if (IS_ERR_OR_NULL(info_linear)) {
474		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
475		goto out;
476	}
477
478	btf_id = info_linear->info.btf_id;
479
480	info_node = malloc(sizeof(struct bpf_prog_info_node));
481	if (info_node) {
482		info_node->info_linear = info_linear;
483		perf_env__insert_bpf_prog_info(env, info_node);
484	} else
485		free(info_linear);
486
487	if (btf_id == 0)
488		goto out;
489
490	btf = btf__load_from_kernel_by_id(btf_id);
491	if (libbpf_get_error(btf)) {
492		pr_debug("%s: failed to get BTF of id %u, aborting\n",
493			 __func__, btf_id);
494		goto out;
495	}
496	perf_env__fetch_btf(env, btf_id, btf);
497
498out:
499	btf__free(btf);
500	close(fd);
501}
502
503static int bpf_event__sb_cb(union perf_event *event, void *data)
504{
505	struct perf_env *env = data;
506
507	if (event->header.type != PERF_RECORD_BPF_EVENT)
508		return -1;
509
510	switch (event->bpf.type) {
511	case PERF_BPF_EVENT_PROG_LOAD:
512		perf_env__add_bpf_info(env, event->bpf.id);
513
514	case PERF_BPF_EVENT_PROG_UNLOAD:
515		/*
516		 * Do not free bpf_prog_info and btf of the program here,
517		 * as annotation still need them. They will be freed at
518		 * the end of the session.
519		 */
520		break;
521	default:
522		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
523		break;
524	}
525
526	return 0;
527}
528
529int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
530{
531	struct perf_event_attr attr = {
532		.type	          = PERF_TYPE_SOFTWARE,
533		.config           = PERF_COUNT_SW_DUMMY,
534		.sample_id_all    = 1,
535		.watermark        = 1,
536		.bpf_event        = 1,
537		.size	   = sizeof(attr), /* to capture ABI version */
538	};
539
540	/*
541	 * Older gcc versions don't support designated initializers, like above,
542	 * for unnamed union members, such as the following:
543	 */
544	attr.wakeup_watermark = 1;
545
546	return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
547}
548
549void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
550				      struct perf_env *env,
551				      FILE *fp)
552{
553	__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
554	__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
555	char name[KSYM_NAME_LEN];
556	struct btf *btf = NULL;
557	u32 sub_prog_cnt, i;
558
559	sub_prog_cnt = info->nr_jited_ksyms;
560	if (sub_prog_cnt != info->nr_prog_tags ||
561	    sub_prog_cnt != info->nr_jited_func_lens)
562		return;
563
564	if (info->btf_id) {
565		struct btf_node *node;
566
567		node = __perf_env__find_btf(env, info->btf_id);
568		if (node)
569			btf = btf__new((__u8 *)(node->data),
570				       node->data_size);
571	}
572
573	if (sub_prog_cnt == 1) {
574		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
575		fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
576			info->id, name, prog_addrs[0], prog_lens[0]);
577		goto out;
578	}
579
580	fprintf(fp, "# bpf_prog_info %u:\n", info->id);
581	for (i = 0; i < sub_prog_cnt; i++) {
582		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
583
584		fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
585			i, name, prog_addrs[i], prog_lens[i]);
586	}
587out:
588	btf__free(btf);
589}
590