1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2020 Facebook */
3
4#include <errno.h>
5#include <linux/err.h>
6#include <linux/netfilter.h>
7#include <linux/netfilter_arp.h>
8#include <linux/perf_event.h>
9#include <net/if.h>
10#include <stdio.h>
11#include <unistd.h>
12
13#include <bpf/bpf.h>
14#include <bpf/hashmap.h>
15
16#include "json_writer.h"
17#include "main.h"
18#include "xlated_dumper.h"
19
20#define PERF_HW_CACHE_LEN 128
21
22static struct hashmap *link_table;
23static struct dump_data dd;
24
25static const char *perf_type_name[PERF_TYPE_MAX] = {
26	[PERF_TYPE_HARDWARE]			= "hardware",
27	[PERF_TYPE_SOFTWARE]			= "software",
28	[PERF_TYPE_TRACEPOINT]			= "tracepoint",
29	[PERF_TYPE_HW_CACHE]			= "hw-cache",
30	[PERF_TYPE_RAW]				= "raw",
31	[PERF_TYPE_BREAKPOINT]			= "breakpoint",
32};
33
34const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
35	[PERF_COUNT_HW_CPU_CYCLES]		= "cpu-cycles",
36	[PERF_COUNT_HW_INSTRUCTIONS]		= "instructions",
37	[PERF_COUNT_HW_CACHE_REFERENCES]	= "cache-references",
38	[PERF_COUNT_HW_CACHE_MISSES]		= "cache-misses",
39	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= "branch-instructions",
40	[PERF_COUNT_HW_BRANCH_MISSES]		= "branch-misses",
41	[PERF_COUNT_HW_BUS_CYCLES]		= "bus-cycles",
42	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= "stalled-cycles-frontend",
43	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= "stalled-cycles-backend",
44	[PERF_COUNT_HW_REF_CPU_CYCLES]		= "ref-cycles",
45};
46
47const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
48	[PERF_COUNT_SW_CPU_CLOCK]		= "cpu-clock",
49	[PERF_COUNT_SW_TASK_CLOCK]		= "task-clock",
50	[PERF_COUNT_SW_PAGE_FAULTS]		= "page-faults",
51	[PERF_COUNT_SW_CONTEXT_SWITCHES]	= "context-switches",
52	[PERF_COUNT_SW_CPU_MIGRATIONS]		= "cpu-migrations",
53	[PERF_COUNT_SW_PAGE_FAULTS_MIN]		= "minor-faults",
54	[PERF_COUNT_SW_PAGE_FAULTS_MAJ]		= "major-faults",
55	[PERF_COUNT_SW_ALIGNMENT_FAULTS]	= "alignment-faults",
56	[PERF_COUNT_SW_EMULATION_FAULTS]	= "emulation-faults",
57	[PERF_COUNT_SW_DUMMY]			= "dummy",
58	[PERF_COUNT_SW_BPF_OUTPUT]		= "bpf-output",
59	[PERF_COUNT_SW_CGROUP_SWITCHES]		= "cgroup-switches",
60};
61
62const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
63	[PERF_COUNT_HW_CACHE_L1D]		= "L1-dcache",
64	[PERF_COUNT_HW_CACHE_L1I]		= "L1-icache",
65	[PERF_COUNT_HW_CACHE_LL]		= "LLC",
66	[PERF_COUNT_HW_CACHE_DTLB]		= "dTLB",
67	[PERF_COUNT_HW_CACHE_ITLB]		= "iTLB",
68	[PERF_COUNT_HW_CACHE_BPU]		= "branch",
69	[PERF_COUNT_HW_CACHE_NODE]		= "node",
70};
71
72const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
73	[PERF_COUNT_HW_CACHE_OP_READ]		= "load",
74	[PERF_COUNT_HW_CACHE_OP_WRITE]		= "store",
75	[PERF_COUNT_HW_CACHE_OP_PREFETCH]	= "prefetch",
76};
77
78const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
79	[PERF_COUNT_HW_CACHE_RESULT_ACCESS]	= "refs",
80	[PERF_COUNT_HW_CACHE_RESULT_MISS]	= "misses",
81};
82
83#define perf_event_name(array, id) ({			\
84	const char *event_str = NULL;			\
85							\
86	if ((id) < ARRAY_SIZE(array))			\
87		event_str = array[id];			\
88	event_str;					\
89})
90
91static int link_parse_fd(int *argc, char ***argv)
92{
93	int fd;
94
95	if (is_prefix(**argv, "id")) {
96		unsigned int id;
97		char *endptr;
98
99		NEXT_ARGP();
100
101		id = strtoul(**argv, &endptr, 0);
102		if (*endptr) {
103			p_err("can't parse %s as ID", **argv);
104			return -1;
105		}
106		NEXT_ARGP();
107
108		fd = bpf_link_get_fd_by_id(id);
109		if (fd < 0)
110			p_err("failed to get link with ID %d: %s", id, strerror(errno));
111		return fd;
112	} else if (is_prefix(**argv, "pinned")) {
113		char *path;
114
115		NEXT_ARGP();
116
117		path = **argv;
118		NEXT_ARGP();
119
120		return open_obj_pinned_any(path, BPF_OBJ_LINK);
121	}
122
123	p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
124	return -1;
125}
126
127static void
128show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
129{
130	const char *link_type_str;
131
132	jsonw_uint_field(wtr, "id", info->id);
133	link_type_str = libbpf_bpf_link_type_str(info->type);
134	if (link_type_str)
135		jsonw_string_field(wtr, "type", link_type_str);
136	else
137		jsonw_uint_field(wtr, "type", info->type);
138
139	jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
140}
141
142static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
143{
144	const char *attach_type_str;
145
146	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
147	if (attach_type_str)
148		jsonw_string_field(wtr, "attach_type", attach_type_str);
149	else
150		jsonw_uint_field(wtr, "attach_type", attach_type);
151}
152
153static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
154{
155	char devname[IF_NAMESIZE] = "(unknown)";
156
157	if (ifindex)
158		if_indextoname(ifindex, devname);
159	else
160		snprintf(devname, sizeof(devname), "(detached)");
161	jsonw_string_field(wtr, "devname", devname);
162	jsonw_uint_field(wtr, "ifindex", ifindex);
163}
164
165static bool is_iter_map_target(const char *target_name)
166{
167	return strcmp(target_name, "bpf_map_elem") == 0 ||
168	       strcmp(target_name, "bpf_sk_storage_map") == 0;
169}
170
171static bool is_iter_cgroup_target(const char *target_name)
172{
173	return strcmp(target_name, "cgroup") == 0;
174}
175
176static const char *cgroup_order_string(__u32 order)
177{
178	switch (order) {
179	case BPF_CGROUP_ITER_ORDER_UNSPEC:
180		return "order_unspec";
181	case BPF_CGROUP_ITER_SELF_ONLY:
182		return "self_only";
183	case BPF_CGROUP_ITER_DESCENDANTS_PRE:
184		return "descendants_pre";
185	case BPF_CGROUP_ITER_DESCENDANTS_POST:
186		return "descendants_post";
187	case BPF_CGROUP_ITER_ANCESTORS_UP:
188		return "ancestors_up";
189	default: /* won't happen */
190		return "unknown";
191	}
192}
193
194static bool is_iter_task_target(const char *target_name)
195{
196	return strcmp(target_name, "task") == 0 ||
197		strcmp(target_name, "task_file") == 0 ||
198		strcmp(target_name, "task_vma") == 0;
199}
200
201static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
202{
203	const char *target_name = u64_to_ptr(info->iter.target_name);
204
205	jsonw_string_field(wtr, "target_name", target_name);
206
207	if (is_iter_map_target(target_name))
208		jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
209	else if (is_iter_task_target(target_name)) {
210		if (info->iter.task.tid)
211			jsonw_uint_field(wtr, "tid", info->iter.task.tid);
212		else if (info->iter.task.pid)
213			jsonw_uint_field(wtr, "pid", info->iter.task.pid);
214	}
215
216	if (is_iter_cgroup_target(target_name)) {
217		jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
218		jsonw_string_field(wtr, "order",
219				   cgroup_order_string(info->iter.cgroup.order));
220	}
221}
222
223void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
224{
225	jsonw_uint_field(json_wtr, "pf",
226			 info->netfilter.pf);
227	jsonw_uint_field(json_wtr, "hook",
228			 info->netfilter.hooknum);
229	jsonw_int_field(json_wtr, "prio",
230			 info->netfilter.priority);
231	jsonw_uint_field(json_wtr, "flags",
232			 info->netfilter.flags);
233}
234
235static int get_prog_info(int prog_id, struct bpf_prog_info *info)
236{
237	__u32 len = sizeof(*info);
238	int err, prog_fd;
239
240	prog_fd = bpf_prog_get_fd_by_id(prog_id);
241	if (prog_fd < 0)
242		return prog_fd;
243
244	memset(info, 0, sizeof(*info));
245	err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
246	if (err)
247		p_err("can't get prog info: %s", strerror(errno));
248	close(prog_fd);
249	return err;
250}
251
252struct addr_cookie {
253	__u64 addr;
254	__u64 cookie;
255};
256
257static int cmp_addr_cookie(const void *A, const void *B)
258{
259	const struct addr_cookie *a = A, *b = B;
260
261	if (a->addr == b->addr)
262		return 0;
263	return a->addr < b->addr ? -1 : 1;
264}
265
266static struct addr_cookie *
267get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
268{
269	struct addr_cookie *data;
270	__u32 i;
271
272	data = calloc(count, sizeof(data[0]));
273	if (!data) {
274		p_err("mem alloc failed");
275		return NULL;
276	}
277	for (i = 0; i < count; i++) {
278		data[i].addr = addrs[i];
279		data[i].cookie = cookies[i];
280	}
281	qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
282	return data;
283}
284
285static void
286show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
287{
288	struct addr_cookie *data;
289	__u32 i, j = 0;
290
291	jsonw_bool_field(json_wtr, "retprobe",
292			 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
293	jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
294	jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
295	jsonw_name(json_wtr, "funcs");
296	jsonw_start_array(json_wtr);
297	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
298				     u64_to_ptr(info->kprobe_multi.cookies),
299				     info->kprobe_multi.count);
300	if (!data)
301		return;
302
303	/* Load it once for all. */
304	if (!dd.sym_count)
305		kernel_syms_load(&dd);
306	if (!dd.sym_count)
307		goto error;
308
309	for (i = 0; i < dd.sym_count; i++) {
310		if (dd.sym_mapping[i].address != data[j].addr)
311			continue;
312		jsonw_start_object(json_wtr);
313		jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
314		jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
315		/* Print null if it is vmlinux */
316		if (dd.sym_mapping[i].module[0] == '\0') {
317			jsonw_name(json_wtr, "module");
318			jsonw_null(json_wtr);
319		} else {
320			jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
321		}
322		jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
323		jsonw_end_object(json_wtr);
324		if (j++ == info->kprobe_multi.count)
325			break;
326	}
327	jsonw_end_array(json_wtr);
328error:
329	free(data);
330}
331
332static __u64 *u64_to_arr(__u64 val)
333{
334	return (__u64 *) u64_to_ptr(val);
335}
336
337static void
338show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
339{
340	__u32 i;
341
342	jsonw_bool_field(json_wtr, "retprobe",
343			 info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN);
344	jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path));
345	jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count);
346	jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid);
347	jsonw_name(json_wtr, "funcs");
348	jsonw_start_array(json_wtr);
349
350	for (i = 0; i < info->uprobe_multi.count; i++) {
351		jsonw_start_object(json_wtr);
352		jsonw_uint_field(json_wtr, "offset",
353				 u64_to_arr(info->uprobe_multi.offsets)[i]);
354		jsonw_uint_field(json_wtr, "ref_ctr_offset",
355				 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]);
356		jsonw_uint_field(json_wtr, "cookie",
357				 u64_to_arr(info->uprobe_multi.cookies)[i]);
358		jsonw_end_object(json_wtr);
359	}
360	jsonw_end_array(json_wtr);
361}
362
363static void
364show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
365{
366	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
367	jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
368	jsonw_string_field(wtr, "func",
369			   u64_to_ptr(info->perf_event.kprobe.func_name));
370	jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
371	jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
372	jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
373}
374
375static void
376show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
377{
378	jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
379	jsonw_string_field(wtr, "file",
380			   u64_to_ptr(info->perf_event.uprobe.file_name));
381	jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
382	jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
383}
384
385static void
386show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
387{
388	jsonw_string_field(wtr, "tracepoint",
389			   u64_to_ptr(info->perf_event.tracepoint.tp_name));
390	jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
391}
392
393static char *perf_config_hw_cache_str(__u64 config)
394{
395	const char *hw_cache, *result, *op;
396	char *str = malloc(PERF_HW_CACHE_LEN);
397
398	if (!str) {
399		p_err("mem alloc failed");
400		return NULL;
401	}
402
403	hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
404	if (hw_cache)
405		snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
406	else
407		snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff);
408
409	op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
410	if (op)
411		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
412			 "%s-", op);
413	else
414		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
415			 "%lld-", (config >> 8) & 0xff);
416
417	result = perf_event_name(evsel__hw_cache_result, config >> 16);
418	if (result)
419		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
420			 "%s", result);
421	else
422		snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
423			 "%lld", config >> 16);
424	return str;
425}
426
427static const char *perf_config_str(__u32 type, __u64 config)
428{
429	const char *perf_config;
430
431	switch (type) {
432	case PERF_TYPE_HARDWARE:
433		perf_config = perf_event_name(event_symbols_hw, config);
434		break;
435	case PERF_TYPE_SOFTWARE:
436		perf_config = perf_event_name(event_symbols_sw, config);
437		break;
438	case PERF_TYPE_HW_CACHE:
439		perf_config = perf_config_hw_cache_str(config);
440		break;
441	default:
442		perf_config = NULL;
443		break;
444	}
445	return perf_config;
446}
447
448static void
449show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
450{
451	__u64 config = info->perf_event.event.config;
452	__u32 type = info->perf_event.event.type;
453	const char *perf_type, *perf_config;
454
455	perf_type = perf_event_name(perf_type_name, type);
456	if (perf_type)
457		jsonw_string_field(wtr, "event_type", perf_type);
458	else
459		jsonw_uint_field(wtr, "event_type", type);
460
461	perf_config = perf_config_str(type, config);
462	if (perf_config)
463		jsonw_string_field(wtr, "event_config", perf_config);
464	else
465		jsonw_uint_field(wtr, "event_config", config);
466
467	jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
468
469	if (type == PERF_TYPE_HW_CACHE && perf_config)
470		free((void *)perf_config);
471}
472
473static int show_link_close_json(int fd, struct bpf_link_info *info)
474{
475	struct bpf_prog_info prog_info;
476	const char *prog_type_str;
477	int err;
478
479	jsonw_start_object(json_wtr);
480
481	show_link_header_json(info, json_wtr);
482
483	switch (info->type) {
484	case BPF_LINK_TYPE_RAW_TRACEPOINT:
485		jsonw_string_field(json_wtr, "tp_name",
486				   u64_to_ptr(info->raw_tracepoint.tp_name));
487		break;
488	case BPF_LINK_TYPE_TRACING:
489		err = get_prog_info(info->prog_id, &prog_info);
490		if (err)
491			return err;
492
493		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
494		/* libbpf will return NULL for variants unknown to it. */
495		if (prog_type_str)
496			jsonw_string_field(json_wtr, "prog_type", prog_type_str);
497		else
498			jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
499
500		show_link_attach_type_json(info->tracing.attach_type,
501					   json_wtr);
502		jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
503		jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
504		break;
505	case BPF_LINK_TYPE_CGROUP:
506		jsonw_lluint_field(json_wtr, "cgroup_id",
507				   info->cgroup.cgroup_id);
508		show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
509		break;
510	case BPF_LINK_TYPE_ITER:
511		show_iter_json(info, json_wtr);
512		break;
513	case BPF_LINK_TYPE_NETNS:
514		jsonw_uint_field(json_wtr, "netns_ino",
515				 info->netns.netns_ino);
516		show_link_attach_type_json(info->netns.attach_type, json_wtr);
517		break;
518	case BPF_LINK_TYPE_NETFILTER:
519		netfilter_dump_json(info, json_wtr);
520		break;
521	case BPF_LINK_TYPE_TCX:
522		show_link_ifindex_json(info->tcx.ifindex, json_wtr);
523		show_link_attach_type_json(info->tcx.attach_type, json_wtr);
524		break;
525	case BPF_LINK_TYPE_NETKIT:
526		show_link_ifindex_json(info->netkit.ifindex, json_wtr);
527		show_link_attach_type_json(info->netkit.attach_type, json_wtr);
528		break;
529	case BPF_LINK_TYPE_XDP:
530		show_link_ifindex_json(info->xdp.ifindex, json_wtr);
531		break;
532	case BPF_LINK_TYPE_STRUCT_OPS:
533		jsonw_uint_field(json_wtr, "map_id",
534				 info->struct_ops.map_id);
535		break;
536	case BPF_LINK_TYPE_KPROBE_MULTI:
537		show_kprobe_multi_json(info, json_wtr);
538		break;
539	case BPF_LINK_TYPE_UPROBE_MULTI:
540		show_uprobe_multi_json(info, json_wtr);
541		break;
542	case BPF_LINK_TYPE_PERF_EVENT:
543		switch (info->perf_event.type) {
544		case BPF_PERF_EVENT_EVENT:
545			show_perf_event_event_json(info, json_wtr);
546			break;
547		case BPF_PERF_EVENT_TRACEPOINT:
548			show_perf_event_tracepoint_json(info, json_wtr);
549			break;
550		case BPF_PERF_EVENT_KPROBE:
551		case BPF_PERF_EVENT_KRETPROBE:
552			show_perf_event_kprobe_json(info, json_wtr);
553			break;
554		case BPF_PERF_EVENT_UPROBE:
555		case BPF_PERF_EVENT_URETPROBE:
556			show_perf_event_uprobe_json(info, json_wtr);
557			break;
558		default:
559			break;
560		}
561		break;
562	default:
563		break;
564	}
565
566	if (!hashmap__empty(link_table)) {
567		struct hashmap_entry *entry;
568
569		jsonw_name(json_wtr, "pinned");
570		jsonw_start_array(json_wtr);
571		hashmap__for_each_key_entry(link_table, entry, info->id)
572			jsonw_string(json_wtr, entry->pvalue);
573		jsonw_end_array(json_wtr);
574	}
575
576	emit_obj_refs_json(refs_table, info->id, json_wtr);
577
578	jsonw_end_object(json_wtr);
579
580	return 0;
581}
582
583static void show_link_header_plain(struct bpf_link_info *info)
584{
585	const char *link_type_str;
586
587	printf("%u: ", info->id);
588	link_type_str = libbpf_bpf_link_type_str(info->type);
589	if (link_type_str)
590		printf("%s  ", link_type_str);
591	else
592		printf("type %u  ", info->type);
593
594	if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
595		printf("map %u  ", info->struct_ops.map_id);
596	else
597		printf("prog %u  ", info->prog_id);
598}
599
600static void show_link_attach_type_plain(__u32 attach_type)
601{
602	const char *attach_type_str;
603
604	attach_type_str = libbpf_bpf_attach_type_str(attach_type);
605	if (attach_type_str)
606		printf("attach_type %s  ", attach_type_str);
607	else
608		printf("attach_type %u  ", attach_type);
609}
610
611static void show_link_ifindex_plain(__u32 ifindex)
612{
613	char devname[IF_NAMESIZE * 2] = "(unknown)";
614	char tmpname[IF_NAMESIZE];
615	char *ret = NULL;
616
617	if (ifindex)
618		ret = if_indextoname(ifindex, tmpname);
619	else
620		snprintf(devname, sizeof(devname), "(detached)");
621	if (ret)
622		snprintf(devname, sizeof(devname), "%s(%d)",
623			 tmpname, ifindex);
624	printf("ifindex %s  ", devname);
625}
626
627static void show_iter_plain(struct bpf_link_info *info)
628{
629	const char *target_name = u64_to_ptr(info->iter.target_name);
630
631	printf("target_name %s  ", target_name);
632
633	if (is_iter_map_target(target_name))
634		printf("map_id %u  ", info->iter.map.map_id);
635	else if (is_iter_task_target(target_name)) {
636		if (info->iter.task.tid)
637			printf("tid %u ", info->iter.task.tid);
638		else if (info->iter.task.pid)
639			printf("pid %u ", info->iter.task.pid);
640	}
641
642	if (is_iter_cgroup_target(target_name)) {
643		printf("cgroup_id %llu  ", info->iter.cgroup.cgroup_id);
644		printf("order %s  ",
645		       cgroup_order_string(info->iter.cgroup.order));
646	}
647}
648
649static const char * const pf2name[] = {
650	[NFPROTO_INET] = "inet",
651	[NFPROTO_IPV4] = "ip",
652	[NFPROTO_ARP] = "arp",
653	[NFPROTO_NETDEV] = "netdev",
654	[NFPROTO_BRIDGE] = "bridge",
655	[NFPROTO_IPV6] = "ip6",
656};
657
658static const char * const inethook2name[] = {
659	[NF_INET_PRE_ROUTING] = "prerouting",
660	[NF_INET_LOCAL_IN] = "input",
661	[NF_INET_FORWARD] = "forward",
662	[NF_INET_LOCAL_OUT] = "output",
663	[NF_INET_POST_ROUTING] = "postrouting",
664};
665
666static const char * const arphook2name[] = {
667	[NF_ARP_IN] = "input",
668	[NF_ARP_OUT] = "output",
669};
670
671void netfilter_dump_plain(const struct bpf_link_info *info)
672{
673	const char *hookname = NULL, *pfname = NULL;
674	unsigned int hook = info->netfilter.hooknum;
675	unsigned int pf = info->netfilter.pf;
676
677	if (pf < ARRAY_SIZE(pf2name))
678		pfname = pf2name[pf];
679
680	switch (pf) {
681	case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
682	case NFPROTO_IPV4:
683	case NFPROTO_IPV6:
684	case NFPROTO_INET:
685		if (hook < ARRAY_SIZE(inethook2name))
686			hookname = inethook2name[hook];
687		break;
688	case NFPROTO_ARP:
689		if (hook < ARRAY_SIZE(arphook2name))
690			hookname = arphook2name[hook];
691	default:
692		break;
693	}
694
695	if (pfname)
696		printf("\n\t%s", pfname);
697	else
698		printf("\n\tpf: %d", pf);
699
700	if (hookname)
701		printf(" %s", hookname);
702	else
703		printf(", hook %u,", hook);
704
705	printf(" prio %d", info->netfilter.priority);
706
707	if (info->netfilter.flags)
708		printf(" flags 0x%x", info->netfilter.flags);
709}
710
711static void show_kprobe_multi_plain(struct bpf_link_info *info)
712{
713	struct addr_cookie *data;
714	__u32 i, j = 0;
715
716	if (!info->kprobe_multi.count)
717		return;
718
719	if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
720		printf("\n\tkretprobe.multi  ");
721	else
722		printf("\n\tkprobe.multi  ");
723	printf("func_cnt %u  ", info->kprobe_multi.count);
724	if (info->kprobe_multi.missed)
725		printf("missed %llu  ", info->kprobe_multi.missed);
726	data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
727				     u64_to_ptr(info->kprobe_multi.cookies),
728				     info->kprobe_multi.count);
729	if (!data)
730		return;
731
732	/* Load it once for all. */
733	if (!dd.sym_count)
734		kernel_syms_load(&dd);
735	if (!dd.sym_count)
736		goto error;
737
738	printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
739	for (i = 0; i < dd.sym_count; i++) {
740		if (dd.sym_mapping[i].address != data[j].addr)
741			continue;
742		printf("\n\t%016lx %-16llx %s",
743		       dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
744		if (dd.sym_mapping[i].module[0] != '\0')
745			printf(" [%s]  ", dd.sym_mapping[i].module);
746		else
747			printf("  ");
748
749		if (j++ == info->kprobe_multi.count)
750			break;
751	}
752error:
753	free(data);
754}
755
756static void show_uprobe_multi_plain(struct bpf_link_info *info)
757{
758	__u32 i;
759
760	if (!info->uprobe_multi.count)
761		return;
762
763	if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN)
764		printf("\n\turetprobe.multi  ");
765	else
766		printf("\n\tuprobe.multi  ");
767
768	printf("path %s  ", (char *) u64_to_ptr(info->uprobe_multi.path));
769	printf("func_cnt %u  ", info->uprobe_multi.count);
770
771	if (info->uprobe_multi.pid)
772		printf("pid %d  ", info->uprobe_multi.pid);
773
774	printf("\n\t%-16s   %-16s   %-16s", "offset", "ref_ctr_offset", "cookies");
775	for (i = 0; i < info->uprobe_multi.count; i++) {
776		printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
777			u64_to_arr(info->uprobe_multi.offsets)[i],
778			u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i],
779			u64_to_arr(info->uprobe_multi.cookies)[i]);
780	}
781}
782
783static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
784{
785	const char *buf;
786
787	buf = u64_to_ptr(info->perf_event.kprobe.func_name);
788	if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
789		return;
790
791	if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
792		printf("\n\tkretprobe ");
793	else
794		printf("\n\tkprobe ");
795	if (info->perf_event.kprobe.addr)
796		printf("%llx ", info->perf_event.kprobe.addr);
797	printf("%s", buf);
798	if (info->perf_event.kprobe.offset)
799		printf("+%#x", info->perf_event.kprobe.offset);
800	if (info->perf_event.kprobe.missed)
801		printf("  missed %llu", info->perf_event.kprobe.missed);
802	if (info->perf_event.kprobe.cookie)
803		printf("  cookie %llu", info->perf_event.kprobe.cookie);
804	printf("  ");
805}
806
807static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
808{
809	const char *buf;
810
811	buf = u64_to_ptr(info->perf_event.uprobe.file_name);
812	if (buf[0] == '\0')
813		return;
814
815	if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
816		printf("\n\turetprobe ");
817	else
818		printf("\n\tuprobe ");
819	printf("%s+%#x  ", buf, info->perf_event.uprobe.offset);
820	if (info->perf_event.uprobe.cookie)
821		printf("cookie %llu  ", info->perf_event.uprobe.cookie);
822}
823
824static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
825{
826	const char *buf;
827
828	buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
829	if (buf[0] == '\0')
830		return;
831
832	printf("\n\ttracepoint %s  ", buf);
833	if (info->perf_event.tracepoint.cookie)
834		printf("cookie %llu  ", info->perf_event.tracepoint.cookie);
835}
836
837static void show_perf_event_event_plain(struct bpf_link_info *info)
838{
839	__u64 config = info->perf_event.event.config;
840	__u32 type = info->perf_event.event.type;
841	const char *perf_type, *perf_config;
842
843	printf("\n\tevent ");
844	perf_type = perf_event_name(perf_type_name, type);
845	if (perf_type)
846		printf("%s:", perf_type);
847	else
848		printf("%u :", type);
849
850	perf_config = perf_config_str(type, config);
851	if (perf_config)
852		printf("%s  ", perf_config);
853	else
854		printf("%llu  ", config);
855
856	if (info->perf_event.event.cookie)
857		printf("cookie %llu  ", info->perf_event.event.cookie);
858
859	if (type == PERF_TYPE_HW_CACHE && perf_config)
860		free((void *)perf_config);
861}
862
863static int show_link_close_plain(int fd, struct bpf_link_info *info)
864{
865	struct bpf_prog_info prog_info;
866	const char *prog_type_str;
867	int err;
868
869	show_link_header_plain(info);
870
871	switch (info->type) {
872	case BPF_LINK_TYPE_RAW_TRACEPOINT:
873		printf("\n\ttp '%s'  ",
874		       (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
875		break;
876	case BPF_LINK_TYPE_TRACING:
877		err = get_prog_info(info->prog_id, &prog_info);
878		if (err)
879			return err;
880
881		prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
882		/* libbpf will return NULL for variants unknown to it. */
883		if (prog_type_str)
884			printf("\n\tprog_type %s  ", prog_type_str);
885		else
886			printf("\n\tprog_type %u  ", prog_info.type);
887
888		show_link_attach_type_plain(info->tracing.attach_type);
889		if (info->tracing.target_obj_id || info->tracing.target_btf_id)
890			printf("\n\ttarget_obj_id %u  target_btf_id %u  ",
891			       info->tracing.target_obj_id,
892			       info->tracing.target_btf_id);
893		break;
894	case BPF_LINK_TYPE_CGROUP:
895		printf("\n\tcgroup_id %zu  ", (size_t)info->cgroup.cgroup_id);
896		show_link_attach_type_plain(info->cgroup.attach_type);
897		break;
898	case BPF_LINK_TYPE_ITER:
899		show_iter_plain(info);
900		break;
901	case BPF_LINK_TYPE_NETNS:
902		printf("\n\tnetns_ino %u  ", info->netns.netns_ino);
903		show_link_attach_type_plain(info->netns.attach_type);
904		break;
905	case BPF_LINK_TYPE_NETFILTER:
906		netfilter_dump_plain(info);
907		break;
908	case BPF_LINK_TYPE_TCX:
909		printf("\n\t");
910		show_link_ifindex_plain(info->tcx.ifindex);
911		show_link_attach_type_plain(info->tcx.attach_type);
912		break;
913	case BPF_LINK_TYPE_NETKIT:
914		printf("\n\t");
915		show_link_ifindex_plain(info->netkit.ifindex);
916		show_link_attach_type_plain(info->netkit.attach_type);
917		break;
918	case BPF_LINK_TYPE_XDP:
919		printf("\n\t");
920		show_link_ifindex_plain(info->xdp.ifindex);
921		break;
922	case BPF_LINK_TYPE_KPROBE_MULTI:
923		show_kprobe_multi_plain(info);
924		break;
925	case BPF_LINK_TYPE_UPROBE_MULTI:
926		show_uprobe_multi_plain(info);
927		break;
928	case BPF_LINK_TYPE_PERF_EVENT:
929		switch (info->perf_event.type) {
930		case BPF_PERF_EVENT_EVENT:
931			show_perf_event_event_plain(info);
932			break;
933		case BPF_PERF_EVENT_TRACEPOINT:
934			show_perf_event_tracepoint_plain(info);
935			break;
936		case BPF_PERF_EVENT_KPROBE:
937		case BPF_PERF_EVENT_KRETPROBE:
938			show_perf_event_kprobe_plain(info);
939			break;
940		case BPF_PERF_EVENT_UPROBE:
941		case BPF_PERF_EVENT_URETPROBE:
942			show_perf_event_uprobe_plain(info);
943			break;
944		default:
945			break;
946		}
947		break;
948	default:
949		break;
950	}
951
952	if (!hashmap__empty(link_table)) {
953		struct hashmap_entry *entry;
954
955		hashmap__for_each_key_entry(link_table, entry, info->id)
956			printf("\n\tpinned %s", (char *)entry->pvalue);
957	}
958	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
959
960	printf("\n");
961
962	return 0;
963}
964
965static int do_show_link(int fd)
966{
967	__u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL;
968	struct bpf_link_info info;
969	__u32 len = sizeof(info);
970	char path_buf[PATH_MAX];
971	__u64 *addrs = NULL;
972	char buf[PATH_MAX];
973	int count;
974	int err;
975
976	memset(&info, 0, sizeof(info));
977	buf[0] = '\0';
978again:
979	err = bpf_link_get_info_by_fd(fd, &info, &len);
980	if (err) {
981		p_err("can't get link info: %s",
982		      strerror(errno));
983		close(fd);
984		return err;
985	}
986	if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
987	    !info.raw_tracepoint.tp_name) {
988		info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
989		info.raw_tracepoint.tp_name_len = sizeof(buf);
990		goto again;
991	}
992	if (info.type == BPF_LINK_TYPE_ITER &&
993	    !info.iter.target_name) {
994		info.iter.target_name = ptr_to_u64(&buf);
995		info.iter.target_name_len = sizeof(buf);
996		goto again;
997	}
998	if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
999	    !info.kprobe_multi.addrs) {
1000		count = info.kprobe_multi.count;
1001		if (count) {
1002			addrs = calloc(count, sizeof(__u64));
1003			if (!addrs) {
1004				p_err("mem alloc failed");
1005				close(fd);
1006				return -ENOMEM;
1007			}
1008			info.kprobe_multi.addrs = ptr_to_u64(addrs);
1009			cookies = calloc(count, sizeof(__u64));
1010			if (!cookies) {
1011				p_err("mem alloc failed");
1012				free(addrs);
1013				close(fd);
1014				return -ENOMEM;
1015			}
1016			info.kprobe_multi.cookies = ptr_to_u64(cookies);
1017			goto again;
1018		}
1019	}
1020	if (info.type == BPF_LINK_TYPE_UPROBE_MULTI &&
1021	    !info.uprobe_multi.offsets) {
1022		count = info.uprobe_multi.count;
1023		if (count) {
1024			offsets = calloc(count, sizeof(__u64));
1025			if (!offsets) {
1026				p_err("mem alloc failed");
1027				close(fd);
1028				return -ENOMEM;
1029			}
1030			info.uprobe_multi.offsets = ptr_to_u64(offsets);
1031			ref_ctr_offsets = calloc(count, sizeof(__u64));
1032			if (!ref_ctr_offsets) {
1033				p_err("mem alloc failed");
1034				free(offsets);
1035				close(fd);
1036				return -ENOMEM;
1037			}
1038			info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets);
1039			cookies = calloc(count, sizeof(__u64));
1040			if (!cookies) {
1041				p_err("mem alloc failed");
1042				free(ref_ctr_offsets);
1043				free(offsets);
1044				close(fd);
1045				return -ENOMEM;
1046			}
1047			info.uprobe_multi.cookies = ptr_to_u64(cookies);
1048			info.uprobe_multi.path = ptr_to_u64(path_buf);
1049			info.uprobe_multi.path_size = sizeof(path_buf);
1050			goto again;
1051		}
1052	}
1053	if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
1054		switch (info.perf_event.type) {
1055		case BPF_PERF_EVENT_TRACEPOINT:
1056			if (!info.perf_event.tracepoint.tp_name) {
1057				info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
1058				info.perf_event.tracepoint.name_len = sizeof(buf);
1059				goto again;
1060			}
1061			break;
1062		case BPF_PERF_EVENT_KPROBE:
1063		case BPF_PERF_EVENT_KRETPROBE:
1064			if (!info.perf_event.kprobe.func_name) {
1065				info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
1066				info.perf_event.kprobe.name_len = sizeof(buf);
1067				goto again;
1068			}
1069			break;
1070		case BPF_PERF_EVENT_UPROBE:
1071		case BPF_PERF_EVENT_URETPROBE:
1072			if (!info.perf_event.uprobe.file_name) {
1073				info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
1074				info.perf_event.uprobe.name_len = sizeof(buf);
1075				goto again;
1076			}
1077			break;
1078		default:
1079			break;
1080		}
1081	}
1082
1083	if (json_output)
1084		show_link_close_json(fd, &info);
1085	else
1086		show_link_close_plain(fd, &info);
1087
1088	free(ref_ctr_offsets);
1089	free(cookies);
1090	free(offsets);
1091	free(addrs);
1092	close(fd);
1093	return 0;
1094}
1095
1096static int do_show(int argc, char **argv)
1097{
1098	__u32 id = 0;
1099	int err, fd;
1100
1101	if (show_pinned) {
1102		link_table = hashmap__new(hash_fn_for_key_as_id,
1103					  equal_fn_for_key_as_id, NULL);
1104		if (IS_ERR(link_table)) {
1105			p_err("failed to create hashmap for pinned paths");
1106			return -1;
1107		}
1108		build_pinned_obj_table(link_table, BPF_OBJ_LINK);
1109	}
1110	build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
1111
1112	if (argc == 2) {
1113		fd = link_parse_fd(&argc, &argv);
1114		if (fd < 0)
1115			return fd;
1116		do_show_link(fd);
1117		goto out;
1118	}
1119
1120	if (argc)
1121		return BAD_ARG();
1122
1123	if (json_output)
1124		jsonw_start_array(json_wtr);
1125	while (true) {
1126		err = bpf_link_get_next_id(id, &id);
1127		if (err) {
1128			if (errno == ENOENT)
1129				break;
1130			p_err("can't get next link: %s%s", strerror(errno),
1131			      errno == EINVAL ? " -- kernel too old?" : "");
1132			break;
1133		}
1134
1135		fd = bpf_link_get_fd_by_id(id);
1136		if (fd < 0) {
1137			if (errno == ENOENT)
1138				continue;
1139			p_err("can't get link by id (%u): %s",
1140			      id, strerror(errno));
1141			break;
1142		}
1143
1144		err = do_show_link(fd);
1145		if (err)
1146			break;
1147	}
1148	if (json_output)
1149		jsonw_end_array(json_wtr);
1150
1151	delete_obj_refs_table(refs_table);
1152
1153	if (show_pinned)
1154		delete_pinned_obj_table(link_table);
1155
1156out:
1157	if (dd.sym_count)
1158		kernel_syms_destroy(&dd);
1159	return errno == ENOENT ? 0 : -1;
1160}
1161
1162static int do_pin(int argc, char **argv)
1163{
1164	int err;
1165
1166	err = do_pin_any(argc, argv, link_parse_fd);
1167	if (!err && json_output)
1168		jsonw_null(json_wtr);
1169	return err;
1170}
1171
1172static int do_detach(int argc, char **argv)
1173{
1174	int err, fd;
1175
1176	if (argc != 2) {
1177		p_err("link specifier is invalid or missing\n");
1178		return 1;
1179	}
1180
1181	fd = link_parse_fd(&argc, &argv);
1182	if (fd < 0)
1183		return 1;
1184
1185	err = bpf_link_detach(fd);
1186	if (err)
1187		err = -errno;
1188	close(fd);
1189	if (err) {
1190		p_err("failed link detach: %s", strerror(-err));
1191		return 1;
1192	}
1193
1194	if (json_output)
1195		jsonw_null(json_wtr);
1196
1197	return 0;
1198}
1199
1200static int do_help(int argc, char **argv)
1201{
1202	if (json_output) {
1203		jsonw_null(json_wtr);
1204		return 0;
1205	}
1206
1207	fprintf(stderr,
1208		"Usage: %1$s %2$s { show | list }   [LINK]\n"
1209		"       %1$s %2$s pin        LINK  FILE\n"
1210		"       %1$s %2$s detach     LINK\n"
1211		"       %1$s %2$s help\n"
1212		"\n"
1213		"       " HELP_SPEC_LINK "\n"
1214		"       " HELP_SPEC_OPTIONS " |\n"
1215		"                    {-f|--bpffs} | {-n|--nomount} }\n"
1216		"",
1217		bin_name, argv[-2]);
1218
1219	return 0;
1220}
1221
1222static const struct cmd cmds[] = {
1223	{ "show",	do_show },
1224	{ "list",	do_show },
1225	{ "help",	do_help },
1226	{ "pin",	do_pin },
1227	{ "detach",	do_detach },
1228	{ 0 }
1229};
1230
1231int do_link(int argc, char **argv)
1232{
1233	return cmd_select(cmds, argc, argv, do_help);
1234}
1235