1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13#ifndef _GNU_SOURCE
14#define _GNU_SOURCE
15#endif
16#include <stdlib.h>
17#include <stdio.h>
18#include <stdarg.h>
19#include <libgen.h>
20#include <inttypes.h>
21#include <limits.h>
22#include <string.h>
23#include <unistd.h>
24#include <endian.h>
25#include <fcntl.h>
26#include <errno.h>
27#include <ctype.h>
28#include <asm/unistd.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
31#include <linux/bpf.h>
32#include <linux/btf.h>
33#include <linux/filter.h>
34#include <linux/limits.h>
35#include <linux/perf_event.h>
36#include <linux/ring_buffer.h>
37#include <linux/version.h>
38#include <sys/epoll.h>
39#include <sys/ioctl.h>
40#include <sys/mman.h>
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
44#include <sys/utsname.h>
45#include <sys/resource.h>
46#include <libelf.h>
47#include <gelf.h>
48#include <zlib.h>
49
50#include "libbpf.h"
51#include "bpf.h"
52#include "btf.h"
53#include "str_error.h"
54#include "libbpf_internal.h"
55#include "hashmap.h"
56#include "bpf_gen_internal.h"
57
58#ifndef BPF_FS_MAGIC
59#define BPF_FS_MAGIC		0xcafe4a11
60#endif
61
62#define BPF_INSN_SZ (sizeof(struct bpf_insn))
63
64/* vsprintf() in __base_pr() uses nonliteral format string. It may break
65 * compilation if user enables corresponding warning. Disable it explicitly.
66 */
67#pragma GCC diagnostic ignored "-Wformat-nonliteral"
68
69#define __printf(a, b)	__attribute__((format(printf, a, b)))
70
71static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
72static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
73
74static const char * const attach_type_name[] = {
75	[BPF_CGROUP_INET_INGRESS]	= "cgroup_inet_ingress",
76	[BPF_CGROUP_INET_EGRESS]	= "cgroup_inet_egress",
77	[BPF_CGROUP_INET_SOCK_CREATE]	= "cgroup_inet_sock_create",
78	[BPF_CGROUP_INET_SOCK_RELEASE]	= "cgroup_inet_sock_release",
79	[BPF_CGROUP_SOCK_OPS]		= "cgroup_sock_ops",
80	[BPF_CGROUP_DEVICE]		= "cgroup_device",
81	[BPF_CGROUP_INET4_BIND]		= "cgroup_inet4_bind",
82	[BPF_CGROUP_INET6_BIND]		= "cgroup_inet6_bind",
83	[BPF_CGROUP_INET4_CONNECT]	= "cgroup_inet4_connect",
84	[BPF_CGROUP_INET6_CONNECT]	= "cgroup_inet6_connect",
85	[BPF_CGROUP_INET4_POST_BIND]	= "cgroup_inet4_post_bind",
86	[BPF_CGROUP_INET6_POST_BIND]	= "cgroup_inet6_post_bind",
87	[BPF_CGROUP_INET4_GETPEERNAME]	= "cgroup_inet4_getpeername",
88	[BPF_CGROUP_INET6_GETPEERNAME]	= "cgroup_inet6_getpeername",
89	[BPF_CGROUP_INET4_GETSOCKNAME]	= "cgroup_inet4_getsockname",
90	[BPF_CGROUP_INET6_GETSOCKNAME]	= "cgroup_inet6_getsockname",
91	[BPF_CGROUP_UDP4_SENDMSG]	= "cgroup_udp4_sendmsg",
92	[BPF_CGROUP_UDP6_SENDMSG]	= "cgroup_udp6_sendmsg",
93	[BPF_CGROUP_SYSCTL]		= "cgroup_sysctl",
94	[BPF_CGROUP_UDP4_RECVMSG]	= "cgroup_udp4_recvmsg",
95	[BPF_CGROUP_UDP6_RECVMSG]	= "cgroup_udp6_recvmsg",
96	[BPF_CGROUP_GETSOCKOPT]		= "cgroup_getsockopt",
97	[BPF_CGROUP_SETSOCKOPT]		= "cgroup_setsockopt",
98	[BPF_SK_SKB_STREAM_PARSER]	= "sk_skb_stream_parser",
99	[BPF_SK_SKB_STREAM_VERDICT]	= "sk_skb_stream_verdict",
100	[BPF_SK_SKB_VERDICT]		= "sk_skb_verdict",
101	[BPF_SK_MSG_VERDICT]		= "sk_msg_verdict",
102	[BPF_LIRC_MODE2]		= "lirc_mode2",
103	[BPF_FLOW_DISSECTOR]		= "flow_dissector",
104	[BPF_TRACE_RAW_TP]		= "trace_raw_tp",
105	[BPF_TRACE_FENTRY]		= "trace_fentry",
106	[BPF_TRACE_FEXIT]		= "trace_fexit",
107	[BPF_MODIFY_RETURN]		= "modify_return",
108	[BPF_LSM_MAC]			= "lsm_mac",
109	[BPF_LSM_CGROUP]		= "lsm_cgroup",
110	[BPF_SK_LOOKUP]			= "sk_lookup",
111	[BPF_TRACE_ITER]		= "trace_iter",
112	[BPF_XDP_DEVMAP]		= "xdp_devmap",
113	[BPF_XDP_CPUMAP]		= "xdp_cpumap",
114	[BPF_XDP]			= "xdp",
115	[BPF_SK_REUSEPORT_SELECT]	= "sk_reuseport_select",
116	[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE]	= "sk_reuseport_select_or_migrate",
117	[BPF_PERF_EVENT]		= "perf_event",
118	[BPF_TRACE_KPROBE_MULTI]	= "trace_kprobe_multi",
119};
120
121static const char * const link_type_name[] = {
122	[BPF_LINK_TYPE_UNSPEC]			= "unspec",
123	[BPF_LINK_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
124	[BPF_LINK_TYPE_TRACING]			= "tracing",
125	[BPF_LINK_TYPE_CGROUP]			= "cgroup",
126	[BPF_LINK_TYPE_ITER]			= "iter",
127	[BPF_LINK_TYPE_NETNS]			= "netns",
128	[BPF_LINK_TYPE_XDP]			= "xdp",
129	[BPF_LINK_TYPE_PERF_EVENT]		= "perf_event",
130	[BPF_LINK_TYPE_KPROBE_MULTI]		= "kprobe_multi",
131	[BPF_LINK_TYPE_STRUCT_OPS]		= "struct_ops",
132};
133
134static const char * const map_type_name[] = {
135	[BPF_MAP_TYPE_UNSPEC]			= "unspec",
136	[BPF_MAP_TYPE_HASH]			= "hash",
137	[BPF_MAP_TYPE_ARRAY]			= "array",
138	[BPF_MAP_TYPE_PROG_ARRAY]		= "prog_array",
139	[BPF_MAP_TYPE_PERF_EVENT_ARRAY]		= "perf_event_array",
140	[BPF_MAP_TYPE_PERCPU_HASH]		= "percpu_hash",
141	[BPF_MAP_TYPE_PERCPU_ARRAY]		= "percpu_array",
142	[BPF_MAP_TYPE_STACK_TRACE]		= "stack_trace",
143	[BPF_MAP_TYPE_CGROUP_ARRAY]		= "cgroup_array",
144	[BPF_MAP_TYPE_LRU_HASH]			= "lru_hash",
145	[BPF_MAP_TYPE_LRU_PERCPU_HASH]		= "lru_percpu_hash",
146	[BPF_MAP_TYPE_LPM_TRIE]			= "lpm_trie",
147	[BPF_MAP_TYPE_ARRAY_OF_MAPS]		= "array_of_maps",
148	[BPF_MAP_TYPE_HASH_OF_MAPS]		= "hash_of_maps",
149	[BPF_MAP_TYPE_DEVMAP]			= "devmap",
150	[BPF_MAP_TYPE_DEVMAP_HASH]		= "devmap_hash",
151	[BPF_MAP_TYPE_SOCKMAP]			= "sockmap",
152	[BPF_MAP_TYPE_CPUMAP]			= "cpumap",
153	[BPF_MAP_TYPE_XSKMAP]			= "xskmap",
154	[BPF_MAP_TYPE_SOCKHASH]			= "sockhash",
155	[BPF_MAP_TYPE_CGROUP_STORAGE]		= "cgroup_storage",
156	[BPF_MAP_TYPE_REUSEPORT_SOCKARRAY]	= "reuseport_sockarray",
157	[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE]	= "percpu_cgroup_storage",
158	[BPF_MAP_TYPE_QUEUE]			= "queue",
159	[BPF_MAP_TYPE_STACK]			= "stack",
160	[BPF_MAP_TYPE_SK_STORAGE]		= "sk_storage",
161	[BPF_MAP_TYPE_STRUCT_OPS]		= "struct_ops",
162	[BPF_MAP_TYPE_RINGBUF]			= "ringbuf",
163	[BPF_MAP_TYPE_INODE_STORAGE]		= "inode_storage",
164	[BPF_MAP_TYPE_TASK_STORAGE]		= "task_storage",
165	[BPF_MAP_TYPE_BLOOM_FILTER]		= "bloom_filter",
166};
167
168static const char * const prog_type_name[] = {
169	[BPF_PROG_TYPE_UNSPEC]			= "unspec",
170	[BPF_PROG_TYPE_SOCKET_FILTER]		= "socket_filter",
171	[BPF_PROG_TYPE_KPROBE]			= "kprobe",
172	[BPF_PROG_TYPE_SCHED_CLS]		= "sched_cls",
173	[BPF_PROG_TYPE_SCHED_ACT]		= "sched_act",
174	[BPF_PROG_TYPE_TRACEPOINT]		= "tracepoint",
175	[BPF_PROG_TYPE_XDP]			= "xdp",
176	[BPF_PROG_TYPE_PERF_EVENT]		= "perf_event",
177	[BPF_PROG_TYPE_CGROUP_SKB]		= "cgroup_skb",
178	[BPF_PROG_TYPE_CGROUP_SOCK]		= "cgroup_sock",
179	[BPF_PROG_TYPE_LWT_IN]			= "lwt_in",
180	[BPF_PROG_TYPE_LWT_OUT]			= "lwt_out",
181	[BPF_PROG_TYPE_LWT_XMIT]		= "lwt_xmit",
182	[BPF_PROG_TYPE_SOCK_OPS]		= "sock_ops",
183	[BPF_PROG_TYPE_SK_SKB]			= "sk_skb",
184	[BPF_PROG_TYPE_CGROUP_DEVICE]		= "cgroup_device",
185	[BPF_PROG_TYPE_SK_MSG]			= "sk_msg",
186	[BPF_PROG_TYPE_RAW_TRACEPOINT]		= "raw_tracepoint",
187	[BPF_PROG_TYPE_CGROUP_SOCK_ADDR]	= "cgroup_sock_addr",
188	[BPF_PROG_TYPE_LWT_SEG6LOCAL]		= "lwt_seg6local",
189	[BPF_PROG_TYPE_LIRC_MODE2]		= "lirc_mode2",
190	[BPF_PROG_TYPE_SK_REUSEPORT]		= "sk_reuseport",
191	[BPF_PROG_TYPE_FLOW_DISSECTOR]		= "flow_dissector",
192	[BPF_PROG_TYPE_CGROUP_SYSCTL]		= "cgroup_sysctl",
193	[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE]	= "raw_tracepoint_writable",
194	[BPF_PROG_TYPE_CGROUP_SOCKOPT]		= "cgroup_sockopt",
195	[BPF_PROG_TYPE_TRACING]			= "tracing",
196	[BPF_PROG_TYPE_STRUCT_OPS]		= "struct_ops",
197	[BPF_PROG_TYPE_EXT]			= "ext",
198	[BPF_PROG_TYPE_LSM]			= "lsm",
199	[BPF_PROG_TYPE_SK_LOOKUP]		= "sk_lookup",
200	[BPF_PROG_TYPE_SYSCALL]			= "syscall",
201};
202
203static int __base_pr(enum libbpf_print_level level, const char *format,
204		     va_list args)
205{
206	if (level == LIBBPF_DEBUG)
207		return 0;
208
209	return vfprintf(stderr, format, args);
210}
211
212static libbpf_print_fn_t __libbpf_pr = __base_pr;
213
214libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
215{
216	libbpf_print_fn_t old_print_fn = __libbpf_pr;
217
218	__libbpf_pr = fn;
219	return old_print_fn;
220}
221
222__printf(2, 3)
223void libbpf_print(enum libbpf_print_level level, const char *format, ...)
224{
225	va_list args;
226
227	if (!__libbpf_pr)
228		return;
229
230	va_start(args, format);
231	__libbpf_pr(level, format, args);
232	va_end(args);
233}
234
235static void pr_perm_msg(int err)
236{
237	struct rlimit limit;
238	char buf[100];
239
240	if (err != -EPERM || geteuid() != 0)
241		return;
242
243	err = getrlimit(RLIMIT_MEMLOCK, &limit);
244	if (err)
245		return;
246
247	if (limit.rlim_cur == RLIM_INFINITY)
248		return;
249
250	if (limit.rlim_cur < 1024)
251		snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
252	else if (limit.rlim_cur < 1024*1024)
253		snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
254	else
255		snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
256
257	pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
258		buf);
259}
260
261#define STRERR_BUFSIZE  128
262
263/* Copied from tools/perf/util/util.h */
264#ifndef zfree
265# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
266#endif
267
268#ifndef zclose
269# define zclose(fd) ({			\
270	int ___err = 0;			\
271	if ((fd) >= 0)			\
272		___err = close((fd));	\
273	fd = -1;			\
274	___err; })
275#endif
276
277static inline __u64 ptr_to_u64(const void *ptr)
278{
279	return (__u64) (unsigned long) ptr;
280}
281
282int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
283{
284	/* as of v1.0 libbpf_set_strict_mode() is a no-op */
285	return 0;
286}
287
288__u32 libbpf_major_version(void)
289{
290	return LIBBPF_MAJOR_VERSION;
291}
292
293__u32 libbpf_minor_version(void)
294{
295	return LIBBPF_MINOR_VERSION;
296}
297
298const char *libbpf_version_string(void)
299{
300#define __S(X) #X
301#define _S(X) __S(X)
302	return  "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
303#undef _S
304#undef __S
305}
306
307enum reloc_type {
308	RELO_LD64,
309	RELO_CALL,
310	RELO_DATA,
311	RELO_EXTERN_VAR,
312	RELO_EXTERN_FUNC,
313	RELO_SUBPROG_ADDR,
314	RELO_CORE,
315};
316
317struct reloc_desc {
318	enum reloc_type type;
319	int insn_idx;
320	union {
321		const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
322		struct {
323			int map_idx;
324			int sym_off;
325		};
326	};
327};
328
329/* stored as sec_def->cookie for all libbpf-supported SEC()s */
330enum sec_def_flags {
331	SEC_NONE = 0,
332	/* expected_attach_type is optional, if kernel doesn't support that */
333	SEC_EXP_ATTACH_OPT = 1,
334	/* legacy, only used by libbpf_get_type_names() and
335	 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
336	 * This used to be associated with cgroup (and few other) BPF programs
337	 * that were attachable through BPF_PROG_ATTACH command. Pretty
338	 * meaningless nowadays, though.
339	 */
340	SEC_ATTACHABLE = 2,
341	SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
342	/* attachment target is specified through BTF ID in either kernel or
343	 * other BPF program's BTF object */
344	SEC_ATTACH_BTF = 4,
345	/* BPF program type allows sleeping/blocking in kernel */
346	SEC_SLEEPABLE = 8,
347	/* BPF program support non-linear XDP buffer */
348	SEC_XDP_FRAGS = 16,
349};
350
351struct bpf_sec_def {
352	char *sec;
353	enum bpf_prog_type prog_type;
354	enum bpf_attach_type expected_attach_type;
355	long cookie;
356	int handler_id;
357
358	libbpf_prog_setup_fn_t prog_setup_fn;
359	libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
360	libbpf_prog_attach_fn_t prog_attach_fn;
361};
362
363/*
364 * bpf_prog should be a better name but it has been used in
365 * linux/filter.h.
366 */
367struct bpf_program {
368	char *name;
369	char *sec_name;
370	size_t sec_idx;
371	const struct bpf_sec_def *sec_def;
372	/* this program's instruction offset (in number of instructions)
373	 * within its containing ELF section
374	 */
375	size_t sec_insn_off;
376	/* number of original instructions in ELF section belonging to this
377	 * program, not taking into account subprogram instructions possible
378	 * appended later during relocation
379	 */
380	size_t sec_insn_cnt;
381	/* Offset (in number of instructions) of the start of instruction
382	 * belonging to this BPF program  within its containing main BPF
383	 * program. For the entry-point (main) BPF program, this is always
384	 * zero. For a sub-program, this gets reset before each of main BPF
385	 * programs are processed and relocated and is used to determined
386	 * whether sub-program was already appended to the main program, and
387	 * if yes, at which instruction offset.
388	 */
389	size_t sub_insn_off;
390
391	/* instructions that belong to BPF program; insns[0] is located at
392	 * sec_insn_off instruction within its ELF section in ELF file, so
393	 * when mapping ELF file instruction index to the local instruction,
394	 * one needs to subtract sec_insn_off; and vice versa.
395	 */
396	struct bpf_insn *insns;
397	/* actual number of instruction in this BPF program's image; for
398	 * entry-point BPF programs this includes the size of main program
399	 * itself plus all the used sub-programs, appended at the end
400	 */
401	size_t insns_cnt;
402
403	struct reloc_desc *reloc_desc;
404	int nr_reloc;
405
406	/* BPF verifier log settings */
407	char *log_buf;
408	size_t log_size;
409	__u32 log_level;
410
411	struct bpf_object *obj;
412
413	int fd;
414	bool autoload;
415	bool mark_btf_static;
416	enum bpf_prog_type type;
417	enum bpf_attach_type expected_attach_type;
418
419	int prog_ifindex;
420	__u32 attach_btf_obj_fd;
421	__u32 attach_btf_id;
422	__u32 attach_prog_fd;
423
424	void *func_info;
425	__u32 func_info_rec_size;
426	__u32 func_info_cnt;
427
428	void *line_info;
429	__u32 line_info_rec_size;
430	__u32 line_info_cnt;
431	__u32 prog_flags;
432};
433
434struct bpf_struct_ops {
435	const char *tname;
436	const struct btf_type *type;
437	struct bpf_program **progs;
438	__u32 *kern_func_off;
439	/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
440	void *data;
441	/* e.g. struct bpf_struct_ops_tcp_congestion_ops in
442	 *      btf_vmlinux's format.
443	 * struct bpf_struct_ops_tcp_congestion_ops {
444	 *	[... some other kernel fields ...]
445	 *	struct tcp_congestion_ops data;
446	 * }
447	 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
448	 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
449	 * from "data".
450	 */
451	void *kern_vdata;
452	__u32 type_id;
453};
454
455#define DATA_SEC ".data"
456#define BSS_SEC ".bss"
457#define RODATA_SEC ".rodata"
458#define KCONFIG_SEC ".kconfig"
459#define KSYMS_SEC ".ksyms"
460#define STRUCT_OPS_SEC ".struct_ops"
461
462enum libbpf_map_type {
463	LIBBPF_MAP_UNSPEC,
464	LIBBPF_MAP_DATA,
465	LIBBPF_MAP_BSS,
466	LIBBPF_MAP_RODATA,
467	LIBBPF_MAP_KCONFIG,
468};
469
470struct bpf_map_def {
471	unsigned int type;
472	unsigned int key_size;
473	unsigned int value_size;
474	unsigned int max_entries;
475	unsigned int map_flags;
476};
477
478struct bpf_map {
479	struct bpf_object *obj;
480	char *name;
481	/* real_name is defined for special internal maps (.rodata*,
482	 * .data*, .bss, .kconfig) and preserves their original ELF section
483	 * name. This is important to be be able to find corresponding BTF
484	 * DATASEC information.
485	 */
486	char *real_name;
487	int fd;
488	int sec_idx;
489	size_t sec_offset;
490	int map_ifindex;
491	int inner_map_fd;
492	struct bpf_map_def def;
493	__u32 numa_node;
494	__u32 btf_var_idx;
495	__u32 btf_key_type_id;
496	__u32 btf_value_type_id;
497	__u32 btf_vmlinux_value_type_id;
498	enum libbpf_map_type libbpf_type;
499	void *mmaped;
500	struct bpf_struct_ops *st_ops;
501	struct bpf_map *inner_map;
502	void **init_slots;
503	int init_slots_sz;
504	char *pin_path;
505	bool pinned;
506	bool reused;
507	bool autocreate;
508	__u64 map_extra;
509};
510
511enum extern_type {
512	EXT_UNKNOWN,
513	EXT_KCFG,
514	EXT_KSYM,
515};
516
517enum kcfg_type {
518	KCFG_UNKNOWN,
519	KCFG_CHAR,
520	KCFG_BOOL,
521	KCFG_INT,
522	KCFG_TRISTATE,
523	KCFG_CHAR_ARR,
524};
525
526struct extern_desc {
527	enum extern_type type;
528	int sym_idx;
529	int btf_id;
530	int sec_btf_id;
531	const char *name;
532	bool is_set;
533	bool is_weak;
534	union {
535		struct {
536			enum kcfg_type type;
537			int sz;
538			int align;
539			int data_off;
540			bool is_signed;
541		} kcfg;
542		struct {
543			unsigned long long addr;
544
545			/* target btf_id of the corresponding kernel var. */
546			int kernel_btf_obj_fd;
547			int kernel_btf_id;
548
549			/* local btf_id of the ksym extern's type. */
550			__u32 type_id;
551			/* BTF fd index to be patched in for insn->off, this is
552			 * 0 for vmlinux BTF, index in obj->fd_array for module
553			 * BTF
554			 */
555			__s16 btf_fd_idx;
556		} ksym;
557	};
558};
559
560struct module_btf {
561	struct btf *btf;
562	char *name;
563	__u32 id;
564	int fd;
565	int fd_array_idx;
566};
567
568enum sec_type {
569	SEC_UNUSED = 0,
570	SEC_RELO,
571	SEC_BSS,
572	SEC_DATA,
573	SEC_RODATA,
574};
575
576struct elf_sec_desc {
577	enum sec_type sec_type;
578	Elf64_Shdr *shdr;
579	Elf_Data *data;
580};
581
582struct elf_state {
583	int fd;
584	const void *obj_buf;
585	size_t obj_buf_sz;
586	Elf *elf;
587	Elf64_Ehdr *ehdr;
588	Elf_Data *symbols;
589	Elf_Data *st_ops_data;
590	size_t shstrndx; /* section index for section name strings */
591	size_t strtabidx;
592	struct elf_sec_desc *secs;
593	int sec_cnt;
594	int maps_shndx;
595	int btf_maps_shndx;
596	__u32 btf_maps_sec_btf_id;
597	int text_shndx;
598	int symbols_shndx;
599	int st_ops_shndx;
600};
601
602struct usdt_manager;
603
604struct bpf_object {
605	char name[BPF_OBJ_NAME_LEN];
606	char license[64];
607	__u32 kern_version;
608
609	struct bpf_program *programs;
610	size_t nr_programs;
611	struct bpf_map *maps;
612	size_t nr_maps;
613	size_t maps_cap;
614
615	char *kconfig;
616	struct extern_desc *externs;
617	int nr_extern;
618	int kconfig_map_idx;
619
620	bool loaded;
621	bool has_subcalls;
622	bool has_rodata;
623
624	struct bpf_gen *gen_loader;
625
626	/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
627	struct elf_state efile;
628
629	struct btf *btf;
630	struct btf_ext *btf_ext;
631
632	/* Parse and load BTF vmlinux if any of the programs in the object need
633	 * it at load time.
634	 */
635	struct btf *btf_vmlinux;
636	/* Path to the custom BTF to be used for BPF CO-RE relocations as an
637	 * override for vmlinux BTF.
638	 */
639	char *btf_custom_path;
640	/* vmlinux BTF override for CO-RE relocations */
641	struct btf *btf_vmlinux_override;
642	/* Lazily initialized kernel module BTFs */
643	struct module_btf *btf_modules;
644	bool btf_modules_loaded;
645	size_t btf_module_cnt;
646	size_t btf_module_cap;
647
648	/* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
649	char *log_buf;
650	size_t log_size;
651	__u32 log_level;
652
653	int *fd_array;
654	size_t fd_array_cap;
655	size_t fd_array_cnt;
656
657	struct usdt_manager *usdt_man;
658
659	char path[];
660};
661
662static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
663static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
664static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
665static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
666static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
667static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
668static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
669static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
670static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
671
672void bpf_program__unload(struct bpf_program *prog)
673{
674	if (!prog)
675		return;
676
677	zclose(prog->fd);
678
679	zfree(&prog->func_info);
680	zfree(&prog->line_info);
681}
682
683static void bpf_program__exit(struct bpf_program *prog)
684{
685	if (!prog)
686		return;
687
688	bpf_program__unload(prog);
689	zfree(&prog->name);
690	zfree(&prog->sec_name);
691	zfree(&prog->insns);
692	zfree(&prog->reloc_desc);
693
694	prog->nr_reloc = 0;
695	prog->insns_cnt = 0;
696	prog->sec_idx = -1;
697}
698
699static bool insn_is_subprog_call(const struct bpf_insn *insn)
700{
701	return BPF_CLASS(insn->code) == BPF_JMP &&
702	       BPF_OP(insn->code) == BPF_CALL &&
703	       BPF_SRC(insn->code) == BPF_K &&
704	       insn->src_reg == BPF_PSEUDO_CALL &&
705	       insn->dst_reg == 0 &&
706	       insn->off == 0;
707}
708
709static bool is_call_insn(const struct bpf_insn *insn)
710{
711	return insn->code == (BPF_JMP | BPF_CALL);
712}
713
714static bool insn_is_pseudo_func(struct bpf_insn *insn)
715{
716	return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
717}
718
719static int
720bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
721		      const char *name, size_t sec_idx, const char *sec_name,
722		      size_t sec_off, void *insn_data, size_t insn_data_sz)
723{
724	if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
725		pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
726			sec_name, name, sec_off, insn_data_sz);
727		return -EINVAL;
728	}
729
730	memset(prog, 0, sizeof(*prog));
731	prog->obj = obj;
732
733	prog->sec_idx = sec_idx;
734	prog->sec_insn_off = sec_off / BPF_INSN_SZ;
735	prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
736	/* insns_cnt can later be increased by appending used subprograms */
737	prog->insns_cnt = prog->sec_insn_cnt;
738
739	prog->type = BPF_PROG_TYPE_UNSPEC;
740	prog->fd = -1;
741
742	/* libbpf's convention for SEC("?abc...") is that it's just like
743	 * SEC("abc...") but the corresponding bpf_program starts out with
744	 * autoload set to false.
745	 */
746	if (sec_name[0] == '?') {
747		prog->autoload = false;
748		/* from now on forget there was ? in section name */
749		sec_name++;
750	} else {
751		prog->autoload = true;
752	}
753
754	/* inherit object's log_level */
755	prog->log_level = obj->log_level;
756
757	prog->sec_name = strdup(sec_name);
758	if (!prog->sec_name)
759		goto errout;
760
761	prog->name = strdup(name);
762	if (!prog->name)
763		goto errout;
764
765	prog->insns = malloc(insn_data_sz);
766	if (!prog->insns)
767		goto errout;
768	memcpy(prog->insns, insn_data, insn_data_sz);
769
770	return 0;
771errout:
772	pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
773	bpf_program__exit(prog);
774	return -ENOMEM;
775}
776
777static int
778bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
779			 const char *sec_name, int sec_idx)
780{
781	Elf_Data *symbols = obj->efile.symbols;
782	struct bpf_program *prog, *progs;
783	void *data = sec_data->d_buf;
784	size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
785	int nr_progs, err, i;
786	const char *name;
787	Elf64_Sym *sym;
788
789	progs = obj->programs;
790	nr_progs = obj->nr_programs;
791	nr_syms = symbols->d_size / sizeof(Elf64_Sym);
792	sec_off = 0;
793
794	for (i = 0; i < nr_syms; i++) {
795		sym = elf_sym_by_idx(obj, i);
796
797		if (sym->st_shndx != sec_idx)
798			continue;
799		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
800			continue;
801
802		prog_sz = sym->st_size;
803		sec_off = sym->st_value;
804
805		name = elf_sym_str(obj, sym->st_name);
806		if (!name) {
807			pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
808				sec_name, sec_off);
809			return -LIBBPF_ERRNO__FORMAT;
810		}
811
812		if (sec_off + prog_sz > sec_sz) {
813			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
814				sec_name, sec_off);
815			return -LIBBPF_ERRNO__FORMAT;
816		}
817
818		if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
819			pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
820			return -ENOTSUP;
821		}
822
823		pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
824			 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
825
826		progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
827		if (!progs) {
828			/*
829			 * In this case the original obj->programs
830			 * is still valid, so don't need special treat for
831			 * bpf_close_object().
832			 */
833			pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
834				sec_name, name);
835			return -ENOMEM;
836		}
837		obj->programs = progs;
838
839		prog = &progs[nr_progs];
840
841		err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
842					    sec_off, data + sec_off, prog_sz);
843		if (err)
844			return err;
845
846		/* if function is a global/weak symbol, but has restricted
847		 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
848		 * as static to enable more permissive BPF verification mode
849		 * with more outside context available to BPF verifier
850		 */
851		if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
852		    && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
853			|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
854			prog->mark_btf_static = true;
855
856		nr_progs++;
857		obj->nr_programs = nr_progs;
858	}
859
860	return 0;
861}
862
863__u32 get_kernel_version(void)
864{
865	/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
866	 * but Ubuntu provides /proc/version_signature file, as described at
867	 * https://ubuntu.com/kernel, with an example contents below, which we
868	 * can use to get a proper LINUX_VERSION_CODE.
869	 *
870	 *   Ubuntu 5.4.0-12.15-generic 5.4.8
871	 *
872	 * In the above, 5.4.8 is what kernel is actually expecting, while
873	 * uname() call will return 5.4.0 in info.release.
874	 */
875	const char *ubuntu_kver_file = "/proc/version_signature";
876	__u32 major, minor, patch;
877	struct utsname info;
878
879	if (access(ubuntu_kver_file, R_OK) == 0) {
880		FILE *f;
881
882		f = fopen(ubuntu_kver_file, "r");
883		if (f) {
884			if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
885				fclose(f);
886				return KERNEL_VERSION(major, minor, patch);
887			}
888			fclose(f);
889		}
890		/* something went wrong, fall back to uname() approach */
891	}
892
893	uname(&info);
894	if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
895		return 0;
896	return KERNEL_VERSION(major, minor, patch);
897}
898
899static const struct btf_member *
900find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
901{
902	struct btf_member *m;
903	int i;
904
905	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
906		if (btf_member_bit_offset(t, i) == bit_offset)
907			return m;
908	}
909
910	return NULL;
911}
912
913static const struct btf_member *
914find_member_by_name(const struct btf *btf, const struct btf_type *t,
915		    const char *name)
916{
917	struct btf_member *m;
918	int i;
919
920	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
921		if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
922			return m;
923	}
924
925	return NULL;
926}
927
928#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
929static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
930				   const char *name, __u32 kind);
931
932static int
933find_struct_ops_kern_types(const struct btf *btf, const char *tname,
934			   const struct btf_type **type, __u32 *type_id,
935			   const struct btf_type **vtype, __u32 *vtype_id,
936			   const struct btf_member **data_member)
937{
938	const struct btf_type *kern_type, *kern_vtype;
939	const struct btf_member *kern_data_member;
940	__s32 kern_vtype_id, kern_type_id;
941	__u32 i;
942
943	kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
944	if (kern_type_id < 0) {
945		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
946			tname);
947		return kern_type_id;
948	}
949	kern_type = btf__type_by_id(btf, kern_type_id);
950
951	/* Find the corresponding "map_value" type that will be used
952	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
953	 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
954	 * btf_vmlinux.
955	 */
956	kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
957						tname, BTF_KIND_STRUCT);
958	if (kern_vtype_id < 0) {
959		pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
960			STRUCT_OPS_VALUE_PREFIX, tname);
961		return kern_vtype_id;
962	}
963	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
964
965	/* Find "struct tcp_congestion_ops" from
966	 * struct bpf_struct_ops_tcp_congestion_ops {
967	 *	[ ... ]
968	 *	struct tcp_congestion_ops data;
969	 * }
970	 */
971	kern_data_member = btf_members(kern_vtype);
972	for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
973		if (kern_data_member->type == kern_type_id)
974			break;
975	}
976	if (i == btf_vlen(kern_vtype)) {
977		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
978			tname, STRUCT_OPS_VALUE_PREFIX, tname);
979		return -EINVAL;
980	}
981
982	*type = kern_type;
983	*type_id = kern_type_id;
984	*vtype = kern_vtype;
985	*vtype_id = kern_vtype_id;
986	*data_member = kern_data_member;
987
988	return 0;
989}
990
991static bool bpf_map__is_struct_ops(const struct bpf_map *map)
992{
993	return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
994}
995
996/* Init the map's fields that depend on kern_btf */
997static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
998					 const struct btf *btf,
999					 const struct btf *kern_btf)
1000{
1001	const struct btf_member *member, *kern_member, *kern_data_member;
1002	const struct btf_type *type, *kern_type, *kern_vtype;
1003	__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1004	struct bpf_struct_ops *st_ops;
1005	void *data, *kern_data;
1006	const char *tname;
1007	int err;
1008
1009	st_ops = map->st_ops;
1010	type = st_ops->type;
1011	tname = st_ops->tname;
1012	err = find_struct_ops_kern_types(kern_btf, tname,
1013					 &kern_type, &kern_type_id,
1014					 &kern_vtype, &kern_vtype_id,
1015					 &kern_data_member);
1016	if (err)
1017		return err;
1018
1019	pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1020		 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1021
1022	map->def.value_size = kern_vtype->size;
1023	map->btf_vmlinux_value_type_id = kern_vtype_id;
1024
1025	st_ops->kern_vdata = calloc(1, kern_vtype->size);
1026	if (!st_ops->kern_vdata)
1027		return -ENOMEM;
1028
1029	data = st_ops->data;
1030	kern_data_off = kern_data_member->offset / 8;
1031	kern_data = st_ops->kern_vdata + kern_data_off;
1032
1033	member = btf_members(type);
1034	for (i = 0; i < btf_vlen(type); i++, member++) {
1035		const struct btf_type *mtype, *kern_mtype;
1036		__u32 mtype_id, kern_mtype_id;
1037		void *mdata, *kern_mdata;
1038		__s64 msize, kern_msize;
1039		__u32 moff, kern_moff;
1040		__u32 kern_member_idx;
1041		const char *mname;
1042
1043		mname = btf__name_by_offset(btf, member->name_off);
1044		kern_member = find_member_by_name(kern_btf, kern_type, mname);
1045		if (!kern_member) {
1046			pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1047				map->name, mname);
1048			return -ENOTSUP;
1049		}
1050
1051		kern_member_idx = kern_member - btf_members(kern_type);
1052		if (btf_member_bitfield_size(type, i) ||
1053		    btf_member_bitfield_size(kern_type, kern_member_idx)) {
1054			pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1055				map->name, mname);
1056			return -ENOTSUP;
1057		}
1058
1059		moff = member->offset / 8;
1060		kern_moff = kern_member->offset / 8;
1061
1062		mdata = data + moff;
1063		kern_mdata = kern_data + kern_moff;
1064
1065		mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1066		kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1067						    &kern_mtype_id);
1068		if (BTF_INFO_KIND(mtype->info) !=
1069		    BTF_INFO_KIND(kern_mtype->info)) {
1070			pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1071				map->name, mname, BTF_INFO_KIND(mtype->info),
1072				BTF_INFO_KIND(kern_mtype->info));
1073			return -ENOTSUP;
1074		}
1075
1076		if (btf_is_ptr(mtype)) {
1077			struct bpf_program *prog;
1078
1079			prog = st_ops->progs[i];
1080			if (!prog)
1081				continue;
1082
1083			kern_mtype = skip_mods_and_typedefs(kern_btf,
1084							    kern_mtype->type,
1085							    &kern_mtype_id);
1086
1087			/* mtype->type must be a func_proto which was
1088			 * guaranteed in bpf_object__collect_st_ops_relos(),
1089			 * so only check kern_mtype for func_proto here.
1090			 */
1091			if (!btf_is_func_proto(kern_mtype)) {
1092				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1093					map->name, mname);
1094				return -ENOTSUP;
1095			}
1096
1097			prog->attach_btf_id = kern_type_id;
1098			prog->expected_attach_type = kern_member_idx;
1099
1100			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1101
1102			pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1103				 map->name, mname, prog->name, moff,
1104				 kern_moff);
1105
1106			continue;
1107		}
1108
1109		msize = btf__resolve_size(btf, mtype_id);
1110		kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1111		if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1112			pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1113				map->name, mname, (ssize_t)msize,
1114				(ssize_t)kern_msize);
1115			return -ENOTSUP;
1116		}
1117
1118		pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1119			 map->name, mname, (unsigned int)msize,
1120			 moff, kern_moff);
1121		memcpy(kern_mdata, mdata, msize);
1122	}
1123
1124	return 0;
1125}
1126
1127static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1128{
1129	struct bpf_map *map;
1130	size_t i;
1131	int err;
1132
1133	for (i = 0; i < obj->nr_maps; i++) {
1134		map = &obj->maps[i];
1135
1136		if (!bpf_map__is_struct_ops(map))
1137			continue;
1138
1139		err = bpf_map__init_kern_struct_ops(map, obj->btf,
1140						    obj->btf_vmlinux);
1141		if (err)
1142			return err;
1143	}
1144
1145	return 0;
1146}
1147
1148static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
1149{
1150	const struct btf_type *type, *datasec;
1151	const struct btf_var_secinfo *vsi;
1152	struct bpf_struct_ops *st_ops;
1153	const char *tname, *var_name;
1154	__s32 type_id, datasec_id;
1155	const struct btf *btf;
1156	struct bpf_map *map;
1157	__u32 i;
1158
1159	if (obj->efile.st_ops_shndx == -1)
1160		return 0;
1161
1162	btf = obj->btf;
1163	datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
1164					    BTF_KIND_DATASEC);
1165	if (datasec_id < 0) {
1166		pr_warn("struct_ops init: DATASEC %s not found\n",
1167			STRUCT_OPS_SEC);
1168		return -EINVAL;
1169	}
1170
1171	datasec = btf__type_by_id(btf, datasec_id);
1172	vsi = btf_var_secinfos(datasec);
1173	for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1174		type = btf__type_by_id(obj->btf, vsi->type);
1175		var_name = btf__name_by_offset(obj->btf, type->name_off);
1176
1177		type_id = btf__resolve_type(obj->btf, vsi->type);
1178		if (type_id < 0) {
1179			pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1180				vsi->type, STRUCT_OPS_SEC);
1181			return -EINVAL;
1182		}
1183
1184		type = btf__type_by_id(obj->btf, type_id);
1185		tname = btf__name_by_offset(obj->btf, type->name_off);
1186		if (!tname[0]) {
1187			pr_warn("struct_ops init: anonymous type is not supported\n");
1188			return -ENOTSUP;
1189		}
1190		if (!btf_is_struct(type)) {
1191			pr_warn("struct_ops init: %s is not a struct\n", tname);
1192			return -EINVAL;
1193		}
1194
1195		map = bpf_object__add_map(obj);
1196		if (IS_ERR(map))
1197			return PTR_ERR(map);
1198
1199		map->sec_idx = obj->efile.st_ops_shndx;
1200		map->sec_offset = vsi->offset;
1201		map->name = strdup(var_name);
1202		if (!map->name)
1203			return -ENOMEM;
1204
1205		map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1206		map->def.key_size = sizeof(int);
1207		map->def.value_size = type->size;
1208		map->def.max_entries = 1;
1209
1210		map->st_ops = calloc(1, sizeof(*map->st_ops));
1211		if (!map->st_ops)
1212			return -ENOMEM;
1213		st_ops = map->st_ops;
1214		st_ops->data = malloc(type->size);
1215		st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1216		st_ops->kern_func_off = malloc(btf_vlen(type) *
1217					       sizeof(*st_ops->kern_func_off));
1218		if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1219			return -ENOMEM;
1220
1221		if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1222			pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1223				var_name, STRUCT_OPS_SEC);
1224			return -EINVAL;
1225		}
1226
1227		memcpy(st_ops->data,
1228		       obj->efile.st_ops_data->d_buf + vsi->offset,
1229		       type->size);
1230		st_ops->tname = tname;
1231		st_ops->type = type;
1232		st_ops->type_id = type_id;
1233
1234		pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1235			 tname, type_id, var_name, vsi->offset);
1236	}
1237
1238	return 0;
1239}
1240
1241static struct bpf_object *bpf_object__new(const char *path,
1242					  const void *obj_buf,
1243					  size_t obj_buf_sz,
1244					  const char *obj_name)
1245{
1246	struct bpf_object *obj;
1247	char *end;
1248
1249	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1250	if (!obj) {
1251		pr_warn("alloc memory failed for %s\n", path);
1252		return ERR_PTR(-ENOMEM);
1253	}
1254
1255	strcpy(obj->path, path);
1256	if (obj_name) {
1257		libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1258	} else {
1259		/* Using basename() GNU version which doesn't modify arg. */
1260		libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1261		end = strchr(obj->name, '.');
1262		if (end)
1263			*end = 0;
1264	}
1265
1266	obj->efile.fd = -1;
1267	/*
1268	 * Caller of this function should also call
1269	 * bpf_object__elf_finish() after data collection to return
1270	 * obj_buf to user. If not, we should duplicate the buffer to
1271	 * avoid user freeing them before elf finish.
1272	 */
1273	obj->efile.obj_buf = obj_buf;
1274	obj->efile.obj_buf_sz = obj_buf_sz;
1275	obj->efile.maps_shndx = -1;
1276	obj->efile.btf_maps_shndx = -1;
1277	obj->efile.st_ops_shndx = -1;
1278	obj->kconfig_map_idx = -1;
1279
1280	obj->kern_version = get_kernel_version();
1281	obj->loaded = false;
1282
1283	return obj;
1284}
1285
1286static void bpf_object__elf_finish(struct bpf_object *obj)
1287{
1288	if (!obj->efile.elf)
1289		return;
1290
1291	elf_end(obj->efile.elf);
1292	obj->efile.elf = NULL;
1293	obj->efile.symbols = NULL;
1294	obj->efile.st_ops_data = NULL;
1295
1296	zfree(&obj->efile.secs);
1297	obj->efile.sec_cnt = 0;
1298	zclose(obj->efile.fd);
1299	obj->efile.obj_buf = NULL;
1300	obj->efile.obj_buf_sz = 0;
1301}
1302
1303static int bpf_object__elf_init(struct bpf_object *obj)
1304{
1305	Elf64_Ehdr *ehdr;
1306	int err = 0;
1307	Elf *elf;
1308
1309	if (obj->efile.elf) {
1310		pr_warn("elf: init internal error\n");
1311		return -LIBBPF_ERRNO__LIBELF;
1312	}
1313
1314	if (obj->efile.obj_buf_sz > 0) {
1315		/* obj_buf should have been validated by bpf_object__open_mem(). */
1316		elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1317	} else {
1318		obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1319		if (obj->efile.fd < 0) {
1320			char errmsg[STRERR_BUFSIZE], *cp;
1321
1322			err = -errno;
1323			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1324			pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1325			return err;
1326		}
1327
1328		elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1329	}
1330
1331	if (!elf) {
1332		pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1333		err = -LIBBPF_ERRNO__LIBELF;
1334		goto errout;
1335	}
1336
1337	obj->efile.elf = elf;
1338
1339	if (elf_kind(elf) != ELF_K_ELF) {
1340		err = -LIBBPF_ERRNO__FORMAT;
1341		pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1342		goto errout;
1343	}
1344
1345	if (gelf_getclass(elf) != ELFCLASS64) {
1346		err = -LIBBPF_ERRNO__FORMAT;
1347		pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1348		goto errout;
1349	}
1350
1351	obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1352	if (!obj->efile.ehdr) {
1353		pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1354		err = -LIBBPF_ERRNO__FORMAT;
1355		goto errout;
1356	}
1357
1358	if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1359		pr_warn("elf: failed to get section names section index for %s: %s\n",
1360			obj->path, elf_errmsg(-1));
1361		err = -LIBBPF_ERRNO__FORMAT;
1362		goto errout;
1363	}
1364
1365	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
1366	if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1367		pr_warn("elf: failed to get section names strings from %s: %s\n",
1368			obj->path, elf_errmsg(-1));
1369		err = -LIBBPF_ERRNO__FORMAT;
1370		goto errout;
1371	}
1372
1373	/* Old LLVM set e_machine to EM_NONE */
1374	if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1375		pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1376		err = -LIBBPF_ERRNO__FORMAT;
1377		goto errout;
1378	}
1379
1380	return 0;
1381errout:
1382	bpf_object__elf_finish(obj);
1383	return err;
1384}
1385
1386static int bpf_object__check_endianness(struct bpf_object *obj)
1387{
1388#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1389	if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1390		return 0;
1391#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1392	if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1393		return 0;
1394#else
1395# error "Unrecognized __BYTE_ORDER__"
1396#endif
1397	pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1398	return -LIBBPF_ERRNO__ENDIAN;
1399}
1400
1401static int
1402bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1403{
1404	/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1405	 * go over allowed ELF data section buffer
1406	 */
1407	libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1408	pr_debug("license of %s is %s\n", obj->path, obj->license);
1409	return 0;
1410}
1411
1412static int
1413bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1414{
1415	__u32 kver;
1416
1417	if (size != sizeof(kver)) {
1418		pr_warn("invalid kver section in %s\n", obj->path);
1419		return -LIBBPF_ERRNO__FORMAT;
1420	}
1421	memcpy(&kver, data, sizeof(kver));
1422	obj->kern_version = kver;
1423	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1424	return 0;
1425}
1426
1427static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1428{
1429	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1430	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
1431		return true;
1432	return false;
1433}
1434
1435static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1436{
1437	Elf_Data *data;
1438	Elf_Scn *scn;
1439
1440	if (!name)
1441		return -EINVAL;
1442
1443	scn = elf_sec_by_name(obj, name);
1444	data = elf_sec_data(obj, scn);
1445	if (data) {
1446		*size = data->d_size;
1447		return 0; /* found it */
1448	}
1449
1450	return -ENOENT;
1451}
1452
1453static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
1454{
1455	Elf_Data *symbols = obj->efile.symbols;
1456	const char *sname;
1457	size_t si;
1458
1459	if (!name || !off)
1460		return -EINVAL;
1461
1462	for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1463		Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1464
1465		if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1466			continue;
1467
1468		if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1469		    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1470			continue;
1471
1472		sname = elf_sym_str(obj, sym->st_name);
1473		if (!sname) {
1474			pr_warn("failed to get sym name string for var %s\n", name);
1475			return -EIO;
1476		}
1477		if (strcmp(name, sname) == 0) {
1478			*off = sym->st_value;
1479			return 0;
1480		}
1481	}
1482
1483	return -ENOENT;
1484}
1485
1486static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1487{
1488	struct bpf_map *map;
1489	int err;
1490
1491	err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1492				sizeof(*obj->maps), obj->nr_maps + 1);
1493	if (err)
1494		return ERR_PTR(err);
1495
1496	map = &obj->maps[obj->nr_maps++];
1497	map->obj = obj;
1498	map->fd = -1;
1499	map->inner_map_fd = -1;
1500	map->autocreate = true;
1501
1502	return map;
1503}
1504
1505static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1506{
1507	long page_sz = sysconf(_SC_PAGE_SIZE);
1508	size_t map_sz;
1509
1510	map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
1511	map_sz = roundup(map_sz, page_sz);
1512	return map_sz;
1513}
1514
1515static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1516{
1517	char map_name[BPF_OBJ_NAME_LEN], *p;
1518	int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1519
1520	/* This is one of the more confusing parts of libbpf for various
1521	 * reasons, some of which are historical. The original idea for naming
1522	 * internal names was to include as much of BPF object name prefix as
1523	 * possible, so that it can be distinguished from similar internal
1524	 * maps of a different BPF object.
1525	 * As an example, let's say we have bpf_object named 'my_object_name'
1526	 * and internal map corresponding to '.rodata' ELF section. The final
1527	 * map name advertised to user and to the kernel will be
1528	 * 'my_objec.rodata', taking first 8 characters of object name and
1529	 * entire 7 characters of '.rodata'.
1530	 * Somewhat confusingly, if internal map ELF section name is shorter
1531	 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1532	 * for the suffix, even though we only have 4 actual characters, and
1533	 * resulting map will be called 'my_objec.bss', not even using all 15
1534	 * characters allowed by the kernel. Oh well, at least the truncated
1535	 * object name is somewhat consistent in this case. But if the map
1536	 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1537	 * (8 chars) and thus will be left with only first 7 characters of the
1538	 * object name ('my_obje'). Happy guessing, user, that the final map
1539	 * name will be "my_obje.kconfig".
1540	 * Now, with libbpf starting to support arbitrarily named .rodata.*
1541	 * and .data.* data sections, it's possible that ELF section name is
1542	 * longer than allowed 15 chars, so we now need to be careful to take
1543	 * only up to 15 first characters of ELF name, taking no BPF object
1544	 * name characters at all. So '.rodata.abracadabra' will result in
1545	 * '.rodata.abracad' kernel and user-visible name.
1546	 * We need to keep this convoluted logic intact for .data, .bss and
1547	 * .rodata maps, but for new custom .data.custom and .rodata.custom
1548	 * maps we use their ELF names as is, not prepending bpf_object name
1549	 * in front. We still need to truncate them to 15 characters for the
1550	 * kernel. Full name can be recovered for such maps by using DATASEC
1551	 * BTF type associated with such map's value type, though.
1552	 */
1553	if (sfx_len >= BPF_OBJ_NAME_LEN)
1554		sfx_len = BPF_OBJ_NAME_LEN - 1;
1555
1556	/* if there are two or more dots in map name, it's a custom dot map */
1557	if (strchr(real_name + 1, '.') != NULL)
1558		pfx_len = 0;
1559	else
1560		pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1561
1562	snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1563		 sfx_len, real_name);
1564
1565	/* sanitise map name to characters allowed by kernel */
1566	for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1567		if (!isalnum(*p) && *p != '_' && *p != '.')
1568			*p = '_';
1569
1570	return strdup(map_name);
1571}
1572
1573static int
1574bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
1575
1576static int
1577bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1578			      const char *real_name, int sec_idx, void *data, size_t data_sz)
1579{
1580	struct bpf_map_def *def;
1581	struct bpf_map *map;
1582	int err;
1583
1584	map = bpf_object__add_map(obj);
1585	if (IS_ERR(map))
1586		return PTR_ERR(map);
1587
1588	map->libbpf_type = type;
1589	map->sec_idx = sec_idx;
1590	map->sec_offset = 0;
1591	map->real_name = strdup(real_name);
1592	map->name = internal_map_name(obj, real_name);
1593	if (!map->real_name || !map->name) {
1594		zfree(&map->real_name);
1595		zfree(&map->name);
1596		return -ENOMEM;
1597	}
1598
1599	def = &map->def;
1600	def->type = BPF_MAP_TYPE_ARRAY;
1601	def->key_size = sizeof(int);
1602	def->value_size = data_sz;
1603	def->max_entries = 1;
1604	def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1605			 ? BPF_F_RDONLY_PROG : 0;
1606	def->map_flags |= BPF_F_MMAPABLE;
1607
1608	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1609		 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1610
1611	map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1612			   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1613	if (map->mmaped == MAP_FAILED) {
1614		err = -errno;
1615		map->mmaped = NULL;
1616		pr_warn("failed to alloc map '%s' content buffer: %d\n",
1617			map->name, err);
1618		zfree(&map->real_name);
1619		zfree(&map->name);
1620		return err;
1621	}
1622
1623	/* failures are fine because of maps like .rodata.str1.1 */
1624	(void) bpf_map_find_btf_info(obj, map);
1625
1626	if (data)
1627		memcpy(map->mmaped, data, data_sz);
1628
1629	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1630	return 0;
1631}
1632
1633static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1634{
1635	struct elf_sec_desc *sec_desc;
1636	const char *sec_name;
1637	int err = 0, sec_idx;
1638
1639	/*
1640	 * Populate obj->maps with libbpf internal maps.
1641	 */
1642	for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1643		sec_desc = &obj->efile.secs[sec_idx];
1644
1645		switch (sec_desc->sec_type) {
1646		case SEC_DATA:
1647			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1648			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1649							    sec_name, sec_idx,
1650							    sec_desc->data->d_buf,
1651							    sec_desc->data->d_size);
1652			break;
1653		case SEC_RODATA:
1654			obj->has_rodata = true;
1655			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1656			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1657							    sec_name, sec_idx,
1658							    sec_desc->data->d_buf,
1659							    sec_desc->data->d_size);
1660			break;
1661		case SEC_BSS:
1662			sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1663			err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1664							    sec_name, sec_idx,
1665							    NULL,
1666							    sec_desc->data->d_size);
1667			break;
1668		default:
1669			/* skip */
1670			break;
1671		}
1672		if (err)
1673			return err;
1674	}
1675	return 0;
1676}
1677
1678
1679static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1680					       const void *name)
1681{
1682	int i;
1683
1684	for (i = 0; i < obj->nr_extern; i++) {
1685		if (strcmp(obj->externs[i].name, name) == 0)
1686			return &obj->externs[i];
1687	}
1688	return NULL;
1689}
1690
1691static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1692			      char value)
1693{
1694	switch (ext->kcfg.type) {
1695	case KCFG_BOOL:
1696		if (value == 'm') {
1697			pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1698				ext->name, value);
1699			return -EINVAL;
1700		}
1701		*(bool *)ext_val = value == 'y' ? true : false;
1702		break;
1703	case KCFG_TRISTATE:
1704		if (value == 'y')
1705			*(enum libbpf_tristate *)ext_val = TRI_YES;
1706		else if (value == 'm')
1707			*(enum libbpf_tristate *)ext_val = TRI_MODULE;
1708		else /* value == 'n' */
1709			*(enum libbpf_tristate *)ext_val = TRI_NO;
1710		break;
1711	case KCFG_CHAR:
1712		*(char *)ext_val = value;
1713		break;
1714	case KCFG_UNKNOWN:
1715	case KCFG_INT:
1716	case KCFG_CHAR_ARR:
1717	default:
1718		pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1719			ext->name, value);
1720		return -EINVAL;
1721	}
1722	ext->is_set = true;
1723	return 0;
1724}
1725
1726static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1727			      const char *value)
1728{
1729	size_t len;
1730
1731	if (ext->kcfg.type != KCFG_CHAR_ARR) {
1732		pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
1733			ext->name, value);
1734		return -EINVAL;
1735	}
1736
1737	len = strlen(value);
1738	if (value[len - 1] != '"') {
1739		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1740			ext->name, value);
1741		return -EINVAL;
1742	}
1743
1744	/* strip quotes */
1745	len -= 2;
1746	if (len >= ext->kcfg.sz) {
1747		pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
1748			ext->name, value, len, ext->kcfg.sz - 1);
1749		len = ext->kcfg.sz - 1;
1750	}
1751	memcpy(ext_val, value + 1, len);
1752	ext_val[len] = '\0';
1753	ext->is_set = true;
1754	return 0;
1755}
1756
1757static int parse_u64(const char *value, __u64 *res)
1758{
1759	char *value_end;
1760	int err;
1761
1762	errno = 0;
1763	*res = strtoull(value, &value_end, 0);
1764	if (errno) {
1765		err = -errno;
1766		pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1767		return err;
1768	}
1769	if (*value_end) {
1770		pr_warn("failed to parse '%s' as integer completely\n", value);
1771		return -EINVAL;
1772	}
1773	return 0;
1774}
1775
1776static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1777{
1778	int bit_sz = ext->kcfg.sz * 8;
1779
1780	if (ext->kcfg.sz == 8)
1781		return true;
1782
1783	/* Validate that value stored in u64 fits in integer of `ext->sz`
1784	 * bytes size without any loss of information. If the target integer
1785	 * is signed, we rely on the following limits of integer type of
1786	 * Y bits and subsequent transformation:
1787	 *
1788	 *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
1789	 *            0 <= X + 2^(Y-1) <= 2^Y - 1
1790	 *            0 <= X + 2^(Y-1) <  2^Y
1791	 *
1792	 *  For unsigned target integer, check that all the (64 - Y) bits are
1793	 *  zero.
1794	 */
1795	if (ext->kcfg.is_signed)
1796		return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1797	else
1798		return (v >> bit_sz) == 0;
1799}
1800
1801static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1802			      __u64 value)
1803{
1804	if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
1805	    ext->kcfg.type != KCFG_BOOL) {
1806		pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
1807			ext->name, (unsigned long long)value);
1808		return -EINVAL;
1809	}
1810	if (ext->kcfg.type == KCFG_BOOL && value > 1) {
1811		pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
1812			ext->name, (unsigned long long)value);
1813		return -EINVAL;
1814
1815	}
1816	if (!is_kcfg_value_in_range(ext, value)) {
1817		pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
1818			ext->name, (unsigned long long)value, ext->kcfg.sz);
1819		return -ERANGE;
1820	}
1821	switch (ext->kcfg.sz) {
1822		case 1: *(__u8 *)ext_val = value; break;
1823		case 2: *(__u16 *)ext_val = value; break;
1824		case 4: *(__u32 *)ext_val = value; break;
1825		case 8: *(__u64 *)ext_val = value; break;
1826		default:
1827			return -EINVAL;
1828	}
1829	ext->is_set = true;
1830	return 0;
1831}
1832
1833static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1834					    char *buf, void *data)
1835{
1836	struct extern_desc *ext;
1837	char *sep, *value;
1838	int len, err = 0;
1839	void *ext_val;
1840	__u64 num;
1841
1842	if (!str_has_pfx(buf, "CONFIG_"))
1843		return 0;
1844
1845	sep = strchr(buf, '=');
1846	if (!sep) {
1847		pr_warn("failed to parse '%s': no separator\n", buf);
1848		return -EINVAL;
1849	}
1850
1851	/* Trim ending '\n' */
1852	len = strlen(buf);
1853	if (buf[len - 1] == '\n')
1854		buf[len - 1] = '\0';
1855	/* Split on '=' and ensure that a value is present. */
1856	*sep = '\0';
1857	if (!sep[1]) {
1858		*sep = '=';
1859		pr_warn("failed to parse '%s': no value\n", buf);
1860		return -EINVAL;
1861	}
1862
1863	ext = find_extern_by_name(obj, buf);
1864	if (!ext || ext->is_set)
1865		return 0;
1866
1867	ext_val = data + ext->kcfg.data_off;
1868	value = sep + 1;
1869
1870	switch (*value) {
1871	case 'y': case 'n': case 'm':
1872		err = set_kcfg_value_tri(ext, ext_val, *value);
1873		break;
1874	case '"':
1875		err = set_kcfg_value_str(ext, ext_val, value);
1876		break;
1877	default:
1878		/* assume integer */
1879		err = parse_u64(value, &num);
1880		if (err) {
1881			pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
1882			return err;
1883		}
1884		if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1885			pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
1886			return -EINVAL;
1887		}
1888		err = set_kcfg_value_num(ext, ext_val, num);
1889		break;
1890	}
1891	if (err)
1892		return err;
1893	pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
1894	return 0;
1895}
1896
1897static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1898{
1899	char buf[PATH_MAX];
1900	struct utsname uts;
1901	int len, err = 0;
1902	gzFile file;
1903
1904	uname(&uts);
1905	len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1906	if (len < 0)
1907		return -EINVAL;
1908	else if (len >= PATH_MAX)
1909		return -ENAMETOOLONG;
1910
1911	/* gzopen also accepts uncompressed files. */
1912	file = gzopen(buf, "r");
1913	if (!file)
1914		file = gzopen("/proc/config.gz", "r");
1915
1916	if (!file) {
1917		pr_warn("failed to open system Kconfig\n");
1918		return -ENOENT;
1919	}
1920
1921	while (gzgets(file, buf, sizeof(buf))) {
1922		err = bpf_object__process_kconfig_line(obj, buf, data);
1923		if (err) {
1924			pr_warn("error parsing system Kconfig line '%s': %d\n",
1925				buf, err);
1926			goto out;
1927		}
1928	}
1929
1930out:
1931	gzclose(file);
1932	return err;
1933}
1934
1935static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1936					const char *config, void *data)
1937{
1938	char buf[PATH_MAX];
1939	int err = 0;
1940	FILE *file;
1941
1942	file = fmemopen((void *)config, strlen(config), "r");
1943	if (!file) {
1944		err = -errno;
1945		pr_warn("failed to open in-memory Kconfig: %d\n", err);
1946		return err;
1947	}
1948
1949	while (fgets(buf, sizeof(buf), file)) {
1950		err = bpf_object__process_kconfig_line(obj, buf, data);
1951		if (err) {
1952			pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1953				buf, err);
1954			break;
1955		}
1956	}
1957
1958	fclose(file);
1959	return err;
1960}
1961
1962static int bpf_object__init_kconfig_map(struct bpf_object *obj)
1963{
1964	struct extern_desc *last_ext = NULL, *ext;
1965	size_t map_sz;
1966	int i, err;
1967
1968	for (i = 0; i < obj->nr_extern; i++) {
1969		ext = &obj->externs[i];
1970		if (ext->type == EXT_KCFG)
1971			last_ext = ext;
1972	}
1973
1974	if (!last_ext)
1975		return 0;
1976
1977	map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
1978	err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
1979					    ".kconfig", obj->efile.symbols_shndx,
1980					    NULL, map_sz);
1981	if (err)
1982		return err;
1983
1984	obj->kconfig_map_idx = obj->nr_maps - 1;
1985
1986	return 0;
1987}
1988
1989const struct btf_type *
1990skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1991{
1992	const struct btf_type *t = btf__type_by_id(btf, id);
1993
1994	if (res_id)
1995		*res_id = id;
1996
1997	while (btf_is_mod(t) || btf_is_typedef(t)) {
1998		if (res_id)
1999			*res_id = t->type;
2000		t = btf__type_by_id(btf, t->type);
2001	}
2002
2003	return t;
2004}
2005
2006static const struct btf_type *
2007resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2008{
2009	const struct btf_type *t;
2010
2011	t = skip_mods_and_typedefs(btf, id, NULL);
2012	if (!btf_is_ptr(t))
2013		return NULL;
2014
2015	t = skip_mods_and_typedefs(btf, t->type, res_id);
2016
2017	return btf_is_func_proto(t) ? t : NULL;
2018}
2019
2020static const char *__btf_kind_str(__u16 kind)
2021{
2022	switch (kind) {
2023	case BTF_KIND_UNKN: return "void";
2024	case BTF_KIND_INT: return "int";
2025	case BTF_KIND_PTR: return "ptr";
2026	case BTF_KIND_ARRAY: return "array";
2027	case BTF_KIND_STRUCT: return "struct";
2028	case BTF_KIND_UNION: return "union";
2029	case BTF_KIND_ENUM: return "enum";
2030	case BTF_KIND_FWD: return "fwd";
2031	case BTF_KIND_TYPEDEF: return "typedef";
2032	case BTF_KIND_VOLATILE: return "volatile";
2033	case BTF_KIND_CONST: return "const";
2034	case BTF_KIND_RESTRICT: return "restrict";
2035	case BTF_KIND_FUNC: return "func";
2036	case BTF_KIND_FUNC_PROTO: return "func_proto";
2037	case BTF_KIND_VAR: return "var";
2038	case BTF_KIND_DATASEC: return "datasec";
2039	case BTF_KIND_FLOAT: return "float";
2040	case BTF_KIND_DECL_TAG: return "decl_tag";
2041	case BTF_KIND_TYPE_TAG: return "type_tag";
2042	case BTF_KIND_ENUM64: return "enum64";
2043	default: return "unknown";
2044	}
2045}
2046
2047const char *btf_kind_str(const struct btf_type *t)
2048{
2049	return __btf_kind_str(btf_kind(t));
2050}
2051
2052/*
2053 * Fetch integer attribute of BTF map definition. Such attributes are
2054 * represented using a pointer to an array, in which dimensionality of array
2055 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2056 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2057 * type definition, while using only sizeof(void *) space in ELF data section.
2058 */
2059static bool get_map_field_int(const char *map_name, const struct btf *btf,
2060			      const struct btf_member *m, __u32 *res)
2061{
2062	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2063	const char *name = btf__name_by_offset(btf, m->name_off);
2064	const struct btf_array *arr_info;
2065	const struct btf_type *arr_t;
2066
2067	if (!btf_is_ptr(t)) {
2068		pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2069			map_name, name, btf_kind_str(t));
2070		return false;
2071	}
2072
2073	arr_t = btf__type_by_id(btf, t->type);
2074	if (!arr_t) {
2075		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2076			map_name, name, t->type);
2077		return false;
2078	}
2079	if (!btf_is_array(arr_t)) {
2080		pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2081			map_name, name, btf_kind_str(arr_t));
2082		return false;
2083	}
2084	arr_info = btf_array(arr_t);
2085	*res = arr_info->nelems;
2086	return true;
2087}
2088
2089static int build_map_pin_path(struct bpf_map *map, const char *path)
2090{
2091	char buf[PATH_MAX];
2092	int len;
2093
2094	if (!path)
2095		path = "/sys/fs/bpf";
2096
2097	len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2098	if (len < 0)
2099		return -EINVAL;
2100	else if (len >= PATH_MAX)
2101		return -ENAMETOOLONG;
2102
2103	return bpf_map__set_pin_path(map, buf);
2104}
2105
2106/* should match definition in bpf_helpers.h */
2107enum libbpf_pin_type {
2108	LIBBPF_PIN_NONE,
2109	/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2110	LIBBPF_PIN_BY_NAME,
2111};
2112
2113int parse_btf_map_def(const char *map_name, struct btf *btf,
2114		      const struct btf_type *def_t, bool strict,
2115		      struct btf_map_def *map_def, struct btf_map_def *inner_def)
2116{
2117	const struct btf_type *t;
2118	const struct btf_member *m;
2119	bool is_inner = inner_def == NULL;
2120	int vlen, i;
2121
2122	vlen = btf_vlen(def_t);
2123	m = btf_members(def_t);
2124	for (i = 0; i < vlen; i++, m++) {
2125		const char *name = btf__name_by_offset(btf, m->name_off);
2126
2127		if (!name) {
2128			pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2129			return -EINVAL;
2130		}
2131		if (strcmp(name, "type") == 0) {
2132			if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2133				return -EINVAL;
2134			map_def->parts |= MAP_DEF_MAP_TYPE;
2135		} else if (strcmp(name, "max_entries") == 0) {
2136			if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2137				return -EINVAL;
2138			map_def->parts |= MAP_DEF_MAX_ENTRIES;
2139		} else if (strcmp(name, "map_flags") == 0) {
2140			if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2141				return -EINVAL;
2142			map_def->parts |= MAP_DEF_MAP_FLAGS;
2143		} else if (strcmp(name, "numa_node") == 0) {
2144			if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2145				return -EINVAL;
2146			map_def->parts |= MAP_DEF_NUMA_NODE;
2147		} else if (strcmp(name, "key_size") == 0) {
2148			__u32 sz;
2149
2150			if (!get_map_field_int(map_name, btf, m, &sz))
2151				return -EINVAL;
2152			if (map_def->key_size && map_def->key_size != sz) {
2153				pr_warn("map '%s': conflicting key size %u != %u.\n",
2154					map_name, map_def->key_size, sz);
2155				return -EINVAL;
2156			}
2157			map_def->key_size = sz;
2158			map_def->parts |= MAP_DEF_KEY_SIZE;
2159		} else if (strcmp(name, "key") == 0) {
2160			__s64 sz;
2161
2162			t = btf__type_by_id(btf, m->type);
2163			if (!t) {
2164				pr_warn("map '%s': key type [%d] not found.\n",
2165					map_name, m->type);
2166				return -EINVAL;
2167			}
2168			if (!btf_is_ptr(t)) {
2169				pr_warn("map '%s': key spec is not PTR: %s.\n",
2170					map_name, btf_kind_str(t));
2171				return -EINVAL;
2172			}
2173			sz = btf__resolve_size(btf, t->type);
2174			if (sz < 0) {
2175				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2176					map_name, t->type, (ssize_t)sz);
2177				return sz;
2178			}
2179			if (map_def->key_size && map_def->key_size != sz) {
2180				pr_warn("map '%s': conflicting key size %u != %zd.\n",
2181					map_name, map_def->key_size, (ssize_t)sz);
2182				return -EINVAL;
2183			}
2184			map_def->key_size = sz;
2185			map_def->key_type_id = t->type;
2186			map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2187		} else if (strcmp(name, "value_size") == 0) {
2188			__u32 sz;
2189
2190			if (!get_map_field_int(map_name, btf, m, &sz))
2191				return -EINVAL;
2192			if (map_def->value_size && map_def->value_size != sz) {
2193				pr_warn("map '%s': conflicting value size %u != %u.\n",
2194					map_name, map_def->value_size, sz);
2195				return -EINVAL;
2196			}
2197			map_def->value_size = sz;
2198			map_def->parts |= MAP_DEF_VALUE_SIZE;
2199		} else if (strcmp(name, "value") == 0) {
2200			__s64 sz;
2201
2202			t = btf__type_by_id(btf, m->type);
2203			if (!t) {
2204				pr_warn("map '%s': value type [%d] not found.\n",
2205					map_name, m->type);
2206				return -EINVAL;
2207			}
2208			if (!btf_is_ptr(t)) {
2209				pr_warn("map '%s': value spec is not PTR: %s.\n",
2210					map_name, btf_kind_str(t));
2211				return -EINVAL;
2212			}
2213			sz = btf__resolve_size(btf, t->type);
2214			if (sz < 0) {
2215				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2216					map_name, t->type, (ssize_t)sz);
2217				return sz;
2218			}
2219			if (map_def->value_size && map_def->value_size != sz) {
2220				pr_warn("map '%s': conflicting value size %u != %zd.\n",
2221					map_name, map_def->value_size, (ssize_t)sz);
2222				return -EINVAL;
2223			}
2224			map_def->value_size = sz;
2225			map_def->value_type_id = t->type;
2226			map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2227		}
2228		else if (strcmp(name, "values") == 0) {
2229			bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2230			bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2231			const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2232			char inner_map_name[128];
2233			int err;
2234
2235			if (is_inner) {
2236				pr_warn("map '%s': multi-level inner maps not supported.\n",
2237					map_name);
2238				return -ENOTSUP;
2239			}
2240			if (i != vlen - 1) {
2241				pr_warn("map '%s': '%s' member should be last.\n",
2242					map_name, name);
2243				return -EINVAL;
2244			}
2245			if (!is_map_in_map && !is_prog_array) {
2246				pr_warn("map '%s': should be map-in-map or prog-array.\n",
2247					map_name);
2248				return -ENOTSUP;
2249			}
2250			if (map_def->value_size && map_def->value_size != 4) {
2251				pr_warn("map '%s': conflicting value size %u != 4.\n",
2252					map_name, map_def->value_size);
2253				return -EINVAL;
2254			}
2255			map_def->value_size = 4;
2256			t = btf__type_by_id(btf, m->type);
2257			if (!t) {
2258				pr_warn("map '%s': %s type [%d] not found.\n",
2259					map_name, desc, m->type);
2260				return -EINVAL;
2261			}
2262			if (!btf_is_array(t) || btf_array(t)->nelems) {
2263				pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2264					map_name, desc);
2265				return -EINVAL;
2266			}
2267			t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2268			if (!btf_is_ptr(t)) {
2269				pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2270					map_name, desc, btf_kind_str(t));
2271				return -EINVAL;
2272			}
2273			t = skip_mods_and_typedefs(btf, t->type, NULL);
2274			if (is_prog_array) {
2275				if (!btf_is_func_proto(t)) {
2276					pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2277						map_name, btf_kind_str(t));
2278					return -EINVAL;
2279				}
2280				continue;
2281			}
2282			if (!btf_is_struct(t)) {
2283				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2284					map_name, btf_kind_str(t));
2285				return -EINVAL;
2286			}
2287
2288			snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2289			err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2290			if (err)
2291				return err;
2292
2293			map_def->parts |= MAP_DEF_INNER_MAP;
2294		} else if (strcmp(name, "pinning") == 0) {
2295			__u32 val;
2296
2297			if (is_inner) {
2298				pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2299				return -EINVAL;
2300			}
2301			if (!get_map_field_int(map_name, btf, m, &val))
2302				return -EINVAL;
2303			if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2304				pr_warn("map '%s': invalid pinning value %u.\n",
2305					map_name, val);
2306				return -EINVAL;
2307			}
2308			map_def->pinning = val;
2309			map_def->parts |= MAP_DEF_PINNING;
2310		} else if (strcmp(name, "map_extra") == 0) {
2311			__u32 map_extra;
2312
2313			if (!get_map_field_int(map_name, btf, m, &map_extra))
2314				return -EINVAL;
2315			map_def->map_extra = map_extra;
2316			map_def->parts |= MAP_DEF_MAP_EXTRA;
2317		} else {
2318			if (strict) {
2319				pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2320				return -ENOTSUP;
2321			}
2322			pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2323		}
2324	}
2325
2326	if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2327		pr_warn("map '%s': map type isn't specified.\n", map_name);
2328		return -EINVAL;
2329	}
2330
2331	return 0;
2332}
2333
2334static size_t adjust_ringbuf_sz(size_t sz)
2335{
2336	__u32 page_sz = sysconf(_SC_PAGE_SIZE);
2337	__u32 mul;
2338
2339	/* if user forgot to set any size, make sure they see error */
2340	if (sz == 0)
2341		return 0;
2342	/* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2343	 * a power-of-2 multiple of kernel's page size. If user diligently
2344	 * satisified these conditions, pass the size through.
2345	 */
2346	if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2347		return sz;
2348
2349	/* Otherwise find closest (page_sz * power_of_2) product bigger than
2350	 * user-set size to satisfy both user size request and kernel
2351	 * requirements and substitute correct max_entries for map creation.
2352	 */
2353	for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2354		if (mul * page_sz > sz)
2355			return mul * page_sz;
2356	}
2357
2358	/* if it's impossible to satisfy the conditions (i.e., user size is
2359	 * very close to UINT_MAX but is not a power-of-2 multiple of
2360	 * page_size) then just return original size and let kernel reject it
2361	 */
2362	return sz;
2363}
2364
2365static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2366{
2367	map->def.type = def->map_type;
2368	map->def.key_size = def->key_size;
2369	map->def.value_size = def->value_size;
2370	map->def.max_entries = def->max_entries;
2371	map->def.map_flags = def->map_flags;
2372	map->map_extra = def->map_extra;
2373
2374	map->numa_node = def->numa_node;
2375	map->btf_key_type_id = def->key_type_id;
2376	map->btf_value_type_id = def->value_type_id;
2377
2378	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2379	if (map->def.type == BPF_MAP_TYPE_RINGBUF)
2380		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2381
2382	if (def->parts & MAP_DEF_MAP_TYPE)
2383		pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2384
2385	if (def->parts & MAP_DEF_KEY_TYPE)
2386		pr_debug("map '%s': found key [%u], sz = %u.\n",
2387			 map->name, def->key_type_id, def->key_size);
2388	else if (def->parts & MAP_DEF_KEY_SIZE)
2389		pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2390
2391	if (def->parts & MAP_DEF_VALUE_TYPE)
2392		pr_debug("map '%s': found value [%u], sz = %u.\n",
2393			 map->name, def->value_type_id, def->value_size);
2394	else if (def->parts & MAP_DEF_VALUE_SIZE)
2395		pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2396
2397	if (def->parts & MAP_DEF_MAX_ENTRIES)
2398		pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2399	if (def->parts & MAP_DEF_MAP_FLAGS)
2400		pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2401	if (def->parts & MAP_DEF_MAP_EXTRA)
2402		pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2403			 (unsigned long long)def->map_extra);
2404	if (def->parts & MAP_DEF_PINNING)
2405		pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2406	if (def->parts & MAP_DEF_NUMA_NODE)
2407		pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2408
2409	if (def->parts & MAP_DEF_INNER_MAP)
2410		pr_debug("map '%s': found inner map definition.\n", map->name);
2411}
2412
2413static const char *btf_var_linkage_str(__u32 linkage)
2414{
2415	switch (linkage) {
2416	case BTF_VAR_STATIC: return "static";
2417	case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2418	case BTF_VAR_GLOBAL_EXTERN: return "extern";
2419	default: return "unknown";
2420	}
2421}
2422
2423static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2424					 const struct btf_type *sec,
2425					 int var_idx, int sec_idx,
2426					 const Elf_Data *data, bool strict,
2427					 const char *pin_root_path)
2428{
2429	struct btf_map_def map_def = {}, inner_def = {};
2430	const struct btf_type *var, *def;
2431	const struct btf_var_secinfo *vi;
2432	const struct btf_var *var_extra;
2433	const char *map_name;
2434	struct bpf_map *map;
2435	int err;
2436
2437	vi = btf_var_secinfos(sec) + var_idx;
2438	var = btf__type_by_id(obj->btf, vi->type);
2439	var_extra = btf_var(var);
2440	map_name = btf__name_by_offset(obj->btf, var->name_off);
2441
2442	if (map_name == NULL || map_name[0] == '\0') {
2443		pr_warn("map #%d: empty name.\n", var_idx);
2444		return -EINVAL;
2445	}
2446	if ((__u64)vi->offset + vi->size > data->d_size) {
2447		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2448		return -EINVAL;
2449	}
2450	if (!btf_is_var(var)) {
2451		pr_warn("map '%s': unexpected var kind %s.\n",
2452			map_name, btf_kind_str(var));
2453		return -EINVAL;
2454	}
2455	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2456		pr_warn("map '%s': unsupported map linkage %s.\n",
2457			map_name, btf_var_linkage_str(var_extra->linkage));
2458		return -EOPNOTSUPP;
2459	}
2460
2461	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2462	if (!btf_is_struct(def)) {
2463		pr_warn("map '%s': unexpected def kind %s.\n",
2464			map_name, btf_kind_str(var));
2465		return -EINVAL;
2466	}
2467	if (def->size > vi->size) {
2468		pr_warn("map '%s': invalid def size.\n", map_name);
2469		return -EINVAL;
2470	}
2471
2472	map = bpf_object__add_map(obj);
2473	if (IS_ERR(map))
2474		return PTR_ERR(map);
2475	map->name = strdup(map_name);
2476	if (!map->name) {
2477		pr_warn("map '%s': failed to alloc map name.\n", map_name);
2478		return -ENOMEM;
2479	}
2480	map->libbpf_type = LIBBPF_MAP_UNSPEC;
2481	map->def.type = BPF_MAP_TYPE_UNSPEC;
2482	map->sec_idx = sec_idx;
2483	map->sec_offset = vi->offset;
2484	map->btf_var_idx = var_idx;
2485	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2486		 map_name, map->sec_idx, map->sec_offset);
2487
2488	err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2489	if (err)
2490		return err;
2491
2492	fill_map_from_def(map, &map_def);
2493
2494	if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2495		err = build_map_pin_path(map, pin_root_path);
2496		if (err) {
2497			pr_warn("map '%s': couldn't build pin path.\n", map->name);
2498			return err;
2499		}
2500	}
2501
2502	if (map_def.parts & MAP_DEF_INNER_MAP) {
2503		map->inner_map = calloc(1, sizeof(*map->inner_map));
2504		if (!map->inner_map)
2505			return -ENOMEM;
2506		map->inner_map->fd = -1;
2507		map->inner_map->sec_idx = sec_idx;
2508		map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2509		if (!map->inner_map->name)
2510			return -ENOMEM;
2511		sprintf(map->inner_map->name, "%s.inner", map_name);
2512
2513		fill_map_from_def(map->inner_map, &inner_def);
2514	}
2515
2516	err = bpf_map_find_btf_info(obj, map);
2517	if (err)
2518		return err;
2519
2520	return 0;
2521}
2522
2523static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2524					  const char *pin_root_path)
2525{
2526	const struct btf_type *sec = NULL;
2527	int nr_types, i, vlen, err;
2528	const struct btf_type *t;
2529	const char *name;
2530	Elf_Data *data;
2531	Elf_Scn *scn;
2532
2533	if (obj->efile.btf_maps_shndx < 0)
2534		return 0;
2535
2536	scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2537	data = elf_sec_data(obj, scn);
2538	if (!scn || !data) {
2539		pr_warn("elf: failed to get %s map definitions for %s\n",
2540			MAPS_ELF_SEC, obj->path);
2541		return -EINVAL;
2542	}
2543
2544	nr_types = btf__type_cnt(obj->btf);
2545	for (i = 1; i < nr_types; i++) {
2546		t = btf__type_by_id(obj->btf, i);
2547		if (!btf_is_datasec(t))
2548			continue;
2549		name = btf__name_by_offset(obj->btf, t->name_off);
2550		if (strcmp(name, MAPS_ELF_SEC) == 0) {
2551			sec = t;
2552			obj->efile.btf_maps_sec_btf_id = i;
2553			break;
2554		}
2555	}
2556
2557	if (!sec) {
2558		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2559		return -ENOENT;
2560	}
2561
2562	vlen = btf_vlen(sec);
2563	for (i = 0; i < vlen; i++) {
2564		err = bpf_object__init_user_btf_map(obj, sec, i,
2565						    obj->efile.btf_maps_shndx,
2566						    data, strict,
2567						    pin_root_path);
2568		if (err)
2569			return err;
2570	}
2571
2572	return 0;
2573}
2574
2575static int bpf_object__init_maps(struct bpf_object *obj,
2576				 const struct bpf_object_open_opts *opts)
2577{
2578	const char *pin_root_path;
2579	bool strict;
2580	int err = 0;
2581
2582	strict = !OPTS_GET(opts, relaxed_maps, false);
2583	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2584
2585	err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2586	err = err ?: bpf_object__init_global_data_maps(obj);
2587	err = err ?: bpf_object__init_kconfig_map(obj);
2588	err = err ?: bpf_object__init_struct_ops_maps(obj);
2589
2590	return err;
2591}
2592
2593static bool section_have_execinstr(struct bpf_object *obj, int idx)
2594{
2595	Elf64_Shdr *sh;
2596
2597	sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2598	if (!sh)
2599		return false;
2600
2601	return sh->sh_flags & SHF_EXECINSTR;
2602}
2603
2604static bool btf_needs_sanitization(struct bpf_object *obj)
2605{
2606	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2607	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2608	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2609	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2610	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2611	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2612	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2613
2614	return !has_func || !has_datasec || !has_func_global || !has_float ||
2615	       !has_decl_tag || !has_type_tag || !has_enum64;
2616}
2617
2618static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2619{
2620	bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2621	bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2622	bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2623	bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2624	bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2625	bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2626	bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2627	int enum64_placeholder_id = 0;
2628	struct btf_type *t;
2629	int i, j, vlen;
2630
2631	for (i = 1; i < btf__type_cnt(btf); i++) {
2632		t = (struct btf_type *)btf__type_by_id(btf, i);
2633
2634		if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2635			/* replace VAR/DECL_TAG with INT */
2636			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2637			/*
2638			 * using size = 1 is the safest choice, 4 will be too
2639			 * big and cause kernel BTF validation failure if
2640			 * original variable took less than 4 bytes
2641			 */
2642			t->size = 1;
2643			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2644		} else if (!has_datasec && btf_is_datasec(t)) {
2645			/* replace DATASEC with STRUCT */
2646			const struct btf_var_secinfo *v = btf_var_secinfos(t);
2647			struct btf_member *m = btf_members(t);
2648			struct btf_type *vt;
2649			char *name;
2650
2651			name = (char *)btf__name_by_offset(btf, t->name_off);
2652			while (*name) {
2653				if (*name == '.')
2654					*name = '_';
2655				name++;
2656			}
2657
2658			vlen = btf_vlen(t);
2659			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2660			for (j = 0; j < vlen; j++, v++, m++) {
2661				/* order of field assignments is important */
2662				m->offset = v->offset * 8;
2663				m->type = v->type;
2664				/* preserve variable name as member name */
2665				vt = (void *)btf__type_by_id(btf, v->type);
2666				m->name_off = vt->name_off;
2667			}
2668		} else if (!has_func && btf_is_func_proto(t)) {
2669			/* replace FUNC_PROTO with ENUM */
2670			vlen = btf_vlen(t);
2671			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2672			t->size = sizeof(__u32); /* kernel enforced */
2673		} else if (!has_func && btf_is_func(t)) {
2674			/* replace FUNC with TYPEDEF */
2675			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2676		} else if (!has_func_global && btf_is_func(t)) {
2677			/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2678			t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2679		} else if (!has_float && btf_is_float(t)) {
2680			/* replace FLOAT with an equally-sized empty STRUCT;
2681			 * since C compilers do not accept e.g. "float" as a
2682			 * valid struct name, make it anonymous
2683			 */
2684			t->name_off = 0;
2685			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2686		} else if (!has_type_tag && btf_is_type_tag(t)) {
2687			/* replace TYPE_TAG with a CONST */
2688			t->name_off = 0;
2689			t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2690		} else if (!has_enum64 && btf_is_enum(t)) {
2691			/* clear the kflag */
2692			t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
2693		} else if (!has_enum64 && btf_is_enum64(t)) {
2694			/* replace ENUM64 with a union */
2695			struct btf_member *m;
2696
2697			if (enum64_placeholder_id == 0) {
2698				enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
2699				if (enum64_placeholder_id < 0)
2700					return enum64_placeholder_id;
2701
2702				t = (struct btf_type *)btf__type_by_id(btf, i);
2703			}
2704
2705			m = btf_members(t);
2706			vlen = btf_vlen(t);
2707			t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
2708			for (j = 0; j < vlen; j++, m++) {
2709				m->type = enum64_placeholder_id;
2710				m->offset = 0;
2711			}
2712                }
2713	}
2714
2715	return 0;
2716}
2717
2718static bool libbpf_needs_btf(const struct bpf_object *obj)
2719{
2720	return obj->efile.btf_maps_shndx >= 0 ||
2721	       obj->efile.st_ops_shndx >= 0 ||
2722	       obj->nr_extern > 0;
2723}
2724
2725static bool kernel_needs_btf(const struct bpf_object *obj)
2726{
2727	return obj->efile.st_ops_shndx >= 0;
2728}
2729
2730static int bpf_object__init_btf(struct bpf_object *obj,
2731				Elf_Data *btf_data,
2732				Elf_Data *btf_ext_data)
2733{
2734	int err = -ENOENT;
2735
2736	if (btf_data) {
2737		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2738		err = libbpf_get_error(obj->btf);
2739		if (err) {
2740			obj->btf = NULL;
2741			pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2742			goto out;
2743		}
2744		/* enforce 8-byte pointers for BPF-targeted BTFs */
2745		btf__set_pointer_size(obj->btf, 8);
2746	}
2747	if (btf_ext_data) {
2748		struct btf_ext_info *ext_segs[3];
2749		int seg_num, sec_num;
2750
2751		if (!obj->btf) {
2752			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2753				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2754			goto out;
2755		}
2756		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2757		err = libbpf_get_error(obj->btf_ext);
2758		if (err) {
2759			pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2760				BTF_EXT_ELF_SEC, err);
2761			obj->btf_ext = NULL;
2762			goto out;
2763		}
2764
2765		/* setup .BTF.ext to ELF section mapping */
2766		ext_segs[0] = &obj->btf_ext->func_info;
2767		ext_segs[1] = &obj->btf_ext->line_info;
2768		ext_segs[2] = &obj->btf_ext->core_relo_info;
2769		for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
2770			struct btf_ext_info *seg = ext_segs[seg_num];
2771			const struct btf_ext_info_sec *sec;
2772			const char *sec_name;
2773			Elf_Scn *scn;
2774
2775			if (seg->sec_cnt == 0)
2776				continue;
2777
2778			seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
2779			if (!seg->sec_idxs) {
2780				err = -ENOMEM;
2781				goto out;
2782			}
2783
2784			sec_num = 0;
2785			for_each_btf_ext_sec(seg, sec) {
2786				/* preventively increment index to avoid doing
2787				 * this before every continue below
2788				 */
2789				sec_num++;
2790
2791				sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
2792				if (str_is_empty(sec_name))
2793					continue;
2794				scn = elf_sec_by_name(obj, sec_name);
2795				if (!scn)
2796					continue;
2797
2798				seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
2799			}
2800		}
2801	}
2802out:
2803	if (err && libbpf_needs_btf(obj)) {
2804		pr_warn("BTF is required, but is missing or corrupted.\n");
2805		return err;
2806	}
2807	return 0;
2808}
2809
2810static int compare_vsi_off(const void *_a, const void *_b)
2811{
2812	const struct btf_var_secinfo *a = _a;
2813	const struct btf_var_secinfo *b = _b;
2814
2815	return a->offset - b->offset;
2816}
2817
2818static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
2819			     struct btf_type *t)
2820{
2821	__u32 size = 0, off = 0, i, vars = btf_vlen(t);
2822	const char *name = btf__name_by_offset(btf, t->name_off);
2823	const struct btf_type *t_var;
2824	struct btf_var_secinfo *vsi;
2825	const struct btf_var *var;
2826	int ret;
2827
2828	if (!name) {
2829		pr_debug("No name found in string section for DATASEC kind.\n");
2830		return -ENOENT;
2831	}
2832
2833	/* .extern datasec size and var offsets were set correctly during
2834	 * extern collection step, so just skip straight to sorting variables
2835	 */
2836	if (t->size)
2837		goto sort_vars;
2838
2839	ret = find_elf_sec_sz(obj, name, &size);
2840	if (ret || !size) {
2841		pr_debug("Invalid size for section %s: %u bytes\n", name, size);
2842		return -ENOENT;
2843	}
2844
2845	t->size = size;
2846
2847	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
2848		t_var = btf__type_by_id(btf, vsi->type);
2849		if (!t_var || !btf_is_var(t_var)) {
2850			pr_debug("Non-VAR type seen in section %s\n", name);
2851			return -EINVAL;
2852		}
2853
2854		var = btf_var(t_var);
2855		if (var->linkage == BTF_VAR_STATIC)
2856			continue;
2857
2858		name = btf__name_by_offset(btf, t_var->name_off);
2859		if (!name) {
2860			pr_debug("No name found in string section for VAR kind\n");
2861			return -ENOENT;
2862		}
2863
2864		ret = find_elf_var_offset(obj, name, &off);
2865		if (ret) {
2866			pr_debug("No offset found in symbol table for VAR %s\n",
2867				 name);
2868			return -ENOENT;
2869		}
2870
2871		vsi->offset = off;
2872	}
2873
2874sort_vars:
2875	qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
2876	return 0;
2877}
2878
2879static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
2880{
2881	int err = 0;
2882	__u32 i, n = btf__type_cnt(btf);
2883
2884	for (i = 1; i < n; i++) {
2885		struct btf_type *t = btf_type_by_id(btf, i);
2886
2887		/* Loader needs to fix up some of the things compiler
2888		 * couldn't get its hands on while emitting BTF. This
2889		 * is section size and global variable offset. We use
2890		 * the info from the ELF itself for this purpose.
2891		 */
2892		if (btf_is_datasec(t)) {
2893			err = btf_fixup_datasec(obj, btf, t);
2894			if (err)
2895				break;
2896		}
2897	}
2898
2899	return libbpf_err(err);
2900}
2901
2902static int bpf_object__finalize_btf(struct bpf_object *obj)
2903{
2904	int err;
2905
2906	if (!obj->btf)
2907		return 0;
2908
2909	err = btf_finalize_data(obj, obj->btf);
2910	if (err) {
2911		pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2912		return err;
2913	}
2914
2915	return 0;
2916}
2917
2918static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
2919{
2920	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2921	    prog->type == BPF_PROG_TYPE_LSM)
2922		return true;
2923
2924	/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2925	 * also need vmlinux BTF
2926	 */
2927	if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2928		return true;
2929
2930	return false;
2931}
2932
2933static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
2934{
2935	struct bpf_program *prog;
2936	int i;
2937
2938	/* CO-RE relocations need kernel BTF, only when btf_custom_path
2939	 * is not specified
2940	 */
2941	if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
2942		return true;
2943
2944	/* Support for typed ksyms needs kernel BTF */
2945	for (i = 0; i < obj->nr_extern; i++) {
2946		const struct extern_desc *ext;
2947
2948		ext = &obj->externs[i];
2949		if (ext->type == EXT_KSYM && ext->ksym.type_id)
2950			return true;
2951	}
2952
2953	bpf_object__for_each_program(prog, obj) {
2954		if (!prog->autoload)
2955			continue;
2956		if (prog_needs_vmlinux_btf(prog))
2957			return true;
2958	}
2959
2960	return false;
2961}
2962
2963static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2964{
2965	int err;
2966
2967	/* btf_vmlinux could be loaded earlier */
2968	if (obj->btf_vmlinux || obj->gen_loader)
2969		return 0;
2970
2971	if (!force && !obj_needs_vmlinux_btf(obj))
2972		return 0;
2973
2974	obj->btf_vmlinux = btf__load_vmlinux_btf();
2975	err = libbpf_get_error(obj->btf_vmlinux);
2976	if (err) {
2977		pr_warn("Error loading vmlinux BTF: %d\n", err);
2978		obj->btf_vmlinux = NULL;
2979		return err;
2980	}
2981	return 0;
2982}
2983
2984static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2985{
2986	struct btf *kern_btf = obj->btf;
2987	bool btf_mandatory, sanitize;
2988	int i, err = 0;
2989
2990	if (!obj->btf)
2991		return 0;
2992
2993	if (!kernel_supports(obj, FEAT_BTF)) {
2994		if (kernel_needs_btf(obj)) {
2995			err = -EOPNOTSUPP;
2996			goto report;
2997		}
2998		pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2999		return 0;
3000	}
3001
3002	/* Even though some subprogs are global/weak, user might prefer more
3003	 * permissive BPF verification process that BPF verifier performs for
3004	 * static functions, taking into account more context from the caller
3005	 * functions. In such case, they need to mark such subprogs with
3006	 * __attribute__((visibility("hidden"))) and libbpf will adjust
3007	 * corresponding FUNC BTF type to be marked as static and trigger more
3008	 * involved BPF verification process.
3009	 */
3010	for (i = 0; i < obj->nr_programs; i++) {
3011		struct bpf_program *prog = &obj->programs[i];
3012		struct btf_type *t;
3013		const char *name;
3014		int j, n;
3015
3016		if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3017			continue;
3018
3019		n = btf__type_cnt(obj->btf);
3020		for (j = 1; j < n; j++) {
3021			t = btf_type_by_id(obj->btf, j);
3022			if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3023				continue;
3024
3025			name = btf__str_by_offset(obj->btf, t->name_off);
3026			if (strcmp(name, prog->name) != 0)
3027				continue;
3028
3029			t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3030			break;
3031		}
3032	}
3033
3034	sanitize = btf_needs_sanitization(obj);
3035	if (sanitize) {
3036		const void *raw_data;
3037		__u32 sz;
3038
3039		/* clone BTF to sanitize a copy and leave the original intact */
3040		raw_data = btf__raw_data(obj->btf, &sz);
3041		kern_btf = btf__new(raw_data, sz);
3042		err = libbpf_get_error(kern_btf);
3043		if (err)
3044			return err;
3045
3046		/* enforce 8-byte pointers for BPF-targeted BTFs */
3047		btf__set_pointer_size(obj->btf, 8);
3048		err = bpf_object__sanitize_btf(obj, kern_btf);
3049		if (err)
3050			return err;
3051	}
3052
3053	if (obj->gen_loader) {
3054		__u32 raw_size = 0;
3055		const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3056
3057		if (!raw_data)
3058			return -ENOMEM;
3059		bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3060		/* Pretend to have valid FD to pass various fd >= 0 checks.
3061		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3062		 */
3063		btf__set_fd(kern_btf, 0);
3064	} else {
3065		/* currently BPF_BTF_LOAD only supports log_level 1 */
3066		err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3067					   obj->log_level ? 1 : 0);
3068	}
3069	if (sanitize) {
3070		if (!err) {
3071			/* move fd to libbpf's BTF */
3072			btf__set_fd(obj->btf, btf__fd(kern_btf));
3073			btf__set_fd(kern_btf, -1);
3074		}
3075		btf__free(kern_btf);
3076	}
3077report:
3078	if (err) {
3079		btf_mandatory = kernel_needs_btf(obj);
3080		pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3081			btf_mandatory ? "BTF is mandatory, can't proceed."
3082				      : "BTF is optional, ignoring.");
3083		if (!btf_mandatory)
3084			err = 0;
3085	}
3086	return err;
3087}
3088
3089static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3090{
3091	const char *name;
3092
3093	name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3094	if (!name) {
3095		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3096			off, obj->path, elf_errmsg(-1));
3097		return NULL;
3098	}
3099
3100	return name;
3101}
3102
3103static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3104{
3105	const char *name;
3106
3107	name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3108	if (!name) {
3109		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3110			off, obj->path, elf_errmsg(-1));
3111		return NULL;
3112	}
3113
3114	return name;
3115}
3116
3117static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3118{
3119	Elf_Scn *scn;
3120
3121	scn = elf_getscn(obj->efile.elf, idx);
3122	if (!scn) {
3123		pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3124			idx, obj->path, elf_errmsg(-1));
3125		return NULL;
3126	}
3127	return scn;
3128}
3129
3130static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3131{
3132	Elf_Scn *scn = NULL;
3133	Elf *elf = obj->efile.elf;
3134	const char *sec_name;
3135
3136	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3137		sec_name = elf_sec_name(obj, scn);
3138		if (!sec_name)
3139			return NULL;
3140
3141		if (strcmp(sec_name, name) != 0)
3142			continue;
3143
3144		return scn;
3145	}
3146	return NULL;
3147}
3148
3149static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3150{
3151	Elf64_Shdr *shdr;
3152
3153	if (!scn)
3154		return NULL;
3155
3156	shdr = elf64_getshdr(scn);
3157	if (!shdr) {
3158		pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3159			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3160		return NULL;
3161	}
3162
3163	return shdr;
3164}
3165
3166static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3167{
3168	const char *name;
3169	Elf64_Shdr *sh;
3170
3171	if (!scn)
3172		return NULL;
3173
3174	sh = elf_sec_hdr(obj, scn);
3175	if (!sh)
3176		return NULL;
3177
3178	name = elf_sec_str(obj, sh->sh_name);
3179	if (!name) {
3180		pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3181			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3182		return NULL;
3183	}
3184
3185	return name;
3186}
3187
3188static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3189{
3190	Elf_Data *data;
3191
3192	if (!scn)
3193		return NULL;
3194
3195	data = elf_getdata(scn, 0);
3196	if (!data) {
3197		pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3198			elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3199			obj->path, elf_errmsg(-1));
3200		return NULL;
3201	}
3202
3203	return data;
3204}
3205
3206static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3207{
3208	if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3209		return NULL;
3210
3211	return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3212}
3213
3214static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3215{
3216	if (idx >= data->d_size / sizeof(Elf64_Rel))
3217		return NULL;
3218
3219	return (Elf64_Rel *)data->d_buf + idx;
3220}
3221
3222static bool is_sec_name_dwarf(const char *name)
3223{
3224	/* approximation, but the actual list is too long */
3225	return str_has_pfx(name, ".debug_");
3226}
3227
3228static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3229{
3230	/* no special handling of .strtab */
3231	if (hdr->sh_type == SHT_STRTAB)
3232		return true;
3233
3234	/* ignore .llvm_addrsig section as well */
3235	if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3236		return true;
3237
3238	/* no subprograms will lead to an empty .text section, ignore it */
3239	if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3240	    strcmp(name, ".text") == 0)
3241		return true;
3242
3243	/* DWARF sections */
3244	if (is_sec_name_dwarf(name))
3245		return true;
3246
3247	if (str_has_pfx(name, ".rel")) {
3248		name += sizeof(".rel") - 1;
3249		/* DWARF section relocations */
3250		if (is_sec_name_dwarf(name))
3251			return true;
3252
3253		/* .BTF and .BTF.ext don't need relocations */
3254		if (strcmp(name, BTF_ELF_SEC) == 0 ||
3255		    strcmp(name, BTF_EXT_ELF_SEC) == 0)
3256			return true;
3257	}
3258
3259	return false;
3260}
3261
3262static int cmp_progs(const void *_a, const void *_b)
3263{
3264	const struct bpf_program *a = _a;
3265	const struct bpf_program *b = _b;
3266
3267	if (a->sec_idx != b->sec_idx)
3268		return a->sec_idx < b->sec_idx ? -1 : 1;
3269
3270	/* sec_insn_off can't be the same within the section */
3271	return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3272}
3273
3274static int bpf_object__elf_collect(struct bpf_object *obj)
3275{
3276	struct elf_sec_desc *sec_desc;
3277	Elf *elf = obj->efile.elf;
3278	Elf_Data *btf_ext_data = NULL;
3279	Elf_Data *btf_data = NULL;
3280	int idx = 0, err = 0;
3281	const char *name;
3282	Elf_Data *data;
3283	Elf_Scn *scn;
3284	Elf64_Shdr *sh;
3285
3286	/* ELF section indices are 0-based, but sec #0 is special "invalid"
3287	 * section. e_shnum does include sec #0, so e_shnum is the necessary
3288	 * size of an array to keep all the sections.
3289	 */
3290	obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
3291	obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3292	if (!obj->efile.secs)
3293		return -ENOMEM;
3294
3295	/* a bunch of ELF parsing functionality depends on processing symbols,
3296	 * so do the first pass and find the symbol table
3297	 */
3298	scn = NULL;
3299	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3300		sh = elf_sec_hdr(obj, scn);
3301		if (!sh)
3302			return -LIBBPF_ERRNO__FORMAT;
3303
3304		if (sh->sh_type == SHT_SYMTAB) {
3305			if (obj->efile.symbols) {
3306				pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3307				return -LIBBPF_ERRNO__FORMAT;
3308			}
3309
3310			data = elf_sec_data(obj, scn);
3311			if (!data)
3312				return -LIBBPF_ERRNO__FORMAT;
3313
3314			idx = elf_ndxscn(scn);
3315
3316			obj->efile.symbols = data;
3317			obj->efile.symbols_shndx = idx;
3318			obj->efile.strtabidx = sh->sh_link;
3319		}
3320	}
3321
3322	if (!obj->efile.symbols) {
3323		pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3324			obj->path);
3325		return -ENOENT;
3326	}
3327
3328	scn = NULL;
3329	while ((scn = elf_nextscn(elf, scn)) != NULL) {
3330		idx = elf_ndxscn(scn);
3331		sec_desc = &obj->efile.secs[idx];
3332
3333		sh = elf_sec_hdr(obj, scn);
3334		if (!sh)
3335			return -LIBBPF_ERRNO__FORMAT;
3336
3337		name = elf_sec_str(obj, sh->sh_name);
3338		if (!name)
3339			return -LIBBPF_ERRNO__FORMAT;
3340
3341		if (ignore_elf_section(sh, name))
3342			continue;
3343
3344		data = elf_sec_data(obj, scn);
3345		if (!data)
3346			return -LIBBPF_ERRNO__FORMAT;
3347
3348		pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3349			 idx, name, (unsigned long)data->d_size,
3350			 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3351			 (int)sh->sh_type);
3352
3353		if (strcmp(name, "license") == 0) {
3354			err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3355			if (err)
3356				return err;
3357		} else if (strcmp(name, "version") == 0) {
3358			err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3359			if (err)
3360				return err;
3361		} else if (strcmp(name, "maps") == 0) {
3362			obj->efile.maps_shndx = idx;
3363		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3364			obj->efile.btf_maps_shndx = idx;
3365		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
3366			if (sh->sh_type != SHT_PROGBITS)
3367				return -LIBBPF_ERRNO__FORMAT;
3368			btf_data = data;
3369		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3370			if (sh->sh_type != SHT_PROGBITS)
3371				return -LIBBPF_ERRNO__FORMAT;
3372			btf_ext_data = data;
3373		} else if (sh->sh_type == SHT_SYMTAB) {
3374			/* already processed during the first pass above */
3375		} else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3376			if (sh->sh_flags & SHF_EXECINSTR) {
3377				if (strcmp(name, ".text") == 0)
3378					obj->efile.text_shndx = idx;
3379				err = bpf_object__add_programs(obj, data, name, idx);
3380				if (err)
3381					return err;
3382			} else if (strcmp(name, DATA_SEC) == 0 ||
3383				   str_has_pfx(name, DATA_SEC ".")) {
3384				sec_desc->sec_type = SEC_DATA;
3385				sec_desc->shdr = sh;
3386				sec_desc->data = data;
3387			} else if (strcmp(name, RODATA_SEC) == 0 ||
3388				   str_has_pfx(name, RODATA_SEC ".")) {
3389				sec_desc->sec_type = SEC_RODATA;
3390				sec_desc->shdr = sh;
3391				sec_desc->data = data;
3392			} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3393				obj->efile.st_ops_data = data;
3394				obj->efile.st_ops_shndx = idx;
3395			} else {
3396				pr_info("elf: skipping unrecognized data section(%d) %s\n",
3397					idx, name);
3398			}
3399		} else if (sh->sh_type == SHT_REL) {
3400			int targ_sec_idx = sh->sh_info; /* points to other section */
3401
3402			if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3403			    targ_sec_idx >= obj->efile.sec_cnt)
3404				return -LIBBPF_ERRNO__FORMAT;
3405
3406			/* Only do relo for section with exec instructions */
3407			if (!section_have_execinstr(obj, targ_sec_idx) &&
3408			    strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3409			    strcmp(name, ".rel" MAPS_ELF_SEC)) {
3410				pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3411					idx, name, targ_sec_idx,
3412					elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3413				continue;
3414			}
3415
3416			sec_desc->sec_type = SEC_RELO;
3417			sec_desc->shdr = sh;
3418			sec_desc->data = data;
3419		} else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
3420			sec_desc->sec_type = SEC_BSS;
3421			sec_desc->shdr = sh;
3422			sec_desc->data = data;
3423		} else {
3424			pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3425				(size_t)sh->sh_size);
3426		}
3427	}
3428
3429	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3430		pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3431		return -LIBBPF_ERRNO__FORMAT;
3432	}
3433
3434	/* sort BPF programs by section name and in-section instruction offset
3435	 * for faster search */
3436	if (obj->nr_programs)
3437		qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3438
3439	return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3440}
3441
3442static bool sym_is_extern(const Elf64_Sym *sym)
3443{
3444	int bind = ELF64_ST_BIND(sym->st_info);
3445	/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3446	return sym->st_shndx == SHN_UNDEF &&
3447	       (bind == STB_GLOBAL || bind == STB_WEAK) &&
3448	       ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3449}
3450
3451static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3452{
3453	int bind = ELF64_ST_BIND(sym->st_info);
3454	int type = ELF64_ST_TYPE(sym->st_info);
3455
3456	/* in .text section */
3457	if (sym->st_shndx != text_shndx)
3458		return false;
3459
3460	/* local function */
3461	if (bind == STB_LOCAL && type == STT_SECTION)
3462		return true;
3463
3464	/* global function */
3465	return bind == STB_GLOBAL && type == STT_FUNC;
3466}
3467
3468static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3469{
3470	const struct btf_type *t;
3471	const char *tname;
3472	int i, n;
3473
3474	if (!btf)
3475		return -ESRCH;
3476
3477	n = btf__type_cnt(btf);
3478	for (i = 1; i < n; i++) {
3479		t = btf__type_by_id(btf, i);
3480
3481		if (!btf_is_var(t) && !btf_is_func(t))
3482			continue;
3483
3484		tname = btf__name_by_offset(btf, t->name_off);
3485		if (strcmp(tname, ext_name))
3486			continue;
3487
3488		if (btf_is_var(t) &&
3489		    btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3490			return -EINVAL;
3491
3492		if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3493			return -EINVAL;
3494
3495		return i;
3496	}
3497
3498	return -ENOENT;
3499}
3500
3501static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3502	const struct btf_var_secinfo *vs;
3503	const struct btf_type *t;
3504	int i, j, n;
3505
3506	if (!btf)
3507		return -ESRCH;
3508
3509	n = btf__type_cnt(btf);
3510	for (i = 1; i < n; i++) {
3511		t = btf__type_by_id(btf, i);
3512
3513		if (!btf_is_datasec(t))
3514			continue;
3515
3516		vs = btf_var_secinfos(t);
3517		for (j = 0; j < btf_vlen(t); j++, vs++) {
3518			if (vs->type == ext_btf_id)
3519				return i;
3520		}
3521	}
3522
3523	return -ENOENT;
3524}
3525
3526static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3527				     bool *is_signed)
3528{
3529	const struct btf_type *t;
3530	const char *name;
3531
3532	t = skip_mods_and_typedefs(btf, id, NULL);
3533	name = btf__name_by_offset(btf, t->name_off);
3534
3535	if (is_signed)
3536		*is_signed = false;
3537	switch (btf_kind(t)) {
3538	case BTF_KIND_INT: {
3539		int enc = btf_int_encoding(t);
3540
3541		if (enc & BTF_INT_BOOL)
3542			return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3543		if (is_signed)
3544			*is_signed = enc & BTF_INT_SIGNED;
3545		if (t->size == 1)
3546			return KCFG_CHAR;
3547		if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3548			return KCFG_UNKNOWN;
3549		return KCFG_INT;
3550	}
3551	case BTF_KIND_ENUM:
3552		if (t->size != 4)
3553			return KCFG_UNKNOWN;
3554		if (strcmp(name, "libbpf_tristate"))
3555			return KCFG_UNKNOWN;
3556		return KCFG_TRISTATE;
3557	case BTF_KIND_ENUM64:
3558		if (strcmp(name, "libbpf_tristate"))
3559			return KCFG_UNKNOWN;
3560		return KCFG_TRISTATE;
3561	case BTF_KIND_ARRAY:
3562		if (btf_array(t)->nelems == 0)
3563			return KCFG_UNKNOWN;
3564		if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3565			return KCFG_UNKNOWN;
3566		return KCFG_CHAR_ARR;
3567	default:
3568		return KCFG_UNKNOWN;
3569	}
3570}
3571
3572static int cmp_externs(const void *_a, const void *_b)
3573{
3574	const struct extern_desc *a = _a;
3575	const struct extern_desc *b = _b;
3576
3577	if (a->type != b->type)
3578		return a->type < b->type ? -1 : 1;
3579
3580	if (a->type == EXT_KCFG) {
3581		/* descending order by alignment requirements */
3582		if (a->kcfg.align != b->kcfg.align)
3583			return a->kcfg.align > b->kcfg.align ? -1 : 1;
3584		/* ascending order by size, within same alignment class */
3585		if (a->kcfg.sz != b->kcfg.sz)
3586			return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3587	}
3588
3589	/* resolve ties by name */
3590	return strcmp(a->name, b->name);
3591}
3592
3593static int find_int_btf_id(const struct btf *btf)
3594{
3595	const struct btf_type *t;
3596	int i, n;
3597
3598	n = btf__type_cnt(btf);
3599	for (i = 1; i < n; i++) {
3600		t = btf__type_by_id(btf, i);
3601
3602		if (btf_is_int(t) && btf_int_bits(t) == 32)
3603			return i;
3604	}
3605
3606	return 0;
3607}
3608
3609static int add_dummy_ksym_var(struct btf *btf)
3610{
3611	int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3612	const struct btf_var_secinfo *vs;
3613	const struct btf_type *sec;
3614
3615	if (!btf)
3616		return 0;
3617
3618	sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
3619					    BTF_KIND_DATASEC);
3620	if (sec_btf_id < 0)
3621		return 0;
3622
3623	sec = btf__type_by_id(btf, sec_btf_id);
3624	vs = btf_var_secinfos(sec);
3625	for (i = 0; i < btf_vlen(sec); i++, vs++) {
3626		const struct btf_type *vt;
3627
3628		vt = btf__type_by_id(btf, vs->type);
3629		if (btf_is_func(vt))
3630			break;
3631	}
3632
3633	/* No func in ksyms sec.  No need to add dummy var. */
3634	if (i == btf_vlen(sec))
3635		return 0;
3636
3637	int_btf_id = find_int_btf_id(btf);
3638	dummy_var_btf_id = btf__add_var(btf,
3639					"dummy_ksym",
3640					BTF_VAR_GLOBAL_ALLOCATED,
3641					int_btf_id);
3642	if (dummy_var_btf_id < 0)
3643		pr_warn("cannot create a dummy_ksym var\n");
3644
3645	return dummy_var_btf_id;
3646}
3647
3648static int bpf_object__collect_externs(struct bpf_object *obj)
3649{
3650	struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
3651	const struct btf_type *t;
3652	struct extern_desc *ext;
3653	int i, n, off, dummy_var_btf_id;
3654	const char *ext_name, *sec_name;
3655	Elf_Scn *scn;
3656	Elf64_Shdr *sh;
3657
3658	if (!obj->efile.symbols)
3659		return 0;
3660
3661	scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3662	sh = elf_sec_hdr(obj, scn);
3663	if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
3664		return -LIBBPF_ERRNO__FORMAT;
3665
3666	dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
3667	if (dummy_var_btf_id < 0)
3668		return dummy_var_btf_id;
3669
3670	n = sh->sh_size / sh->sh_entsize;
3671	pr_debug("looking for externs among %d symbols...\n", n);
3672
3673	for (i = 0; i < n; i++) {
3674		Elf64_Sym *sym = elf_sym_by_idx(obj, i);
3675
3676		if (!sym)
3677			return -LIBBPF_ERRNO__FORMAT;
3678		if (!sym_is_extern(sym))
3679			continue;
3680		ext_name = elf_sym_str(obj, sym->st_name);
3681		if (!ext_name || !ext_name[0])
3682			continue;
3683
3684		ext = obj->externs;
3685		ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
3686		if (!ext)
3687			return -ENOMEM;
3688		obj->externs = ext;
3689		ext = &ext[obj->nr_extern];
3690		memset(ext, 0, sizeof(*ext));
3691		obj->nr_extern++;
3692
3693		ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3694		if (ext->btf_id <= 0) {
3695			pr_warn("failed to find BTF for extern '%s': %d\n",
3696				ext_name, ext->btf_id);
3697			return ext->btf_id;
3698		}
3699		t = btf__type_by_id(obj->btf, ext->btf_id);
3700		ext->name = btf__name_by_offset(obj->btf, t->name_off);
3701		ext->sym_idx = i;
3702		ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
3703
3704		ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3705		if (ext->sec_btf_id <= 0) {
3706			pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3707				ext_name, ext->btf_id, ext->sec_btf_id);
3708			return ext->sec_btf_id;
3709		}
3710		sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3711		sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3712
3713		if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3714			if (btf_is_func(t)) {
3715				pr_warn("extern function %s is unsupported under %s section\n",
3716					ext->name, KCONFIG_SEC);
3717				return -ENOTSUP;
3718			}
3719			kcfg_sec = sec;
3720			ext->type = EXT_KCFG;
3721			ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3722			if (ext->kcfg.sz <= 0) {
3723				pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3724					ext_name, ext->kcfg.sz);
3725				return ext->kcfg.sz;
3726			}
3727			ext->kcfg.align = btf__align_of(obj->btf, t->type);
3728			if (ext->kcfg.align <= 0) {
3729				pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3730					ext_name, ext->kcfg.align);
3731				return -EINVAL;
3732			}
3733			ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3734						        &ext->kcfg.is_signed);
3735			if (ext->kcfg.type == KCFG_UNKNOWN) {
3736				pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
3737				return -ENOTSUP;
3738			}
3739		} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
3740			ksym_sec = sec;
3741			ext->type = EXT_KSYM;
3742			skip_mods_and_typedefs(obj->btf, t->type,
3743					       &ext->ksym.type_id);
3744		} else {
3745			pr_warn("unrecognized extern section '%s'\n", sec_name);
3746			return -ENOTSUP;
3747		}
3748	}
3749	pr_debug("collected %d externs total\n", obj->nr_extern);
3750
3751	if (!obj->nr_extern)
3752		return 0;
3753
3754	/* sort externs by type, for kcfg ones also by (align, size, name) */
3755	qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
3756
3757	/* for .ksyms section, we need to turn all externs into allocated
3758	 * variables in BTF to pass kernel verification; we do this by
3759	 * pretending that each extern is a 8-byte variable
3760	 */
3761	if (ksym_sec) {
3762		/* find existing 4-byte integer type in BTF to use for fake
3763		 * extern variables in DATASEC
3764		 */
3765		int int_btf_id = find_int_btf_id(obj->btf);
3766		/* For extern function, a dummy_var added earlier
3767		 * will be used to replace the vs->type and
3768		 * its name string will be used to refill
3769		 * the missing param's name.
3770		 */
3771		const struct btf_type *dummy_var;
3772
3773		dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
3774		for (i = 0; i < obj->nr_extern; i++) {
3775			ext = &obj->externs[i];
3776			if (ext->type != EXT_KSYM)
3777				continue;
3778			pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3779				 i, ext->sym_idx, ext->name);
3780		}
3781
3782		sec = ksym_sec;
3783		n = btf_vlen(sec);
3784		for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3785			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3786			struct btf_type *vt;
3787
3788			vt = (void *)btf__type_by_id(obj->btf, vs->type);
3789			ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3790			ext = find_extern_by_name(obj, ext_name);
3791			if (!ext) {
3792				pr_warn("failed to find extern definition for BTF %s '%s'\n",
3793					btf_kind_str(vt), ext_name);
3794				return -ESRCH;
3795			}
3796			if (btf_is_func(vt)) {
3797				const struct btf_type *func_proto;
3798				struct btf_param *param;
3799				int j;
3800
3801				func_proto = btf__type_by_id(obj->btf,
3802							     vt->type);
3803				param = btf_params(func_proto);
3804				/* Reuse the dummy_var string if the
3805				 * func proto does not have param name.
3806				 */
3807				for (j = 0; j < btf_vlen(func_proto); j++)
3808					if (param[j].type && !param[j].name_off)
3809						param[j].name_off =
3810							dummy_var->name_off;
3811				vs->type = dummy_var_btf_id;
3812				vt->info &= ~0xffff;
3813				vt->info |= BTF_FUNC_GLOBAL;
3814			} else {
3815				btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3816				vt->type = int_btf_id;
3817			}
3818			vs->offset = off;
3819			vs->size = sizeof(int);
3820		}
3821		sec->size = off;
3822	}
3823
3824	if (kcfg_sec) {
3825		sec = kcfg_sec;
3826		/* for kcfg externs calculate their offsets within a .kconfig map */
3827		off = 0;
3828		for (i = 0; i < obj->nr_extern; i++) {
3829			ext = &obj->externs[i];
3830			if (ext->type != EXT_KCFG)
3831				continue;
3832
3833			ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3834			off = ext->kcfg.data_off + ext->kcfg.sz;
3835			pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
3836				 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3837		}
3838		sec->size = off;
3839		n = btf_vlen(sec);
3840		for (i = 0; i < n; i++) {
3841			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3842
3843			t = btf__type_by_id(obj->btf, vs->type);
3844			ext_name = btf__name_by_offset(obj->btf, t->name_off);
3845			ext = find_extern_by_name(obj, ext_name);
3846			if (!ext) {
3847				pr_warn("failed to find extern definition for BTF var '%s'\n",
3848					ext_name);
3849				return -ESRCH;
3850			}
3851			btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3852			vs->offset = ext->kcfg.data_off;
3853		}
3854	}
3855	return 0;
3856}
3857
3858static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
3859{
3860	return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
3861}
3862
3863struct bpf_program *
3864bpf_object__find_program_by_name(const struct bpf_object *obj,
3865				 const char *name)
3866{
3867	struct bpf_program *prog;
3868
3869	bpf_object__for_each_program(prog, obj) {
3870		if (prog_is_subprog(obj, prog))
3871			continue;
3872		if (!strcmp(prog->name, name))
3873			return prog;
3874	}
3875	return errno = ENOENT, NULL;
3876}
3877
3878static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3879				      int shndx)
3880{
3881	switch (obj->efile.secs[shndx].sec_type) {
3882	case SEC_BSS:
3883	case SEC_DATA:
3884	case SEC_RODATA:
3885		return true;
3886	default:
3887		return false;
3888	}
3889}
3890
3891static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3892				      int shndx)
3893{
3894	return shndx == obj->efile.maps_shndx ||
3895	       shndx == obj->efile.btf_maps_shndx;
3896}
3897
3898static enum libbpf_map_type
3899bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3900{
3901	if (shndx == obj->efile.symbols_shndx)
3902		return LIBBPF_MAP_KCONFIG;
3903
3904	switch (obj->efile.secs[shndx].sec_type) {
3905	case SEC_BSS:
3906		return LIBBPF_MAP_BSS;
3907	case SEC_DATA:
3908		return LIBBPF_MAP_DATA;
3909	case SEC_RODATA:
3910		return LIBBPF_MAP_RODATA;
3911	default:
3912		return LIBBPF_MAP_UNSPEC;
3913	}
3914}
3915
3916static int bpf_program__record_reloc(struct bpf_program *prog,
3917				     struct reloc_desc *reloc_desc,
3918				     __u32 insn_idx, const char *sym_name,
3919				     const Elf64_Sym *sym, const Elf64_Rel *rel)
3920{
3921	struct bpf_insn *insn = &prog->insns[insn_idx];
3922	size_t map_idx, nr_maps = prog->obj->nr_maps;
3923	struct bpf_object *obj = prog->obj;
3924	__u32 shdr_idx = sym->st_shndx;
3925	enum libbpf_map_type type;
3926	const char *sym_sec_name;
3927	struct bpf_map *map;
3928
3929	if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
3930		pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3931			prog->name, sym_name, insn_idx, insn->code);
3932		return -LIBBPF_ERRNO__RELOC;
3933	}
3934
3935	if (sym_is_extern(sym)) {
3936		int sym_idx = ELF64_R_SYM(rel->r_info);
3937		int i, n = obj->nr_extern;
3938		struct extern_desc *ext;
3939
3940		for (i = 0; i < n; i++) {
3941			ext = &obj->externs[i];
3942			if (ext->sym_idx == sym_idx)
3943				break;
3944		}
3945		if (i >= n) {
3946			pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3947				prog->name, sym_name, sym_idx);
3948			return -LIBBPF_ERRNO__RELOC;
3949		}
3950		pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3951			 prog->name, i, ext->name, ext->sym_idx, insn_idx);
3952		if (insn->code == (BPF_JMP | BPF_CALL))
3953			reloc_desc->type = RELO_EXTERN_FUNC;
3954		else
3955			reloc_desc->type = RELO_EXTERN_VAR;
3956		reloc_desc->insn_idx = insn_idx;
3957		reloc_desc->sym_off = i; /* sym_off stores extern index */
3958		return 0;
3959	}
3960
3961	/* sub-program call relocation */
3962	if (is_call_insn(insn)) {
3963		if (insn->src_reg != BPF_PSEUDO_CALL) {
3964			pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
3965			return -LIBBPF_ERRNO__RELOC;
3966		}
3967		/* text_shndx can be 0, if no default "main" program exists */
3968		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
3969			sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3970			pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3971				prog->name, sym_name, sym_sec_name);
3972			return -LIBBPF_ERRNO__RELOC;
3973		}
3974		if (sym->st_value % BPF_INSN_SZ) {
3975			pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3976				prog->name, sym_name, (size_t)sym->st_value);
3977			return -LIBBPF_ERRNO__RELOC;
3978		}
3979		reloc_desc->type = RELO_CALL;
3980		reloc_desc->insn_idx = insn_idx;
3981		reloc_desc->sym_off = sym->st_value;
3982		return 0;
3983	}
3984
3985	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
3986		pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3987			prog->name, sym_name, shdr_idx);
3988		return -LIBBPF_ERRNO__RELOC;
3989	}
3990
3991	/* loading subprog addresses */
3992	if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3993		/* global_func: sym->st_value = offset in the section, insn->imm = 0.
3994		 * local_func: sym->st_value = 0, insn->imm = offset in the section.
3995		 */
3996		if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3997			pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3998				prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3999			return -LIBBPF_ERRNO__RELOC;
4000		}
4001
4002		reloc_desc->type = RELO_SUBPROG_ADDR;
4003		reloc_desc->insn_idx = insn_idx;
4004		reloc_desc->sym_off = sym->st_value;
4005		return 0;
4006	}
4007
4008	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4009	sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4010
4011	/* generic map reference relocation */
4012	if (type == LIBBPF_MAP_UNSPEC) {
4013		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4014			pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4015				prog->name, sym_name, sym_sec_name);
4016			return -LIBBPF_ERRNO__RELOC;
4017		}
4018		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4019			map = &obj->maps[map_idx];
4020			if (map->libbpf_type != type ||
4021			    map->sec_idx != sym->st_shndx ||
4022			    map->sec_offset != sym->st_value)
4023				continue;
4024			pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4025				 prog->name, map_idx, map->name, map->sec_idx,
4026				 map->sec_offset, insn_idx);
4027			break;
4028		}
4029		if (map_idx >= nr_maps) {
4030			pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4031				prog->name, sym_sec_name, (size_t)sym->st_value);
4032			return -LIBBPF_ERRNO__RELOC;
4033		}
4034		reloc_desc->type = RELO_LD64;
4035		reloc_desc->insn_idx = insn_idx;
4036		reloc_desc->map_idx = map_idx;
4037		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4038		return 0;
4039	}
4040
4041	/* global data map relocation */
4042	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4043		pr_warn("prog '%s': bad data relo against section '%s'\n",
4044			prog->name, sym_sec_name);
4045		return -LIBBPF_ERRNO__RELOC;
4046	}
4047	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4048		map = &obj->maps[map_idx];
4049		if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4050			continue;
4051		pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4052			 prog->name, map_idx, map->name, map->sec_idx,
4053			 map->sec_offset, insn_idx);
4054		break;
4055	}
4056	if (map_idx >= nr_maps) {
4057		pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4058			prog->name, sym_sec_name);
4059		return -LIBBPF_ERRNO__RELOC;
4060	}
4061
4062	reloc_desc->type = RELO_DATA;
4063	reloc_desc->insn_idx = insn_idx;
4064	reloc_desc->map_idx = map_idx;
4065	reloc_desc->sym_off = sym->st_value;
4066	return 0;
4067}
4068
4069static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4070{
4071	return insn_idx >= prog->sec_insn_off &&
4072	       insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4073}
4074
4075static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4076						 size_t sec_idx, size_t insn_idx)
4077{
4078	int l = 0, r = obj->nr_programs - 1, m;
4079	struct bpf_program *prog;
4080
4081	while (l < r) {
4082		m = l + (r - l + 1) / 2;
4083		prog = &obj->programs[m];
4084
4085		if (prog->sec_idx < sec_idx ||
4086		    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4087			l = m;
4088		else
4089			r = m - 1;
4090	}
4091	/* matching program could be at index l, but it still might be the
4092	 * wrong one, so we need to double check conditions for the last time
4093	 */
4094	prog = &obj->programs[l];
4095	if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4096		return prog;
4097	return NULL;
4098}
4099
4100static int
4101bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4102{
4103	const char *relo_sec_name, *sec_name;
4104	size_t sec_idx = shdr->sh_info, sym_idx;
4105	struct bpf_program *prog;
4106	struct reloc_desc *relos;
4107	int err, i, nrels;
4108	const char *sym_name;
4109	__u32 insn_idx;
4110	Elf_Scn *scn;
4111	Elf_Data *scn_data;
4112	Elf64_Sym *sym;
4113	Elf64_Rel *rel;
4114
4115	if (sec_idx >= obj->efile.sec_cnt)
4116		return -EINVAL;
4117
4118	scn = elf_sec_by_idx(obj, sec_idx);
4119	scn_data = elf_sec_data(obj, scn);
4120
4121	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4122	sec_name = elf_sec_name(obj, scn);
4123	if (!relo_sec_name || !sec_name)
4124		return -EINVAL;
4125
4126	pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4127		 relo_sec_name, sec_idx, sec_name);
4128	nrels = shdr->sh_size / shdr->sh_entsize;
4129
4130	for (i = 0; i < nrels; i++) {
4131		rel = elf_rel_by_idx(data, i);
4132		if (!rel) {
4133			pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4134			return -LIBBPF_ERRNO__FORMAT;
4135		}
4136
4137		sym_idx = ELF64_R_SYM(rel->r_info);
4138		sym = elf_sym_by_idx(obj, sym_idx);
4139		if (!sym) {
4140			pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4141				relo_sec_name, sym_idx, i);
4142			return -LIBBPF_ERRNO__FORMAT;
4143		}
4144
4145		if (sym->st_shndx >= obj->efile.sec_cnt) {
4146			pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4147				relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4148			return -LIBBPF_ERRNO__FORMAT;
4149		}
4150
4151		if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4152			pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4153				relo_sec_name, (size_t)rel->r_offset, i);
4154			return -LIBBPF_ERRNO__FORMAT;
4155		}
4156
4157		insn_idx = rel->r_offset / BPF_INSN_SZ;
4158		/* relocations against static functions are recorded as
4159		 * relocations against the section that contains a function;
4160		 * in such case, symbol will be STT_SECTION and sym.st_name
4161		 * will point to empty string (0), so fetch section name
4162		 * instead
4163		 */
4164		if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4165			sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4166		else
4167			sym_name = elf_sym_str(obj, sym->st_name);
4168		sym_name = sym_name ?: "<?";
4169
4170		pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4171			 relo_sec_name, i, insn_idx, sym_name);
4172
4173		prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4174		if (!prog) {
4175			pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4176				relo_sec_name, i, sec_name, insn_idx);
4177			continue;
4178		}
4179
4180		relos = libbpf_reallocarray(prog->reloc_desc,
4181					    prog->nr_reloc + 1, sizeof(*relos));
4182		if (!relos)
4183			return -ENOMEM;
4184		prog->reloc_desc = relos;
4185
4186		/* adjust insn_idx to local BPF program frame of reference */
4187		insn_idx -= prog->sec_insn_off;
4188		err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4189						insn_idx, sym_name, sym, rel);
4190		if (err)
4191			return err;
4192
4193		prog->nr_reloc++;
4194	}
4195	return 0;
4196}
4197
4198static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
4199{
4200	int id;
4201
4202	if (!obj->btf)
4203		return -ENOENT;
4204
4205	/* if it's BTF-defined map, we don't need to search for type IDs.
4206	 * For struct_ops map, it does not need btf_key_type_id and
4207	 * btf_value_type_id.
4208	 */
4209	if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4210		return 0;
4211
4212	/*
4213	 * LLVM annotates global data differently in BTF, that is,
4214	 * only as '.data', '.bss' or '.rodata'.
4215	 */
4216	if (!bpf_map__is_internal(map))
4217		return -ENOENT;
4218
4219	id = btf__find_by_name(obj->btf, map->real_name);
4220	if (id < 0)
4221		return id;
4222
4223	map->btf_key_type_id = 0;
4224	map->btf_value_type_id = id;
4225	return 0;
4226}
4227
4228static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4229{
4230	char file[PATH_MAX], buff[4096];
4231	FILE *fp;
4232	__u32 val;
4233	int err;
4234
4235	snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4236	memset(info, 0, sizeof(*info));
4237
4238	fp = fopen(file, "r");
4239	if (!fp) {
4240		err = -errno;
4241		pr_warn("failed to open %s: %d. No procfs support?\n", file,
4242			err);
4243		return err;
4244	}
4245
4246	while (fgets(buff, sizeof(buff), fp)) {
4247		if (sscanf(buff, "map_type:\t%u", &val) == 1)
4248			info->type = val;
4249		else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4250			info->key_size = val;
4251		else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4252			info->value_size = val;
4253		else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4254			info->max_entries = val;
4255		else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4256			info->map_flags = val;
4257	}
4258
4259	fclose(fp);
4260
4261	return 0;
4262}
4263
4264bool bpf_map__autocreate(const struct bpf_map *map)
4265{
4266	return map->autocreate;
4267}
4268
4269int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4270{
4271	if (map->obj->loaded)
4272		return libbpf_err(-EBUSY);
4273
4274	map->autocreate = autocreate;
4275	return 0;
4276}
4277
4278int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4279{
4280	struct bpf_map_info info = {};
4281	__u32 len = sizeof(info), name_len;
4282	int new_fd, err;
4283	char *new_name;
4284
4285	err = bpf_obj_get_info_by_fd(fd, &info, &len);
4286	if (err && errno == EINVAL)
4287		err = bpf_get_map_info_from_fdinfo(fd, &info);
4288	if (err)
4289		return libbpf_err(err);
4290
4291	name_len = strlen(info.name);
4292	if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4293		new_name = strdup(map->name);
4294	else
4295		new_name = strdup(info.name);
4296
4297	if (!new_name)
4298		return libbpf_err(-errno);
4299
4300	new_fd = open("/", O_RDONLY | O_CLOEXEC);
4301	if (new_fd < 0) {
4302		err = -errno;
4303		goto err_free_new_name;
4304	}
4305
4306	new_fd = dup3(fd, new_fd, O_CLOEXEC);
4307	if (new_fd < 0) {
4308		err = -errno;
4309		goto err_close_new_fd;
4310	}
4311
4312	err = zclose(map->fd);
4313	if (err) {
4314		err = -errno;
4315		goto err_close_new_fd;
4316	}
4317	free(map->name);
4318
4319	map->fd = new_fd;
4320	map->name = new_name;
4321	map->def.type = info.type;
4322	map->def.key_size = info.key_size;
4323	map->def.value_size = info.value_size;
4324	map->def.max_entries = info.max_entries;
4325	map->def.map_flags = info.map_flags;
4326	map->btf_key_type_id = info.btf_key_type_id;
4327	map->btf_value_type_id = info.btf_value_type_id;
4328	map->reused = true;
4329	map->map_extra = info.map_extra;
4330
4331	return 0;
4332
4333err_close_new_fd:
4334	close(new_fd);
4335err_free_new_name:
4336	free(new_name);
4337	return libbpf_err(err);
4338}
4339
4340__u32 bpf_map__max_entries(const struct bpf_map *map)
4341{
4342	return map->def.max_entries;
4343}
4344
4345struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4346{
4347	if (!bpf_map_type__is_map_in_map(map->def.type))
4348		return errno = EINVAL, NULL;
4349
4350	return map->inner_map;
4351}
4352
4353int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4354{
4355	if (map->obj->loaded)
4356		return libbpf_err(-EBUSY);
4357
4358	map->def.max_entries = max_entries;
4359
4360	/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4361	if (map->def.type == BPF_MAP_TYPE_RINGBUF)
4362		map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4363
4364	return 0;
4365}
4366
4367static int
4368bpf_object__probe_loading(struct bpf_object *obj)
4369{
4370	char *cp, errmsg[STRERR_BUFSIZE];
4371	struct bpf_insn insns[] = {
4372		BPF_MOV64_IMM(BPF_REG_0, 0),
4373		BPF_EXIT_INSN(),
4374	};
4375	int ret, insn_cnt = ARRAY_SIZE(insns);
4376
4377	if (obj->gen_loader)
4378		return 0;
4379
4380	ret = bump_rlimit_memlock();
4381	if (ret)
4382		pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4383
4384	/* make sure basic loading works */
4385	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4386	if (ret < 0)
4387		ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4388	if (ret < 0) {
4389		ret = errno;
4390		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4391		pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4392			"program. Make sure your kernel supports BPF "
4393			"(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4394			"set to big enough value.\n", __func__, cp, ret);
4395		return -ret;
4396	}
4397	close(ret);
4398
4399	return 0;
4400}
4401
4402static int probe_fd(int fd)
4403{
4404	if (fd >= 0)
4405		close(fd);
4406	return fd >= 0;
4407}
4408
4409static int probe_kern_prog_name(void)
4410{
4411	struct bpf_insn insns[] = {
4412		BPF_MOV64_IMM(BPF_REG_0, 0),
4413		BPF_EXIT_INSN(),
4414	};
4415	int ret, insn_cnt = ARRAY_SIZE(insns);
4416
4417	/* make sure loading with name works */
4418	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
4419	return probe_fd(ret);
4420}
4421
4422static int probe_kern_global_data(void)
4423{
4424	char *cp, errmsg[STRERR_BUFSIZE];
4425	struct bpf_insn insns[] = {
4426		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4427		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4428		BPF_MOV64_IMM(BPF_REG_0, 0),
4429		BPF_EXIT_INSN(),
4430	};
4431	int ret, map, insn_cnt = ARRAY_SIZE(insns);
4432
4433	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4434	if (map < 0) {
4435		ret = -errno;
4436		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4437		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4438			__func__, cp, -ret);
4439		return ret;
4440	}
4441
4442	insns[0].imm = map;
4443
4444	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4445	close(map);
4446	return probe_fd(ret);
4447}
4448
4449static int probe_kern_btf(void)
4450{
4451	static const char strs[] = "\0int";
4452	__u32 types[] = {
4453		/* int */
4454		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4455	};
4456
4457	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4458					     strs, sizeof(strs)));
4459}
4460
4461static int probe_kern_btf_func(void)
4462{
4463	static const char strs[] = "\0int\0x\0a";
4464	/* void x(int a) {} */
4465	__u32 types[] = {
4466		/* int */
4467		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4468		/* FUNC_PROTO */                                /* [2] */
4469		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4470		BTF_PARAM_ENC(7, 1),
4471		/* FUNC x */                                    /* [3] */
4472		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
4473	};
4474
4475	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4476					     strs, sizeof(strs)));
4477}
4478
4479static int probe_kern_btf_func_global(void)
4480{
4481	static const char strs[] = "\0int\0x\0a";
4482	/* static void x(int a) {} */
4483	__u32 types[] = {
4484		/* int */
4485		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4486		/* FUNC_PROTO */                                /* [2] */
4487		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
4488		BTF_PARAM_ENC(7, 1),
4489		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
4490		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
4491	};
4492
4493	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4494					     strs, sizeof(strs)));
4495}
4496
4497static int probe_kern_btf_datasec(void)
4498{
4499	static const char strs[] = "\0x\0.data";
4500	/* static int a; */
4501	__u32 types[] = {
4502		/* int */
4503		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4504		/* VAR x */                                     /* [2] */
4505		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4506		BTF_VAR_STATIC,
4507		/* DATASEC val */                               /* [3] */
4508		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
4509		BTF_VAR_SECINFO_ENC(2, 0, 4),
4510	};
4511
4512	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4513					     strs, sizeof(strs)));
4514}
4515
4516static int probe_kern_btf_float(void)
4517{
4518	static const char strs[] = "\0float";
4519	__u32 types[] = {
4520		/* float */
4521		BTF_TYPE_FLOAT_ENC(1, 4),
4522	};
4523
4524	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4525					     strs, sizeof(strs)));
4526}
4527
4528static int probe_kern_btf_decl_tag(void)
4529{
4530	static const char strs[] = "\0tag";
4531	__u32 types[] = {
4532		/* int */
4533		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
4534		/* VAR x */                                     /* [2] */
4535		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
4536		BTF_VAR_STATIC,
4537		/* attr */
4538		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
4539	};
4540
4541	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4542					     strs, sizeof(strs)));
4543}
4544
4545static int probe_kern_btf_type_tag(void)
4546{
4547	static const char strs[] = "\0tag";
4548	__u32 types[] = {
4549		/* int */
4550		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
4551		/* attr */
4552		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
4553		/* ptr */
4554		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
4555	};
4556
4557	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4558					     strs, sizeof(strs)));
4559}
4560
4561static int probe_kern_array_mmap(void)
4562{
4563	LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
4564	int fd;
4565
4566	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
4567	return probe_fd(fd);
4568}
4569
4570static int probe_kern_exp_attach_type(void)
4571{
4572	LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
4573	struct bpf_insn insns[] = {
4574		BPF_MOV64_IMM(BPF_REG_0, 0),
4575		BPF_EXIT_INSN(),
4576	};
4577	int fd, insn_cnt = ARRAY_SIZE(insns);
4578
4579	/* use any valid combination of program type and (optional)
4580	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
4581	 * to see if kernel supports expected_attach_type field for
4582	 * BPF_PROG_LOAD command
4583	 */
4584	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
4585	return probe_fd(fd);
4586}
4587
4588static int probe_kern_probe_read_kernel(void)
4589{
4590	struct bpf_insn insns[] = {
4591		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
4592		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
4593		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
4594		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
4595		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
4596		BPF_EXIT_INSN(),
4597	};
4598	int fd, insn_cnt = ARRAY_SIZE(insns);
4599
4600	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4601	return probe_fd(fd);
4602}
4603
4604static int probe_prog_bind_map(void)
4605{
4606	char *cp, errmsg[STRERR_BUFSIZE];
4607	struct bpf_insn insns[] = {
4608		BPF_MOV64_IMM(BPF_REG_0, 0),
4609		BPF_EXIT_INSN(),
4610	};
4611	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
4612
4613	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
4614	if (map < 0) {
4615		ret = -errno;
4616		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4617		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4618			__func__, cp, -ret);
4619		return ret;
4620	}
4621
4622	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4623	if (prog < 0) {
4624		close(map);
4625		return 0;
4626	}
4627
4628	ret = bpf_prog_bind_map(prog, map, NULL);
4629
4630	close(map);
4631	close(prog);
4632
4633	return ret >= 0;
4634}
4635
4636static int probe_module_btf(void)
4637{
4638	static const char strs[] = "\0int";
4639	__u32 types[] = {
4640		/* int */
4641		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4642	};
4643	struct bpf_btf_info info;
4644	__u32 len = sizeof(info);
4645	char name[16];
4646	int fd, err;
4647
4648	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4649	if (fd < 0)
4650		return 0; /* BTF not supported at all */
4651
4652	memset(&info, 0, sizeof(info));
4653	info.name = ptr_to_u64(name);
4654	info.name_len = sizeof(name);
4655
4656	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4657	 * kernel's module BTF support coincides with support for
4658	 * name/name_len fields in struct bpf_btf_info.
4659	 */
4660	err = bpf_obj_get_info_by_fd(fd, &info, &len);
4661	close(fd);
4662	return !err;
4663}
4664
4665static int probe_perf_link(void)
4666{
4667	struct bpf_insn insns[] = {
4668		BPF_MOV64_IMM(BPF_REG_0, 0),
4669		BPF_EXIT_INSN(),
4670	};
4671	int prog_fd, link_fd, err;
4672
4673	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
4674				insns, ARRAY_SIZE(insns), NULL);
4675	if (prog_fd < 0)
4676		return -errno;
4677
4678	/* use invalid perf_event FD to get EBADF, if link is supported;
4679	 * otherwise EINVAL should be returned
4680	 */
4681	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
4682	err = -errno; /* close() can clobber errno */
4683
4684	if (link_fd >= 0)
4685		close(link_fd);
4686	close(prog_fd);
4687
4688	return link_fd < 0 && err == -EBADF;
4689}
4690
4691static int probe_kern_bpf_cookie(void)
4692{
4693	struct bpf_insn insns[] = {
4694		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
4695		BPF_EXIT_INSN(),
4696	};
4697	int ret, insn_cnt = ARRAY_SIZE(insns);
4698
4699	ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
4700	return probe_fd(ret);
4701}
4702
4703static int probe_kern_btf_enum64(void)
4704{
4705	static const char strs[] = "\0enum64";
4706	__u32 types[] = {
4707		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
4708	};
4709
4710	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
4711					     strs, sizeof(strs)));
4712}
4713
4714static int probe_kern_syscall_wrapper(void);
4715
4716enum kern_feature_result {
4717	FEAT_UNKNOWN = 0,
4718	FEAT_SUPPORTED = 1,
4719	FEAT_MISSING = 2,
4720};
4721
4722typedef int (*feature_probe_fn)(void);
4723
4724static struct kern_feature_desc {
4725	const char *desc;
4726	feature_probe_fn probe;
4727	enum kern_feature_result res;
4728} feature_probes[__FEAT_CNT] = {
4729	[FEAT_PROG_NAME] = {
4730		"BPF program name", probe_kern_prog_name,
4731	},
4732	[FEAT_GLOBAL_DATA] = {
4733		"global variables", probe_kern_global_data,
4734	},
4735	[FEAT_BTF] = {
4736		"minimal BTF", probe_kern_btf,
4737	},
4738	[FEAT_BTF_FUNC] = {
4739		"BTF functions", probe_kern_btf_func,
4740	},
4741	[FEAT_BTF_GLOBAL_FUNC] = {
4742		"BTF global function", probe_kern_btf_func_global,
4743	},
4744	[FEAT_BTF_DATASEC] = {
4745		"BTF data section and variable", probe_kern_btf_datasec,
4746	},
4747	[FEAT_ARRAY_MMAP] = {
4748		"ARRAY map mmap()", probe_kern_array_mmap,
4749	},
4750	[FEAT_EXP_ATTACH_TYPE] = {
4751		"BPF_PROG_LOAD expected_attach_type attribute",
4752		probe_kern_exp_attach_type,
4753	},
4754	[FEAT_PROBE_READ_KERN] = {
4755		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
4756	},
4757	[FEAT_PROG_BIND_MAP] = {
4758		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4759	},
4760	[FEAT_MODULE_BTF] = {
4761		"module BTF support", probe_module_btf,
4762	},
4763	[FEAT_BTF_FLOAT] = {
4764		"BTF_KIND_FLOAT support", probe_kern_btf_float,
4765	},
4766	[FEAT_PERF_LINK] = {
4767		"BPF perf link support", probe_perf_link,
4768	},
4769	[FEAT_BTF_DECL_TAG] = {
4770		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
4771	},
4772	[FEAT_BTF_TYPE_TAG] = {
4773		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
4774	},
4775	[FEAT_MEMCG_ACCOUNT] = {
4776		"memcg-based memory accounting", probe_memcg_account,
4777	},
4778	[FEAT_BPF_COOKIE] = {
4779		"BPF cookie support", probe_kern_bpf_cookie,
4780	},
4781	[FEAT_BTF_ENUM64] = {
4782		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
4783	},
4784	[FEAT_SYSCALL_WRAPPER] = {
4785		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
4786	},
4787};
4788
4789bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4790{
4791	struct kern_feature_desc *feat = &feature_probes[feat_id];
4792	int ret;
4793
4794	if (obj && obj->gen_loader)
4795		/* To generate loader program assume the latest kernel
4796		 * to avoid doing extra prog_load, map_create syscalls.
4797		 */
4798		return true;
4799
4800	if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4801		ret = feat->probe();
4802		if (ret > 0) {
4803			WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4804		} else if (ret == 0) {
4805			WRITE_ONCE(feat->res, FEAT_MISSING);
4806		} else {
4807			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4808			WRITE_ONCE(feat->res, FEAT_MISSING);
4809		}
4810	}
4811
4812	return READ_ONCE(feat->res) == FEAT_SUPPORTED;
4813}
4814
4815static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4816{
4817	struct bpf_map_info map_info = {};
4818	char msg[STRERR_BUFSIZE];
4819	__u32 map_info_len;
4820	int err;
4821
4822	map_info_len = sizeof(map_info);
4823
4824	err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
4825	if (err && errno == EINVAL)
4826		err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4827	if (err) {
4828		pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4829			libbpf_strerror_r(errno, msg, sizeof(msg)));
4830		return false;
4831	}
4832
4833	return (map_info.type == map->def.type &&
4834		map_info.key_size == map->def.key_size &&
4835		map_info.value_size == map->def.value_size &&
4836		map_info.max_entries == map->def.max_entries &&
4837		map_info.map_flags == map->def.map_flags &&
4838		map_info.map_extra == map->map_extra);
4839}
4840
4841static int
4842bpf_object__reuse_map(struct bpf_map *map)
4843{
4844	char *cp, errmsg[STRERR_BUFSIZE];
4845	int err, pin_fd;
4846
4847	pin_fd = bpf_obj_get(map->pin_path);
4848	if (pin_fd < 0) {
4849		err = -errno;
4850		if (err == -ENOENT) {
4851			pr_debug("found no pinned map to reuse at '%s'\n",
4852				 map->pin_path);
4853			return 0;
4854		}
4855
4856		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4857		pr_warn("couldn't retrieve pinned map '%s': %s\n",
4858			map->pin_path, cp);
4859		return err;
4860	}
4861
4862	if (!map_is_reuse_compat(map, pin_fd)) {
4863		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4864			map->pin_path);
4865		close(pin_fd);
4866		return -EINVAL;
4867	}
4868
4869	err = bpf_map__reuse_fd(map, pin_fd);
4870	close(pin_fd);
4871	if (err) {
4872		return err;
4873	}
4874	map->pinned = true;
4875	pr_debug("reused pinned map at '%s'\n", map->pin_path);
4876
4877	return 0;
4878}
4879
4880static int
4881bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4882{
4883	enum libbpf_map_type map_type = map->libbpf_type;
4884	char *cp, errmsg[STRERR_BUFSIZE];
4885	int err, zero = 0;
4886
4887	if (obj->gen_loader) {
4888		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4889					 map->mmaped, map->def.value_size);
4890		if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4891			bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4892		return 0;
4893	}
4894	err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4895	if (err) {
4896		err = -errno;
4897		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4898		pr_warn("Error setting initial map(%s) contents: %s\n",
4899			map->name, cp);
4900		return err;
4901	}
4902
4903	/* Freeze .rodata and .kconfig map as read-only from syscall side. */
4904	if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4905		err = bpf_map_freeze(map->fd);
4906		if (err) {
4907			err = -errno;
4908			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4909			pr_warn("Error freezing map(%s) as read-only: %s\n",
4910				map->name, cp);
4911			return err;
4912		}
4913	}
4914	return 0;
4915}
4916
4917static void bpf_map__destroy(struct bpf_map *map);
4918
4919static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4920{
4921	LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4922	struct bpf_map_def *def = &map->def;
4923	const char *map_name = NULL;
4924	int err = 0;
4925
4926	if (kernel_supports(obj, FEAT_PROG_NAME))
4927		map_name = map->name;
4928	create_attr.map_ifindex = map->map_ifindex;
4929	create_attr.map_flags = def->map_flags;
4930	create_attr.numa_node = map->numa_node;
4931	create_attr.map_extra = map->map_extra;
4932
4933	if (bpf_map__is_struct_ops(map))
4934		create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4935
4936	if (obj->btf && btf__fd(obj->btf) >= 0) {
4937		create_attr.btf_fd = btf__fd(obj->btf);
4938		create_attr.btf_key_type_id = map->btf_key_type_id;
4939		create_attr.btf_value_type_id = map->btf_value_type_id;
4940	}
4941
4942	if (bpf_map_type__is_map_in_map(def->type)) {
4943		if (map->inner_map) {
4944			err = bpf_object__create_map(obj, map->inner_map, true);
4945			if (err) {
4946				pr_warn("map '%s': failed to create inner map: %d\n",
4947					map->name, err);
4948				return err;
4949			}
4950			map->inner_map_fd = bpf_map__fd(map->inner_map);
4951		}
4952		if (map->inner_map_fd >= 0)
4953			create_attr.inner_map_fd = map->inner_map_fd;
4954	}
4955
4956	switch (def->type) {
4957	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
4958	case BPF_MAP_TYPE_CGROUP_ARRAY:
4959	case BPF_MAP_TYPE_STACK_TRACE:
4960	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
4961	case BPF_MAP_TYPE_HASH_OF_MAPS:
4962	case BPF_MAP_TYPE_DEVMAP:
4963	case BPF_MAP_TYPE_DEVMAP_HASH:
4964	case BPF_MAP_TYPE_CPUMAP:
4965	case BPF_MAP_TYPE_XSKMAP:
4966	case BPF_MAP_TYPE_SOCKMAP:
4967	case BPF_MAP_TYPE_SOCKHASH:
4968	case BPF_MAP_TYPE_QUEUE:
4969	case BPF_MAP_TYPE_STACK:
4970		create_attr.btf_fd = 0;
4971		create_attr.btf_key_type_id = 0;
4972		create_attr.btf_value_type_id = 0;
4973		map->btf_key_type_id = 0;
4974		map->btf_value_type_id = 0;
4975	default:
4976		break;
4977	}
4978
4979	if (obj->gen_loader) {
4980		bpf_gen__map_create(obj->gen_loader, def->type, map_name,
4981				    def->key_size, def->value_size, def->max_entries,
4982				    &create_attr, is_inner ? -1 : map - obj->maps);
4983		/* Pretend to have valid FD to pass various fd >= 0 checks.
4984		 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
4985		 */
4986		map->fd = 0;
4987	} else {
4988		map->fd = bpf_map_create(def->type, map_name,
4989					 def->key_size, def->value_size,
4990					 def->max_entries, &create_attr);
4991	}
4992	if (map->fd < 0 && (create_attr.btf_key_type_id ||
4993			    create_attr.btf_value_type_id)) {
4994		char *cp, errmsg[STRERR_BUFSIZE];
4995
4996		err = -errno;
4997		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4998		pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4999			map->name, cp, err);
5000		create_attr.btf_fd = 0;
5001		create_attr.btf_key_type_id = 0;
5002		create_attr.btf_value_type_id = 0;
5003		map->btf_key_type_id = 0;
5004		map->btf_value_type_id = 0;
5005		map->fd = bpf_map_create(def->type, map_name,
5006					 def->key_size, def->value_size,
5007					 def->max_entries, &create_attr);
5008	}
5009
5010	err = map->fd < 0 ? -errno : 0;
5011
5012	if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5013		if (obj->gen_loader)
5014			map->inner_map->fd = -1;
5015		bpf_map__destroy(map->inner_map);
5016		zfree(&map->inner_map);
5017	}
5018
5019	return err;
5020}
5021
5022static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5023{
5024	const struct bpf_map *targ_map;
5025	unsigned int i;
5026	int fd, err = 0;
5027
5028	for (i = 0; i < map->init_slots_sz; i++) {
5029		if (!map->init_slots[i])
5030			continue;
5031
5032		targ_map = map->init_slots[i];
5033		fd = bpf_map__fd(targ_map);
5034
5035		if (obj->gen_loader) {
5036			bpf_gen__populate_outer_map(obj->gen_loader,
5037						    map - obj->maps, i,
5038						    targ_map - obj->maps);
5039		} else {
5040			err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5041		}
5042		if (err) {
5043			err = -errno;
5044			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5045				map->name, i, targ_map->name, fd, err);
5046			return err;
5047		}
5048		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5049			 map->name, i, targ_map->name, fd);
5050	}
5051
5052	zfree(&map->init_slots);
5053	map->init_slots_sz = 0;
5054
5055	return 0;
5056}
5057
5058static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5059{
5060	const struct bpf_program *targ_prog;
5061	unsigned int i;
5062	int fd, err;
5063
5064	if (obj->gen_loader)
5065		return -ENOTSUP;
5066
5067	for (i = 0; i < map->init_slots_sz; i++) {
5068		if (!map->init_slots[i])
5069			continue;
5070
5071		targ_prog = map->init_slots[i];
5072		fd = bpf_program__fd(targ_prog);
5073
5074		err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5075		if (err) {
5076			err = -errno;
5077			pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5078				map->name, i, targ_prog->name, fd, err);
5079			return err;
5080		}
5081		pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5082			 map->name, i, targ_prog->name, fd);
5083	}
5084
5085	zfree(&map->init_slots);
5086	map->init_slots_sz = 0;
5087
5088	return 0;
5089}
5090
5091static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5092{
5093	struct bpf_map *map;
5094	int i, err;
5095
5096	for (i = 0; i < obj->nr_maps; i++) {
5097		map = &obj->maps[i];
5098
5099		if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5100			continue;
5101
5102		err = init_prog_array_slots(obj, map);
5103		if (err < 0) {
5104			zclose(map->fd);
5105			return err;
5106		}
5107	}
5108	return 0;
5109}
5110
5111static int map_set_def_max_entries(struct bpf_map *map)
5112{
5113	if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5114		int nr_cpus;
5115
5116		nr_cpus = libbpf_num_possible_cpus();
5117		if (nr_cpus < 0) {
5118			pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5119				map->name, nr_cpus);
5120			return nr_cpus;
5121		}
5122		pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5123		map->def.max_entries = nr_cpus;
5124	}
5125
5126	return 0;
5127}
5128
5129static int
5130bpf_object__create_maps(struct bpf_object *obj)
5131{
5132	struct bpf_map *map;
5133	char *cp, errmsg[STRERR_BUFSIZE];
5134	unsigned int i, j;
5135	int err;
5136	bool retried;
5137
5138	for (i = 0; i < obj->nr_maps; i++) {
5139		map = &obj->maps[i];
5140
5141		/* To support old kernels, we skip creating global data maps
5142		 * (.rodata, .data, .kconfig, etc); later on, during program
5143		 * loading, if we detect that at least one of the to-be-loaded
5144		 * programs is referencing any global data map, we'll error
5145		 * out with program name and relocation index logged.
5146		 * This approach allows to accommodate Clang emitting
5147		 * unnecessary .rodata.str1.1 sections for string literals,
5148		 * but also it allows to have CO-RE applications that use
5149		 * global variables in some of BPF programs, but not others.
5150		 * If those global variable-using programs are not loaded at
5151		 * runtime due to bpf_program__set_autoload(prog, false),
5152		 * bpf_object loading will succeed just fine even on old
5153		 * kernels.
5154		 */
5155		if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5156			map->autocreate = false;
5157
5158		if (!map->autocreate) {
5159			pr_debug("map '%s': skipped auto-creating...\n", map->name);
5160			continue;
5161		}
5162
5163		err = map_set_def_max_entries(map);
5164		if (err)
5165			goto err_out;
5166
5167		retried = false;
5168retry:
5169		if (map->pin_path) {
5170			err = bpf_object__reuse_map(map);
5171			if (err) {
5172				pr_warn("map '%s': error reusing pinned map\n",
5173					map->name);
5174				goto err_out;
5175			}
5176			if (retried && map->fd < 0) {
5177				pr_warn("map '%s': cannot find pinned map\n",
5178					map->name);
5179				err = -ENOENT;
5180				goto err_out;
5181			}
5182		}
5183
5184		if (map->fd >= 0) {
5185			pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5186				 map->name, map->fd);
5187		} else {
5188			err = bpf_object__create_map(obj, map, false);
5189			if (err)
5190				goto err_out;
5191
5192			pr_debug("map '%s': created successfully, fd=%d\n",
5193				 map->name, map->fd);
5194
5195			if (bpf_map__is_internal(map)) {
5196				err = bpf_object__populate_internal_map(obj, map);
5197				if (err < 0) {
5198					zclose(map->fd);
5199					goto err_out;
5200				}
5201			}
5202
5203			if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5204				err = init_map_in_map_slots(obj, map);
5205				if (err < 0) {
5206					zclose(map->fd);
5207					goto err_out;
5208				}
5209			}
5210		}
5211
5212		if (map->pin_path && !map->pinned) {
5213			err = bpf_map__pin(map, NULL);
5214			if (err) {
5215				zclose(map->fd);
5216				if (!retried && err == -EEXIST) {
5217					retried = true;
5218					goto retry;
5219				}
5220				pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5221					map->name, map->pin_path, err);
5222				goto err_out;
5223			}
5224		}
5225	}
5226
5227	return 0;
5228
5229err_out:
5230	cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5231	pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5232	pr_perm_msg(err);
5233	for (j = 0; j < i; j++)
5234		zclose(obj->maps[j].fd);
5235	return err;
5236}
5237
5238static bool bpf_core_is_flavor_sep(const char *s)
5239{
5240	/* check X___Y name pattern, where X and Y are not underscores */
5241	return s[0] != '_' &&				      /* X */
5242	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
5243	       s[4] != '_';				      /* Y */
5244}
5245
5246/* Given 'some_struct_name___with_flavor' return the length of a name prefix
5247 * before last triple underscore. Struct name part after last triple
5248 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5249 */
5250size_t bpf_core_essential_name_len(const char *name)
5251{
5252	size_t n = strlen(name);
5253	int i;
5254
5255	for (i = n - 5; i >= 0; i--) {
5256		if (bpf_core_is_flavor_sep(name + i))
5257			return i + 1;
5258	}
5259	return n;
5260}
5261
5262void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5263{
5264	if (!cands)
5265		return;
5266
5267	free(cands->cands);
5268	free(cands);
5269}
5270
5271int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5272		       size_t local_essent_len,
5273		       const struct btf *targ_btf,
5274		       const char *targ_btf_name,
5275		       int targ_start_id,
5276		       struct bpf_core_cand_list *cands)
5277{
5278	struct bpf_core_cand *new_cands, *cand;
5279	const struct btf_type *t, *local_t;
5280	const char *targ_name, *local_name;
5281	size_t targ_essent_len;
5282	int n, i;
5283
5284	local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5285	local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5286
5287	n = btf__type_cnt(targ_btf);
5288	for (i = targ_start_id; i < n; i++) {
5289		t = btf__type_by_id(targ_btf, i);
5290		if (!btf_kind_core_compat(t, local_t))
5291			continue;
5292
5293		targ_name = btf__name_by_offset(targ_btf, t->name_off);
5294		if (str_is_empty(targ_name))
5295			continue;
5296
5297		targ_essent_len = bpf_core_essential_name_len(targ_name);
5298		if (targ_essent_len != local_essent_len)
5299			continue;
5300
5301		if (strncmp(local_name, targ_name, local_essent_len) != 0)
5302			continue;
5303
5304		pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5305			 local_cand->id, btf_kind_str(local_t),
5306			 local_name, i, btf_kind_str(t), targ_name,
5307			 targ_btf_name);
5308		new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5309					      sizeof(*cands->cands));
5310		if (!new_cands)
5311			return -ENOMEM;
5312
5313		cand = &new_cands[cands->len];
5314		cand->btf = targ_btf;
5315		cand->id = i;
5316
5317		cands->cands = new_cands;
5318		cands->len++;
5319	}
5320	return 0;
5321}
5322
5323static int load_module_btfs(struct bpf_object *obj)
5324{
5325	struct bpf_btf_info info;
5326	struct module_btf *mod_btf;
5327	struct btf *btf;
5328	char name[64];
5329	__u32 id = 0, len;
5330	int err, fd;
5331
5332	if (obj->btf_modules_loaded)
5333		return 0;
5334
5335	if (obj->gen_loader)
5336		return 0;
5337
5338	/* don't do this again, even if we find no module BTFs */
5339	obj->btf_modules_loaded = true;
5340
5341	/* kernel too old to support module BTFs */
5342	if (!kernel_supports(obj, FEAT_MODULE_BTF))
5343		return 0;
5344
5345	while (true) {
5346		err = bpf_btf_get_next_id(id, &id);
5347		if (err && errno == ENOENT)
5348			return 0;
5349		if (err) {
5350			err = -errno;
5351			pr_warn("failed to iterate BTF objects: %d\n", err);
5352			return err;
5353		}
5354
5355		fd = bpf_btf_get_fd_by_id(id);
5356		if (fd < 0) {
5357			if (errno == ENOENT)
5358				continue; /* expected race: BTF was unloaded */
5359			err = -errno;
5360			pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5361			return err;
5362		}
5363
5364		len = sizeof(info);
5365		memset(&info, 0, sizeof(info));
5366		info.name = ptr_to_u64(name);
5367		info.name_len = sizeof(name);
5368
5369		err = bpf_obj_get_info_by_fd(fd, &info, &len);
5370		if (err) {
5371			err = -errno;
5372			pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5373			goto err_out;
5374		}
5375
5376		/* ignore non-module BTFs */
5377		if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5378			close(fd);
5379			continue;
5380		}
5381
5382		btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5383		err = libbpf_get_error(btf);
5384		if (err) {
5385			pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5386				name, id, err);
5387			goto err_out;
5388		}
5389
5390		err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5391				        sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5392		if (err)
5393			goto err_out;
5394
5395		mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5396
5397		mod_btf->btf = btf;
5398		mod_btf->id = id;
5399		mod_btf->fd = fd;
5400		mod_btf->name = strdup(name);
5401		if (!mod_btf->name) {
5402			err = -ENOMEM;
5403			goto err_out;
5404		}
5405		continue;
5406
5407err_out:
5408		close(fd);
5409		return err;
5410	}
5411
5412	return 0;
5413}
5414
5415static struct bpf_core_cand_list *
5416bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5417{
5418	struct bpf_core_cand local_cand = {};
5419	struct bpf_core_cand_list *cands;
5420	const struct btf *main_btf;
5421	const struct btf_type *local_t;
5422	const char *local_name;
5423	size_t local_essent_len;
5424	int err, i;
5425
5426	local_cand.btf = local_btf;
5427	local_cand.id = local_type_id;
5428	local_t = btf__type_by_id(local_btf, local_type_id);
5429	if (!local_t)
5430		return ERR_PTR(-EINVAL);
5431
5432	local_name = btf__name_by_offset(local_btf, local_t->name_off);
5433	if (str_is_empty(local_name))
5434		return ERR_PTR(-EINVAL);
5435	local_essent_len = bpf_core_essential_name_len(local_name);
5436
5437	cands = calloc(1, sizeof(*cands));
5438	if (!cands)
5439		return ERR_PTR(-ENOMEM);
5440
5441	/* Attempt to find target candidates in vmlinux BTF first */
5442	main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5443	err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5444	if (err)
5445		goto err_out;
5446
5447	/* if vmlinux BTF has any candidate, don't got for module BTFs */
5448	if (cands->len)
5449		return cands;
5450
5451	/* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5452	if (obj->btf_vmlinux_override)
5453		return cands;
5454
5455	/* now look through module BTFs, trying to still find candidates */
5456	err = load_module_btfs(obj);
5457	if (err)
5458		goto err_out;
5459
5460	for (i = 0; i < obj->btf_module_cnt; i++) {
5461		err = bpf_core_add_cands(&local_cand, local_essent_len,
5462					 obj->btf_modules[i].btf,
5463					 obj->btf_modules[i].name,
5464					 btf__type_cnt(obj->btf_vmlinux),
5465					 cands);
5466		if (err)
5467			goto err_out;
5468	}
5469
5470	return cands;
5471err_out:
5472	bpf_core_free_cands(cands);
5473	return ERR_PTR(err);
5474}
5475
5476/* Check local and target types for compatibility. This check is used for
5477 * type-based CO-RE relocations and follow slightly different rules than
5478 * field-based relocations. This function assumes that root types were already
5479 * checked for name match. Beyond that initial root-level name check, names
5480 * are completely ignored. Compatibility rules are as follows:
5481 *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5482 *     kind should match for local and target types (i.e., STRUCT is not
5483 *     compatible with UNION);
5484 *   - for ENUMs, the size is ignored;
5485 *   - for INT, size and signedness are ignored;
5486 *   - for ARRAY, dimensionality is ignored, element types are checked for
5487 *     compatibility recursively;
5488 *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
5489 *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5490 *   - FUNC_PROTOs are compatible if they have compatible signature: same
5491 *     number of input args and compatible return and argument types.
5492 * These rules are not set in stone and probably will be adjusted as we get
5493 * more experience with using BPF CO-RE relocations.
5494 */
5495int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5496			      const struct btf *targ_btf, __u32 targ_id)
5497{
5498	return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5499}
5500
5501int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5502			 const struct btf *targ_btf, __u32 targ_id)
5503{
5504	return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5505}
5506
5507static size_t bpf_core_hash_fn(const void *key, void *ctx)
5508{
5509	return (size_t)key;
5510}
5511
5512static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5513{
5514	return k1 == k2;
5515}
5516
5517static void *u32_as_hash_key(__u32 x)
5518{
5519	return (void *)(uintptr_t)x;
5520}
5521
5522static int record_relo_core(struct bpf_program *prog,
5523			    const struct bpf_core_relo *core_relo, int insn_idx)
5524{
5525	struct reloc_desc *relos, *relo;
5526
5527	relos = libbpf_reallocarray(prog->reloc_desc,
5528				    prog->nr_reloc + 1, sizeof(*relos));
5529	if (!relos)
5530		return -ENOMEM;
5531	relo = &relos[prog->nr_reloc];
5532	relo->type = RELO_CORE;
5533	relo->insn_idx = insn_idx;
5534	relo->core_relo = core_relo;
5535	prog->reloc_desc = relos;
5536	prog->nr_reloc++;
5537	return 0;
5538}
5539
5540static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5541{
5542	struct reloc_desc *relo;
5543	int i;
5544
5545	for (i = 0; i < prog->nr_reloc; i++) {
5546		relo = &prog->reloc_desc[i];
5547		if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5548			continue;
5549
5550		return relo->core_relo;
5551	}
5552
5553	return NULL;
5554}
5555
5556static int bpf_core_resolve_relo(struct bpf_program *prog,
5557				 const struct bpf_core_relo *relo,
5558				 int relo_idx,
5559				 const struct btf *local_btf,
5560				 struct hashmap *cand_cache,
5561				 struct bpf_core_relo_res *targ_res)
5562{
5563	struct bpf_core_spec specs_scratch[3] = {};
5564	const void *type_key = u32_as_hash_key(relo->type_id);
5565	struct bpf_core_cand_list *cands = NULL;
5566	const char *prog_name = prog->name;
5567	const struct btf_type *local_type;
5568	const char *local_name;
5569	__u32 local_id = relo->type_id;
5570	int err;
5571
5572	local_type = btf__type_by_id(local_btf, local_id);
5573	if (!local_type)
5574		return -EINVAL;
5575
5576	local_name = btf__name_by_offset(local_btf, local_type->name_off);
5577	if (!local_name)
5578		return -EINVAL;
5579
5580	if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5581	    !hashmap__find(cand_cache, type_key, (void **)&cands)) {
5582		cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5583		if (IS_ERR(cands)) {
5584			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5585				prog_name, relo_idx, local_id, btf_kind_str(local_type),
5586				local_name, PTR_ERR(cands));
5587			return PTR_ERR(cands);
5588		}
5589		err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
5590		if (err) {
5591			bpf_core_free_cands(cands);
5592			return err;
5593		}
5594	}
5595
5596	return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5597				       targ_res);
5598}
5599
5600static int
5601bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5602{
5603	const struct btf_ext_info_sec *sec;
5604	struct bpf_core_relo_res targ_res;
5605	const struct bpf_core_relo *rec;
5606	const struct btf_ext_info *seg;
5607	struct hashmap_entry *entry;
5608	struct hashmap *cand_cache = NULL;
5609	struct bpf_program *prog;
5610	struct bpf_insn *insn;
5611	const char *sec_name;
5612	int i, err = 0, insn_idx, sec_idx, sec_num;
5613
5614	if (obj->btf_ext->core_relo_info.len == 0)
5615		return 0;
5616
5617	if (targ_btf_path) {
5618		obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5619		err = libbpf_get_error(obj->btf_vmlinux_override);
5620		if (err) {
5621			pr_warn("failed to parse target BTF: %d\n", err);
5622			return err;
5623		}
5624	}
5625
5626	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5627	if (IS_ERR(cand_cache)) {
5628		err = PTR_ERR(cand_cache);
5629		goto out;
5630	}
5631
5632	seg = &obj->btf_ext->core_relo_info;
5633	sec_num = 0;
5634	for_each_btf_ext_sec(seg, sec) {
5635		sec_idx = seg->sec_idxs[sec_num];
5636		sec_num++;
5637
5638		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5639		if (str_is_empty(sec_name)) {
5640			err = -EINVAL;
5641			goto out;
5642		}
5643
5644		pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5645
5646		for_each_btf_ext_rec(seg, sec, i, rec) {
5647			if (rec->insn_off % BPF_INSN_SZ)
5648				return -EINVAL;
5649			insn_idx = rec->insn_off / BPF_INSN_SZ;
5650			prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5651			if (!prog) {
5652				/* When __weak subprog is "overridden" by another instance
5653				 * of the subprog from a different object file, linker still
5654				 * appends all the .BTF.ext info that used to belong to that
5655				 * eliminated subprogram.
5656				 * This is similar to what x86-64 linker does for relocations.
5657				 * So just ignore such relocations just like we ignore
5658				 * subprog instructions when discovering subprograms.
5659				 */
5660				pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5661					 sec_name, i, insn_idx);
5662				continue;
5663			}
5664			/* no need to apply CO-RE relocation if the program is
5665			 * not going to be loaded
5666			 */
5667			if (!prog->autoload)
5668				continue;
5669
5670			/* adjust insn_idx from section frame of reference to the local
5671			 * program's frame of reference; (sub-)program code is not yet
5672			 * relocated, so it's enough to just subtract in-section offset
5673			 */
5674			insn_idx = insn_idx - prog->sec_insn_off;
5675			if (insn_idx >= prog->insns_cnt)
5676				return -EINVAL;
5677			insn = &prog->insns[insn_idx];
5678
5679			err = record_relo_core(prog, rec, insn_idx);
5680			if (err) {
5681				pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5682					prog->name, i, err);
5683				goto out;
5684			}
5685
5686			if (prog->obj->gen_loader)
5687				continue;
5688
5689			err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5690			if (err) {
5691				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5692					prog->name, i, err);
5693				goto out;
5694			}
5695
5696			err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5697			if (err) {
5698				pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5699					prog->name, i, insn_idx, err);
5700				goto out;
5701			}
5702		}
5703	}
5704
5705out:
5706	/* obj->btf_vmlinux and module BTFs are freed after object load */
5707	btf__free(obj->btf_vmlinux_override);
5708	obj->btf_vmlinux_override = NULL;
5709
5710	if (!IS_ERR_OR_NULL(cand_cache)) {
5711		hashmap__for_each_entry(cand_cache, entry, i) {
5712			bpf_core_free_cands(entry->value);
5713		}
5714		hashmap__free(cand_cache);
5715	}
5716	return err;
5717}
5718
5719/* base map load ldimm64 special constant, used also for log fixup logic */
5720#define MAP_LDIMM64_POISON_BASE 2001000000
5721#define MAP_LDIMM64_POISON_PFX "200100"
5722
5723static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5724			       int insn_idx, struct bpf_insn *insn,
5725			       int map_idx, const struct bpf_map *map)
5726{
5727	int i;
5728
5729	pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5730		 prog->name, relo_idx, insn_idx, map_idx, map->name);
5731
5732	/* we turn single ldimm64 into two identical invalid calls */
5733	for (i = 0; i < 2; i++) {
5734		insn->code = BPF_JMP | BPF_CALL;
5735		insn->dst_reg = 0;
5736		insn->src_reg = 0;
5737		insn->off = 0;
5738		/* if this instruction is reachable (not a dead code),
5739		 * verifier will complain with something like:
5740		 * invalid func unknown#2001000123
5741		 * where lower 123 is map index into obj->maps[] array
5742		 */
5743		insn->imm = MAP_LDIMM64_POISON_BASE + map_idx;
5744
5745		insn++;
5746	}
5747}
5748
5749/* Relocate data references within program code:
5750 *  - map references;
5751 *  - global variable references;
5752 *  - extern references.
5753 */
5754static int
5755bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5756{
5757	int i;
5758
5759	for (i = 0; i < prog->nr_reloc; i++) {
5760		struct reloc_desc *relo = &prog->reloc_desc[i];
5761		struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5762		const struct bpf_map *map;
5763		struct extern_desc *ext;
5764
5765		switch (relo->type) {
5766		case RELO_LD64:
5767			map = &obj->maps[relo->map_idx];
5768			if (obj->gen_loader) {
5769				insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5770				insn[0].imm = relo->map_idx;
5771			} else if (map->autocreate) {
5772				insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5773				insn[0].imm = map->fd;
5774			} else {
5775				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5776						   relo->map_idx, map);
5777			}
5778			break;
5779		case RELO_DATA:
5780			map = &obj->maps[relo->map_idx];
5781			insn[1].imm = insn[0].imm + relo->sym_off;
5782			if (obj->gen_loader) {
5783				insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5784				insn[0].imm = relo->map_idx;
5785			} else if (map->autocreate) {
5786				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5787				insn[0].imm = map->fd;
5788			} else {
5789				poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5790						   relo->map_idx, map);
5791			}
5792			break;
5793		case RELO_EXTERN_VAR:
5794			ext = &obj->externs[relo->sym_off];
5795			if (ext->type == EXT_KCFG) {
5796				if (obj->gen_loader) {
5797					insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5798					insn[0].imm = obj->kconfig_map_idx;
5799				} else {
5800					insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5801					insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5802				}
5803				insn[1].imm = ext->kcfg.data_off;
5804			} else /* EXT_KSYM */ {
5805				if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5806					insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5807					insn[0].imm = ext->ksym.kernel_btf_id;
5808					insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5809				} else { /* typeless ksyms or unresolved typed ksyms */
5810					insn[0].imm = (__u32)ext->ksym.addr;
5811					insn[1].imm = ext->ksym.addr >> 32;
5812				}
5813			}
5814			break;
5815		case RELO_EXTERN_FUNC:
5816			ext = &obj->externs[relo->sym_off];
5817			insn[0].src_reg =