1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2021 Facebook */
3#include <stdio.h>
4#include <stdlib.h>
5#include <string.h>
6#include <errno.h>
7#include <linux/filter.h>
8#include <sys/param.h>
9#include "btf.h"
10#include "bpf.h"
11#include "libbpf.h"
12#include "libbpf_internal.h"
13#include "hashmap.h"
14#include "bpf_gen_internal.h"
15#include "skel_internal.h"
16#include <asm/byteorder.h>
17
18#define MAX_USED_MAPS	64
19#define MAX_USED_PROGS	32
20#define MAX_KFUNC_DESCS 256
21#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
22
23/* The following structure describes the stack layout of the loader program.
24 * In addition R6 contains the pointer to context.
25 * R7 contains the result of the last sys_bpf command (typically error or FD).
26 * R9 contains the result of the last sys_close command.
27 *
28 * Naming convention:
29 * ctx - bpf program context
30 * stack - bpf program stack
31 * blob - bpf_attr-s, strings, insns, map data.
32 *        All the bytes that loader prog will use for read/write.
33 */
34struct loader_stack {
35	__u32 btf_fd;
36	__u32 inner_map_fd;
37	__u32 prog_fd[MAX_USED_PROGS];
38};
39
40#define stack_off(field) \
41	(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
42
43#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
44
45static int blob_fd_array_off(struct bpf_gen *gen, int index)
46{
47	return gen->fd_array + index * sizeof(int);
48}
49
50static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
51{
52	size_t off = gen->insn_cur - gen->insn_start;
53	void *insn_start;
54
55	if (gen->error)
56		return gen->error;
57	if (size > INT32_MAX || off + size > INT32_MAX) {
58		gen->error = -ERANGE;
59		return -ERANGE;
60	}
61	insn_start = realloc(gen->insn_start, off + size);
62	if (!insn_start) {
63		gen->error = -ENOMEM;
64		free(gen->insn_start);
65		gen->insn_start = NULL;
66		return -ENOMEM;
67	}
68	gen->insn_start = insn_start;
69	gen->insn_cur = insn_start + off;
70	return 0;
71}
72
73static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
74{
75	size_t off = gen->data_cur - gen->data_start;
76	void *data_start;
77
78	if (gen->error)
79		return gen->error;
80	if (size > INT32_MAX || off + size > INT32_MAX) {
81		gen->error = -ERANGE;
82		return -ERANGE;
83	}
84	data_start = realloc(gen->data_start, off + size);
85	if (!data_start) {
86		gen->error = -ENOMEM;
87		free(gen->data_start);
88		gen->data_start = NULL;
89		return -ENOMEM;
90	}
91	gen->data_start = data_start;
92	gen->data_cur = data_start + off;
93	return 0;
94}
95
96static void emit(struct bpf_gen *gen, struct bpf_insn insn)
97{
98	if (realloc_insn_buf(gen, sizeof(insn)))
99		return;
100	memcpy(gen->insn_cur, &insn, sizeof(insn));
101	gen->insn_cur += sizeof(insn);
102}
103
104static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
105{
106	emit(gen, insn1);
107	emit(gen, insn2);
108}
109
110static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
111static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
112
113void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
114{
115	size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
116	int i;
117
118	gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
119	gen->log_level = log_level;
120	/* save ctx pointer into R6 */
121	emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
122
123	/* bzero stack */
124	emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
125	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
126	emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
127	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
128	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
129
130	/* amount of stack actually used, only used to calculate iterations, not stack offset */
131	nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
132	/* jump over cleanup code */
133	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
134			      /* size of cleanup code below (including map fd cleanup) */
135			      (nr_progs_sz / 4) * 3 + 2 +
136			      /* 6 insns for emit_sys_close_blob,
137			       * 6 insns for debug_regs in emit_sys_close_blob
138			       */
139			      nr_maps * (6 + (gen->log_level ? 6 : 0))));
140
141	/* remember the label where all error branches will jump to */
142	gen->cleanup_label = gen->insn_cur - gen->insn_start;
143	/* emit cleanup code: close all temp FDs */
144	for (i = 0; i < nr_progs_sz; i += 4) {
145		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
146		emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
147		emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
148	}
149	for (i = 0; i < nr_maps; i++)
150		emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
151	/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
152	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
153	emit(gen, BPF_EXIT_INSN());
154}
155
156static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
157{
158	__u32 size8 = roundup(size, 8);
159	__u64 zero = 0;
160	void *prev;
161
162	if (realloc_data_buf(gen, size8))
163		return 0;
164	prev = gen->data_cur;
165	if (data) {
166		memcpy(gen->data_cur, data, size);
167		memcpy(gen->data_cur + size, &zero, size8 - size);
168	} else {
169		memset(gen->data_cur, 0, size8);
170	}
171	gen->data_cur += size8;
172	return prev - gen->data_start;
173}
174
175/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
176 * to start of fd_array. Caller can decide if it is usable or not.
177 */
178static int add_map_fd(struct bpf_gen *gen)
179{
180	if (gen->nr_maps == MAX_USED_MAPS) {
181		pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
182		gen->error = -E2BIG;
183		return 0;
184	}
185	return gen->nr_maps++;
186}
187
188static int add_kfunc_btf_fd(struct bpf_gen *gen)
189{
190	int cur;
191
192	if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
193		cur = add_data(gen, NULL, sizeof(int));
194		return (cur - gen->fd_array) / sizeof(int);
195	}
196	return MAX_USED_MAPS + gen->nr_fd_array++;
197}
198
199static int insn_bytes_to_bpf_size(__u32 sz)
200{
201	switch (sz) {
202	case 8: return BPF_DW;
203	case 4: return BPF_W;
204	case 2: return BPF_H;
205	case 1: return BPF_B;
206	default: return -1;
207	}
208}
209
210/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
211static void emit_rel_store(struct bpf_gen *gen, int off, int data)
212{
213	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
214					 0, 0, 0, data));
215	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
216					 0, 0, 0, off));
217	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
218}
219
220static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
221{
222	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
223					 0, 0, 0, blob_off));
224	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
225	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
226					 0, 0, 0, off));
227	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
228}
229
230static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
231{
232	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
233					 0, 0, 0, blob_off));
234	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
235	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
236}
237
238static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
239				   bool check_non_zero)
240{
241	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
242	if (check_non_zero)
243		/* If value in ctx is zero don't update the blob.
244		 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
245		 */
246		emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
247	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
248					 0, 0, 0, off));
249	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
250}
251
252static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
253{
254	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
255	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
256					 0, 0, 0, off));
257	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
258}
259
260static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
261{
262	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
263	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
264}
265
266static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
267{
268	emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
269	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
270					 0, 0, 0, attr));
271	emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
272	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
273	/* remember the result in R7 */
274	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
275}
276
277static bool is_simm16(__s64 value)
278{
279	return value == (__s64)(__s16)value;
280}
281
282static void emit_check_err(struct bpf_gen *gen)
283{
284	__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
285
286	/* R7 contains result of last sys_bpf command.
287	 * if (R7 < 0) goto cleanup;
288	 */
289	if (is_simm16(off)) {
290		emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
291	} else {
292		gen->error = -ERANGE;
293		emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
294	}
295}
296
297/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
298static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
299		       const char *fmt, va_list args)
300{
301	char buf[1024];
302	int addr, len, ret;
303
304	if (!gen->log_level)
305		return;
306	ret = vsnprintf(buf, sizeof(buf), fmt, args);
307	if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
308		/* The special case to accommodate common debug_ret():
309		 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
310		 * prints explicitly.
311		 */
312		strcat(buf, " r=%d");
313	len = strlen(buf) + 1;
314	addr = add_data(gen, buf, len);
315
316	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
317					 0, 0, 0, addr));
318	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
319	if (reg1 >= 0)
320		emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
321	if (reg2 >= 0)
322		emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
323	emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
324}
325
326static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
327{
328	va_list args;
329
330	va_start(args, fmt);
331	emit_debug(gen, reg1, reg2, fmt, args);
332	va_end(args);
333}
334
335static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
336{
337	va_list args;
338
339	va_start(args, fmt);
340	emit_debug(gen, BPF_REG_7, -1, fmt, args);
341	va_end(args);
342}
343
344static void __emit_sys_close(struct bpf_gen *gen)
345{
346	emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
347			      /* 2 is the number of the following insns
348			       * * 6 is additional insns in debug_regs
349			       */
350			      2 + (gen->log_level ? 6 : 0)));
351	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
352	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
353	debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
354}
355
356static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
357{
358	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
359	__emit_sys_close(gen);
360}
361
362static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
363{
364	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
365					 0, 0, 0, blob_off));
366	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
367	__emit_sys_close(gen);
368}
369
370int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
371{
372	int i;
373
374	if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
375		pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
376			nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
377		gen->error = -EFAULT;
378		return gen->error;
379	}
380	emit_sys_close_stack(gen, stack_off(btf_fd));
381	for (i = 0; i < gen->nr_progs; i++)
382		move_stack2ctx(gen,
383			       sizeof(struct bpf_loader_ctx) +
384			       sizeof(struct bpf_map_desc) * gen->nr_maps +
385			       sizeof(struct bpf_prog_desc) * i +
386			       offsetof(struct bpf_prog_desc, prog_fd), 4,
387			       stack_off(prog_fd[i]));
388	for (i = 0; i < gen->nr_maps; i++)
389		move_blob2ctx(gen,
390			      sizeof(struct bpf_loader_ctx) +
391			      sizeof(struct bpf_map_desc) * i +
392			      offsetof(struct bpf_map_desc, map_fd), 4,
393			      blob_fd_array_off(gen, i));
394	emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
395	emit(gen, BPF_EXIT_INSN());
396	pr_debug("gen: finish %d\n", gen->error);
397	if (!gen->error) {
398		struct gen_loader_opts *opts = gen->opts;
399
400		opts->insns = gen->insn_start;
401		opts->insns_sz = gen->insn_cur - gen->insn_start;
402		opts->data = gen->data_start;
403		opts->data_sz = gen->data_cur - gen->data_start;
404	}
405	return gen->error;
406}
407
408void bpf_gen__free(struct bpf_gen *gen)
409{
410	if (!gen)
411		return;
412	free(gen->data_start);
413	free(gen->insn_start);
414	free(gen);
415}
416
417void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
418		       __u32 btf_raw_size)
419{
420	int attr_size = offsetofend(union bpf_attr, btf_log_level);
421	int btf_data, btf_load_attr;
422	union bpf_attr attr;
423
424	memset(&attr, 0, attr_size);
425	pr_debug("gen: load_btf: size %d\n", btf_raw_size);
426	btf_data = add_data(gen, btf_raw_data, btf_raw_size);
427
428	attr.btf_size = btf_raw_size;
429	btf_load_attr = add_data(gen, &attr, attr_size);
430
431	/* populate union bpf_attr with user provided log details */
432	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
433		      offsetof(struct bpf_loader_ctx, log_level), false);
434	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
435		      offsetof(struct bpf_loader_ctx, log_size), false);
436	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
437		      offsetof(struct bpf_loader_ctx, log_buf), false);
438	/* populate union bpf_attr with a pointer to the BTF data */
439	emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
440	/* emit BTF_LOAD command */
441	emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
442	debug_ret(gen, "btf_load size %d", btf_raw_size);
443	emit_check_err(gen);
444	/* remember btf_fd in the stack, if successful */
445	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
446}
447
448void bpf_gen__map_create(struct bpf_gen *gen,
449			 enum bpf_map_type map_type,
450			 const char *map_name,
451			 __u32 key_size, __u32 value_size, __u32 max_entries,
452			 struct bpf_map_create_opts *map_attr, int map_idx)
453{
454	int attr_size = offsetofend(union bpf_attr, map_extra);
455	bool close_inner_map_fd = false;
456	int map_create_attr, idx;
457	union bpf_attr attr;
458
459	memset(&attr, 0, attr_size);
460	attr.map_type = map_type;
461	attr.key_size = key_size;
462	attr.value_size = value_size;
463	attr.map_flags = map_attr->map_flags;
464	attr.map_extra = map_attr->map_extra;
465	if (map_name)
466		libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
467	attr.numa_node = map_attr->numa_node;
468	attr.map_ifindex = map_attr->map_ifindex;
469	attr.max_entries = max_entries;
470	attr.btf_key_type_id = map_attr->btf_key_type_id;
471	attr.btf_value_type_id = map_attr->btf_value_type_id;
472
473	pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
474		 attr.map_name, map_idx, map_type, attr.btf_value_type_id);
475
476	map_create_attr = add_data(gen, &attr, attr_size);
477	if (attr.btf_value_type_id)
478		/* populate union bpf_attr with btf_fd saved in the stack earlier */
479		move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
480				stack_off(btf_fd));
481	switch (attr.map_type) {
482	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
483	case BPF_MAP_TYPE_HASH_OF_MAPS:
484		move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
485				stack_off(inner_map_fd));
486		close_inner_map_fd = true;
487		break;
488	default:
489		break;
490	}
491	/* conditionally update max_entries */
492	if (map_idx >= 0)
493		move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
494			      sizeof(struct bpf_loader_ctx) +
495			      sizeof(struct bpf_map_desc) * map_idx +
496			      offsetof(struct bpf_map_desc, max_entries),
497			      true /* check that max_entries != 0 */);
498	/* emit MAP_CREATE command */
499	emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
500	debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
501		  attr.map_name, map_idx, map_type, value_size,
502		  attr.btf_value_type_id);
503	emit_check_err(gen);
504	/* remember map_fd in the stack, if successful */
505	if (map_idx < 0) {
506		/* This bpf_gen__map_create() function is called with map_idx >= 0
507		 * for all maps that libbpf loading logic tracks.
508		 * It's called with -1 to create an inner map.
509		 */
510		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
511				      stack_off(inner_map_fd)));
512	} else if (map_idx != gen->nr_maps) {
513		gen->error = -EDOM; /* internal bug */
514		return;
515	} else {
516		/* add_map_fd does gen->nr_maps++ */
517		idx = add_map_fd(gen);
518		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
519						 0, 0, 0, blob_fd_array_off(gen, idx)));
520		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
521	}
522	if (close_inner_map_fd)
523		emit_sys_close_stack(gen, stack_off(inner_map_fd));
524}
525
526void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
527				   enum bpf_attach_type type)
528{
529	const char *prefix;
530	int kind, ret;
531
532	btf_get_kernel_prefix_kind(type, &prefix, &kind);
533	gen->attach_kind = kind;
534	ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
535		       prefix, attach_name);
536	if (ret >= sizeof(gen->attach_target))
537		gen->error = -ENOSPC;
538}
539
540static void emit_find_attach_target(struct bpf_gen *gen)
541{
542	int name, len = strlen(gen->attach_target) + 1;
543
544	pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
545	name = add_data(gen, gen->attach_target, len);
546
547	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
548					 0, 0, 0, name));
549	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
550	emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
551	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
552	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
553	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
554	debug_ret(gen, "find_by_name_kind(%s,%d)",
555		  gen->attach_target, gen->attach_kind);
556	emit_check_err(gen);
557	/* if successful, btf_id is in lower 32-bit of R7 and
558	 * btf_obj_fd is in upper 32-bit
559	 */
560}
561
562void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
563			    bool is_typeless, bool is_ld64, int kind, int insn_idx)
564{
565	struct ksym_relo_desc *relo;
566
567	relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
568	if (!relo) {
569		gen->error = -ENOMEM;
570		return;
571	}
572	gen->relos = relo;
573	relo += gen->relo_cnt;
574	relo->name = name;
575	relo->is_weak = is_weak;
576	relo->is_typeless = is_typeless;
577	relo->is_ld64 = is_ld64;
578	relo->kind = kind;
579	relo->insn_idx = insn_idx;
580	gen->relo_cnt++;
581}
582
583/* returns existing ksym_desc with ref incremented, or inserts a new one */
584static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
585{
586	struct ksym_desc *kdesc;
587	int i;
588
589	for (i = 0; i < gen->nr_ksyms; i++) {
590		kdesc = &gen->ksyms[i];
591		if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
592		    !strcmp(kdesc->name, relo->name)) {
593			kdesc->ref++;
594			return kdesc;
595		}
596	}
597	kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
598	if (!kdesc) {
599		gen->error = -ENOMEM;
600		return NULL;
601	}
602	gen->ksyms = kdesc;
603	kdesc = &gen->ksyms[gen->nr_ksyms++];
604	kdesc->name = relo->name;
605	kdesc->kind = relo->kind;
606	kdesc->ref = 1;
607	kdesc->off = 0;
608	kdesc->insn = 0;
609	kdesc->is_ld64 = relo->is_ld64;
610	return kdesc;
611}
612
613/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
614 * Returns result in BPF_REG_7
615 */
616static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
617{
618	int name_off, len = strlen(relo->name) + 1;
619
620	name_off = add_data(gen, relo->name, len);
621	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
622					 0, 0, 0, name_off));
623	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
624	emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
625	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
626	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
627	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
628	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
629}
630
631/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
632 * Returns result in BPF_REG_7
633 * Returns u64 symbol addr in BPF_REG_9
634 */
635static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
636{
637	int name_off, len = strlen(relo->name) + 1, res_off;
638
639	name_off = add_data(gen, relo->name, len);
640	res_off = add_data(gen, NULL, 8); /* res is u64 */
641	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
642					 0, 0, 0, name_off));
643	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
644	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
645	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
646					 0, 0, 0, res_off));
647	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
648	emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
649	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
650	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
651	debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
652}
653
654/* Expects:
655 * BPF_REG_8 - pointer to instruction
656 *
657 * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
658 * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
659 * this would mean a new BTF fd index for each entry. By pairing symbol name
660 * with index, we get the insn->imm, insn->off pairing that kernel uses for
661 * kfunc_tab, which becomes the effective limit even though all of them may
662 * share same index in fd_array (such that kfunc_btf_tab has 1 element).
663 */
664static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
665{
666	struct ksym_desc *kdesc;
667	int btf_fd_idx;
668
669	kdesc = get_ksym_desc(gen, relo);
670	if (!kdesc)
671		return;
672	/* try to copy from existing bpf_insn */
673	if (kdesc->ref > 1) {
674		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
675			       kdesc->insn + offsetof(struct bpf_insn, imm));
676		move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
677			       kdesc->insn + offsetof(struct bpf_insn, off));
678		goto log;
679	}
680	/* remember insn offset, so we can copy BTF ID and FD later */
681	kdesc->insn = insn;
682	emit_bpf_find_by_name_kind(gen, relo);
683	if (!relo->is_weak)
684		emit_check_err(gen);
685	/* get index in fd_array to store BTF FD at */
686	btf_fd_idx = add_kfunc_btf_fd(gen);
687	if (btf_fd_idx > INT16_MAX) {
688		pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
689			btf_fd_idx, relo->name);
690		gen->error = -E2BIG;
691		return;
692	}
693	kdesc->off = btf_fd_idx;
694	/* jump to success case */
695	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
696	/* set value for imm, off as 0 */
697	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
698	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
699	/* skip success case for ret < 0 */
700	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
701	/* store btf_id into insn[insn_idx].imm */
702	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
703	/* obtain fd in BPF_REG_9 */
704	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
705	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
706	/* load fd_array slot pointer */
707	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
708					 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
709	/* store BTF fd in slot, 0 for vmlinux */
710	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
711	/* jump to insn[insn_idx].off store if fd denotes module BTF */
712	emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
713	/* set the default value for off */
714	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
715	/* skip BTF fd store for vmlinux BTF */
716	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
717	/* store index into insn[insn_idx].off */
718	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
719log:
720	if (!gen->log_level)
721		return;
722	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
723			      offsetof(struct bpf_insn, imm)));
724	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
725			      offsetof(struct bpf_insn, off)));
726	debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
727		   relo->name, kdesc->ref);
728	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
729					 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
730	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
731	debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
732		   relo->name, kdesc->ref);
733}
734
735static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
736			       int ref)
737{
738	if (!gen->log_level)
739		return;
740	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
741			      offsetof(struct bpf_insn, imm)));
742	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
743			      offsetof(struct bpf_insn, imm)));
744	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
745		   relo->is_typeless, relo->is_weak, relo->name, ref);
746	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
747	debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
748		   relo->is_typeless, relo->is_weak, relo->name, ref);
749}
750
751/* Expects:
752 * BPF_REG_8 - pointer to instruction
753 */
754static void emit_relo_ksym_typeless(struct bpf_gen *gen,
755				    struct ksym_relo_desc *relo, int insn)
756{
757	struct ksym_desc *kdesc;
758
759	kdesc = get_ksym_desc(gen, relo);
760	if (!kdesc)
761		return;
762	/* try to copy from existing ldimm64 insn */
763	if (kdesc->ref > 1) {
764		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
765			       kdesc->insn + offsetof(struct bpf_insn, imm));
766		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
767			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
768		goto log;
769	}
770	/* remember insn offset, so we can copy ksym addr later */
771	kdesc->insn = insn;
772	/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
773	kdesc->typeless = true;
774	emit_bpf_kallsyms_lookup_name(gen, relo);
775	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
776	emit_check_err(gen);
777	/* store lower half of addr into insn[insn_idx].imm */
778	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
779	/* store upper half of addr into insn[insn_idx + 1].imm */
780	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
781	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
782		      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
783log:
784	emit_ksym_relo_log(gen, relo, kdesc->ref);
785}
786
787static __u32 src_reg_mask(void)
788{
789#if defined(__LITTLE_ENDIAN_BITFIELD)
790	return 0x0f; /* src_reg,dst_reg,... */
791#elif defined(__BIG_ENDIAN_BITFIELD)
792	return 0xf0; /* dst_reg,src_reg,... */
793#else
794#error "Unsupported bit endianness, cannot proceed"
795#endif
796}
797
798/* Expects:
799 * BPF_REG_8 - pointer to instruction
800 */
801static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
802{
803	struct ksym_desc *kdesc;
804	__u32 reg_mask;
805
806	kdesc = get_ksym_desc(gen, relo);
807	if (!kdesc)
808		return;
809	/* try to copy from existing ldimm64 insn */
810	if (kdesc->ref > 1) {
811		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
812			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
813		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
814			       kdesc->insn + offsetof(struct bpf_insn, imm));
815		/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
816		 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
817		 */
818		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
819		goto clear_src_reg;
820	}
821	/* remember insn offset, so we can copy BTF ID and FD later */
822	kdesc->insn = insn;
823	emit_bpf_find_by_name_kind(gen, relo);
824	if (!relo->is_weak)
825		emit_check_err(gen);
826	/* jump to success case */
827	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
828	/* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
829	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
830	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
831	/* skip success case for ret < 0 */
832	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
833	/* store btf_id into insn[insn_idx].imm */
834	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
835	/* store btf_obj_fd into insn[insn_idx + 1].imm */
836	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
837	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
838			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
839	/* skip src_reg adjustment */
840	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
841clear_src_reg:
842	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
843	reg_mask = src_reg_mask();
844	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
845	emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
846	emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
847
848	emit_ksym_relo_log(gen, relo, kdesc->ref);
849}
850
851void bpf_gen__record_relo_core(struct bpf_gen *gen,
852			       const struct bpf_core_relo *core_relo)
853{
854	struct bpf_core_relo *relos;
855
856	relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
857	if (!relos) {
858		gen->error = -ENOMEM;
859		return;
860	}
861	gen->core_relos = relos;
862	relos += gen->core_relo_cnt;
863	memcpy(relos, core_relo, sizeof(*relos));
864	gen->core_relo_cnt++;
865}
866
867static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
868{
869	int insn;
870
871	pr_debug("gen: emit_relo (%d): %s at %d %s\n",
872		 relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
873	insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
874	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
875	if (relo->is_ld64) {
876		if (relo->is_typeless)
877			emit_relo_ksym_typeless(gen, relo, insn);
878		else
879			emit_relo_ksym_btf(gen, relo, insn);
880	} else {
881		emit_relo_kfunc_btf(gen, relo, insn);
882	}
883}
884
885static void emit_relos(struct bpf_gen *gen, int insns)
886{
887	int i;
888
889	for (i = 0; i < gen->relo_cnt; i++)
890		emit_relo(gen, gen->relos + i, insns);
891}
892
893static void cleanup_core_relo(struct bpf_gen *gen)
894{
895	if (!gen->core_relo_cnt)
896		return;
897	free(gen->core_relos);
898	gen->core_relo_cnt = 0;
899	gen->core_relos = NULL;
900}
901
902static void cleanup_relos(struct bpf_gen *gen, int insns)
903{
904	struct ksym_desc *kdesc;
905	int i, insn;
906
907	for (i = 0; i < gen->nr_ksyms; i++) {
908		kdesc = &gen->ksyms[i];
909		/* only close fds for typed ksyms and kfuncs */
910		if (kdesc->is_ld64 && !kdesc->typeless) {
911			/* close fd recorded in insn[insn_idx + 1].imm */
912			insn = kdesc->insn;
913			insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
914			emit_sys_close_blob(gen, insn);
915		} else if (!kdesc->is_ld64) {
916			emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
917			if (kdesc->off < MAX_FD_ARRAY_SZ)
918				gen->nr_fd_array--;
919		}
920	}
921	if (gen->nr_ksyms) {
922		free(gen->ksyms);
923		gen->nr_ksyms = 0;
924		gen->ksyms = NULL;
925	}
926	if (gen->relo_cnt) {
927		free(gen->relos);
928		gen->relo_cnt = 0;
929		gen->relos = NULL;
930	}
931	cleanup_core_relo(gen);
932}
933
934void bpf_gen__prog_load(struct bpf_gen *gen,
935			enum bpf_prog_type prog_type, const char *prog_name,
936			const char *license, struct bpf_insn *insns, size_t insn_cnt,
937			struct bpf_prog_load_opts *load_attr, int prog_idx)
938{
939	int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
940	int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
941	union bpf_attr attr;
942
943	memset(&attr, 0, attr_size);
944	pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
945		 prog_type, insn_cnt, prog_idx);
946	/* add license string to blob of bytes */
947	license_off = add_data(gen, license, strlen(license) + 1);
948	/* add insns to blob of bytes */
949	insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
950
951	attr.prog_type = prog_type;
952	attr.expected_attach_type = load_attr->expected_attach_type;
953	attr.attach_btf_id = load_attr->attach_btf_id;
954	attr.prog_ifindex = load_attr->prog_ifindex;
955	attr.kern_version = 0;
956	attr.insn_cnt = (__u32)insn_cnt;
957	attr.prog_flags = load_attr->prog_flags;
958
959	attr.func_info_rec_size = load_attr->func_info_rec_size;
960	attr.func_info_cnt = load_attr->func_info_cnt;
961	func_info = add_data(gen, load_attr->func_info,
962			     attr.func_info_cnt * attr.func_info_rec_size);
963
964	attr.line_info_rec_size = load_attr->line_info_rec_size;
965	attr.line_info_cnt = load_attr->line_info_cnt;
966	line_info = add_data(gen, load_attr->line_info,
967			     attr.line_info_cnt * attr.line_info_rec_size);
968
969	attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
970	attr.core_relo_cnt = gen->core_relo_cnt;
971	core_relos = add_data(gen, gen->core_relos,
972			     attr.core_relo_cnt * attr.core_relo_rec_size);
973
974	libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
975	prog_load_attr = add_data(gen, &attr, attr_size);
976
977	/* populate union bpf_attr with a pointer to license */
978	emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
979
980	/* populate union bpf_attr with a pointer to instructions */
981	emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
982
983	/* populate union bpf_attr with a pointer to func_info */
984	emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
985
986	/* populate union bpf_attr with a pointer to line_info */
987	emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
988
989	/* populate union bpf_attr with a pointer to core_relos */
990	emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
991
992	/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
993	emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
994
995	/* populate union bpf_attr with user provided log details */
996	move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
997		      offsetof(struct bpf_loader_ctx, log_level), false);
998	move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
999		      offsetof(struct bpf_loader_ctx, log_size), false);
1000	move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
1001		      offsetof(struct bpf_loader_ctx, log_buf), false);
1002	/* populate union bpf_attr with btf_fd saved in the stack earlier */
1003	move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
1004			stack_off(btf_fd));
1005	if (gen->attach_kind) {
1006		emit_find_attach_target(gen);
1007		/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
1008		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
1009						 0, 0, 0, prog_load_attr));
1010		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1011				      offsetof(union bpf_attr, attach_btf_id)));
1012		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
1013		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1014				      offsetof(union bpf_attr, attach_btf_obj_fd)));
1015	}
1016	emit_relos(gen, insns_off);
1017	/* emit PROG_LOAD command */
1018	emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
1019	debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
1020	/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
1021	cleanup_relos(gen, insns_off);
1022	if (gen->attach_kind) {
1023		emit_sys_close_blob(gen,
1024				    attr_field(prog_load_attr, attach_btf_obj_fd));
1025		gen->attach_kind = 0;
1026	}
1027	emit_check_err(gen);
1028	/* remember prog_fd in the stack, if successful */
1029	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
1030			      stack_off(prog_fd[gen->nr_progs])));
1031	gen->nr_progs++;
1032}
1033
1034void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
1035			      __u32 value_size)
1036{
1037	int attr_size = offsetofend(union bpf_attr, flags);
1038	int map_update_attr, value, key;
1039	union bpf_attr attr;
1040	int zero = 0;
1041
1042	memset(&attr, 0, attr_size);
1043	pr_debug("gen: map_update_elem: idx %d\n", map_idx);
1044
1045	value = add_data(gen, pvalue, value_size);
1046	key = add_data(gen, &zero, sizeof(zero));
1047
1048	/* if (map_desc[map_idx].initial_value) {
1049	 *    if (ctx->flags & BPF_SKEL_KERNEL)
1050	 *        bpf_probe_read_kernel(value, value_size, initial_value);
1051	 *    else
1052	 *        bpf_copy_from_user(value, value_size, initial_value);
1053	 * }
1054	 */
1055	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1056			      sizeof(struct bpf_loader_ctx) +
1057			      sizeof(struct bpf_map_desc) * map_idx +
1058			      offsetof(struct bpf_map_desc, initial_value)));
1059	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
1060	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1061					 0, 0, 0, value));
1062	emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1063	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1064			      offsetof(struct bpf_loader_ctx, flags)));
1065	emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
1066	emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1067	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1068	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
1069
1070	map_update_attr = add_data(gen, &attr, attr_size);
1071	move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1072		       blob_fd_array_off(gen, map_idx));
1073	emit_rel_store(gen, attr_field(map_update_attr, key), key);
1074	emit_rel_store(gen, attr_field(map_update_attr, value), value);
1075	/* emit MAP_UPDATE_ELEM command */
1076	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1077	debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1078	emit_check_err(gen);
1079}
1080
1081void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
1082				 int inner_map_idx)
1083{
1084	int attr_size = offsetofend(union bpf_attr, flags);
1085	int map_update_attr, key;
1086	union bpf_attr attr;
1087
1088	memset(&attr, 0, attr_size);
1089	pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
1090		 outer_map_idx, slot, inner_map_idx);
1091
1092	key = add_data(gen, &slot, sizeof(slot));
1093
1094	map_update_attr = add_data(gen, &attr, attr_size);
1095	move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1096		       blob_fd_array_off(gen, outer_map_idx));
1097	emit_rel_store(gen, attr_field(map_update_attr, key), key);
1098	emit_rel_store(gen, attr_field(map_update_attr, value),
1099		       blob_fd_array_off(gen, inner_map_idx));
1100
1101	/* emit MAP_UPDATE_ELEM command */
1102	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1103	debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
1104		  outer_map_idx, slot, inner_map_idx);
1105	emit_check_err(gen);
1106}
1107
1108void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1109{
1110	int attr_size = offsetofend(union bpf_attr, map_fd);
1111	int map_freeze_attr;
1112	union bpf_attr attr;
1113
1114	memset(&attr, 0, attr_size);
1115	pr_debug("gen: map_freeze: idx %d\n", map_idx);
1116	map_freeze_attr = add_data(gen, &attr, attr_size);
1117	move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1118		       blob_fd_array_off(gen, map_idx));
1119	/* emit MAP_FREEZE command */
1120	emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1121	debug_ret(gen, "map_freeze");
1122	emit_check_err(gen);
1123}
1124