1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
12#include <linux/bpf_common.h>
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_JMP32	0x06	/* jmp mode in word width */
18#define BPF_ALU64	0x07	/* alu mode in double word width */
19
20/* ld/ldx fields */
21#define BPF_DW		0x18	/* double word (64-bit) */
22#define BPF_MEMSX	0x80	/* load with sign extension */
23#define BPF_ATOMIC	0xc0	/* atomic memory ops - op type in immediate */
24#define BPF_XADD	0xc0	/* exclusive add - legacy name */
25
26/* alu/jmp fields */
27#define BPF_MOV		0xb0	/* mov reg to reg */
28#define BPF_ARSH	0xc0	/* sign extending arithmetic shift right */
29
30/* change endianness of a register */
31#define BPF_END		0xd0	/* flags for endianness conversion: */
32#define BPF_TO_LE	0x00	/* convert to little-endian */
33#define BPF_TO_BE	0x08	/* convert to big-endian */
34#define BPF_FROM_LE	BPF_TO_LE
35#define BPF_FROM_BE	BPF_TO_BE
36
37/* jmp encodings */
38#define BPF_JNE		0x50	/* jump != */
39#define BPF_JLT		0xa0	/* LT is unsigned, '<' */
40#define BPF_JLE		0xb0	/* LE is unsigned, '<=' */
41#define BPF_JSGT	0x60	/* SGT is signed '>', GT in x86 */
42#define BPF_JSGE	0x70	/* SGE is signed '>=', GE in x86 */
43#define BPF_JSLT	0xc0	/* SLT is signed, '<' */
44#define BPF_JSLE	0xd0	/* SLE is signed, '<=' */
45#define BPF_JCOND	0xe0	/* conditional pseudo jumps: may_goto, goto_or_nop */
46#define BPF_CALL	0x80	/* function call */
47#define BPF_EXIT	0x90	/* function return */
48
49/* atomic op type fields (stored in immediate) */
50#define BPF_FETCH	0x01	/* not an opcode on its own, used to build others */
51#define BPF_XCHG	(0xe0 | BPF_FETCH)	/* atomic exchange */
52#define BPF_CMPXCHG	(0xf0 | BPF_FETCH)	/* atomic compare-and-write */
53
54enum bpf_cond_pseudo_jmp {
55	BPF_MAY_GOTO = 0,
56};
57
58/* Register numbers */
59enum {
60	BPF_REG_0 = 0,
61	BPF_REG_1,
62	BPF_REG_2,
63	BPF_REG_3,
64	BPF_REG_4,
65	BPF_REG_5,
66	BPF_REG_6,
67	BPF_REG_7,
68	BPF_REG_8,
69	BPF_REG_9,
70	BPF_REG_10,
71	__MAX_BPF_REG,
72};
73
74/* BPF has 10 general purpose 64-bit registers and stack frame. */
75#define MAX_BPF_REG	__MAX_BPF_REG
76
77struct bpf_insn {
78	__u8	code;		/* opcode */
79	__u8	dst_reg:4;	/* dest register */
80	__u8	src_reg:4;	/* source register */
81	__s16	off;		/* signed offset */
82	__s32	imm;		/* signed immediate constant */
83};
84
85/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
86 * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
87 * the trailing flexible array member) instead.
88 */
89struct bpf_lpm_trie_key {
90	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */
91	__u8	data[0];	/* Arbitrary size */
92};
93
94/* Header for bpf_lpm_trie_key structs */
95struct bpf_lpm_trie_key_hdr {
96	__u32	prefixlen;
97};
98
99/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
100struct bpf_lpm_trie_key_u8 {
101	union {
102		struct bpf_lpm_trie_key_hdr	hdr;
103		__u32				prefixlen;
104	};
105	__u8	data[];		/* Arbitrary size */
106};
107
108struct bpf_cgroup_storage_key {
109	__u64	cgroup_inode_id;	/* cgroup inode id */
110	__u32	attach_type;		/* program attach type (enum bpf_attach_type) */
111};
112
113enum bpf_cgroup_iter_order {
114	BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
115	BPF_CGROUP_ITER_SELF_ONLY,		/* process only a single object. */
116	BPF_CGROUP_ITER_DESCENDANTS_PRE,	/* walk descendants in pre-order. */
117	BPF_CGROUP_ITER_DESCENDANTS_POST,	/* walk descendants in post-order. */
118	BPF_CGROUP_ITER_ANCESTORS_UP,		/* walk ancestors upward. */
119};
120
121union bpf_iter_link_info {
122	struct {
123		__u32	map_fd;
124	} map;
125	struct {
126		enum bpf_cgroup_iter_order order;
127
128		/* At most one of cgroup_fd and cgroup_id can be non-zero. If
129		 * both are zero, the walk starts from the default cgroup v2
130		 * root. For walking v1 hierarchy, one should always explicitly
131		 * specify cgroup_fd.
132		 */
133		__u32	cgroup_fd;
134		__u64	cgroup_id;
135	} cgroup;
136	/* Parameters of task iterators. */
137	struct {
138		__u32	tid;
139		__u32	pid;
140		__u32	pid_fd;
141	} task;
142};
143
144/* BPF syscall commands, see bpf(2) man-page for more details. */
145/**
146 * DOC: eBPF Syscall Preamble
147 *
148 * The operation to be performed by the **bpf**\ () system call is determined
149 * by the *cmd* argument. Each operation takes an accompanying argument,
150 * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
151 * below). The size argument is the size of the union pointed to by *attr*.
152 */
153/**
154 * DOC: eBPF Syscall Commands
155 *
156 * BPF_MAP_CREATE
157 *	Description
158 *		Create a map and return a file descriptor that refers to the
159 *		map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
160 *		is automatically enabled for the new file descriptor.
161 *
162 *		Applying **close**\ (2) to the file descriptor returned by
163 *		**BPF_MAP_CREATE** will delete the map (but see NOTES).
164 *
165 *	Return
166 *		A new file descriptor (a nonnegative integer), or -1 if an
167 *		error occurred (in which case, *errno* is set appropriately).
168 *
169 * BPF_MAP_LOOKUP_ELEM
170 *	Description
171 *		Look up an element with a given *key* in the map referred to
172 *		by the file descriptor *map_fd*.
173 *
174 *		The *flags* argument may be specified as one of the
175 *		following:
176 *
177 *		**BPF_F_LOCK**
178 *			Look up the value of a spin-locked map without
179 *			returning the lock. This must be specified if the
180 *			elements contain a spinlock.
181 *
182 *	Return
183 *		Returns zero on success. On error, -1 is returned and *errno*
184 *		is set appropriately.
185 *
186 * BPF_MAP_UPDATE_ELEM
187 *	Description
188 *		Create or update an element (key/value pair) in a specified map.
189 *
190 *		The *flags* argument should be specified as one of the
191 *		following:
192 *
193 *		**BPF_ANY**
194 *			Create a new element or update an existing element.
195 *		**BPF_NOEXIST**
196 *			Create a new element only if it did not exist.
197 *		**BPF_EXIST**
198 *			Update an existing element.
199 *		**BPF_F_LOCK**
200 *			Update a spin_lock-ed map element.
201 *
202 *	Return
203 *		Returns zero on success. On error, -1 is returned and *errno*
204 *		is set appropriately.
205 *
206 *		May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
207 *		**E2BIG**, **EEXIST**, or **ENOENT**.
208 *
209 *		**E2BIG**
210 *			The number of elements in the map reached the
211 *			*max_entries* limit specified at map creation time.
212 *		**EEXIST**
213 *			If *flags* specifies **BPF_NOEXIST** and the element
214 *			with *key* already exists in the map.
215 *		**ENOENT**
216 *			If *flags* specifies **BPF_EXIST** and the element with
217 *			*key* does not exist in the map.
218 *
219 * BPF_MAP_DELETE_ELEM
220 *	Description
221 *		Look up and delete an element by key in a specified map.
222 *
223 *	Return
224 *		Returns zero on success. On error, -1 is returned and *errno*
225 *		is set appropriately.
226 *
227 * BPF_MAP_GET_NEXT_KEY
228 *	Description
229 *		Look up an element by key in a specified map and return the key
230 *		of the next element. Can be used to iterate over all elements
231 *		in the map.
232 *
233 *	Return
234 *		Returns zero on success. On error, -1 is returned and *errno*
235 *		is set appropriately.
236 *
237 *		The following cases can be used to iterate over all elements of
238 *		the map:
239 *
240 *		* If *key* is not found, the operation returns zero and sets
241 *		  the *next_key* pointer to the key of the first element.
242 *		* If *key* is found, the operation returns zero and sets the
243 *		  *next_key* pointer to the key of the next element.
244 *		* If *key* is the last element, returns -1 and *errno* is set
245 *		  to **ENOENT**.
246 *
247 *		May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
248 *		**EINVAL** on error.
249 *
250 * BPF_PROG_LOAD
251 *	Description
252 *		Verify and load an eBPF program, returning a new file
253 *		descriptor associated with the program.
254 *
255 *		Applying **close**\ (2) to the file descriptor returned by
256 *		**BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
257 *
258 *		The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
259 *		automatically enabled for the new file descriptor.
260 *
261 *	Return
262 *		A new file descriptor (a nonnegative integer), or -1 if an
263 *		error occurred (in which case, *errno* is set appropriately).
264 *
265 * BPF_OBJ_PIN
266 *	Description
267 *		Pin an eBPF program or map referred by the specified *bpf_fd*
268 *		to the provided *pathname* on the filesystem.
269 *
270 *		The *pathname* argument must not contain a dot (".").
271 *
272 *		On success, *pathname* retains a reference to the eBPF object,
273 *		preventing deallocation of the object when the original
274 *		*bpf_fd* is closed. This allow the eBPF object to live beyond
275 *		**close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
276 *		process.
277 *
278 *		Applying **unlink**\ (2) or similar calls to the *pathname*
279 *		unpins the object from the filesystem, removing the reference.
280 *		If no other file descriptors or filesystem nodes refer to the
281 *		same object, it will be deallocated (see NOTES).
282 *
283 *		The filesystem type for the parent directory of *pathname* must
284 *		be **BPF_FS_MAGIC**.
285 *
286 *	Return
287 *		Returns zero on success. On error, -1 is returned and *errno*
288 *		is set appropriately.
289 *
290 * BPF_OBJ_GET
291 *	Description
292 *		Open a file descriptor for the eBPF object pinned to the
293 *		specified *pathname*.
294 *
295 *	Return
296 *		A new file descriptor (a nonnegative integer), or -1 if an
297 *		error occurred (in which case, *errno* is set appropriately).
298 *
299 * BPF_PROG_ATTACH
300 *	Description
301 *		Attach an eBPF program to a *target_fd* at the specified
302 *		*attach_type* hook.
303 *
304 *		The *attach_type* specifies the eBPF attachment point to
305 *		attach the program to, and must be one of *bpf_attach_type*
306 *		(see below).
307 *
308 *		The *attach_bpf_fd* must be a valid file descriptor for a
309 *		loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
310 *		or sock_ops type corresponding to the specified *attach_type*.
311 *
312 *		The *target_fd* must be a valid file descriptor for a kernel
313 *		object which depends on the attach type of *attach_bpf_fd*:
314 *
315 *		**BPF_PROG_TYPE_CGROUP_DEVICE**,
316 *		**BPF_PROG_TYPE_CGROUP_SKB**,
317 *		**BPF_PROG_TYPE_CGROUP_SOCK**,
318 *		**BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
319 *		**BPF_PROG_TYPE_CGROUP_SOCKOPT**,
320 *		**BPF_PROG_TYPE_CGROUP_SYSCTL**,
321 *		**BPF_PROG_TYPE_SOCK_OPS**
322 *
323 *			Control Group v2 hierarchy with the eBPF controller
324 *			enabled. Requires the kernel to be compiled with
325 *			**CONFIG_CGROUP_BPF**.
326 *
327 *		**BPF_PROG_TYPE_FLOW_DISSECTOR**
328 *
329 *			Network namespace (eg /proc/self/ns/net).
330 *
331 *		**BPF_PROG_TYPE_LIRC_MODE2**
332 *
333 *			LIRC device path (eg /dev/lircN). Requires the kernel
334 *			to be compiled with **CONFIG_BPF_LIRC_MODE2**.
335 *
336 *		**BPF_PROG_TYPE_SK_SKB**,
337 *		**BPF_PROG_TYPE_SK_MSG**
338 *
339 *			eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
340 *
341 *	Return
342 *		Returns zero on success. On error, -1 is returned and *errno*
343 *		is set appropriately.
344 *
345 * BPF_PROG_DETACH
346 *	Description
347 *		Detach the eBPF program associated with the *target_fd* at the
348 *		hook specified by *attach_type*. The program must have been
349 *		previously attached using **BPF_PROG_ATTACH**.
350 *
351 *	Return
352 *		Returns zero on success. On error, -1 is returned and *errno*
353 *		is set appropriately.
354 *
355 * BPF_PROG_TEST_RUN
356 *	Description
357 *		Run the eBPF program associated with the *prog_fd* a *repeat*
358 *		number of times against a provided program context *ctx_in* and
359 *		data *data_in*, and return the modified program context
360 *		*ctx_out*, *data_out* (for example, packet data), result of the
361 *		execution *retval*, and *duration* of the test run.
362 *
363 *		The sizes of the buffers provided as input and output
364 *		parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
365 *		be provided in the corresponding variables *ctx_size_in*,
366 *		*ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
367 *		of these parameters are not provided (ie set to NULL), the
368 *		corresponding size field must be zero.
369 *
370 *		Some program types have particular requirements:
371 *
372 *		**BPF_PROG_TYPE_SK_LOOKUP**
373 *			*data_in* and *data_out* must be NULL.
374 *
375 *		**BPF_PROG_TYPE_RAW_TRACEPOINT**,
376 *		**BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
377 *
378 *			*ctx_out*, *data_in* and *data_out* must be NULL.
379 *			*repeat* must be zero.
380 *
381 *		BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
382 *
383 *	Return
384 *		Returns zero on success. On error, -1 is returned and *errno*
385 *		is set appropriately.
386 *
387 *		**ENOSPC**
388 *			Either *data_size_out* or *ctx_size_out* is too small.
389 *		**ENOTSUPP**
390 *			This command is not supported by the program type of
391 *			the program referred to by *prog_fd*.
392 *
393 * BPF_PROG_GET_NEXT_ID
394 *	Description
395 *		Fetch the next eBPF program currently loaded into the kernel.
396 *
397 *		Looks for the eBPF program with an id greater than *start_id*
398 *		and updates *next_id* on success. If no other eBPF programs
399 *		remain with ids higher than *start_id*, returns -1 and sets
400 *		*errno* to **ENOENT**.
401 *
402 *	Return
403 *		Returns zero on success. On error, or when no id remains, -1
404 *		is returned and *errno* is set appropriately.
405 *
406 * BPF_MAP_GET_NEXT_ID
407 *	Description
408 *		Fetch the next eBPF map currently loaded into the kernel.
409 *
410 *		Looks for the eBPF map with an id greater than *start_id*
411 *		and updates *next_id* on success. If no other eBPF maps
412 *		remain with ids higher than *start_id*, returns -1 and sets
413 *		*errno* to **ENOENT**.
414 *
415 *	Return
416 *		Returns zero on success. On error, or when no id remains, -1
417 *		is returned and *errno* is set appropriately.
418 *
419 * BPF_PROG_GET_FD_BY_ID
420 *	Description
421 *		Open a file descriptor for the eBPF program corresponding to
422 *		*prog_id*.
423 *
424 *	Return
425 *		A new file descriptor (a nonnegative integer), or -1 if an
426 *		error occurred (in which case, *errno* is set appropriately).
427 *
428 * BPF_MAP_GET_FD_BY_ID
429 *	Description
430 *		Open a file descriptor for the eBPF map corresponding to
431 *		*map_id*.
432 *
433 *	Return
434 *		A new file descriptor (a nonnegative integer), or -1 if an
435 *		error occurred (in which case, *errno* is set appropriately).
436 *
437 * BPF_OBJ_GET_INFO_BY_FD
438 *	Description
439 *		Obtain information about the eBPF object corresponding to
440 *		*bpf_fd*.
441 *
442 *		Populates up to *info_len* bytes of *info*, which will be in
443 *		one of the following formats depending on the eBPF object type
444 *		of *bpf_fd*:
445 *
446 *		* **struct bpf_prog_info**
447 *		* **struct bpf_map_info**
448 *		* **struct bpf_btf_info**
449 *		* **struct bpf_link_info**
450 *
451 *	Return
452 *		Returns zero on success. On error, -1 is returned and *errno*
453 *		is set appropriately.
454 *
455 * BPF_PROG_QUERY
456 *	Description
457 *		Obtain information about eBPF programs associated with the
458 *		specified *attach_type* hook.
459 *
460 *		The *target_fd* must be a valid file descriptor for a kernel
461 *		object which depends on the attach type of *attach_bpf_fd*:
462 *
463 *		**BPF_PROG_TYPE_CGROUP_DEVICE**,
464 *		**BPF_PROG_TYPE_CGROUP_SKB**,
465 *		**BPF_PROG_TYPE_CGROUP_SOCK**,
466 *		**BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
467 *		**BPF_PROG_TYPE_CGROUP_SOCKOPT**,
468 *		**BPF_PROG_TYPE_CGROUP_SYSCTL**,
469 *		**BPF_PROG_TYPE_SOCK_OPS**
470 *
471 *			Control Group v2 hierarchy with the eBPF controller
472 *			enabled. Requires the kernel to be compiled with
473 *			**CONFIG_CGROUP_BPF**.
474 *
475 *		**BPF_PROG_TYPE_FLOW_DISSECTOR**
476 *
477 *			Network namespace (eg /proc/self/ns/net).
478 *
479 *		**BPF_PROG_TYPE_LIRC_MODE2**
480 *
481 *			LIRC device path (eg /dev/lircN). Requires the kernel
482 *			to be compiled with **CONFIG_BPF_LIRC_MODE2**.
483 *
484 *		**BPF_PROG_QUERY** always fetches the number of programs
485 *		attached and the *attach_flags* which were used to attach those
486 *		programs. Additionally, if *prog_ids* is nonzero and the number
487 *		of attached programs is less than *prog_cnt*, populates
488 *		*prog_ids* with the eBPF program ids of the programs attached
489 *		at *target_fd*.
490 *
491 *		The following flags may alter the result:
492 *
493 *		**BPF_F_QUERY_EFFECTIVE**
494 *			Only return information regarding programs which are
495 *			currently effective at the specified *target_fd*.
496 *
497 *	Return
498 *		Returns zero on success. On error, -1 is returned and *errno*
499 *		is set appropriately.
500 *
501 * BPF_RAW_TRACEPOINT_OPEN
502 *	Description
503 *		Attach an eBPF program to a tracepoint *name* to access kernel
504 *		internal arguments of the tracepoint in their raw form.
505 *
506 *		The *prog_fd* must be a valid file descriptor associated with
507 *		a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
508 *
509 *		No ABI guarantees are made about the content of tracepoint
510 *		arguments exposed to the corresponding eBPF program.
511 *
512 *		Applying **close**\ (2) to the file descriptor returned by
513 *		**BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
514 *
515 *	Return
516 *		A new file descriptor (a nonnegative integer), or -1 if an
517 *		error occurred (in which case, *errno* is set appropriately).
518 *
519 * BPF_BTF_LOAD
520 *	Description
521 *		Verify and load BPF Type Format (BTF) metadata into the kernel,
522 *		returning a new file descriptor associated with the metadata.
523 *		BTF is described in more detail at
524 *		https://www.kernel.org/doc/html/latest/bpf/btf.html.
525 *
526 *		The *btf* parameter must point to valid memory providing
527 *		*btf_size* bytes of BTF binary metadata.
528 *
529 *		The returned file descriptor can be passed to other **bpf**\ ()
530 *		subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
531 *		associate the BTF with those objects.
532 *
533 *		Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
534 *		parameters to specify a *btf_log_buf*, *btf_log_size* and
535 *		*btf_log_level* which allow the kernel to return freeform log
536 *		output regarding the BTF verification process.
537 *
538 *	Return
539 *		A new file descriptor (a nonnegative integer), or -1 if an
540 *		error occurred (in which case, *errno* is set appropriately).
541 *
542 * BPF_BTF_GET_FD_BY_ID
543 *	Description
544 *		Open a file descriptor for the BPF Type Format (BTF)
545 *		corresponding to *btf_id*.
546 *
547 *	Return
548 *		A new file descriptor (a nonnegative integer), or -1 if an
549 *		error occurred (in which case, *errno* is set appropriately).
550 *
551 * BPF_TASK_FD_QUERY
552 *	Description
553 *		Obtain information about eBPF programs associated with the
554 *		target process identified by *pid* and *fd*.
555 *
556 *		If the *pid* and *fd* are associated with a tracepoint, kprobe
557 *		or uprobe perf event, then the *prog_id* and *fd_type* will
558 *		be populated with the eBPF program id and file descriptor type
559 *		of type **bpf_task_fd_type**. If associated with a kprobe or
560 *		uprobe, the  *probe_offset* and *probe_addr* will also be
561 *		populated. Optionally, if *buf* is provided, then up to
562 *		*buf_len* bytes of *buf* will be populated with the name of
563 *		the tracepoint, kprobe or uprobe.
564 *
565 *		The resulting *prog_id* may be introspected in deeper detail
566 *		using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
567 *
568 *	Return
569 *		Returns zero on success. On error, -1 is returned and *errno*
570 *		is set appropriately.
571 *
572 * BPF_MAP_LOOKUP_AND_DELETE_ELEM
573 *	Description
574 *		Look up an element with the given *key* in the map referred to
575 *		by the file descriptor *fd*, and if found, delete the element.
576 *
577 *		For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
578 *		types, the *flags* argument needs to be set to 0, but for other
579 *		map types, it may be specified as:
580 *
581 *		**BPF_F_LOCK**
582 *			Look up and delete the value of a spin-locked map
583 *			without returning the lock. This must be specified if
584 *			the elements contain a spinlock.
585 *
586 *		The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
587 *		implement this command as a "pop" operation, deleting the top
588 *		element rather than one corresponding to *key*.
589 *		The *key* and *key_len* parameters should be zeroed when
590 *		issuing this operation for these map types.
591 *
592 *		This command is only valid for the following map types:
593 *		* **BPF_MAP_TYPE_QUEUE**
594 *		* **BPF_MAP_TYPE_STACK**
595 *		* **BPF_MAP_TYPE_HASH**
596 *		* **BPF_MAP_TYPE_PERCPU_HASH**
597 *		* **BPF_MAP_TYPE_LRU_HASH**
598 *		* **BPF_MAP_TYPE_LRU_PERCPU_HASH**
599 *
600 *	Return
601 *		Returns zero on success. On error, -1 is returned and *errno*
602 *		is set appropriately.
603 *
604 * BPF_MAP_FREEZE
605 *	Description
606 *		Freeze the permissions of the specified map.
607 *
608 *		Write permissions may be frozen by passing zero *flags*.
609 *		Upon success, no future syscall invocations may alter the
610 *		map state of *map_fd*. Write operations from eBPF programs
611 *		are still possible for a frozen map.
612 *
613 *		Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
614 *
615 *	Return
616 *		Returns zero on success. On error, -1 is returned and *errno*
617 *		is set appropriately.
618 *
619 * BPF_BTF_GET_NEXT_ID
620 *	Description
621 *		Fetch the next BPF Type Format (BTF) object currently loaded
622 *		into the kernel.
623 *
624 *		Looks for the BTF object with an id greater than *start_id*
625 *		and updates *next_id* on success. If no other BTF objects
626 *		remain with ids higher than *start_id*, returns -1 and sets
627 *		*errno* to **ENOENT**.
628 *
629 *	Return
630 *		Returns zero on success. On error, or when no id remains, -1
631 *		is returned and *errno* is set appropriately.
632 *
633 * BPF_MAP_LOOKUP_BATCH
634 *	Description
635 *		Iterate and fetch multiple elements in a map.
636 *
637 *		Two opaque values are used to manage batch operations,
638 *		*in_batch* and *out_batch*. Initially, *in_batch* must be set
639 *		to NULL to begin the batched operation. After each subsequent
640 *		**BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
641 *		*out_batch* as the *in_batch* for the next operation to
642 *		continue iteration from the current point. Both *in_batch* and
643 *		*out_batch* must point to memory large enough to hold a key,
644 *		except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
645 *		LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
646 *		must be at least 4 bytes wide regardless of key size.
647 *
648 *		The *keys* and *values* are output parameters which must point
649 *		to memory large enough to hold *count* items based on the key
650 *		and value size of the map *map_fd*. The *keys* buffer must be
651 *		of *key_size* * *count*. The *values* buffer must be of
652 *		*value_size* * *count*.
653 *
654 *		The *elem_flags* argument may be specified as one of the
655 *		following:
656 *
657 *		**BPF_F_LOCK**
658 *			Look up the value of a spin-locked map without
659 *			returning the lock. This must be specified if the
660 *			elements contain a spinlock.
661 *
662 *		On success, *count* elements from the map are copied into the
663 *		user buffer, with the keys copied into *keys* and the values
664 *		copied into the corresponding indices in *values*.
665 *
666 *		If an error is returned and *errno* is not **EFAULT**, *count*
667 *		is set to the number of successfully processed elements.
668 *
669 *	Return
670 *		Returns zero on success. On error, -1 is returned and *errno*
671 *		is set appropriately.
672 *
673 *		May set *errno* to **ENOSPC** to indicate that *keys* or
674 *		*values* is too small to dump an entire bucket during
675 *		iteration of a hash-based map type.
676 *
677 * BPF_MAP_LOOKUP_AND_DELETE_BATCH
678 *	Description
679 *		Iterate and delete all elements in a map.
680 *
681 *		This operation has the same behavior as
682 *		**BPF_MAP_LOOKUP_BATCH** with two exceptions:
683 *
684 *		* Every element that is successfully returned is also deleted
685 *		  from the map. This is at least *count* elements. Note that
686 *		  *count* is both an input and an output parameter.
687 *		* Upon returning with *errno* set to **EFAULT**, up to
688 *		  *count* elements may be deleted without returning the keys
689 *		  and values of the deleted elements.
690 *
691 *	Return
692 *		Returns zero on success. On error, -1 is returned and *errno*
693 *		is set appropriately.
694 *
695 * BPF_MAP_UPDATE_BATCH
696 *	Description
697 *		Update multiple elements in a map by *key*.
698 *
699 *		The *keys* and *values* are input parameters which must point
700 *		to memory large enough to hold *count* items based on the key
701 *		and value size of the map *map_fd*. The *keys* buffer must be
702 *		of *key_size* * *count*. The *values* buffer must be of
703 *		*value_size* * *count*.
704 *
705 *		Each element specified in *keys* is sequentially updated to the
706 *		value in the corresponding index in *values*. The *in_batch*
707 *		and *out_batch* parameters are ignored and should be zeroed.
708 *
709 *		The *elem_flags* argument should be specified as one of the
710 *		following:
711 *
712 *		**BPF_ANY**
713 *			Create new elements or update a existing elements.
714 *		**BPF_NOEXIST**
715 *			Create new elements only if they do not exist.
716 *		**BPF_EXIST**
717 *			Update existing elements.
718 *		**BPF_F_LOCK**
719 *			Update spin_lock-ed map elements. This must be
720 *			specified if the map value contains a spinlock.
721 *
722 *		On success, *count* elements from the map are updated.
723 *
724 *		If an error is returned and *errno* is not **EFAULT**, *count*
725 *		is set to the number of successfully processed elements.
726 *
727 *	Return
728 *		Returns zero on success. On error, -1 is returned and *errno*
729 *		is set appropriately.
730 *
731 *		May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
732 *		**E2BIG**. **E2BIG** indicates that the number of elements in
733 *		the map reached the *max_entries* limit specified at map
734 *		creation time.
735 *
736 *		May set *errno* to one of the following error codes under
737 *		specific circumstances:
738 *
739 *		**EEXIST**
740 *			If *flags* specifies **BPF_NOEXIST** and the element
741 *			with *key* already exists in the map.
742 *		**ENOENT**
743 *			If *flags* specifies **BPF_EXIST** and the element with
744 *			*key* does not exist in the map.
745 *
746 * BPF_MAP_DELETE_BATCH
747 *	Description
748 *		Delete multiple elements in a map by *key*.
749 *
750 *		The *keys* parameter is an input parameter which must point
751 *		to memory large enough to hold *count* items based on the key
752 *		size of the map *map_fd*, that is, *key_size* * *count*.
753 *
754 *		Each element specified in *keys* is sequentially deleted. The
755 *		*in_batch*, *out_batch*, and *values* parameters are ignored
756 *		and should be zeroed.
757 *
758 *		The *elem_flags* argument may be specified as one of the
759 *		following:
760 *
761 *		**BPF_F_LOCK**
762 *			Look up the value of a spin-locked map without
763 *			returning the lock. This must be specified if the
764 *			elements contain a spinlock.
765 *
766 *		On success, *count* elements from the map are updated.
767 *
768 *		If an error is returned and *errno* is not **EFAULT**, *count*
769 *		is set to the number of successfully processed elements. If
770 *		*errno* is **EFAULT**, up to *count* elements may be been
771 *		deleted.
772 *
773 *	Return
774 *		Returns zero on success. On error, -1 is returned and *errno*
775 *		is set appropriately.
776 *
777 * BPF_LINK_CREATE
778 *	Description
779 *		Attach an eBPF program to a *target_fd* at the specified
780 *		*attach_type* hook and return a file descriptor handle for
781 *		managing the link.
782 *
783 *	Return
784 *		A new file descriptor (a nonnegative integer), or -1 if an
785 *		error occurred (in which case, *errno* is set appropriately).
786 *
787 * BPF_LINK_UPDATE
788 *	Description
789 *		Update the eBPF program in the specified *link_fd* to
790 *		*new_prog_fd*.
791 *
792 *	Return
793 *		Returns zero on success. On error, -1 is returned and *errno*
794 *		is set appropriately.
795 *
796 * BPF_LINK_GET_FD_BY_ID
797 *	Description
798 *		Open a file descriptor for the eBPF Link corresponding to
799 *		*link_id*.
800 *
801 *	Return
802 *		A new file descriptor (a nonnegative integer), or -1 if an
803 *		error occurred (in which case, *errno* is set appropriately).
804 *
805 * BPF_LINK_GET_NEXT_ID
806 *	Description
807 *		Fetch the next eBPF link currently loaded into the kernel.
808 *
809 *		Looks for the eBPF link with an id greater than *start_id*
810 *		and updates *next_id* on success. If no other eBPF links
811 *		remain with ids higher than *start_id*, returns -1 and sets
812 *		*errno* to **ENOENT**.
813 *
814 *	Return
815 *		Returns zero on success. On error, or when no id remains, -1
816 *		is returned and *errno* is set appropriately.
817 *
818 * BPF_ENABLE_STATS
819 *	Description
820 *		Enable eBPF runtime statistics gathering.
821 *
822 *		Runtime statistics gathering for the eBPF runtime is disabled
823 *		by default to minimize the corresponding performance overhead.
824 *		This command enables statistics globally.
825 *
826 *		Multiple programs may independently enable statistics.
827 *		After gathering the desired statistics, eBPF runtime statistics
828 *		may be disabled again by calling **close**\ (2) for the file
829 *		descriptor returned by this function. Statistics will only be
830 *		disabled system-wide when all outstanding file descriptors
831 *		returned by prior calls for this subcommand are closed.
832 *
833 *	Return
834 *		A new file descriptor (a nonnegative integer), or -1 if an
835 *		error occurred (in which case, *errno* is set appropriately).
836 *
837 * BPF_ITER_CREATE
838 *	Description
839 *		Create an iterator on top of the specified *link_fd* (as
840 *		previously created using **BPF_LINK_CREATE**) and return a
841 *		file descriptor that can be used to trigger the iteration.
842 *
843 *		If the resulting file descriptor is pinned to the filesystem
844 *		using  **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
845 *		for that path will trigger the iterator to read kernel state
846 *		using the eBPF program attached to *link_fd*.
847 *
848 *	Return
849 *		A new file descriptor (a nonnegative integer), or -1 if an
850 *		error occurred (in which case, *errno* is set appropriately).
851 *
852 * BPF_LINK_DETACH
853 *	Description
854 *		Forcefully detach the specified *link_fd* from its
855 *		corresponding attachment point.
856 *
857 *	Return
858 *		Returns zero on success. On error, -1 is returned and *errno*
859 *		is set appropriately.
860 *
861 * BPF_PROG_BIND_MAP
862 *	Description
863 *		Bind a map to the lifetime of an eBPF program.
864 *
865 *		The map identified by *map_fd* is bound to the program
866 *		identified by *prog_fd* and only released when *prog_fd* is
867 *		released. This may be used in cases where metadata should be
868 *		associated with a program which otherwise does not contain any
869 *		references to the map (for example, embedded in the eBPF
870 *		program instructions).
871 *
872 *	Return
873 *		Returns zero on success. On error, -1 is returned and *errno*
874 *		is set appropriately.
875 *
876 * BPF_TOKEN_CREATE
877 *	Description
878 *		Create BPF token with embedded information about what
879 *		BPF-related functionality it allows:
880 *		- a set of allowed bpf() syscall commands;
881 *		- a set of allowed BPF map types to be created with
882 *		BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
883 *		- a set of allowed BPF program types and BPF program attach
884 *		types to be loaded with BPF_PROG_LOAD command, if
885 *		BPF_PROG_LOAD itself is allowed.
886 *
887 *		BPF token is created (derived) from an instance of BPF FS,
888 *		assuming it has necessary delegation mount options specified.
889 *		This BPF token can be passed as an extra parameter to various
890 *		bpf() syscall commands to grant BPF subsystem functionality to
891 *		unprivileged processes.
892 *
893 *		When created, BPF token is "associated" with the owning
894 *		user namespace of BPF FS instance (super block) that it was
895 *		derived from, and subsequent BPF operations performed with
896 *		BPF token would be performing capabilities checks (i.e.,
897 *		CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
898 *		that user namespace. Without BPF token, such capabilities
899 *		have to be granted in init user namespace, making bpf()
900 *		syscall incompatible with user namespace, for the most part.
901 *
902 *	Return
903 *		A new file descriptor (a nonnegative integer), or -1 if an
904 *		error occurred (in which case, *errno* is set appropriately).
905 *
906 * NOTES
907 *	eBPF objects (maps and programs) can be shared between processes.
908 *
909 *	* After **fork**\ (2), the child inherits file descriptors
910 *	  referring to the same eBPF objects.
911 *	* File descriptors referring to eBPF objects can be transferred over
912 *	  **unix**\ (7) domain sockets.
913 *	* File descriptors referring to eBPF objects can be duplicated in the
914 *	  usual way, using **dup**\ (2) and similar calls.
915 *	* File descriptors referring to eBPF objects can be pinned to the
916 *	  filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
917 *
918 *	An eBPF object is deallocated only after all file descriptors referring
919 *	to the object have been closed and no references remain pinned to the
920 *	filesystem or attached (for example, bound to a program or device).
921 */
922enum bpf_cmd {
923	BPF_MAP_CREATE,
924	BPF_MAP_LOOKUP_ELEM,
925	BPF_MAP_UPDATE_ELEM,
926	BPF_MAP_DELETE_ELEM,
927	BPF_MAP_GET_NEXT_KEY,
928	BPF_PROG_LOAD,
929	BPF_OBJ_PIN,
930	BPF_OBJ_GET,
931	BPF_PROG_ATTACH,
932	BPF_PROG_DETACH,
933	BPF_PROG_TEST_RUN,
934	BPF_PROG_RUN = BPF_PROG_TEST_RUN,
935	BPF_PROG_GET_NEXT_ID,
936	BPF_MAP_GET_NEXT_ID,
937	BPF_PROG_GET_FD_BY_ID,
938	BPF_MAP_GET_FD_BY_ID,
939	BPF_OBJ_GET_INFO_BY_FD,
940	BPF_PROG_QUERY,
941	BPF_RAW_TRACEPOINT_OPEN,
942	BPF_BTF_LOAD,
943	BPF_BTF_GET_FD_BY_ID,
944	BPF_TASK_FD_QUERY,
945	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
946	BPF_MAP_FREEZE,
947	BPF_BTF_GET_NEXT_ID,
948	BPF_MAP_LOOKUP_BATCH,
949	BPF_MAP_LOOKUP_AND_DELETE_BATCH,
950	BPF_MAP_UPDATE_BATCH,
951	BPF_MAP_DELETE_BATCH,
952	BPF_LINK_CREATE,
953	BPF_LINK_UPDATE,
954	BPF_LINK_GET_FD_BY_ID,
955	BPF_LINK_GET_NEXT_ID,
956	BPF_ENABLE_STATS,
957	BPF_ITER_CREATE,
958	BPF_LINK_DETACH,
959	BPF_PROG_BIND_MAP,
960	BPF_TOKEN_CREATE,
961	__MAX_BPF_CMD,
962};
963
964enum bpf_map_type {
965	BPF_MAP_TYPE_UNSPEC,
966	BPF_MAP_TYPE_HASH,
967	BPF_MAP_TYPE_ARRAY,
968	BPF_MAP_TYPE_PROG_ARRAY,
969	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
970	BPF_MAP_TYPE_PERCPU_HASH,
971	BPF_MAP_TYPE_PERCPU_ARRAY,
972	BPF_MAP_TYPE_STACK_TRACE,
973	BPF_MAP_TYPE_CGROUP_ARRAY,
974	BPF_MAP_TYPE_LRU_HASH,
975	BPF_MAP_TYPE_LRU_PERCPU_HASH,
976	BPF_MAP_TYPE_LPM_TRIE,
977	BPF_MAP_TYPE_ARRAY_OF_MAPS,
978	BPF_MAP_TYPE_HASH_OF_MAPS,
979	BPF_MAP_TYPE_DEVMAP,
980	BPF_MAP_TYPE_SOCKMAP,
981	BPF_MAP_TYPE_CPUMAP,
982	BPF_MAP_TYPE_XSKMAP,
983	BPF_MAP_TYPE_SOCKHASH,
984	BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
985	/* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
986	 * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
987	 * both cgroup-attached and other progs and supports all functionality
988	 * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
989	 * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
990	 */
991	BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
992	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
993	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
994	/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
995	 * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
996	 * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
997	 * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
998	 * deprecated.
999	 */
1000	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
1001	BPF_MAP_TYPE_QUEUE,
1002	BPF_MAP_TYPE_STACK,
1003	BPF_MAP_TYPE_SK_STORAGE,
1004	BPF_MAP_TYPE_DEVMAP_HASH,
1005	BPF_MAP_TYPE_STRUCT_OPS,
1006	BPF_MAP_TYPE_RINGBUF,
1007	BPF_MAP_TYPE_INODE_STORAGE,
1008	BPF_MAP_TYPE_TASK_STORAGE,
1009	BPF_MAP_TYPE_BLOOM_FILTER,
1010	BPF_MAP_TYPE_USER_RINGBUF,
1011	BPF_MAP_TYPE_CGRP_STORAGE,
1012	BPF_MAP_TYPE_ARENA,
1013	__MAX_BPF_MAP_TYPE
1014};
1015
1016/* Note that tracing related programs such as
1017 * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
1018 * are not subject to a stable API since kernel internal data
1019 * structures can change from release to release and may
1020 * therefore break existing tracing BPF programs. Tracing BPF
1021 * programs correspond to /a/ specific kernel which is to be
1022 * analyzed, and not /a/ specific kernel /and/ all future ones.
1023 */
1024enum bpf_prog_type {
1025	BPF_PROG_TYPE_UNSPEC,
1026	BPF_PROG_TYPE_SOCKET_FILTER,
1027	BPF_PROG_TYPE_KPROBE,
1028	BPF_PROG_TYPE_SCHED_CLS,
1029	BPF_PROG_TYPE_SCHED_ACT,
1030	BPF_PROG_TYPE_TRACEPOINT,
1031	BPF_PROG_TYPE_XDP,
1032	BPF_PROG_TYPE_PERF_EVENT,
1033	BPF_PROG_TYPE_CGROUP_SKB,
1034	BPF_PROG_TYPE_CGROUP_SOCK,
1035	BPF_PROG_TYPE_LWT_IN,
1036	BPF_PROG_TYPE_LWT_OUT,
1037	BPF_PROG_TYPE_LWT_XMIT,
1038	BPF_PROG_TYPE_SOCK_OPS,
1039	BPF_PROG_TYPE_SK_SKB,
1040	BPF_PROG_TYPE_CGROUP_DEVICE,
1041	BPF_PROG_TYPE_SK_MSG,
1042	BPF_PROG_TYPE_RAW_TRACEPOINT,
1043	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
1044	BPF_PROG_TYPE_LWT_SEG6LOCAL,
1045	BPF_PROG_TYPE_LIRC_MODE2,
1046	BPF_PROG_TYPE_SK_REUSEPORT,
1047	BPF_PROG_TYPE_FLOW_DISSECTOR,
1048	BPF_PROG_TYPE_CGROUP_SYSCTL,
1049	BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
1050	BPF_PROG_TYPE_CGROUP_SOCKOPT,
1051	BPF_PROG_TYPE_TRACING,
1052	BPF_PROG_TYPE_STRUCT_OPS,
1053	BPF_PROG_TYPE_EXT,
1054	BPF_PROG_TYPE_LSM,
1055	BPF_PROG_TYPE_SK_LOOKUP,
1056	BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
1057	BPF_PROG_TYPE_NETFILTER,
1058	__MAX_BPF_PROG_TYPE
1059};
1060
1061enum bpf_attach_type {
1062	BPF_CGROUP_INET_INGRESS,
1063	BPF_CGROUP_INET_EGRESS,
1064	BPF_CGROUP_INET_SOCK_CREATE,
1065	BPF_CGROUP_SOCK_OPS,
1066	BPF_SK_SKB_STREAM_PARSER,
1067	BPF_SK_SKB_STREAM_VERDICT,
1068	BPF_CGROUP_DEVICE,
1069	BPF_SK_MSG_VERDICT,
1070	BPF_CGROUP_INET4_BIND,
1071	BPF_CGROUP_INET6_BIND,
1072	BPF_CGROUP_INET4_CONNECT,
1073	BPF_CGROUP_INET6_CONNECT,
1074	BPF_CGROUP_INET4_POST_BIND,
1075	BPF_CGROUP_INET6_POST_BIND,
1076	BPF_CGROUP_UDP4_SENDMSG,
1077	BPF_CGROUP_UDP6_SENDMSG,
1078	BPF_LIRC_MODE2,
1079	BPF_FLOW_DISSECTOR,
1080	BPF_CGROUP_SYSCTL,
1081	BPF_CGROUP_UDP4_RECVMSG,
1082	BPF_CGROUP_UDP6_RECVMSG,
1083	BPF_CGROUP_GETSOCKOPT,
1084	BPF_CGROUP_SETSOCKOPT,
1085	BPF_TRACE_RAW_TP,
1086	BPF_TRACE_FENTRY,
1087	BPF_TRACE_FEXIT,
1088	BPF_MODIFY_RETURN,
1089	BPF_LSM_MAC,
1090	BPF_TRACE_ITER,
1091	BPF_CGROUP_INET4_GETPEERNAME,
1092	BPF_CGROUP_INET6_GETPEERNAME,
1093	BPF_CGROUP_INET4_GETSOCKNAME,
1094	BPF_CGROUP_INET6_GETSOCKNAME,
1095	BPF_XDP_DEVMAP,
1096	BPF_CGROUP_INET_SOCK_RELEASE,
1097	BPF_XDP_CPUMAP,
1098	BPF_SK_LOOKUP,
1099	BPF_XDP,
1100	BPF_SK_SKB_VERDICT,
1101	BPF_SK_REUSEPORT_SELECT,
1102	BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
1103	BPF_PERF_EVENT,
1104	BPF_TRACE_KPROBE_MULTI,
1105	BPF_LSM_CGROUP,
1106	BPF_STRUCT_OPS,
1107	BPF_NETFILTER,
1108	BPF_TCX_INGRESS,
1109	BPF_TCX_EGRESS,
1110	BPF_TRACE_UPROBE_MULTI,
1111	BPF_CGROUP_UNIX_CONNECT,
1112	BPF_CGROUP_UNIX_SENDMSG,
1113	BPF_CGROUP_UNIX_RECVMSG,
1114	BPF_CGROUP_UNIX_GETPEERNAME,
1115	BPF_CGROUP_UNIX_GETSOCKNAME,
1116	BPF_NETKIT_PRIMARY,
1117	BPF_NETKIT_PEER,
1118	__MAX_BPF_ATTACH_TYPE
1119};
1120
1121#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
1122
1123enum bpf_link_type {
1124	BPF_LINK_TYPE_UNSPEC = 0,
1125	BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
1126	BPF_LINK_TYPE_TRACING = 2,
1127	BPF_LINK_TYPE_CGROUP = 3,
1128	BPF_LINK_TYPE_ITER = 4,
1129	BPF_LINK_TYPE_NETNS = 5,
1130	BPF_LINK_TYPE_XDP = 6,
1131	BPF_LINK_TYPE_PERF_EVENT = 7,
1132	BPF_LINK_TYPE_KPROBE_MULTI = 8,
1133	BPF_LINK_TYPE_STRUCT_OPS = 9,
1134	BPF_LINK_TYPE_NETFILTER = 10,
1135	BPF_LINK_TYPE_TCX = 11,
1136	BPF_LINK_TYPE_UPROBE_MULTI = 12,
1137	BPF_LINK_TYPE_NETKIT = 13,
1138	__MAX_BPF_LINK_TYPE,
1139};
1140
1141#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
1142
1143enum bpf_perf_event_type {
1144	BPF_PERF_EVENT_UNSPEC = 0,
1145	BPF_PERF_EVENT_UPROBE = 1,
1146	BPF_PERF_EVENT_URETPROBE = 2,
1147	BPF_PERF_EVENT_KPROBE = 3,
1148	BPF_PERF_EVENT_KRETPROBE = 4,
1149	BPF_PERF_EVENT_TRACEPOINT = 5,
1150	BPF_PERF_EVENT_EVENT = 6,
1151};
1152
1153/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
1154 *
1155 * NONE(default): No further bpf programs allowed in the subtree.
1156 *
1157 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
1158 * the program in this cgroup yields to sub-cgroup program.
1159 *
1160 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
1161 * that cgroup program gets run in addition to the program in this cgroup.
1162 *
1163 * Only one program is allowed to be attached to a cgroup with
1164 * NONE or BPF_F_ALLOW_OVERRIDE flag.
1165 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
1166 * release old program and attach the new one. Attach flags has to match.
1167 *
1168 * Multiple programs are allowed to be attached to a cgroup with
1169 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
1170 * (those that were attached first, run first)
1171 * The programs of sub-cgroup are executed first, then programs of
1172 * this cgroup and then programs of parent cgroup.
1173 * When children program makes decision (like picking TCP CA or sock bind)
1174 * parent program has a chance to override it.
1175 *
1176 * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
1177 * programs for a cgroup. Though it's possible to replace an old program at
1178 * any position by also specifying BPF_F_REPLACE flag and position itself in
1179 * replace_bpf_fd attribute. Old program at this position will be released.
1180 *
1181 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
1182 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
1183 * Ex1:
1184 * cgrp1 (MULTI progs A, B) ->
1185 *    cgrp2 (OVERRIDE prog C) ->
1186 *      cgrp3 (MULTI prog D) ->
1187 *        cgrp4 (OVERRIDE prog E) ->
1188 *          cgrp5 (NONE prog F)
1189 * the event in cgrp5 triggers execution of F,D,A,B in that order.
1190 * if prog F is detached, the execution is E,D,A,B
1191 * if prog F and D are detached, the execution is E,A,B
1192 * if prog F, E and D are detached, the execution is C,A,B
1193 *
1194 * All eligible programs are executed regardless of return code from
1195 * earlier programs.
1196 */
1197#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
1198#define BPF_F_ALLOW_MULTI	(1U << 1)
1199/* Generic attachment flags. */
1200#define BPF_F_REPLACE		(1U << 2)
1201#define BPF_F_BEFORE		(1U << 3)
1202#define BPF_F_AFTER		(1U << 4)
1203#define BPF_F_ID		(1U << 5)
1204#define BPF_F_LINK		BPF_F_LINK /* 1 << 13 */
1205
1206/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
1207 * verifier will perform strict alignment checking as if the kernel
1208 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
1209 * and NET_IP_ALIGN defined to 2.
1210 */
1211#define BPF_F_STRICT_ALIGNMENT	(1U << 0)
1212
1213/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
1214 * verifier will allow any alignment whatsoever.  On platforms
1215 * with strict alignment requirements for loads ands stores (such
1216 * as sparc and mips) the verifier validates that all loads and
1217 * stores provably follow this requirement.  This flag turns that
1218 * checking and enforcement off.
1219 *
1220 * It is mostly used for testing when we want to validate the
1221 * context and memory access aspects of the verifier, but because
1222 * of an unaligned access the alignment check would trigger before
1223 * the one we are interested in.
1224 */
1225#define BPF_F_ANY_ALIGNMENT	(1U << 1)
1226
1227/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
1228 * Verifier does sub-register def/use analysis and identifies instructions whose
1229 * def only matters for low 32-bit, high 32-bit is never referenced later
1230 * through implicit zero extension. Therefore verifier notifies JIT back-ends
1231 * that it is safe to ignore clearing high 32-bit for these instructions. This
1232 * saves some back-ends a lot of code-gen. However such optimization is not
1233 * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
1234 * hence hasn't used verifier's analysis result. But, we really want to have a
1235 * way to be able to verify the correctness of the described optimization on
1236 * x86_64 on which testsuites are frequently exercised.
1237 *
1238 * So, this flag is introduced. Once it is set, verifier will randomize high
1239 * 32-bit for those instructions who has been identified as safe to ignore them.
1240 * Then, if verifier is not doing correct analysis, such randomization will
1241 * regress tests to expose bugs.
1242 */
1243#define BPF_F_TEST_RND_HI32	(1U << 2)
1244
1245/* The verifier internal test flag. Behavior is undefined */
1246#define BPF_F_TEST_STATE_FREQ	(1U << 3)
1247
1248/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
1249 * restrict map and helper usage for such programs. Sleepable BPF programs can
1250 * only be attached to hooks where kernel execution context allows sleeping.
1251 * Such programs are allowed to use helpers that may sleep like
1252 * bpf_copy_from_user().
1253 */
1254#define BPF_F_SLEEPABLE		(1U << 4)
1255
1256/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
1257 * fully support xdp frags.
1258 */
1259#define BPF_F_XDP_HAS_FRAGS	(1U << 5)
1260
1261/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
1262 * program becomes device-bound but can access XDP metadata.
1263 */
1264#define BPF_F_XDP_DEV_BOUND_ONLY	(1U << 6)
1265
1266/* The verifier internal test flag. Behavior is undefined */
1267#define BPF_F_TEST_REG_INVARIANTS	(1U << 7)
1268
1269/* link_create.kprobe_multi.flags used in LINK_CREATE command for
1270 * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
1271 */
1272enum {
1273	BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
1274};
1275
1276/* link_create.uprobe_multi.flags used in LINK_CREATE command for
1277 * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
1278 */
1279enum {
1280	BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
1281};
1282
1283/* link_create.netfilter.flags used in LINK_CREATE command for
1284 * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
1285 */
1286#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
1287
1288/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
1289 * the following extensions:
1290 *
1291 * insn[0].src_reg:  BPF_PSEUDO_MAP_[FD|IDX]
1292 * insn[0].imm:      map fd or fd_idx
1293 * insn[1].imm:      0
1294 * insn[0].off:      0
1295 * insn[1].off:      0
1296 * ldimm64 rewrite:  address of map
1297 * verifier type:    CONST_PTR_TO_MAP
1298 */
1299#define BPF_PSEUDO_MAP_FD	1
1300#define BPF_PSEUDO_MAP_IDX	5
1301
1302/* insn[0].src_reg:  BPF_PSEUDO_MAP_[IDX_]VALUE
1303 * insn[0].imm:      map fd or fd_idx
1304 * insn[1].imm:      offset into value
1305 * insn[0].off:      0
1306 * insn[1].off:      0
1307 * ldimm64 rewrite:  address of map[0]+offset
1308 * verifier type:    PTR_TO_MAP_VALUE
1309 */
1310#define BPF_PSEUDO_MAP_VALUE		2
1311#define BPF_PSEUDO_MAP_IDX_VALUE	6
1312
1313/* insn[0].src_reg:  BPF_PSEUDO_BTF_ID
1314 * insn[0].imm:      kernel btd id of VAR
1315 * insn[1].imm:      0
1316 * insn[0].off:      0
1317 * insn[1].off:      0
1318 * ldimm64 rewrite:  address of the kernel variable
1319 * verifier type:    PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
1320 *                   is struct/union.
1321 */
1322#define BPF_PSEUDO_BTF_ID	3
1323/* insn[0].src_reg:  BPF_PSEUDO_FUNC
1324 * insn[0].imm:      insn offset to the func
1325 * insn[1].imm:      0
1326 * insn[0].off:      0
1327 * insn[1].off:      0
1328 * ldimm64 rewrite:  address of the function
1329 * verifier type:    PTR_TO_FUNC.
1330 */
1331#define BPF_PSEUDO_FUNC		4
1332
1333/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
1334 * offset to another bpf function
1335 */
1336#define BPF_PSEUDO_CALL		1
1337/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
1338 * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
1339 */
1340#define BPF_PSEUDO_KFUNC_CALL	2
1341
1342enum bpf_addr_space_cast {
1343	BPF_ADDR_SPACE_CAST = 1,
1344};
1345
1346/* flags for BPF_MAP_UPDATE_ELEM command */
1347enum {
1348	BPF_ANY		= 0, /* create new element or update existing */
1349	BPF_NOEXIST	= 1, /* create new element if it didn't exist */
1350	BPF_EXIST	= 2, /* update existing element */
1351	BPF_F_LOCK	= 4, /* spin_lock-ed map_lookup/map_update */
1352};
1353
1354/* flags for BPF_MAP_CREATE command */
1355enum {
1356	BPF_F_NO_PREALLOC	= (1U << 0),
1357/* Instead of having one common LRU list in the
1358 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
1359 * which can scale and perform better.
1360 * Note, the LRU nodes (including free nodes) cannot be moved
1361 * across different LRU lists.
1362 */
1363	BPF_F_NO_COMMON_LRU	= (1U << 1),
1364/* Specify numa node during map creation */
1365	BPF_F_NUMA_NODE		= (1U << 2),
1366
1367/* Flags for accessing BPF object from syscall side. */
1368	BPF_F_RDONLY		= (1U << 3),
1369	BPF_F_WRONLY		= (1U << 4),
1370
1371/* Flag for stack_map, store build_id+offset instead of pointer */
1372	BPF_F_STACK_BUILD_ID	= (1U << 5),
1373
1374/* Zero-initialize hash function seed. This should only be used for testing. */
1375	BPF_F_ZERO_SEED		= (1U << 6),
1376
1377/* Flags for accessing BPF object from program side. */
1378	BPF_F_RDONLY_PROG	= (1U << 7),
1379	BPF_F_WRONLY_PROG	= (1U << 8),
1380
1381/* Clone map from listener for newly accepted socket */
1382	BPF_F_CLONE		= (1U << 9),
1383
1384/* Enable memory-mapping BPF map */
1385	BPF_F_MMAPABLE		= (1U << 10),
1386
1387/* Share perf_event among processes */
1388	BPF_F_PRESERVE_ELEMS	= (1U << 11),
1389
1390/* Create a map that is suitable to be an inner map with dynamic max entries */
1391	BPF_F_INNER_MAP		= (1U << 12),
1392
1393/* Create a map that will be registered/unregesitered by the backed bpf_link */
1394	BPF_F_LINK		= (1U << 13),
1395
1396/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
1397	BPF_F_PATH_FD		= (1U << 14),
1398
1399/* Flag for value_type_btf_obj_fd, the fd is available */
1400	BPF_F_VTYPE_BTF_OBJ_FD	= (1U << 15),
1401
1402/* BPF token FD is passed in a corresponding command's token_fd field */
1403	BPF_F_TOKEN_FD          = (1U << 16),
1404
1405/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
1406	BPF_F_SEGV_ON_FAULT	= (1U << 17),
1407
1408/* Do not translate kernel bpf_arena pointers to user pointers */
1409	BPF_F_NO_USER_CONV	= (1U << 18),
1410};
1411
1412/* Flags for BPF_PROG_QUERY. */
1413
1414/* Query effective (directly attached + inherited from ancestor cgroups)
1415 * programs that will be executed for events within a cgroup.
1416 * attach_flags with this flag are always returned 0.
1417 */
1418#define BPF_F_QUERY_EFFECTIVE	(1U << 0)
1419
1420/* Flags for BPF_PROG_TEST_RUN */
1421
1422/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
1423#define BPF_F_TEST_RUN_ON_CPU	(1U << 0)
1424/* If set, XDP frames will be transmitted after processing */
1425#define BPF_F_TEST_XDP_LIVE_FRAMES	(1U << 1)
1426
1427/* type for BPF_ENABLE_STATS */
1428enum bpf_stats_type {
1429	/* enabled run_time_ns and run_cnt */
1430	BPF_STATS_RUN_TIME = 0,
1431};
1432
1433enum bpf_stack_build_id_status {
1434	/* user space need an empty entry to identify end of a trace */
1435	BPF_STACK_BUILD_ID_EMPTY = 0,
1436	/* with valid build_id and offset */
1437	BPF_STACK_BUILD_ID_VALID = 1,
1438	/* couldn't get build_id, fallback to ip */
1439	BPF_STACK_BUILD_ID_IP = 2,
1440};
1441
1442#define BPF_BUILD_ID_SIZE 20
1443struct bpf_stack_build_id {
1444	__s32		status;
1445	unsigned char	build_id[BPF_BUILD_ID_SIZE];
1446	union {
1447		__u64	offset;
1448		__u64	ip;
1449	};
1450};
1451
1452#define BPF_OBJ_NAME_LEN 16U
1453
1454union bpf_attr {
1455	struct { /* anonymous struct used by BPF_MAP_CREATE command */
1456		__u32	map_type;	/* one of enum bpf_map_type */
1457		__u32	key_size;	/* size of key in bytes */
1458		__u32	value_size;	/* size of value in bytes */
1459		__u32	max_entries;	/* max number of entries in a map */
1460		__u32	map_flags;	/* BPF_MAP_CREATE related
1461					 * flags defined above.
1462					 */
1463		__u32	inner_map_fd;	/* fd pointing to the inner map */
1464		__u32	numa_node;	/* numa node (effective only if
1465					 * BPF_F_NUMA_NODE is set).
1466					 */
1467		char	map_name[BPF_OBJ_NAME_LEN];
1468		__u32	map_ifindex;	/* ifindex of netdev to create on */
1469		__u32	btf_fd;		/* fd pointing to a BTF type data */
1470		__u32	btf_key_type_id;	/* BTF type_id of the key */
1471		__u32	btf_value_type_id;	/* BTF type_id of the value */
1472		__u32	btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
1473						   * struct stored as the
1474						   * map value
1475						   */
1476		/* Any per-map-type extra fields
1477		 *
1478		 * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
1479		 * number of hash functions (if 0, the bloom filter will default
1480		 * to using 5 hash functions).
1481		 *
1482		 * BPF_MAP_TYPE_ARENA - contains the address where user space
1483		 * is going to mmap() the arena. It has to be page aligned.
1484		 */
1485		__u64	map_extra;
1486
1487		__s32   value_type_btf_obj_fd;	/* fd pointing to a BTF
1488						 * type data for
1489						 * btf_vmlinux_value_type_id.
1490						 */
1491		/* BPF token FD to use with BPF_MAP_CREATE operation.
1492		 * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
1493		 */
1494		__s32	map_token_fd;
1495	};
1496
1497	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
1498		__u32		map_fd;
1499		__aligned_u64	key;
1500		union {
1501			__aligned_u64 value;
1502			__aligned_u64 next_key;
1503		};
1504		__u64		flags;
1505	};
1506
1507	struct { /* struct used by BPF_MAP_*_BATCH commands */
1508		__aligned_u64	in_batch;	/* start batch,
1509						 * NULL to start from beginning
1510						 */
1511		__aligned_u64	out_batch;	/* output: next start batch */
1512		__aligned_u64	keys;
1513		__aligned_u64	values;
1514		__u32		count;		/* input/output:
1515						 * input: # of key/value
1516						 * elements
1517						 * output: # of filled elements
1518						 */
1519		__u32		map_fd;
1520		__u64		elem_flags;
1521		__u64		flags;
1522	} batch;
1523
1524	struct { /* anonymous struct used by BPF_PROG_LOAD command */
1525		__u32		prog_type;	/* one of enum bpf_prog_type */
1526		__u32		insn_cnt;
1527		__aligned_u64	insns;
1528		__aligned_u64	license;
1529		__u32		log_level;	/* verbosity level of verifier */
1530		__u32		log_size;	/* size of user buffer */
1531		__aligned_u64	log_buf;	/* user supplied buffer */
1532		__u32		kern_version;	/* not used */
1533		__u32		prog_flags;
1534		char		prog_name[BPF_OBJ_NAME_LEN];
1535		__u32		prog_ifindex;	/* ifindex of netdev to prep for */
1536		/* For some prog types expected attach type must be known at
1537		 * load time to verify attach type specific parts of prog
1538		 * (context accesses, allowed helpers, etc).
1539		 */
1540		__u32		expected_attach_type;
1541		__u32		prog_btf_fd;	/* fd pointing to BTF type data */
1542		__u32		func_info_rec_size;	/* userspace bpf_func_info size */
1543		__aligned_u64	func_info;	/* func info */
1544		__u32		func_info_cnt;	/* number of bpf_func_info records */
1545		__u32		line_info_rec_size;	/* userspace bpf_line_info size */
1546		__aligned_u64	line_info;	/* line info */
1547		__u32		line_info_cnt;	/* number of bpf_line_info records */
1548		__u32		attach_btf_id;	/* in-kernel BTF type id to attach to */
1549		union {
1550			/* valid prog_fd to attach to bpf prog */
1551			__u32		attach_prog_fd;
1552			/* or valid module BTF object fd or 0 to attach to vmlinux */
1553			__u32		attach_btf_obj_fd;
1554		};
1555		__u32		core_relo_cnt;	/* number of bpf_core_relo */
1556		__aligned_u64	fd_array;	/* array of FDs */
1557		__aligned_u64	core_relos;
1558		__u32		core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
1559		/* output: actual total log contents size (including termintaing zero).
1560		 * It could be both larger than original log_size (if log was
1561		 * truncated), or smaller (if log buffer wasn't filled completely).
1562		 */
1563		__u32		log_true_size;
1564		/* BPF token FD to use with BPF_PROG_LOAD operation.
1565		 * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
1566		 */
1567		__s32		prog_token_fd;
1568	};
1569
1570	struct { /* anonymous struct used by BPF_OBJ_* commands */
1571		__aligned_u64	pathname;
1572		__u32		bpf_fd;
1573		__u32		file_flags;
1574		/* Same as dirfd in openat() syscall; see openat(2)
1575		 * manpage for details of path FD and pathname semantics;
1576		 * path_fd should accompanied by BPF_F_PATH_FD flag set in
1577		 * file_flags field, otherwise it should be set to zero;
1578		 * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
1579		 */
1580		__s32		path_fd;
1581	};
1582
1583	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
1584		union {
1585			__u32	target_fd;	/* target object to attach to or ... */
1586			__u32	target_ifindex;	/* target ifindex */
1587		};
1588		__u32		attach_bpf_fd;
1589		__u32		attach_type;
1590		__u32		attach_flags;
1591		__u32		replace_bpf_fd;
1592		union {
1593			__u32	relative_fd;
1594			__u32	relative_id;
1595		};
1596		__u64		expected_revision;
1597	};
1598
1599	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
1600		__u32		prog_fd;
1601		__u32		retval;
1602		__u32		data_size_in;	/* input: len of data_in */
1603		__u32		data_size_out;	/* input/output: len of data_out
1604						 *   returns ENOSPC if data_out
1605						 *   is too small.
1606						 */
1607		__aligned_u64	data_in;
1608		__aligned_u64	data_out;
1609		__u32		repeat;
1610		__u32		duration;
1611		__u32		ctx_size_in;	/* input: len of ctx_in */
1612		__u32		ctx_size_out;	/* input/output: len of ctx_out
1613						 *   returns ENOSPC if ctx_out
1614						 *   is too small.
1615						 */
1616		__aligned_u64	ctx_in;
1617		__aligned_u64	ctx_out;
1618		__u32		flags;
1619		__u32		cpu;
1620		__u32		batch_size;
1621	} test;
1622
1623	struct { /* anonymous struct used by BPF_*_GET_*_ID */
1624		union {
1625			__u32		start_id;
1626			__u32		prog_id;
1627			__u32		map_id;
1628			__u32		btf_id;
1629			__u32		link_id;
1630		};
1631		__u32		next_id;
1632		__u32		open_flags;
1633	};
1634
1635	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
1636		__u32		bpf_fd;
1637		__u32		info_len;
1638		__aligned_u64	info;
1639	} info;
1640
1641	struct { /* anonymous struct used by BPF_PROG_QUERY command */
1642		union {
1643			__u32	target_fd;	/* target object to query or ... */
1644			__u32	target_ifindex;	/* target ifindex */
1645		};
1646		__u32		attach_type;
1647		__u32		query_flags;
1648		__u32		attach_flags;
1649		__aligned_u64	prog_ids;
1650		union {
1651			__u32	prog_cnt;
1652			__u32	count;
1653		};
1654		__u32		:32;
1655		/* output: per-program attach_flags.
1656		 * not allowed to be set during effective query.
1657		 */
1658		__aligned_u64	prog_attach_flags;
1659		__aligned_u64	link_ids;
1660		__aligned_u64	link_attach_flags;
1661		__u64		revision;
1662	} query;
1663
1664	struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
1665		__u64 name;
1666		__u32 prog_fd;
1667	} raw_tracepoint;
1668
1669	struct { /* anonymous struct for BPF_BTF_LOAD */
1670		__aligned_u64	btf;
1671		__aligned_u64	btf_log_buf;
1672		__u32		btf_size;
1673		__u32		btf_log_size;
1674		__u32		btf_log_level;
1675		/* output: actual total log contents size (including termintaing zero).
1676		 * It could be both larger than original log_size (if log was
1677		 * truncated), or smaller (if log buffer wasn't filled completely).
1678		 */
1679		__u32		btf_log_true_size;
1680		__u32		btf_flags;
1681		/* BPF token FD to use with BPF_BTF_LOAD operation.
1682		 * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
1683		 */
1684		__s32		btf_token_fd;
1685	};
1686
1687	struct {
1688		__u32		pid;		/* input: pid */
1689		__u32		fd;		/* input: fd */
1690		__u32		flags;		/* input: flags */
1691		__u32		buf_len;	/* input/output: buf len */
1692		__aligned_u64	buf;		/* input/output:
1693						 *   tp_name for tracepoint
1694						 *   symbol for kprobe
1695						 *   filename for uprobe
1696						 */
1697		__u32		prog_id;	/* output: prod_id */
1698		__u32		fd_type;	/* output: BPF_FD_TYPE_* */
1699		__u64		probe_offset;	/* output: probe_offset */
1700		__u64		probe_addr;	/* output: probe_addr */
1701	} task_fd_query;
1702
1703	struct { /* struct used by BPF_LINK_CREATE command */
1704		union {
1705			__u32		prog_fd;	/* eBPF program to attach */
1706			__u32		map_fd;		/* struct_ops to attach */
1707		};
1708		union {
1709			__u32	target_fd;	/* target object to attach to or ... */
1710			__u32	target_ifindex; /* target ifindex */
1711		};
1712		__u32		attach_type;	/* attach type */
1713		__u32		flags;		/* extra flags */
1714		union {
1715			__u32	target_btf_id;	/* btf_id of target to attach to */
1716			struct {
1717				__aligned_u64	iter_info;	/* extra bpf_iter_link_info */
1718				__u32		iter_info_len;	/* iter_info length */
1719			};
1720			struct {
1721				/* black box user-provided value passed through
1722				 * to BPF program at the execution time and
1723				 * accessible through bpf_get_attach_cookie() BPF helper
1724				 */
1725				__u64		bpf_cookie;
1726			} perf_event;
1727			struct {
1728				__u32		flags;
1729				__u32		cnt;
1730				__aligned_u64	syms;
1731				__aligned_u64	addrs;
1732				__aligned_u64	cookies;
1733			} kprobe_multi;
1734			struct {
1735				/* this is overlaid with the target_btf_id above. */
1736				__u32		target_btf_id;
1737				/* black box user-provided value passed through
1738				 * to BPF program at the execution time and
1739				 * accessible through bpf_get_attach_cookie() BPF helper
1740				 */
1741				__u64		cookie;
1742			} tracing;
1743			struct {
1744				__u32		pf;
1745				__u32		hooknum;
1746				__s32		priority;
1747				__u32		flags;
1748			} netfilter;
1749			struct {
1750				union {
1751					__u32	relative_fd;
1752					__u32	relative_id;
1753				};
1754				__u64		expected_revision;
1755			} tcx;
1756			struct {
1757				__aligned_u64	path;
1758				__aligned_u64	offsets;
1759				__aligned_u64	ref_ctr_offsets;
1760				__aligned_u64	cookies;
1761				__u32		cnt;
1762				__u32		flags;
1763				__u32		pid;
1764			} uprobe_multi;
1765			struct {
1766				union {
1767					__u32	relative_fd;
1768					__u32	relative_id;
1769				};
1770				__u64		expected_revision;
1771			} netkit;
1772		};
1773	} link_create;
1774
1775	struct { /* struct used by BPF_LINK_UPDATE command */
1776		__u32		link_fd;	/* link fd */
1777		union {
1778			/* new program fd to update link with */
1779			__u32		new_prog_fd;
1780			/* new struct_ops map fd to update link with */
1781			__u32           new_map_fd;
1782		};
1783		__u32		flags;		/* extra flags */
1784		union {
1785			/* expected link's program fd; is specified only if
1786			 * BPF_F_REPLACE flag is set in flags.
1787			 */
1788			__u32		old_prog_fd;
1789			/* expected link's map fd; is specified only
1790			 * if BPF_F_REPLACE flag is set.
1791			 */
1792			__u32           old_map_fd;
1793		};
1794	} link_update;
1795
1796	struct {
1797		__u32		link_fd;
1798	} link_detach;
1799
1800	struct { /* struct used by BPF_ENABLE_STATS command */
1801		__u32		type;
1802	} enable_stats;
1803
1804	struct { /* struct used by BPF_ITER_CREATE command */
1805		__u32		link_fd;
1806		__u32		flags;
1807	} iter_create;
1808
1809	struct { /* struct used by BPF_PROG_BIND_MAP command */
1810		__u32		prog_fd;
1811		__u32		map_fd;
1812		__u32		flags;		/* extra flags */
1813	} prog_bind_map;
1814
1815	struct { /* struct used by BPF_TOKEN_CREATE command */
1816		__u32		flags;
1817		__u32		bpffs_fd;
1818	} token_create;
1819
1820} __attribute__((aligned(8)));
1821
1822/* The description below is an attempt at providing documentation to eBPF
1823 * developers about the multiple available eBPF helper functions. It can be
1824 * parsed and used to produce a manual page. The workflow is the following,
1825 * and requires the rst2man utility:
1826 *
1827 *     $ ./scripts/bpf_doc.py \
1828 *             --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
1829 *     $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
1830 *     $ man /tmp/bpf-helpers.7
1831 *
1832 * Note that in order to produce this external documentation, some RST
1833 * formatting is used in the descriptions to get "bold" and "italics" in
1834 * manual pages. Also note that the few trailing white spaces are
1835 * intentional, removing them would break paragraphs for rst2man.
1836 *
1837 * Start of BPF helper function descriptions:
1838 *
1839 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
1840 * 	Description
1841 * 		Perform a lookup in *map* for an entry associated to *key*.
1842 * 	Return
1843 * 		Map value associated to *key*, or **NULL** if no entry was
1844 * 		found.
1845 *
1846 * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
1847 * 	Description
1848 * 		Add or update the value of the entry associated to *key* in
1849 * 		*map* with *value*. *flags* is one of:
1850 *
1851 * 		**BPF_NOEXIST**
1852 * 			The entry for *key* must not exist in the map.
1853 * 		**BPF_EXIST**
1854 * 			The entry for *key* must already exist in the map.
1855 * 		**BPF_ANY**
1856 * 			No condition on the existence of the entry for *key*.
1857 *
1858 * 		Flag value **BPF_NOEXIST** cannot be used for maps of types
1859 * 		**BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY**  (all
1860 * 		elements always exist), the helper would return an error.
1861 * 	Return
1862 * 		0 on success, or a negative error in case of failure.
1863 *
1864 * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
1865 * 	Description
1866 * 		Delete entry with *key* from *map*.
1867 * 	Return
1868 * 		0 on success, or a negative error in case of failure.
1869 *
1870 * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
1871 * 	Description
1872 * 		For tracing programs, safely attempt to read *size* bytes from
1873 * 		kernel space address *unsafe_ptr* and store the data in *dst*.
1874 *
1875 * 		Generally, use **bpf_probe_read_user**\ () or
1876 * 		**bpf_probe_read_kernel**\ () instead.
1877 * 	Return
1878 * 		0 on success, or a negative error in case of failure.
1879 *
1880 * u64 bpf_ktime_get_ns(void)
1881 * 	Description
1882 * 		Return the time elapsed since system boot, in nanoseconds.
1883 * 		Does not include time the system was suspended.
1884 * 		See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
1885 * 	Return
1886 * 		Current *ktime*.
1887 *
1888 * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
1889 * 	Description
1890 * 		This helper is a "printk()-like" facility for debugging. It
1891 * 		prints a message defined by format *fmt* (of size *fmt_size*)
1892 * 		to file *\/sys/kernel/tracing/trace* from TraceFS, if
1893 * 		available. It can take up to three additional **u64**
1894 * 		arguments (as an eBPF helpers, the total number of arguments is
1895 * 		limited to five).
1896 *
1897 * 		Each time the helper is called, it appends a line to the trace.
1898 * 		Lines are discarded while *\/sys/kernel/tracing/trace* is
1899 * 		open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
1900 * 		The format of the trace is customizable, and the exact output
1901 * 		one will get depends on the options set in
1902 * 		*\/sys/kernel/tracing/trace_options* (see also the
1903 * 		*README* file under the same directory). However, it usually
1904 * 		defaults to something like:
1905 *
1906 * 		::
1907 *
1908 * 			telnet-470   [001] .N.. 419421.045894: 0x00000001: <formatted msg>
1909 *
1910 * 		In the above:
1911 *
1912 * 			* ``telnet`` is the name of the current task.
1913 * 			* ``470`` is the PID of the current task.
1914 * 			* ``001`` is the CPU number on which the task is
1915 * 			  running.
1916 * 			* In ``.N..``, each character refers to a set of
1917 * 			  options (whether irqs are enabled, scheduling
1918 * 			  options, whether hard/softirqs are running, level of
1919 * 			  preempt_disabled respectively). **N** means that
1920 * 			  **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
1921 * 			  are set.
1922 * 			* ``419421.045894`` is a timestamp.
1923 * 			* ``0x00000001`` is a fake value used by BPF for the
1924 * 			  instruction pointer register.
1925 * 			* ``<formatted msg>`` is the message formatted with
1926 * 			  *fmt*.
1927 *
1928 * 		The conversion specifiers supported by *fmt* are similar, but
1929 * 		more limited than for printk(). They are **%d**, **%i**,
1930 * 		**%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
1931 * 		**%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
1932 * 		of field, padding with zeroes, etc.) is available, and the
1933 * 		helper will return **-EINVAL** (but print nothing) if it
1934 * 		encounters an unknown specifier.
1935 *
1936 * 		Also, note that **bpf_trace_printk**\ () is slow, and should
1937 * 		only be used for debugging purposes. For this reason, a notice
1938 * 		block (spanning several lines) is printed to kernel logs and
1939 * 		states that the helper should not be used "for production use"
1940 * 		the first time this helper is used (or more precisely, when
1941 * 		**trace_printk**\ () buffers are allocated). For passing values
1942 * 		to user space, perf events should be preferred.
1943 * 	Return
1944 * 		The number of bytes written to the buffer, or a negative error
1945 * 		in case of failure.
1946 *
1947 * u32 bpf_get_prandom_u32(void)
1948 * 	Description
1949 * 		Get a pseudo-random number.
1950 *
1951 * 		From a security point of view, this helper uses its own
1952 * 		pseudo-random internal state, and cannot be used to infer the
1953 * 		seed of other random functions in the kernel. However, it is
1954 * 		essential to note that the generator used by the helper is not
1955 * 		cryptographically secure.
1956 * 	Return
1957 * 		A random 32-bit unsigned value.
1958 *
1959 * u32 bpf_get_smp_processor_id(void)
1960 * 	Description
1961 * 		Get the SMP (symmetric multiprocessing) processor id. Note that
1962 * 		all programs run with migration disabled, which means that the
1963 * 		SMP processor id is stable during all the execution of the
1964 * 		program.
1965 * 	Return
1966 * 		The SMP id of the processor running the program.
1967 *
1968 * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
1969 * 	Description
1970 * 		Store *len* bytes from address *from* into the packet
1971 * 		associated to *skb*, at *offset*. *flags* are a combination of
1972 * 		**BPF_F_RECOMPUTE_CSUM** (automatically recompute the
1973 * 		checksum for the packet after storing the bytes) and
1974 * 		**BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
1975 * 		**->swhash** and *skb*\ **->l4hash** to 0).
1976 *
1977 * 		A call to this helper is susceptible to change the underlying
1978 * 		packet buffer. Therefore, at load time, all checks on pointers
1979 * 		previously done by the verifier are invalidated and must be
1980 * 		performed again, if the helper is used in combination with
1981 * 		direct packet access.
1982 * 	Return
1983 * 		0 on success, or a negative error in case of failure.
1984 *
1985 * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
1986 * 	Description
1987 * 		Recompute the layer 3 (e.g. IP) checksum for the packet
1988 * 		associated to *skb*. Computation is incremental, so the helper
1989 * 		must know the former value of the header field that was
1990 * 		modified (*from*), the new value of this field (*to*), and the
1991 * 		number of bytes (2 or 4) for this field, stored in *size*.
1992 * 		Alternatively, it is possible to store the difference between
1993 * 		the previous and the new values of the header field in *to*, by
1994 * 		setting *from* and *size* to 0. For both methods, *offset*
1995 * 		indicates the location of the IP checksum within the packet.
1996 *
1997 * 		This helper works in combination with **bpf_csum_diff**\ (),
1998 * 		which does not update the checksum in-place, but offers more
1999 * 		flexibility and can handle sizes larger than 2 or 4 for the
2000 * 		checksum to update.
2001 *
2002 * 		A call to this helper is susceptible to change the underlying
2003 * 		packet buffer. Therefore, at load time, all checks on pointers
2004 * 		previously done by the verifier are invalidated and must be
2005 * 		performed again, if the helper is used in combination with
2006 * 		direct packet access.
2007 * 	Return
2008 * 		0 on success, or a negative error in case of failure.
2009 *
2010 * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
2011 * 	Description
2012 * 		Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
2013 * 		packet associated to *skb*. Computation is incremental, so the
2014 * 		helper must know the former value of the header field that was
2015 * 		modified (*from*), the new value of this field (*to*), and the
2016 * 		number of bytes (2 or 4) for this field, stored on the lowest
2017 * 		four bits of *flags*. Alternatively, it is possible to store
2018 * 		the difference between the previous and the new values of the
2019 * 		header field in *to*, by setting *from* and the four lowest
2020 * 		bits of *flags* to 0. For both methods, *offset* indicates the
2021 * 		location of the IP checksum within the packet. In addition to
2022 * 		the size of the field, *flags* can be added (bitwise OR) actual
2023 * 		flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
2024 * 		untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
2025 * 		for updates resulting in a null checksum the value is set to
2026 * 		**CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
2027 * 		the checksum is to be computed against a pseudo-header.
2028 *
2029 * 		This helper works in combination with **bpf_csum_diff**\ (),
2030 * 		which does not update the checksum in-place, but offers more
2031 * 		flexibility and can handle sizes larger than 2 or 4 for the
2032 * 		checksum to update.
2033 *
2034 * 		A call to this helper is susceptible to change the underlying
2035 * 		packet buffer. Therefore, at load time, all checks on pointers
2036 * 		previously done by the verifier are invalidated and must be
2037 * 		performed again, if the helper is used in combination with
2038 * 		direct packet access.
2039 * 	Return
2040 * 		0 on success, or a negative error in case of failure.
2041 *
2042 * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
2043 * 	Description
2044 * 		This special helper is used to trigger a "tail call", or in
2045 * 		other words, to jump into another eBPF program. The same stack
2046 * 		frame is used (but values on stack and in registers for the
2047 * 		caller are not accessible to the callee). This mechanism allows
2048 * 		for program chaining, either for raising the maximum number of
2049 * 		available eBPF instructions, or to execute given programs in
2050 * 		conditional blocks. For security reasons, there is an upper
2051 * 		limit to the number of successive tail calls that can be
2052 * 		performed.
2053 *
2054 * 		Upon call of this helper, the program attempts to jump into a
2055 * 		program referenced at index *index* in *prog_array_map*, a
2056 * 		special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
2057 * 		*ctx*, a pointer to the context.
2058 *
2059 * 		If the call succeeds, the kernel immediately runs the first
2060 * 		instruction of the new program. This is not a function call,
2061 * 		and it never returns to the previous program. If the call
2062 * 		fails, then the helper has no effect, and the caller continues
2063 * 		to run its subsequent instructions. A call can fail if the
2064 * 		destination program for the jump does not exist (i.e. *index*
2065 * 		is superior to the number of entries in *prog_array_map*), or
2066 * 		if the maximum number of tail calls has been reached for this
2067 * 		chain of programs. This limit is defined in the kernel by the
2068 * 		macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
2069 *		which is currently set to 33.
2070 * 	Return
2071 * 		0 on success, or a negative error in case of failure.
2072 *
2073 * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
2074 * 	Description
2075 * 		Clone and redirect the packet associated to *skb* to another
2076 * 		net device of index *ifindex*. Both ingress and egress
2077 * 		interfaces can be used for redirection. The **BPF_F_INGRESS**
2078 * 		value in *flags* is used to make the distinction (ingress path
2079 * 		is selected if the flag is present, egress path otherwise).
2080 * 		This is the only flag supported for now.
2081 *
2082 * 		In comparison with **bpf_redirect**\ () helper,
2083 * 		**bpf_clone_redirect**\ () has the associated cost of
2084 * 		duplicating the packet buffer, but this can be executed out of
2085 * 		the eBPF program. Conversely, **bpf_redirect**\ () is more
2086 * 		efficient, but it is handled through an action code where the
2087 * 		redirection happens only after the eBPF program has returned.
2088 *
2089 * 		A call to this helper is susceptible to change the underlying
2090 * 		packet buffer. Therefore, at load time, all checks on pointers
2091 * 		previously done by the verifier are invalidated and must be
2092 * 		performed again, if the helper is used in combination with
2093 * 		direct packet access.
2094 * 	Return
2095 * 		0 on success, or a negative error in case of failure. Positive
2096 * 		error indicates a potential drop or congestion in the target
2097 * 		device. The particular positive error codes are not defined.
2098 *
2099 * u64 bpf_get_current_pid_tgid(void)
2100 * 	Description
2101 * 		Get the current pid and tgid.
2102 * 	Return
2103 * 		A 64-bit integer containing the current tgid and pid, and
2104 * 		created as such:
2105 * 		*current_task*\ **->tgid << 32 \|**
2106 * 		*current_task*\ **->pid**.
2107 *
2108 * u64 bpf_get_current_uid_gid(void)
2109 * 	Description
2110 * 		Get the current uid and gid.
2111 * 	Return
2112 * 		A 64-bit integer containing the current GID and UID, and
2113 * 		created as such: *current_gid* **<< 32 \|** *current_uid*.
2114 *
2115 * long bpf_get_current_comm(void *buf, u32 size_of_buf)
2116 * 	Description
2117 * 		Copy the **comm** attribute of the current task into *buf* of
2118 * 		*size_of_buf*. The **comm** attribute contains the name of
2119 * 		the executable (excluding the path) for the current task. The
2120 * 		*size_of_buf* must be strictly positive. On success, the
2121 * 		helper makes sure that the *buf* is NUL-terminated. On failure,
2122 * 		it is filled with zeroes.
2123 * 	Return
2124 * 		0 on success, or a negative error in case of failure.
2125 *
2126 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
2127 * 	Description
2128 * 		Retrieve the classid for the current task, i.e. for the net_cls
2129 * 		cgroup to which *skb* belongs.
2130 *
2131 * 		This helper can be used on TC egress path, but not on ingress.
2132 *
2133 * 		The net_cls cgroup provides an interface to tag network packets
2134 * 		based on a user-provided identifier for all traffic coming from
2135 * 		the tasks belonging to the related cgroup. See also the related
2136 * 		kernel documentation, available from the Linux sources in file
2137 * 		*Documentation/admin-guide/cgroup-v1/net_cls.rst*.
2138 *
2139 * 		The Linux kernel has two versions for cgroups: there are
2140 * 		cgroups v1 and cgroups v2. Both are available to users, who can
2141 * 		use a mixture of them, but note that the net_cls cgroup is for
2142 * 		cgroup v1 only. This makes it incompatible with BPF programs
2143 * 		run on cgroups, which is a cgroup-v2-only feature (a socket can
2144 * 		only hold data for one version of cgroups at a time).
2145 *
2146 * 		This helper is only available is the kernel was compiled with
2147 * 		the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
2148 * 		"**y**" or to "**m**".
2149 * 	Return
2150 * 		The classid, or 0 for the default unconfigured classid.
2151 *
2152 * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
2153 * 	Description
2154 * 		Push a *vlan_tci* (VLAN tag control information) of protocol
2155 * 		*vlan_proto* to the packet associated to *skb*, then update
2156 * 		the checksum. Note that if *vlan_proto* is different from
2157 * 		**ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
2158 * 		be **ETH_P_8021Q**.
2159 *
2160 * 		A call to this helper is susceptible to change the underlying
2161 * 		packet buffer. Therefore, at load time, all checks on pointers
2162 * 		previously done by the verifier are invalidated and must be
2163 * 		performed again, if the helper is used in combination with
2164 * 		direct packet access.
2165 * 	Return
2166 * 		0 on success, or a negative error in case of failure.
2167 *
2168 * long bpf_skb_vlan_pop(struct sk_buff *skb)
2169 * 	Description
2170 * 		Pop a VLAN header from the packet associated to *skb*.
2171 *
2172 * 		A call to this helper is susceptible to change the underlying
2173 * 		packet buffer. Therefore, at load time, all checks on pointers
2174 * 		previously done by the verifier are invalidated and must be
2175 * 		performed again, if the helper is used in combination with
2176 * 		direct packet access.
2177 * 	Return
2178 * 		0 on success, or a negative error in case of failure.
2179 *
2180 * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2181 * 	Description
2182 * 		Get tunnel metadata. This helper takes a pointer *key* to an
2183 * 		empty **struct bpf_tunnel_key** of **size**, that will be
2184 * 		filled with tunnel metadata for the packet associated to *skb*.
2185 * 		The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
2186 * 		indicates that the tunnel is based on IPv6 protocol instead of
2187 * 		IPv4.
2188 *
2189 * 		The **struct bpf_tunnel_key** is an object that generalizes the
2190 * 		principal parameters used by various tunneling protocols into a
2191 * 		single struct. This way, it can be used to easily make a
2192 * 		decision based on the contents of the encapsulation header,
2193 * 		"summarized" in this struct. In particular, it holds the IP
2194 * 		address of the remote end (IPv4 or IPv6, depending on the case)
2195 * 		in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
2196 * 		this struct exposes the *key*\ **->tunnel_id**, which is
2197 * 		generally mapped to a VNI (Virtual Network Identifier), making
2198 * 		it programmable together with the **bpf_skb_set_tunnel_key**\
2199 * 		() helper.
2200 *
2201 * 		Let's imagine that the following code is part of a program
2202 * 		attached to the TC ingress interface, on one end of a GRE
2203 * 		tunnel, and is supposed to filter out all messages coming from
2204 * 		remote ends with IPv4 address other than 10.0.0.1:
2205 *
2206 * 		::
2207 *
2208 * 			int ret;
2209 * 			struct bpf_tunnel_key key = {};
2210 *
2211 * 			ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
2212 * 			if (ret < 0)
2213 * 				return TC_ACT_SHOT;	// drop packet
2214 *
2215 * 			if (key.remote_ipv4 != 0x0a000001)
2216 * 				return TC_ACT_SHOT;	// drop packet
2217 *
2218 * 			return TC_ACT_OK;		// accept packet
2219 *
2220 * 		This interface can also be used with all encapsulation devices
2221 * 		that can operate in "collect metadata" mode: instead of having
2222 * 		one network device per specific configuration, the "collect
2223 * 		metadata" mode only requires a single device where the
2224 * 		configuration can be extracted from this helper.
2225 *
2226 * 		This can be used together with various tunnels such as VXLan,
2227 * 		Geneve, GRE or IP in IP (IPIP).
2228 * 	Return
2229 * 		0 on success, or a negative error in case of failure.
2230 *
2231 * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
2232 * 	Description
2233 * 		Populate tunnel metadata for packet associated to *skb.* The
2234 * 		tunnel metadata is set to the contents of *key*, of *size*. The
2235 * 		*flags* can be set to a combination of the following values:
2236 *
2237 * 		**BPF_F_TUNINFO_IPV6**
2238 * 			Indicate that the tunnel is based on IPv6 protocol
2239 * 			instead of IPv4.
2240 * 		**BPF_F_ZERO_CSUM_TX**
2241 * 			For IPv4 packets, add a flag to tunnel metadata
2242 * 			indicating that checksum computation should be skipped
2243 * 			and checksum set to zeroes.
2244 * 		**BPF_F_DONT_FRAGMENT**
2245 * 			Add a flag to tunnel metadata indicating that the
2246 * 			packet should not be fragmented.
2247 * 		**BPF_F_SEQ_NUMBER**
2248 * 			Add a flag to tunnel metadata indicating that a
2249 * 			sequence number should be added to tunnel header before
2250 * 			sending the packet. This flag was added for GRE
2251 * 			encapsulation, but might be used with other protocols
2252 * 			as well in the future.
2253 * 		**BPF_F_NO_TUNNEL_KEY**
2254 * 			Add a flag to tunnel metadata indicating that no tunnel
2255 * 			key should be set in the resulting tunnel header.
2256 *
2257 * 		Here is a typical usage on the transmit path:
2258 *
2259 * 		::
2260 *
2261 * 			struct bpf_tunnel_key key;
2262 * 			     populate key ...
2263 * 			bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
2264 * 			bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
2265 *
2266 * 		See also the description of the **bpf_skb_get_tunnel_key**\ ()
2267 * 		helper for additional information.
2268 * 	Return
2269 * 		0 on success, or a negative error in case of failure.
2270 *
2271 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
2272 * 	Description
2273 * 		Read the value of a perf event counter. This helper relies on a
2274 * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
2275 * 		the perf event counter is selected when *map* is updated with
2276 * 		perf event file descriptors. The *map* is an array whose size
2277 * 		is the number of available CPUs, and each cell contains a value
2278 * 		relative to one CPU. The value to retrieve is indicated by
2279 * 		*flags*, that contains the index of the CPU to look up, masked
2280 * 		with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
2281 * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
2282 * 		current CPU should be retrieved.
2283 *
2284 * 		Note that before Linux 4.13, only hardware perf event can be
2285 * 		retrieved.
2286 *
2287 * 		Also, be aware that the newer helper
2288 * 		**bpf_perf_event_read_value**\ () is recommended over
2289 * 		**bpf_perf_event_read**\ () in general. The latter has some ABI
2290 * 		quirks where error and counter value are used as a return code
2291 * 		(which is wrong to do since ranges may overlap). This issue is
2292 * 		fixed with **bpf_perf_event_read_value**\ (), which at the same
2293 * 		time provides more features over the **bpf_perf_event_read**\
2294 * 		() interface. Please refer to the description of
2295 * 		**bpf_perf_event_read_value**\ () for details.
2296 * 	Return
2297 * 		The value of the perf event counter read from the map, or a
2298 * 		negative error code in case of failure.
2299 *
2300 * long bpf_redirect(u32 ifindex, u64 flags)
2301 * 	Description
2302 * 		Redirect the packet to another net device of index *ifindex*.
2303 * 		This helper is somewhat similar to **bpf_clone_redirect**\
2304 * 		(), except that the packet is not cloned, which provides
2305 * 		increased performance.
2306 *
2307 * 		Except for XDP, both ingress and egress interfaces can be used
2308 * 		for redirection. The **BPF_F_INGRESS** value in *flags* is used
2309 * 		to make the distinction (ingress path is selected if the flag
2310 * 		is present, egress path otherwise). Currently, XDP only
2311 * 		supports redirection to the egress interface, and accepts no
2312 * 		flag at all.
2313 *
2314 * 		The same effect can also be attained with the more generic
2315 * 		**bpf_redirect_map**\ (), which uses a BPF map to store the
2316 * 		redirect target instead of providing it directly to the helper.
2317 * 	Return
2318 * 		For XDP, the helper returns **XDP_REDIRECT** on success or
2319 * 		**XDP_ABORTED** on error. For other program types, the values
2320 * 		are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
2321 * 		error.
2322 *
2323 * u32 bpf_get_route_realm(struct sk_buff *skb)
2324 * 	Description
2325 * 		Retrieve the realm or the route, that is to say the
2326 * 		**tclassid** field of the destination for the *skb*. The
2327 * 		identifier retrieved is a user-provided tag, similar to the
2328 * 		one used with the net_cls cgroup (see description for
2329 * 		**bpf_get_cgroup_classid**\ () helper), but here this tag is
2330 * 		held by a route (a destination entry), not by a task.
2331 *
2332 * 		Retrieving this identifier works with the clsact TC egress hook
2333 * 		(see also **tc-bpf(8)**), or alternatively on conventional
2334 * 		classful egress qdiscs, but not on TC ingress path. In case of
2335 * 		clsact TC egress hook, this has the advantage that, internally,
2336 * 		the destination entry has not been dropped yet in the transmit
2337 * 		path. Therefore, the destination entry does not need to be
2338 * 		artificially held via **netif_keep_dst**\ () for a classful
2339 * 		qdisc until the *skb* is freed.
2340 *
2341 * 		This helper is available only if the kernel was compiled with
2342 * 		**CONFIG_IP_ROUTE_CLASSID** configuration option.
2343 * 	Return
2344 * 		The realm of the route for the packet associated to *skb*, or 0
2345 * 		if none was found.
2346 *
2347 * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
2348 * 	Description
2349 * 		Write raw *data* blob into a special BPF perf event held by
2350 * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
2351 * 		event must have the following attributes: **PERF_SAMPLE_RAW**
2352 * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
2353 * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
2354 *
2355 * 		The *flags* are used to indicate the index in *map* for which
2356 * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
2357 * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
2358 * 		to indicate that the index of the current CPU core should be
2359 * 		used.
2360 *
2361 * 		The value to write, of *size*, is passed through eBPF stack and
2362 * 		pointed by *data*.
2363 *
2364 * 		The context of the program *ctx* needs also be passed to the
2365 * 		helper.
2366 *
2367 * 		On user space, a program willing to read the values needs to
2368 * 		call **perf_event_open**\ () on the perf event (either for
2369 * 		one or for all CPUs) and to store the file descriptor into the
2370 * 		*map*. This must be done before the eBPF program can send data
2371 * 		into it. An example is available in file
2372 * 		*samples/bpf/trace_output_user.c* in the Linux kernel source
2373 * 		tree (the eBPF program counterpart is in
2374 * 		*samples/bpf/trace_output_kern.c*).
2375 *
2376 * 		**bpf_perf_event_output**\ () achieves better performance
2377 * 		than **bpf_trace_printk**\ () for sharing data with user
2378 * 		space, and is much better suitable for streaming data from eBPF
2379 * 		programs.
2380 *
2381 * 		Note that this helper is not restricted to tracing use cases
2382 * 		and can be used with programs attached to TC or XDP as well,
2383 * 		where it allows for passing data to user space listeners. Data
2384 * 		can be:
2385 *
2386 * 		* Only custom structs,
2387 * 		* Only the packet payload, or
2388 * 		* A combination of both.
2389 * 	Return
2390 * 		0 on success, or a negative error in case of failure.
2391 *
2392 * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
2393 * 	Description
2394 * 		This helper was provided as an easy way to load data from a
2395 * 		packet. It can be used to load *len* bytes from *offset* from
2396 * 		the packet associated to *skb*, into the buffer pointed by
2397 * 		*to*.
2398 *
2399 * 		Since Linux 4.7, usage of this helper has mostly been replaced
2400 * 		by "direct packet access", enabling packet data to be
2401 * 		manipulated with *skb*\ **->data** and *skb*\ **->data_end**
2402 * 		pointing respectively to the first byte of packet data and to
2403 * 		the byte after the last byte of packet data. However, it
2404 * 		remains useful if one wishes to read large quantities of data
2405 * 		at once from a packet into the eBPF stack.
2406 * 	Return
2407 * 		0 on success, or a negative error in case of failure.
2408 *
2409 * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
2410 * 	Description
2411 * 		Walk a user or a kernel stack and return its id. To achieve
2412 * 		this, the helper needs *ctx*, which is a pointer to the context
2413 * 		on which the tracing program is executed, and a pointer to a
2414 * 		*map* of type **BPF_MAP_TYPE_STACK_TRACE**.
2415 *
2416 * 		The last argument, *flags*, holds the number of stack frames to
2417 * 		skip (from 0 to 255), masked with
2418 * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
2419 * 		a combination of the following flags:
2420 *
2421 * 		**BPF_F_USER_STACK**
2422 * 			Collect a user space stack instead of a kernel stack.
2423 * 		**BPF_F_FAST_STACK_CMP**
2424 * 			Compare stacks by hash only.
2425 * 		**BPF_F_REUSE_STACKID**
2426 * 			If two different stacks hash into the same *stackid*,
2427 * 			discard the old one.
2428 *
2429 * 		The stack id retrieved is a 32 bit long integer handle which
2430 * 		can be further combined with other data (including other stack
2431 * 		ids) and used as a key into maps. This can be useful for
2432 * 		generating a variety of graphs (such as flame graphs or off-cpu
2433 * 		graphs).
2434 *
2435 * 		For walking a stack, this helper is an improvement over
2436 * 		**bpf_probe_read**\ (), which can be used with unrolled loops
2437 * 		but is not efficient and consumes a lot of eBPF instructions.
2438 * 		Instead, **bpf_get_stackid**\ () can collect up to
2439 * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
2440 * 		this limit can be controlled with the **sysctl** program, and
2441 * 		that it should be manually increased in order to profile long
2442 * 		user stacks (such as stacks for Java programs). To do so, use:
2443 *
2444 * 		::
2445 *
2446 * 			# sysctl kernel.perf_event_max_stack=<new value>
2447 * 	Return
2448 * 		The positive or null stack id on success, or a negative error
2449 * 		in case of failure.
2450 *
2451 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
2452 * 	Description
2453 * 		Compute a checksum difference, from the raw buffer pointed by
2454 * 		*from*, of length *from_size* (that must be a multiple of 4),
2455 * 		towards the raw buffer pointed by *to*, of size *to_size*
2456 * 		(same remark). An optional *seed* can be added to the value
2457 * 		(this can be cascaded, the seed may come from a previous call
2458 * 		to the helper).
2459 *
2460 * 		This is flexible enough to be used in several ways:
2461 *
2462 * 		* With *from_size* == 0, *to_size* > 0 and *seed* set to
2463 * 		  checksum, it can be used when pushing new data.
2464 * 		* With *from_size* > 0, *to_size* == 0 and *seed* set to
2465 * 		  checksum, it can be used when removing data from a packet.
2466 * 		* With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
2467 * 		  can be used to compute a diff. Note that *from_size* and
2468 * 		  *to_size* do not need to be equal.
2469 *
2470 * 		This helper can be used in combination with
2471 * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
2472 * 		which one can feed in the difference computed with
2473 * 		**bpf_csum_diff**\ ().
2474 * 	Return
2475 * 		The checksum result, or a negative error code in case of
2476 * 		failure.
2477 *
2478 * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2479 * 	Description
2480 * 		Retrieve tunnel options metadata for the packet associated to
2481 * 		*skb*, and store the raw tunnel option data to the buffer *opt*
2482 * 		of *size*.
2483 *
2484 * 		This helper can be used with encapsulation devices that can
2485 * 		operate in "collect metadata" mode (please refer to the related
2486 * 		note in the description of **bpf_skb_get_tunnel_key**\ () for
2487 * 		more details). A particular example where this can be used is
2488 * 		in combination with the Geneve encapsulation protocol, where it
2489 * 		allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
2490 * 		and retrieving arbitrary TLVs (Type-Length-Value headers) from
2491 * 		the eBPF program. This allows for full customization of these
2492 * 		headers.
2493 * 	Return
2494 * 		The size of the option data retrieved.
2495 *
2496 * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
2497 * 	Description
2498 * 		Set tunnel options metadata for the packet associated to *skb*
2499 * 		to the option data contained in the raw buffer *opt* of *size*.
2500 *
2501 * 		See also the description of the **bpf_skb_get_tunnel_opt**\ ()
2502 * 		helper for additional information.
2503 * 	Return
2504 * 		0 on success, or a negative error in case of failure.
2505 *
2506 * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
2507 * 	Description
2508 * 		Change the protocol of the *skb* to *proto*. Currently
2509 * 		supported are transition from IPv4 to IPv6, and from IPv6 to
2510 * 		IPv4. The helper takes care of the groundwork for the
2511 * 		transition, including resizing the socket buffer. The eBPF
2512 * 		program is expected to fill the new headers, if any, via
2513 * 		**skb_store_bytes**\ () and to recompute the checksums with
2514 * 		**bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
2515 * 		(). The main case for this helper is to perform NAT64
2516 * 		operations out of an eBPF program.
2517 *
2518 * 		Internally, the GSO type is marked as dodgy so that headers are
2519 * 		checked and segments are recalculated by the GSO/GRO engine.
2520 * 		The size for GSO target is adapted as well.
2521 *
2522 * 		All values for *flags* are reserved for future usage, and must
2523 * 		be left at zero.
2524 *
2525 * 		A call to this helper is susceptible to change the underlying
2526 * 		packet buffer. Therefore, at load time, all checks on pointers
2527 * 		previously done by the verifier are invalidated and must be
2528 * 		performed again, if the helper is used in combination with
2529 * 		direct packet access.
2530 * 	Return
2531 * 		0 on success, or a negative error in case of failure.
2532 *
2533 * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
2534 * 	Description
2535 * 		Change the packet type for the packet associated to *skb*. This
2536 * 		comes down to setting *skb*\ **->pkt_type** to *type*, except
2537 * 		the eBPF program does not have a write access to *skb*\
2538 * 		**->pkt_type** beside this helper. Using a helper here allows
2539 * 		for graceful handling of errors.
2540 *
2541 * 		The major use case is to change incoming *skb*s to
2542 * 		**PACKET_HOST** in a programmatic way instead of having to
2543 * 		recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
2544 * 		example.
2545 *
2546 * 		Note that *type* only allows certain values. At this time, they
2547 * 		are:
2548 *
2549 * 		**PACKET_HOST**
2550 * 			Packet is for us.
2551 * 		**PACKET_BROADCAST**
2552 * 			Send packet to all.
2553 * 		**PACKET_MULTICAST**
2554 * 			Send packet to group.
2555 * 		**PACKET_OTHERHOST**
2556 * 			Send packet to someone else.
2557 * 	Return
2558 * 		0 on success, or a negative error in case of failure.
2559 *
2560 * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
2561 * 	Description
2562 * 		Check whether *skb* is a descendant of the cgroup2 held by
2563 * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2564 * 	Return
2565 * 		The return value depends on the result of the test, and can be:
2566 *
2567 * 		* 0, if the *skb* failed the cgroup2 descendant test.
2568 * 		* 1, if the *skb* succeeded the cgroup2 descendant test.
2569 * 		* A negative error code, if an error occurred.
2570 *
2571 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
2572 * 	Description
2573 * 		Retrieve the hash of the packet, *skb*\ **->hash**. If it is
2574 * 		not set, in particular if the hash was cleared due to mangling,
2575 * 		recompute this hash. Later accesses to the hash can be done
2576 * 		directly with *skb*\ **->hash**.
2577 *
2578 * 		Calling **bpf_set_hash_invalid**\ (), changing a packet
2579 * 		prototype with **bpf_skb_change_proto**\ (), or calling
2580 * 		**bpf_skb_store_bytes**\ () with the
2581 * 		**BPF_F_INVALIDATE_HASH** are actions susceptible to clear
2582 * 		the hash and to trigger a new computation for the next call to
2583 * 		**bpf_get_hash_recalc**\ ().
2584 * 	Return
2585 * 		The 32-bit hash.
2586 *
2587 * u64 bpf_get_current_task(void)
2588 * 	Description
2589 * 		Get the current task.
2590 * 	Return
2591 * 		A pointer to the current task struct.
2592 *
2593 * long bpf_probe_write_user(void *dst, const void *src, u32 len)
2594 * 	Description
2595 * 		Attempt in a safe way to write *len* bytes from the buffer
2596 * 		*src* to *dst* in memory. It only works for threads that are in
2597 * 		user context, and *dst* must be a valid user space address.
2598 *
2599 * 		This helper should not be used to implement any kind of
2600 * 		security mechanism because of TOC-TOU attacks, but rather to
2601 * 		debug, divert, and manipulate execution of semi-cooperative
2602 * 		processes.
2603 *
2604 * 		Keep in mind that this feature is meant for experiments, and it
2605 * 		has a risk of crashing the system and running programs.
2606 * 		Therefore, when an eBPF program using this helper is attached,
2607 * 		a warning including PID and process name is printed to kernel
2608 * 		logs.
2609 * 	Return
2610 * 		0 on success, or a negative error in case of failure.
2611 *
2612 * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
2613 * 	Description
2614 * 		Check whether the probe is being run is the context of a given
2615 * 		subset of the cgroup2 hierarchy. The cgroup2 to test is held by
2616 * 		*map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
2617 * 	Return
2618 * 		The return value depends on the result of the test, and can be:
2619 *
2620 *		* 1, if current task belongs to the cgroup2.
2621 *		* 0, if current task does not belong to the cgroup2.
2622 * 		* A negative error code, if an error occurred.
2623 *
2624 * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
2625 * 	Description
2626 * 		Resize (trim or grow) the packet associated to *skb* to the
2627 * 		new *len*. The *flags* are reserved for future usage, and must
2628 * 		be left at zero.
2629 *
2630 * 		The basic idea is that the helper performs the needed work to
2631 * 		change the size of the packet, then the eBPF program rewrites
2632 * 		the rest via helpers like **bpf_skb_store_bytes**\ (),
2633 * 		**bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
2634 * 		and others. This helper is a slow path utility intended for
2635 * 		replies with control messages. And because it is targeted for
2636 * 		slow path, the helper itself can afford to be slow: it
2637 * 		implicitly linearizes, unclones and drops offloads from the
2638 * 		*skb*.
2639 *
2640 * 		A call to this helper is susceptible to change the underlying
2641 * 		packet buffer. Therefore, at load time, all checks on pointers
2642 * 		previously done by the verifier are invalidated and must be
2643 * 		performed again, if the helper is used in combination with
2644 * 		direct packet access.
2645 * 	Return
2646 * 		0 on success, or a negative error in case of failure.
2647 *
2648 * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
2649 * 	Description
2650 * 		Pull in non-linear data in case the *skb* is non-linear and not
2651 * 		all of *len* are part of the linear section. Make *len* bytes
2652 * 		from *skb* readable and writable. If a zero value is passed for
2653 *		*len*, then all bytes in the linear part of *skb* will be made
2654 *		readable and writable.
2655 *
2656 * 		This helper is only needed for reading and writing with direct
2657 * 		packet access.
2658 *
2659 * 		For direct packet access, testing that offsets to access
2660 * 		are within packet boundaries (test on *skb*\ **->data_end**) is
2661 * 		susceptible to fail if offsets are invalid, or if the requested
2662 * 		data is in non-linear parts of the *skb*. On failure the
2663 * 		program can just bail out, or in the case of a non-linear
2664 * 		buffer, use a helper to make the data available. The
2665 * 		**bpf_skb_load_bytes**\ () helper is a first solution to access
2666 * 		the data. Another one consists in using **bpf_skb_pull_data**
2667 * 		to pull in once the non-linear parts, then retesting and
2668 * 		eventually access the data.
2669 *
2670 * 		At the same time, this also makes sure the *skb* is uncloned,
2671 * 		which is a necessary condition for direct write. As this needs
2672 * 		to be an invariant for the write part only, the verifier
2673 * 		detects writes and adds a prologue that is calling
2674 * 		**bpf_skb_pull_data()** to effectively unclone the *skb* from
2675 * 		the very beginning in case it is indeed cloned.
2676 *
2677 * 		A call to this helper is susceptible to change the underlying
2678 * 		packet buffer. Therefore, at load time, all checks on pointers
2679 * 		previously done by the verifier are invalidated and must be
2680 * 		performed again, if the helper is used in combination with
2681 * 		direct packet access.
2682 * 	Return
2683 * 		0 on success, or a negative error in case of failure.
2684 *
2685 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
2686 * 	Description
2687 * 		Add the checksum *csum* into *skb*\ **->csum** in case the
2688 * 		driver has supplied a checksum for the entire packet into that
2689 * 		field. Return an error otherwise. This helper is intended to be
2690 * 		used in combination with **bpf_csum_diff**\ (), in particular
2691 * 		when the checksum needs to be updated after data has been
2692 * 		written into the packet through direct packet access.
2693 * 	Return
2694 * 		The checksum on success, or a negative error code in case of
2695 * 		failure.
2696 *
2697 * void bpf_set_hash_invalid(struct sk_buff *skb)
2698 * 	Description
2699 * 		Invalidate the current *skb*\ **->hash**. It can be used after
2700 * 		mangling on headers through direct packet access, in order to
2701 * 		indicate that the hash is outdated and to trigger a
2702 * 		recalculation the next time the kernel tries to access this
2703 * 		hash or when the **bpf_get_hash_recalc**\ () helper is called.
2704 * 	Return
2705 * 		void.
2706 *
2707 * long bpf_get_numa_node_id(void)
2708 * 	Description
2709 * 		Return the id of the current NUMA node. The primary use case
2710 * 		for this helper is the selection of sockets for the local NUMA
2711 * 		node, when the program is attached to sockets using the
2712 * 		**SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
2713 * 		but the helper is also available to other eBPF program types,
2714 * 		similarly to **bpf_get_smp_processor_id**\ ().
2715 * 	Return
2716 * 		The id of current NUMA node.
2717 *
2718 * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
2719 * 	Description
2720 * 		Grows headroom of packet associated to *skb* and adjusts the
2721 * 		offset of the MAC header accordingly, adding *len* bytes of
2722 * 		space. It automatically extends and reallocates memory as
2723 * 		required.
2724 *
2725 * 		This helper can be used on a layer 3 *skb* to push a MAC header
2726 * 		for redirection into a layer 2 device.
2727 *
2728 * 		All values for *flags* are reserved for future usage, and must
2729 * 		be left at zero.
2730 *
2731 * 		A call to this helper is susceptible to change the underlying
2732 * 		packet buffer. Therefore, at load time, all checks on pointers
2733 * 		previously done by the verifier are invalidated and must be
2734 * 		performed again, if the helper is used in combination with
2735 * 		direct packet access.
2736 * 	Return
2737 * 		0 on success, or a negative error in case of failure.
2738 *
2739 * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
2740 * 	Description
2741 * 		Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
2742 * 		it is possible to use a negative value for *delta*. This helper
2743 * 		can be used to prepare the packet for pushing or popping
2744 * 		headers.
2745 *
2746 * 		A call to this helper is susceptible to change the underlying
2747 * 		packet buffer. Therefore, at load time, all checks on pointers
2748 * 		previously done by the verifier are invalidated and must be
2749 * 		performed again, if the helper is used in combination with
2750 * 		direct packet access.
2751 * 	Return
2752 * 		0 on success, or a negative error in case of failure.
2753 *
2754 * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
2755 * 	Description
2756 * 		Copy a NUL terminated string from an unsafe kernel address
2757 * 		*unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
2758 * 		more details.
2759 *
2760 * 		Generally, use **bpf_probe_read_user_str**\ () or
2761 * 		**bpf_probe_read_kernel_str**\ () instead.
2762 * 	Return
2763 * 		On success, the strictly positive length of the string,
2764 * 		including the trailing NUL character. On error, a negative
2765 * 		value.
2766 *
2767 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
2768 * 	Description
2769 * 		If the **struct sk_buff** pointed by *skb* has a known socket,
2770 * 		retrieve the cookie (generated by the kernel) of this socket.
2771 * 		If no cookie has been set yet, generate a new cookie. Once
2772 * 		generated, the socket cookie remains stable for the life of the
2773 * 		socket. This helper can be useful for monitoring per socket
2774 * 		networking traffic statistics as it provides a global socket
2775 * 		identifier that can be assumed unique.
2776 * 	Return
2777 * 		A 8-byte long unique number on success, or 0 if the socket
2778 * 		field is missing inside *skb*.
2779 *
2780 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
2781 * 	Description
2782 * 		Equivalent to bpf_get_socket_cookie() helper that accepts
2783 * 		*skb*, but gets socket from **struct bpf_sock_addr** context.
2784 * 	Return
2785 * 		A 8-byte long unique number.
2786 *
2787 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
2788 * 	Description
2789 * 		Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2790 * 		*skb*, but gets socket from **struct bpf_sock_ops** context.
2791 * 	Return
2792 * 		A 8-byte long unique number.
2793 *
2794 * u64 bpf_get_socket_cookie(struct sock *sk)
2795 * 	Description
2796 * 		Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
2797 * 		*sk*, but gets socket from a BTF **struct sock**. This helper
2798 * 		also works for sleepable programs.
2799 * 	Return
2800 * 		A 8-byte long unique number or 0 if *sk* is NULL.
2801 *
2802 * u32 bpf_get_socket_uid(struct sk_buff *skb)
2803 * 	Description
2804 * 		Get the owner UID of the socked associated to *skb*.
2805 * 	Return
2806 * 		The owner UID of the socket associated to *skb*. If the socket
2807 * 		is **NULL**, or if it is not a full socket (i.e. if it is a
2808 * 		time-wait or a request socket instead), **overflowuid** value
2809 * 		is returned (note that **overflowuid** might also be the actual
2810 * 		UID value for the socket).
2811 *
2812 * long bpf_set_hash(struct sk_buff *skb, u32 hash)
2813 * 	Description
2814 * 		Set the full hash for *skb* (set the field *skb*\ **->hash**)
2815 * 		to value *hash*.
2816 * 	Return
2817 * 		0
2818 *
2819 * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
2820 * 	Description
2821 * 		Emulate a call to **setsockopt()** on the socket associated to
2822 * 		*bpf_socket*, which must be a full socket. The *level* at
2823 * 		which the option resides and the name *optname* of the option
2824 * 		must be specified, see **setsockopt(2)** for more information.
2825 * 		The option value of length *optlen* is pointed by *optval*.
2826 *
2827 * 		*bpf_socket* should be one of the following:
2828 *
2829 * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
2830 *		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
2831 *		  **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
2832 *
2833 * 		This helper actually implements a subset of **setsockopt()**.
2834 * 		It supports the following *level*\ s:
2835 *
2836 * 		* **SOL_SOCKET**, which supports the following *optname*\ s:
2837 * 		  **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
2838 * 		  **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
2839 * 		  **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
2840 * 		  **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
2841 * 		* **IPPROTO_TCP**, which supports the following *optname*\ s:
2842 * 		  **TCP_CONGESTION**, **TCP_BPF_IW**,
2843 * 		  **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
2844 * 		  **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
2845 * 		  **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
2846 * 		  **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
2847 * 		  **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
2848 * 		  **TCP_BPF_RTO_MIN**.
2849 * 		* **IPPROTO_IP**, which supports *optname* **IP_TOS**.
2850 * 		* **IPPROTO_IPV6**, which supports the following *optname*\ s:
2851 * 		  **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
2852 * 	Return
2853 * 		0 on success, or a negative error in case of failure.
2854 *
2855 * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
2856 * 	Description
2857 * 		Grow or shrink the room for data in the packet associated to
2858 * 		*skb* by *len_diff*, and according to the selected *mode*.
2859 *
2860 * 		By default, the helper will reset any offloaded checksum
2861 * 		indicator of the skb to CHECKSUM_NONE. This can be avoided
2862 * 		by the following flag:
2863 *
2864 * 		* **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
2865 * 		  checksum data of the skb to CHECKSUM_NONE.
2866 *
2867 *		There are two supported modes at this time:
2868 *
2869 *		* **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
2870 * 		  (room space is added or removed between the layer 2 and
2871 * 		  layer 3 headers).
2872 *
2873 * 		* **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
2874 * 		  (room space is added or removed between the layer 3 and
2875 * 		  layer 4 headers).
2876 *
2877 *		The following flags are supported at this time:
2878 *
2879 *		* **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
2880 *		  Adjusting mss in this way is not allowed for datagrams.
2881 *
2882 *		* **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
2883 *		  **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
2884 *		  Any new space is reserved to hold a tunnel header.
2885 *		  Configure skb offsets and other fields accordingly.
2886 *
2887 *		* **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
2888 *		  **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
2889 *		  Use with ENCAP_L3 flags to further specify the tunnel type.
2890 *
2891 *		* **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
2892 *		  Use with ENCAP_L3/L4 flags to further specify the tunnel
2893 *		  type; *len* is the length of the inner MAC header.
2894 *
2895 *		* **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
2896 *		  Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
2897 *		  L2 type as Ethernet.
2898 *
2899 *		* **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
2900 *		  **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
2901 *		  Indicate the new IP header version after decapsulating the outer
2902 *		  IP header. Used when the inner and outer IP versions are different.
2903 *
2904 * 		A call to this helper is susceptible to change the underlying
2905 * 		packet buffer. Therefore, at load time, all checks on pointers
2906 * 		previously done by the verifier are invalidated and must be
2907 * 		performed again, if the helper is used in combination with
2908 * 		direct packet access.
2909 * 	Return
2910 * 		0 on success, or a negative error in case of failure.
2911 *
2912 * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
2913 * 	Description
2914 * 		Redirect the packet to the endpoint referenced by *map* at
2915 * 		index *key*. Depending on its type, this *map* can contain
2916 * 		references to net devices (for forwarding packets through other
2917 * 		ports), or to CPUs (for redirecting XDP frames to another CPU;
2918 * 		but this is only implemented for native XDP (with driver
2919 * 		support) as of this writing).
2920 *
2921 * 		The lower two bits of *flags* are used as the return code if
2922 * 		the map lookup fails. This is so that the return value can be
2923 * 		one of the XDP program return codes up to **XDP_TX**, as chosen
2924 * 		by the caller. The higher bits of *flags* can be set to
2925 * 		BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
2926 *
2927 * 		With BPF_F_BROADCAST the packet will be broadcasted to all the
2928 * 		interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
2929 * 		interface will be excluded when do broadcasting.
2930 *
2931 * 		See also **bpf_redirect**\ (), which only supports redirecting
2932 * 		to an ifindex, but doesn't require a map to do so.
2933 * 	Return
2934 * 		**XDP_REDIRECT** on success, or the value of the two lower bits
2935 * 		of the *flags* argument on error.
2936 *
2937 * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
2938 * 	Description
2939 * 		Redirect the packet to the socket referenced by *map* (of type
2940 * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
2941 * 		egress interfaces can be used for redirection. The
2942 * 		**BPF_F_INGRESS** value in *flags* is used to make the
2943 * 		distinction (ingress path is selected if the flag is present,
2944 * 		egress path otherwise). This is the only flag supported for now.
2945 * 	Return
2946 * 		**SK_PASS** on success, or **SK_DROP** on error.
2947 *
2948 * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
2949 * 	Description
2950 * 		Add an entry to, or update a *map* referencing sockets. The
2951 * 		*skops* is used as a new value for the entry associated to
2952 * 		*key*. *flags* is one of:
2953 *
2954 * 		**BPF_NOEXIST**
2955 * 			The entry for *key* must not exist in the map.
2956 * 		**BPF_EXIST**
2957 * 			The entry for *key* must already exist in the map.
2958 * 		**BPF_ANY**
2959 * 			No condition on the existence of the entry for *key*.
2960 *
2961 * 		If the *map* has eBPF programs (parser and verdict), those will
2962 * 		be inherited by the socket being added. If the socket is
2963 * 		already attached to eBPF programs, this results in an error.
2964 * 	Return
2965 * 		0 on success, or a negative error in case of failure.
2966 *
2967 * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
2968 * 	Description
2969 * 		Adjust the address pointed by *xdp_md*\ **->data_meta** by
2970 * 		*delta* (which can be positive or negative). Note that this
2971 * 		operation modifies the address stored in *xdp_md*\ **->data**,
2972 * 		so the latter must be loaded only after the helper has been
2973 * 		called.
2974 *
2975 * 		The use of *xdp_md*\ **->data_meta** is optional and programs
2976 * 		are not required to use it. The rationale is that when the
2977 * 		packet is processed with XDP (e.g. as DoS filter), it is
2978 * 		possible to push further meta data along with it before passing
2979 * 		to the stack, and to give the guarantee that an ingress eBPF
2980 * 		program attached as a TC classifier on the same device can pick
2981 * 		this up for further post-processing. Since TC works with socket
2982 * 		buffers, it remains possible to set from XDP the **mark** or
2983 * 		**priority** pointers, or other pointers for the socket buffer.
2984 * 		Having this scratch space generic and programmable allows for
2985 * 		more flexibility as the user is free to store whatever meta
2986 * 		data they need.
2987 *
2988 * 		A call to this helper is susceptible to change the underlying
2989 * 		packet buffer. Therefore, at load time, all checks on pointers
2990 * 		previously done by the verifier are invalidated and must be
2991 * 		performed again, if the helper is used in combination with
2992 * 		direct packet access.
2993 * 	Return
2994 * 		0 on success, or a negative error in case of failure.
2995 *
2996 * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
2997 * 	Description
2998 * 		Read the value of a perf event counter, and store it into *buf*
2999 * 		of size *buf_size*. This helper relies on a *map* of type
3000 * 		**BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
3001 * 		counter is selected when *map* is updated with perf event file
3002 * 		descriptors. The *map* is an array whose size is the number of
3003 * 		available CPUs, and each cell contains a value relative to one
3004 * 		CPU. The value to retrieve is indicated by *flags*, that
3005 * 		contains the index of the CPU to look up, masked with
3006 * 		**BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
3007 * 		**BPF_F_CURRENT_CPU** to indicate that the value for the
3008 * 		current CPU should be retrieved.
3009 *
3010 * 		This helper behaves in a way close to
3011 * 		**bpf_perf_event_read**\ () helper, save that instead of
3012 * 		just returning the value observed, it fills the *buf*
3013 * 		structure. This allows for additional data to be retrieved: in
3014 * 		particular, the enabled and running times (in *buf*\
3015 * 		**->enabled** and *buf*\ **->running**, respectively) are
3016 * 		copied. In general, **bpf_perf_event_read_value**\ () is
3017 * 		recommended over **bpf_perf_event_read**\ (), which has some
3018 * 		ABI issues and provides fewer functionalities.
3019 *
3020 * 		These values are interesting, because hardware PMU (Performance
3021 * 		Monitoring Unit) counters are limited resources. When there are
3022 * 		more PMU based perf events opened than available counters,
3023 * 		kernel will multiplex these events so each event gets certain
3024 * 		percentage (but not all) of the PMU time. In case that
3025 * 		multiplexing happens, the number of samples or counter value
3026 * 		will not reflect the case compared to when no multiplexing
3027 * 		occurs. This makes comparison between different runs difficult.
3028 * 		Typically, the counter value should be normalized before
3029 * 		comparing to other experiments. The usual normalization is done
3030 * 		as follows.
3031 *
3032 * 		::
3033 *
3034 * 			normalized_counter = counter * t_enabled / t_running
3035 *
3036 * 		Where t_enabled is the time enabled for event and t_running is
3037 * 		the time running for event since last normalization. The
3038 * 		enabled and running times are accumulated since the perf event
3039 * 		open. To achieve scaling factor between two invocations of an
3040 * 		eBPF program, users can use CPU id as the key (which is
3041 * 		typical for perf array usage model) to remember the previous
3042 * 		value and do the calculation inside the eBPF program.
3043 * 	Return
3044 * 		0 on success, or a negative error in case of failure.
3045 *
3046 * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
3047 * 	Description
3048 * 		For an eBPF program attached to a perf event, retrieve the
3049 * 		value of the event counter associated to *ctx* and store it in
3050 * 		the structure pointed by *buf* and of size *buf_size*. Enabled
3051 * 		and running times are also stored in the structure (see
3052 * 		description of helper **bpf_perf_event_read_value**\ () for
3053 * 		more details).
3054 * 	Return
3055 * 		0 on success, or a negative error in case of failure.
3056 *
3057 * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
3058 * 	Description
3059 * 		Emulate a call to **getsockopt()** on the socket associated to
3060 * 		*bpf_socket*, which must be a full socket. The *level* at
3061 * 		which the option resides and the name *optname* of the option
3062 * 		must be specified, see **getsockopt(2)** for more information.
3063 * 		The retrieved value is stored in the structure pointed by
3064 * 		*opval* and of length *optlen*.
3065 *
3066 * 		*bpf_socket* should be one of the following:
3067 *
3068 * 		* **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
3069 *		* **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
3070 *		  **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
3071 *
3072 * 		This helper actually implements a subset of **getsockopt()**.
3073 * 		It supports the same set of *optname*\ s that is supported by
3074 * 		the **bpf_setsockopt**\ () helper.  The exceptions are
3075 * 		**TCP_BPF_*** is **bpf_setsockopt**\ () only and
3076 * 		**TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
3077 * 	Return
3078 * 		0 on success, or a negative error in case of failure.
3079 *
3080 * long bpf_override_return(struct pt_regs *regs, u64 rc)
3081 * 	Description
3082 * 		Used for error injection, this helper uses kprobes to override
3083 * 		the return value of the probed function, and to set it to *rc*.
3084 * 		The first argument is the context *regs* on which the kprobe
3085 * 		works.
3086 *
3087 * 		This helper works by setting the PC (program counter)
3088 * 		to an override function which is run in place of the original
3089 * 		probed function. This means the probed function is not run at
3090 * 		all. The replacement function just returns with the required
3091 * 		value.
3092 *
3093 * 		This helper has security implications, and thus is subject to
3094 * 		restrictions. It is only available if the kernel was compiled
3095 * 		with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
3096 * 		option, and in this case it only works on functions tagged with
3097 * 		**ALLOW_ERROR_INJECTION** in the kernel code.
3098 *
3099 * 		Also, the helper is only available for the architectures having
3100 * 		the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
3101 * 		x86 architecture is the only one to support this feature.
3102 * 	Return
3103 * 		0
3104 *
3105 * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
3106 * 	Description
3107 * 		Attempt to set the value of the **bpf_sock_ops_cb_flags** field
3108 * 		for the full TCP socket associated to *bpf_sock_ops* to
3109 * 		*argval*.
3110 *
3111 * 		The primary use of this field is to determine if there should
3112 * 		be calls to eBPF programs of type
3113 * 		**BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
3114 * 		code. A program of the same type can change its value, per
3115 * 		connection and as necessary, when the connection is
3116 * 		established. This field is directly accessible for reading, but
3117 * 		this helper must be used for updates in order to return an
3118 * 		error if an eBPF program tries to set a callback that is not
3119 * 		supported in the current kernel.
3120 *
3121 * 		*argval* is a flag array which can combine these flags:
3122 *
3123 * 		* **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
3124 * 		* **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
3125 * 		* **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
3126 * 		* **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
3127 *
3128 * 		Therefore, this function can be used to clear a callback flag by
3129 * 		setting the appropriate bit to zero. e.g. to disable the RTO
3130 * 		callback:
3131 *
3132 * 		**bpf_sock_ops_cb_flags_set(bpf_sock,**
3133 * 			**bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
3134 *
3135 * 		Here are some examples of where one could call such eBPF
3136 * 		program:
3137 *
3138 * 		* When RTO fires.
3139 * 		* When a packet is retransmitted.
3140 * 		* When the connection terminates.
3141 * 		* When a packet is sent.
3142 * 		* When a packet is received.
3143 * 	Return
3144 * 		Code **-EINVAL** if the socket is not a full TCP socket;
3145 * 		otherwise, a positive number containing the bits that could not
3146 * 		be set is returned (which comes down to 0 if all bits were set
3147 * 		as required).
3148 *
3149 * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
3150 * 	Description
3151 * 		This helper is used in programs implementing policies at the
3152 * 		socket level. If the message *msg* is allowed to pass (i.e. if
3153 * 		the verdict eBPF program returns **SK_PASS**), redirect it to
3154 * 		the socket referenced by *map* (of type
3155 * 		**BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
3156 * 		egress interfaces can be used for redirection. The
3157 * 		**BPF_F_INGRESS** value in *flags* is used to make the
3158 * 		distinction (ingress path is selected if the flag is present,
3159 * 		egress path otherwise). This is the only flag supported for now.
3160 * 	Return
3161 * 		**SK_PASS** on success, or **SK_DROP** on error.
3162 *
3163 * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
3164 * 	Description
3165 * 		For socket policies, apply the verdict of the eBPF program to
3166 * 		the next *bytes* (number of bytes) of message *msg*.
3167 *
3168 * 		For example, this helper can be used in the following cases:
3169 *
3170 * 		* A single **sendmsg**\ () or **sendfile**\ () system call
3171 * 		  contains multiple logical messages that the eBPF program is
3172 * 		  supposed to read and for which it should apply a verdict.
3173 * 		* An eBPF program only cares to read the first *bytes* of a
3174 * 		  *msg*. If the message has a large payload, then setting up
3175 * 		  and calling the eBPF program repeatedly for all bytes, even
3176 * 		  though the verdict is already known, would create unnecessary
3177 * 		  overhead.
3178 *
3179 * 		When called from within an eBPF program, the helper sets a
3180 * 		counter internal to the BPF infrastructure, that is used to
3181 * 		apply the last verdict to the next *bytes*. If *bytes* is
3182 * 		smaller than the current data being processed from a
3183 * 		**sendmsg**\ () or **sendfile**\ () system call, the first
3184 * 		*bytes* will be sent and the eBPF program will be re-run with
3185 * 		the pointer for start of data pointing to byte number *bytes*
3186 * 		**+ 1**. If *bytes* is larger than the current data being
3187 * 		processed, then the eBPF verdict will be applied to multiple
3188 * 		**sendmsg**\ () or **sendfile**\ () calls until *bytes* are
3189 * 		consumed.
3190 *
3191 * 		Note that if a socket closes with the internal counter holding
3192 * 		a non-zero value, this is not a problem because data is not
3193 * 		being buffered for *bytes* and is sent as it is received.
3194 * 	Return
3195 * 		0
3196 *
3197 * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
3198 * 	Description
3199 * 		For socket policies, prevent the execution of the verdict eBPF
3200 * 		program for message *msg* until *bytes* (byte number) have been
3201 * 		accumulated.
3202 *
3203 * 		This can be used when one needs a specific number of bytes
3204 * 		before a verdict can be assigned, even if the data spans
3205 * 		multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
3206 * 		case would be a user calling **sendmsg**\ () repeatedly with
3207 * 		1-byte long message segments. Obviously, this is bad for
3208 * 		performance, but it is still valid. If the eBPF program needs
3209 * 		*bytes* bytes to validate a header, this helper can be used to
3210 * 		prevent the eBPF program to be called again until *bytes* have
3211 * 		been accumulated.
3212 * 	Return
3213 * 		0
3214 *
3215 * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
3216 * 	Description
3217 * 		For socket policies, pull in non-linear data from user space
3218 * 		for *msg* and set pointers *msg*\ **->data** and *msg*\
3219 * 		**->data_end** to *start* and *end* bytes offsets into *msg*,
3220 * 		respectively.
3221 *
3222 * 		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3223 * 		*msg* it can only parse data that the (**data**, **data_end**)
3224 * 		pointers have already consumed. For **sendmsg**\ () hooks this
3225 * 		is likely the first scatterlist element. But for calls relying
3226 * 		on the **sendpage** handler (e.g. **sendfile**\ ()) this will
3227 * 		be the range (**0**, **0**) because the data is shared with
3228 * 		user space and by default the objective is to avoid allowing
3229 * 		user space to modify data while (or after) eBPF verdict is
3230 * 		being decided. This helper can be used to pull in data and to
3231 * 		set the start and end pointer to given values. Data will be
3232 * 		copied if necessary (i.e. if data was not linear and if start
3233 * 		and end pointers do not point to the same chunk).
3234 *
3235 * 		A call to this helper is susceptible to change the underlying
3236 * 		packet buffer. Therefore, at load time, all checks on pointers
3237 * 		previously done by the verifier are invalidated and must be
3238 * 		performed again, if the helper is used in combination with
3239 * 		direct packet access.
3240 *
3241 * 		All values for *flags* are reserved for future usage, and must
3242 * 		be left at zero.
3243 * 	Return
3244 * 		0 on success, or a negative error in case of failure.
3245 *
3246 * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
3247 * 	Description
3248 * 		Bind the socket associated to *ctx* to the address pointed by
3249 * 		*addr*, of length *addr_len*. This allows for making outgoing
3250 * 		connection from the desired IP address, which can be useful for
3251 * 		example when all processes inside a cgroup should use one
3252 * 		single IP address on a host that has multiple IP configured.
3253 *
3254 * 		This helper works for IPv4 and IPv6, TCP and UDP sockets. The
3255 * 		domain (*addr*\ **->sa_family**) must be **AF_INET** (or
3256 * 		**AF_INET6**). It's advised to pass zero port (**sin_port**
3257 * 		or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
3258 * 		behavior and lets the kernel efficiently pick up an unused
3259 * 		port as long as 4-tuple is unique. Passing non-zero port might
3260 * 		lead to degraded performance.
3261 * 	Return
3262 * 		0 on success, or a negative error in case of failure.
3263 *
3264 * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
3265 * 	Description
3266 * 		Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
3267 * 		possible to both shrink and grow the packet tail.
3268 * 		Shrink done via *delta* being a negative integer.
3269 *
3270 * 		A call to this helper is susceptible to change the underlying
3271 * 		packet buffer. Therefore, at load time, all checks on pointers
3272 * 		previously done by the verifier are invalidated and must be
3273 * 		performed again, if the helper is used in combination with
3274 * 		direct packet access.
3275 * 	Return
3276 * 		0 on success, or a negative error in case of failure.
3277 *
3278 * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
3279 * 	Description
3280 * 		Retrieve the XFRM state (IP transform framework, see also
3281 * 		**ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
3282 *
3283 * 		The retrieved value is stored in the **struct bpf_xfrm_state**
3284 * 		pointed by *xfrm_state* and of length *size*.
3285 *
3286 * 		All values for *flags* are reserved for future usage, and must
3287 * 		be left at zero.
3288 *
3289 * 		This helper is available only if the kernel was compiled with
3290 * 		**CONFIG_XFRM** configuration option.
3291 * 	Return
3292 * 		0 on success, or a negative error in case of failure.
3293 *
3294 * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
3295 * 	Description
3296 * 		Return a user or a kernel stack in bpf program provided buffer.
3297 * 		To achieve this, the helper needs *ctx*, which is a pointer
3298 * 		to the context on which the tracing program is executed.
3299 * 		To store the stacktrace, the bpf program provides *buf* with
3300 * 		a nonnegative *size*.
3301 *
3302 * 		The last argument, *flags*, holds the number of stack frames to
3303 * 		skip (from 0 to 255), masked with
3304 * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
3305 * 		the following flags:
3306 *
3307 * 		**BPF_F_USER_STACK**
3308 * 			Collect a user space stack instead of a kernel stack.
3309 * 		**BPF_F_USER_BUILD_ID**
3310 * 			Collect (build_id, file_offset) instead of ips for user
3311 * 			stack, only valid if **BPF_F_USER_STACK** is also
3312 * 			specified.
3313 *
3314 * 			*file_offset* is an offset relative to the beginning
3315 * 			of the executable or shared object file backing the vma
3316 * 			which the *ip* falls in. It is *not* an offset relative
3317 * 			to that object's base address. Accordingly, it must be
3318 * 			adjusted by adding (sh_addr - sh_offset), where
3319 * 			sh_{addr,offset} correspond to the executable section
3320 * 			containing *file_offset* in the object, for comparisons
3321 * 			to symbols' st_value to be valid.
3322 *
3323 * 		**bpf_get_stack**\ () can collect up to
3324 * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
3325 * 		to sufficient large buffer size. Note that
3326 * 		this limit can be controlled with the **sysctl** program, and
3327 * 		that it should be manually increased in order to profile long
3328 * 		user stacks (such as stacks for Java programs). To do so, use:
3329 *
3330 * 		::
3331 *
3332 * 			# sysctl kernel.perf_event_max_stack=<new value>
3333 * 	Return
3334 * 		The non-negative copied *buf* length equal to or less than
3335 * 		*size* on success, or a negative error in case of failure.
3336 *
3337 * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
3338 * 	Description
3339 * 		This helper is similar to **bpf_skb_load_bytes**\ () in that
3340 * 		it provides an easy way to load *len* bytes from *offset*
3341 * 		from the packet associated to *skb*, into the buffer pointed
3342 * 		by *to*. The difference to **bpf_skb_load_bytes**\ () is that
3343 * 		a fifth argument *start_header* exists in order to select a
3344 * 		base offset to start from. *start_header* can be one of:
3345 *
3346 * 		**BPF_HDR_START_MAC**
3347 * 			Base offset to load data from is *skb*'s mac header.
3348 * 		**BPF_HDR_START_NET**
3349 * 			Base offset to load data from is *skb*'s network header.
3350 *
3351 * 		In general, "direct packet access" is the preferred method to
3352 * 		access packet data, however, this helper is in particular useful
3353 * 		in socket filters where *skb*\ **->data** does not always point
3354 * 		to the start of the mac header and where "direct packet access"
3355 * 		is not available.
3356 * 	Return
3357 * 		0 on success, or a negative error in case of failure.
3358 *
3359 * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
3360 *	Description
3361 *		Do FIB lookup in kernel tables using parameters in *params*.
3362 *		If lookup is successful and result shows packet is to be
3363 *		forwarded, the neighbor tables are searched for the nexthop.
3364 *		If successful (ie., FIB lookup shows forwarding and nexthop
3365 *		is resolved), the nexthop address is returned in ipv4_dst
3366 *		or ipv6_dst based on family, smac is set to mac address of
3367 *		egress device, dmac is set to nexthop mac address, rt_metric
3368 *		is set to metric from route (IPv4/IPv6 only), and ifindex
3369 *		is set to the device index of the nexthop from the FIB lookup.
3370 *
3371 *		*plen* argument is the size of the passed in struct.
3372 *		*flags* argument can be a combination of one or more of the
3373 *		following values:
3374 *
3375 *		**BPF_FIB_LOOKUP_DIRECT**
3376 *			Do a direct table lookup vs full lookup using FIB
3377 *			rules.
3378 *		**BPF_FIB_LOOKUP_TBID**
3379 *			Used with BPF_FIB_LOOKUP_DIRECT.
3380 *			Use the routing table ID present in *params*->tbid
3381 *			for the fib lookup.
3382 *		**BPF_FIB_LOOKUP_OUTPUT**
3383 *			Perform lookup from an egress perspective (default is
3384 *			ingress).
3385 *		**BPF_FIB_LOOKUP_SKIP_NEIGH**
3386 *			Skip the neighbour table lookup. *params*->dmac
3387 *			and *params*->smac will not be set as output. A common
3388 *			use case is to call **bpf_redirect_neigh**\ () after
3389 *			doing **bpf_fib_lookup**\ ().
3390 *		**BPF_FIB_LOOKUP_SRC**
3391 *			Derive and set source IP addr in *params*->ipv{4,6}_src
3392 *			for the nexthop. If the src addr cannot be derived,
3393 *			**BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
3394 *			case, *params*->dmac and *params*->smac are not set either.
3395 *
3396 *		*ctx* is either **struct xdp_md** for XDP programs or
3397 *		**struct sk_buff** tc cls_act programs.
3398 *	Return
3399 *		* < 0 if any input argument is invalid
3400 *		*   0 on success (packet is forwarded, nexthop neighbor exists)
3401 *		* > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
3402 *		  packet is not forwarded or needs assist from full stack
3403 *
3404 *		If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
3405 *		was exceeded and output params->mtu_result contains the MTU.
3406 *
3407 * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
3408 *	Description
3409 *		Add an entry to, or update a sockhash *map* referencing sockets.
3410 *		The *skops* is used as a new value for the entry associated to
3411 *		*key*. *flags* is one of:
3412 *
3413 *		**BPF_NOEXIST**
3414 *			The entry for *key* must not exist in the map.
3415 *		**BPF_EXIST**
3416 *			The entry for *key* must already exist in the map.
3417 *		**BPF_ANY**
3418 *			No condition on the existence of the entry for *key*.
3419 *
3420 *		If the *map* has eBPF programs (parser and verdict), those will
3421 *		be inherited by the socket being added. If the socket is
3422 *		already attached to eBPF programs, this results in an error.
3423 *	Return
3424 *		0 on success, or a negative error in case of failure.
3425 *
3426 * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
3427 *	Description
3428 *		This helper is used in programs implementing policies at the
3429 *		socket level. If the message *msg* is allowed to pass (i.e. if
3430 *		the verdict eBPF program returns **SK_PASS**), redirect it to
3431 *		the socket referenced by *map* (of type
3432 *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3433 *		egress interfaces can be used for redirection. The
3434 *		**BPF_F_INGRESS** value in *flags* is used to make the
3435 *		distinction (ingress path is selected if the flag is present,
3436 *		egress path otherwise). This is the only flag supported for now.
3437 *	Return
3438 *		**SK_PASS** on success, or **SK_DROP** on error.
3439 *
3440 * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
3441 *	Description
3442 *		This helper is used in programs implementing policies at the
3443 *		skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
3444 *		if the verdict eBPF program returns **SK_PASS**), redirect it
3445 *		to the socket referenced by *map* (of type
3446 *		**BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
3447 *		egress interfaces can be used for redirection. The
3448 *		**BPF_F_INGRESS** value in *flags* is used to make the
3449 *		distinction (ingress path is selected if the flag is present,
3450 *		egress otherwise). This is the only flag supported for now.
3451 *	Return
3452 *		**SK_PASS** on success, or **SK_DROP** on error.
3453 *
3454 * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
3455 *	Description
3456 *		Encapsulate the packet associated to *skb* within a Layer 3
3457 *		protocol header. This header is provided in the buffer at
3458 *		address *hdr*, with *len* its size in bytes. *type* indicates
3459 *		the protocol of the header and can be one of:
3460 *
3461 *		**BPF_LWT_ENCAP_SEG6**
3462 *			IPv6 encapsulation with Segment Routing Header
3463 *			(**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
3464 *			the IPv6 header is computed by the kernel.
3465 *		**BPF_LWT_ENCAP_SEG6_INLINE**
3466 *			Only works if *skb* contains an IPv6 packet. Insert a
3467 *			Segment Routing Header (**struct ipv6_sr_hdr**) inside
3468 *			the IPv6 header.
3469 *		**BPF_LWT_ENCAP_IP**
3470 *			IP encapsulation (GRE/GUE/IPIP/etc). The outer header
3471 *			must be IPv4 or IPv6, followed by zero or more
3472 *			additional headers, up to **LWT_BPF_MAX_HEADROOM**
3473 *			total bytes in all prepended headers. Please note that
3474 *			if **skb_is_gso**\ (*skb*) is true, no more than two
3475 *			headers can be prepended, and the inner header, if
3476 *			present, should be either GRE or UDP/GUE.
3477 *
3478 *		**BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
3479 *		of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
3480 *		be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
3481 *		**BPF_PROG_TYPE_LWT_XMIT**.
3482 *
3483 * 		A call to this helper is susceptible to change the underlying
3484 * 		packet buffer. Therefore, at load time, all checks on pointers
3485 * 		previously done by the verifier are invalidated and must be
3486 * 		performed again, if the helper is used in combination with
3487 * 		direct packet access.
3488 *	Return
3489 * 		0 on success, or a negative error in case of failure.
3490 *
3491 * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
3492 *	Description
3493 *		Store *len* bytes from address *from* into the packet
3494 *		associated to *skb*, at *offset*. Only the flags, tag and TLVs
3495 *		inside the outermost IPv6 Segment Routing Header can be
3496 *		modified through this helper.
3497 *
3498 * 		A call to this helper is susceptible to change the underlying
3499 * 		packet buffer. Therefore, at load time, all checks on pointers
3500 * 		previously done by the verifier are invalidated and must be
3501 * 		performed again, if the helper is used in combination with
3502 * 		direct packet access.
3503 *	Return
3504 * 		0 on success, or a negative error in case of failure.
3505 *
3506 * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
3507 *	Description
3508 *		Adjust the size allocated to TLVs in the outermost IPv6
3509 *		Segment Routing Header contained in the packet associated to
3510 *		*skb*, at position *offset* by *delta* bytes. Only offsets
3511 *		after the segments are accepted. *delta* can be as well
3512 *		positive (growing) as negative (shrinking).
3513 *
3514 * 		A call to this helper is susceptible to change the underlying
3515 * 		packet buffer. Therefore, at load time, all checks on pointers
3516 * 		previously done by the verifier are invalidated and must be
3517 * 		performed again, if the helper is used in combination with
3518 * 		direct packet access.
3519 *	Return
3520 * 		0 on success, or a negative error in case of failure.
3521 *
3522 * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
3523 *	Description
3524 *		Apply an IPv6 Segment Routing action of type *action* to the
3525 *		packet associated to *skb*. Each action takes a parameter
3526 *		contained at address *param*, and of length *param_len* bytes.
3527 *		*action* can be one of:
3528 *
3529 *		**SEG6_LOCAL_ACTION_END_X**
3530 *			End.X action: Endpoint with Layer-3 cross-connect.
3531 *			Type of *param*: **struct in6_addr**.
3532 *		**SEG6_LOCAL_ACTION_END_T**
3533 *			End.T action: Endpoint with specific IPv6 table lookup.
3534 *			Type of *param*: **int**.
3535 *		**SEG6_LOCAL_ACTION_END_B6**
3536 *			End.B6 action: Endpoint bound to an SRv6 policy.
3537 *			Type of *param*: **struct ipv6_sr_hdr**.
3538 *		**SEG6_LOCAL_ACTION_END_B6_ENCAP**
3539 *			End.B6.Encap action: Endpoint bound to an SRv6
3540 *			encapsulation policy.
3541 *			Type of *param*: **struct ipv6_sr_hdr**.
3542 *
3543 * 		A call to this helper is susceptible to change the underlying
3544 * 		packet buffer. Therefore, at load time, all checks on pointers
3545 * 		previously done by the verifier are invalidated and must be
3546 * 		performed again, if the helper is used in combination with
3547 * 		direct packet access.
3548 *	Return
3549 * 		0 on success, or a negative error in case of failure.
3550 *
3551 * long bpf_rc_repeat(void *ctx)
3552 *	Description
3553 *		This helper is used in programs implementing IR decoding, to
3554 *		report a successfully decoded repeat key message. This delays
3555 *		the generation of a key up event for previously generated
3556 *		key down event.
3557 *
3558 *		Some IR protocols like NEC have a special IR message for
3559 *		repeating last button, for when a button is held down.
3560 *
3561 *		The *ctx* should point to the lirc sample as passed into
3562 *		the program.
3563 *
3564 *		This helper is only available is the kernel was compiled with
3565 *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3566 *		"**y**".
3567 *	Return
3568 *		0
3569 *
3570 * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
3571 *	Description
3572 *		This helper is used in programs implementing IR decoding, to
3573 *		report a successfully decoded key press with *scancode*,
3574 *		*toggle* value in the given *protocol*. The scancode will be
3575 *		translated to a keycode using the rc keymap, and reported as
3576 *		an input key down event. After a period a key up event is
3577 *		generated. This period can be extended by calling either
3578 *		**bpf_rc_keydown**\ () again with the same values, or calling
3579 *		**bpf_rc_repeat**\ ().
3580 *
3581 *		Some protocols include a toggle bit, in case the button was
3582 *		released and pressed again between consecutive scancodes.
3583 *
3584 *		The *ctx* should point to the lirc sample as passed into
3585 *		the program.
3586 *
3587 *		The *protocol* is the decoded protocol number (see
3588 *		**enum rc_proto** for some predefined values).
3589 *
3590 *		This helper is only available is the kernel was compiled with
3591 *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3592 *		"**y**".
3593 *	Return
3594 *		0
3595 *
3596 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
3597 * 	Description
3598 * 		Return the cgroup v2 id of the socket associated with the *skb*.
3599 * 		This is roughly similar to the **bpf_get_cgroup_classid**\ ()
3600 * 		helper for cgroup v1 by providing a tag resp. identifier that
3601 * 		can be matched on or used for map lookups e.g. to implement
3602 * 		policy. The cgroup v2 id of a given path in the hierarchy is
3603 * 		exposed in user space through the f_handle API in order to get
3604 * 		to the same 64-bit id.
3605 *
3606 * 		This helper can be used on TC egress path, but not on ingress,
3607 * 		and is available only if the kernel was compiled with the
3608 * 		**CONFIG_SOCK_CGROUP_DATA** configuration option.
3609 * 	Return
3610 * 		The id is returned or 0 in case the id could not be retrieved.
3611 *
3612 * u64 bpf_get_current_cgroup_id(void)
3613 * 	Description
3614 * 		Get the current cgroup id based on the cgroup within which
3615 * 		the current task is running.
3616 * 	Return
3617 * 		A 64-bit integer containing the current cgroup id based
3618 * 		on the cgroup within which the current task is running.
3619 *
3620 * void *bpf_get_local_storage(void *map, u64 flags)
3621 *	Description
3622 *		Get the pointer to the local storage area.
3623 *		The type and the size of the local storage is defined
3624 *		by the *map* argument.
3625 *		The *flags* meaning is specific for each map type,
3626 *		and has to be 0 for cgroup local storage.
3627 *
3628 *		Depending on the BPF program type, a local storage area
3629 *		can be shared between multiple instances of the BPF program,
3630 *		running simultaneously.
3631 *
3632 *		A user should care about the synchronization by himself.
3633 *		For example, by using the **BPF_ATOMIC** instructions to alter
3634 *		the shared data.
3635 *	Return
3636 *		A pointer to the local storage area.
3637 *
3638 * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
3639 *	Description
3640 *		Select a **SO_REUSEPORT** socket from a
3641 *		**BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
3642 *		It checks the selected socket is matching the incoming
3643 *		request in the socket buffer.
3644 *	Return
3645 *		0 on success, or a negative error in case of failure.
3646 *
3647 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
3648 *	Description
3649 *		Return id of cgroup v2 that is ancestor of cgroup associated
3650 *		with the *skb* at the *ancestor_level*.  The root cgroup is at
3651 *		*ancestor_level* zero and each step down the hierarchy
3652 *		increments the level. If *ancestor_level* == level of cgroup
3653 *		associated with *skb*, then return value will be same as that
3654 *		of **bpf_skb_cgroup_id**\ ().
3655 *
3656 *		The helper is useful to implement policies based on cgroups
3657 *		that are upper in hierarchy than immediate cgroup associated
3658 *		with *skb*.
3659 *
3660 *		The format of returned id and helper limitations are same as in
3661 *		**bpf_skb_cgroup_id**\ ().
3662 *	Return
3663 *		The id is returned or 0 in case the id could not be retrieved.
3664 *
3665 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3666 *	Description
3667 *		Look for TCP socket matching *tuple*, optionally in a child
3668 *		network namespace *netns*. The return value must be checked,
3669 *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3670 *
3671 *		The *ctx* should point to the context of the program, such as
3672 *		the skb or socket (depending on the hook in use). This is used
3673 *		to determine the base network namespace for the lookup.
3674 *
3675 *		*tuple_size* must be one of:
3676 *
3677 *		**sizeof**\ (*tuple*\ **->ipv4**)
3678 *			Look for an IPv4 socket.
3679 *		**sizeof**\ (*tuple*\ **->ipv6**)
3680 *			Look for an IPv6 socket.
3681 *
3682 *		If the *netns* is a negative signed 32-bit integer, then the
3683 *		socket lookup table in the netns associated with the *ctx*
3684 *		will be used. For the TC hooks, this is the netns of the device
3685 *		in the skb. For socket hooks, this is the netns of the socket.
3686 *		If *netns* is any other signed 32-bit value greater than or
3687 *		equal to zero then it specifies the ID of the netns relative to
3688 *		the netns associated with the *ctx*. *netns* values beyond the
3689 *		range of 32-bit integers are reserved for future use.
3690 *
3691 *		All values for *flags* are reserved for future usage, and must
3692 *		be left at zero.
3693 *
3694 *		This helper is available only if the kernel was compiled with
3695 *		**CONFIG_NET** configuration option.
3696 *	Return
3697 *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3698 *		For sockets with reuseport option, the **struct bpf_sock**
3699 *		result is from *reuse*\ **->socks**\ [] using the hash of the
3700 *		tuple.
3701 *
3702 * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3703 *	Description
3704 *		Look for UDP socket matching *tuple*, optionally in a child
3705 *		network namespace *netns*. The return value must be checked,
3706 *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3707 *
3708 *		The *ctx* should point to the context of the program, such as
3709 *		the skb or socket (depending on the hook in use). This is used
3710 *		to determine the base network namespace for the lookup.
3711 *
3712 *		*tuple_size* must be one of:
3713 *
3714 *		**sizeof**\ (*tuple*\ **->ipv4**)
3715 *			Look for an IPv4 socket.
3716 *		**sizeof**\ (*tuple*\ **->ipv6**)
3717 *			Look for an IPv6 socket.
3718 *
3719 *		If the *netns* is a negative signed 32-bit integer, then the
3720 *		socket lookup table in the netns associated with the *ctx*
3721 *		will be used. For the TC hooks, this is the netns of the device
3722 *		in the skb. For socket hooks, this is the netns of the socket.
3723 *		If *netns* is any other signed 32-bit value greater than or
3724 *		equal to zero then it specifies the ID of the netns relative to
3725 *		the netns associated with the *ctx*. *netns* values beyond the
3726 *		range of 32-bit integers are reserved for future use.
3727 *
3728 *		All values for *flags* are reserved for future usage, and must
3729 *		be left at zero.
3730 *
3731 *		This helper is available only if the kernel was compiled with
3732 *		**CONFIG_NET** configuration option.
3733 *	Return
3734 *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3735 *		For sockets with reuseport option, the **struct bpf_sock**
3736 *		result is from *reuse*\ **->socks**\ [] using the hash of the
3737 *		tuple.
3738 *
3739 * long bpf_sk_release(void *sock)
3740 *	Description
3741 *		Release the reference held by *sock*. *sock* must be a
3742 *		non-**NULL** pointer that was returned from
3743 *		**bpf_sk_lookup_xxx**\ ().
3744 *	Return
3745 *		0 on success, or a negative error in case of failure.
3746 *
3747 * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
3748 * 	Description
3749 * 		Push an element *value* in *map*. *flags* is one of:
3750 *
3751 * 		**BPF_EXIST**
3752 * 			If the queue/stack is full, the oldest element is
3753 * 			removed to make room for this.
3754 * 	Return
3755 * 		0 on success, or a negative error in case of failure.
3756 *
3757 * long bpf_map_pop_elem(struct bpf_map *map, void *value)
3758 * 	Description
3759 * 		Pop an element from *map*.
3760 * 	Return
3761 * 		0 on success, or a negative error in case of failure.
3762 *
3763 * long bpf_map_peek_elem(struct bpf_map *map, void *value)
3764 * 	Description
3765 * 		Get an element from *map* without removing it.
3766 * 	Return
3767 * 		0 on success, or a negative error in case of failure.
3768 *
3769 * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3770 *	Description
3771 *		For socket policies, insert *len* bytes into *msg* at offset
3772 *		*start*.
3773 *
3774 *		If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
3775 *		*msg* it may want to insert metadata or options into the *msg*.
3776 *		This can later be read and used by any of the lower layer BPF
3777 *		hooks.
3778 *
3779 *		This helper may fail if under memory pressure (a malloc
3780 *		fails) in these cases BPF programs will get an appropriate
3781 *		error and BPF programs will need to handle them.
3782 *	Return
3783 *		0 on success, or a negative error in case of failure.
3784 *
3785 * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
3786 *	Description
3787 *		Will remove *len* bytes from a *msg* starting at byte *start*.
3788 *		This may result in **ENOMEM** errors under certain situations if
3789 *		an allocation and copy are required due to a full ring buffer.
3790 *		However, the helper will try to avoid doing the allocation
3791 *		if possible. Other errors can occur if input parameters are
3792 *		invalid either due to *start* byte not being valid part of *msg*
3793 *		payload and/or *pop* value being to large.
3794 *	Return
3795 *		0 on success, or a negative error in case of failure.
3796 *
3797 * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
3798 *	Description
3799 *		This helper is used in programs implementing IR decoding, to
3800 *		report a successfully decoded pointer movement.
3801 *
3802 *		The *ctx* should point to the lirc sample as passed into
3803 *		the program.
3804 *
3805 *		This helper is only available is the kernel was compiled with
3806 *		the **CONFIG_BPF_LIRC_MODE2** configuration option set to
3807 *		"**y**".
3808 *	Return
3809 *		0
3810 *
3811 * long bpf_spin_lock(struct bpf_spin_lock *lock)
3812 *	Description
3813 *		Acquire a spinlock represented by the pointer *lock*, which is
3814 *		stored as part of a value of a map. Taking the lock allows to
3815 *		safely update the rest of the fields in that value. The
3816 *		spinlock can (and must) later be released with a call to
3817 *		**bpf_spin_unlock**\ (\ *lock*\ ).
3818 *
3819 *		Spinlocks in BPF programs come with a number of restrictions
3820 *		and constraints:
3821 *
3822 *		* **bpf_spin_lock** objects are only allowed inside maps of
3823 *		  types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
3824 *		  list could be extended in the future).
3825 *		* BTF description of the map is mandatory.
3826 *		* The BPF program can take ONE lock at a time, since taking two
3827 *		  or more could cause dead locks.
3828 *		* Only one **struct bpf_spin_lock** is allowed per map element.
3829 *		* When the lock is taken, calls (either BPF to BPF or helpers)
3830 *		  are not allowed.
3831 *		* The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
3832 *		  allowed inside a spinlock-ed region.
3833 *		* The BPF program MUST call **bpf_spin_unlock**\ () to release
3834 *		  the lock, on all execution paths, before it returns.
3835 *		* The BPF program can access **struct bpf_spin_lock** only via
3836 *		  the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
3837 *		  helpers. Loading or storing data into the **struct
3838 *		  bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
3839 *		* To use the **bpf_spin_lock**\ () helper, the BTF description
3840 *		  of the map value must be a struct and have **struct
3841 *		  bpf_spin_lock** *anyname*\ **;** field at the top level.
3842 *		  Nested lock inside another struct is not allowed.
3843 *		* The **struct bpf_spin_lock** *lock* field in a map value must
3844 *		  be aligned on a multiple of 4 bytes in that value.
3845 *		* Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
3846 *		  the **bpf_spin_lock** field to user space.
3847 *		* Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
3848 *		  a BPF program, do not update the **bpf_spin_lock** field.
3849 *		* **bpf_spin_lock** cannot be on the stack or inside a
3850 *		  networking packet (it can only be inside of a map values).
3851 *		* **bpf_spin_lock** is available to root only.
3852 *		* Tracing programs and socket filter programs cannot use
3853 *		  **bpf_spin_lock**\ () due to insufficient preemption checks
3854 *		  (but this may change in the future).
3855 *		* **bpf_spin_lock** is not allowed in inner maps of map-in-map.
3856 *	Return
3857 *		0
3858 *
3859 * long bpf_spin_unlock(struct bpf_spin_lock *lock)
3860 *	Description
3861 *		Release the *lock* previously locked by a call to
3862 *		**bpf_spin_lock**\ (\ *lock*\ ).
3863 *	Return
3864 *		0
3865 *
3866 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
3867 *	Description
3868 *		This helper gets a **struct bpf_sock** pointer such
3869 *		that all the fields in this **bpf_sock** can be accessed.
3870 *	Return
3871 *		A **struct bpf_sock** pointer on success, or **NULL** in
3872 *		case of failure.
3873 *
3874 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
3875 *	Description
3876 *		This helper gets a **struct bpf_tcp_sock** pointer from a
3877 *		**struct bpf_sock** pointer.
3878 *	Return
3879 *		A **struct bpf_tcp_sock** pointer on success, or **NULL** in
3880 *		case of failure.
3881 *
3882 * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
3883 *	Description
3884 *		Set ECN (Explicit Congestion Notification) field of IP header
3885 *		to **CE** (Congestion Encountered) if current value is **ECT**
3886 *		(ECN Capable Transport). Otherwise, do nothing. Works with IPv6
3887 *		and IPv4.
3888 *	Return
3889 *		1 if the **CE** flag is set (either by the current helper call
3890 *		or because it was already present), 0 if it is not set.
3891 *
3892 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
3893 *	Description
3894 *		Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
3895 *		**bpf_sk_release**\ () is unnecessary and not allowed.
3896 *	Return
3897 *		A **struct bpf_sock** pointer on success, or **NULL** in
3898 *		case of failure.
3899 *
3900 * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
3901 *	Description
3902 *		Look for TCP socket matching *tuple*, optionally in a child
3903 *		network namespace *netns*. The return value must be checked,
3904 *		and if non-**NULL**, released via **bpf_sk_release**\ ().
3905 *
3906 *		This function is identical to **bpf_sk_lookup_tcp**\ (), except
3907 *		that it also returns timewait or request sockets. Use
3908 *		**bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
3909 *		full structure.
3910 *
3911 *		This helper is available only if the kernel was compiled with
3912 *		**CONFIG_NET** configuration option.
3913 *	Return
3914 *		Pointer to **struct bpf_sock**, or **NULL** in case of failure.
3915 *		For sockets with reuseport option, the **struct bpf_sock**
3916 *		result is from *reuse*\ **->socks**\ [] using the hash of the
3917 *		tuple.
3918 *
3919 * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
3920 * 	Description
3921 * 		Check whether *iph* and *th* contain a valid SYN cookie ACK for
3922 * 		the listening socket in *sk*.
3923 *
3924 * 		*iph* points to the start of the IPv4 or IPv6 header, while
3925 * 		*iph_len* contains **sizeof**\ (**struct iphdr**) or
3926 * 		**sizeof**\ (**struct ipv6hdr**).
3927 *
3928 * 		*th* points to the start of the TCP header, while *th_len*
3929 *		contains the length of the TCP header (at least
3930 *		**sizeof**\ (**struct tcphdr**)).
3931 * 	Return
3932 * 		0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
3933 * 		error otherwise.
3934 *
3935 * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
3936 *	Description
3937 *		Get name of sysctl in /proc/sys/ and copy it into provided by
3938 *		program buffer *buf* of size *buf_len*.
3939 *
3940 *		The buffer is always NUL terminated, unless it's zero-sized.
3941 *
3942 *		If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
3943 *		copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
3944 *		only (e.g. "tcp_mem").
3945 *	Return
3946 *		Number of character copied (not including the trailing NUL).
3947 *
3948 *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3949 *		truncated name in this case).
3950 *
3951 * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3952 *	Description
3953 *		Get current value of sysctl as it is presented in /proc/sys
3954 *		(incl. newline, etc), and copy it as a string into provided
3955 *		by program buffer *buf* of size *buf_len*.
3956 *
3957 *		The whole value is copied, no matter what file position user
3958 *		space issued e.g. sys_read at.
3959 *
3960 *		The buffer is always NUL terminated, unless it's zero-sized.
3961 *	Return
3962 *		Number of character copied (not including the trailing NUL).
3963 *
3964 *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3965 *		truncated name in this case).
3966 *
3967 *		**-EINVAL** if current value was unavailable, e.g. because
3968 *		sysctl is uninitialized and read returns -EIO for it.
3969 *
3970 * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
3971 *	Description
3972 *		Get new value being written by user space to sysctl (before
3973 *		the actual write happens) and copy it as a string into
3974 *		provided by program buffer *buf* of size *buf_len*.
3975 *
3976 *		User space may write new value at file position > 0.
3977 *
3978 *		The buffer is always NUL terminated, unless it's zero-sized.
3979 *	Return
3980 *		Number of character copied (not including the trailing NUL).
3981 *
3982 *		**-E2BIG** if the buffer wasn't big enough (*buf* will contain
3983 *		truncated name in this case).
3984 *
3985 *		**-EINVAL** if sysctl is being read.
3986 *
3987 * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
3988 *	Description
3989 *		Override new value being written by user space to sysctl with
3990 *		value provided by program in buffer *buf* of size *buf_len*.
3991 *
3992 *		*buf* should contain a string in same form as provided by user
3993 *		space on sysctl write.
3994 *
3995 *		User space may write new value at file position > 0. To override
3996 *		the whole sysctl value file position should be set to zero.
3997 *	Return
3998 *		0 on success.
3999 *
4000 *		**-E2BIG** if the *buf_len* is too big.
4001 *
4002 *		**-EINVAL** if sysctl is being read.
4003 *
4004 * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
4005 *	Description
4006 *		Convert the initial part of the string from buffer *buf* of
4007 *		size *buf_len* to a long integer according to the given base
4008 *		and save the result in *res*.
4009 *
4010 *		The string may begin with an arbitrary amount of white space
4011 *		(as determined by **isspace**\ (3)) followed by a single
4012 *		optional '**-**' sign.
4013 *
4014 *		Five least significant bits of *flags* encode base, other bits
4015 *		are currently unused.
4016 *
4017 *		Base must be either 8, 10, 16 or 0 to detect it automatically
4018 *		similar to user space **strtol**\ (3).
4019 *	Return
4020 *		Number of characters consumed on success. Must be positive but
4021 *		no more than *buf_len*.
4022 *
4023 *		**-EINVAL** if no valid digits were found or unsupported base
4024 *		was provided.
4025 *
4026 *		**-ERANGE** if resulting value was out of range.
4027 *
4028 * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
4029 *	Description
4030 *		Convert the initial part of the string from buffer *buf* of
4031 *		size *buf_len* to an unsigned long integer according to the
4032 *		given base and save the result in *res*.
4033 *
4034 *		The string may begin with an arbitrary amount of white space
4035 *		(as determined by **isspace**\ (3)).
4036 *
4037 *		Five least significant bits of *flags* encode base, other bits
4038 *		are currently unused.
4039 *
4040 *		Base must be either 8, 10, 16 or 0 to detect it automatically
4041 *		similar to user space **strtoul**\ (3).
4042 *	Return
4043 *		Number of characters consumed on success. Must be positive but
4044 *		no more than *buf_len*.
4045 *
4046 *		**-EINVAL** if no valid digits were found or unsupported base
4047 *		was provided.
4048 *
4049 *		**-ERANGE** if resulting value was out of range.
4050 *
4051 * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
4052 *	Description
4053 *		Get a bpf-local-storage from a *sk*.
4054 *
4055 *		Logically, it could be thought of getting the value from
4056 *		a *map* with *sk* as the **key**.  From this
4057 *		perspective,  the usage is not much different from
4058 *		**bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
4059 *		helper enforces the key must be a full socket and the map must
4060 *		be a **BPF_MAP_TYPE_SK_STORAGE** also.
4061 *
4062 *		Underneath, the value is stored locally at *sk* instead of
4063 *		the *map*.  The *map* is used as the bpf-local-storage
4064 *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4065 *		searched against all bpf-local-storages residing at *sk*.
4066 *
4067 *		*sk* is a kernel **struct sock** pointer for LSM program.
4068 *		*sk* is a **struct bpf_sock** pointer for other program types.
4069 *
4070 *		An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
4071 *		used such that a new bpf-local-storage will be
4072 *		created if one does not exist.  *value* can be used
4073 *		together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
4074 *		the initial value of a bpf-local-storage.  If *value* is
4075 *		**NULL**, the new bpf-local-storage will be zero initialized.
4076 *	Return
4077 *		A bpf-local-storage pointer is returned on success.
4078 *
4079 *		**NULL** if not found or there was an error in adding
4080 *		a new bpf-local-storage.
4081 *
4082 * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
4083 *	Description
4084 *		Delete a bpf-local-storage from a *sk*.
4085 *	Return
4086 *		0 on success.
4087 *
4088 *		**-ENOENT** if the bpf-local-storage cannot be found.
4089 *		**-EINVAL** if sk is not a fullsock (e.g. a request_sock).
4090 *
4091 * long bpf_send_signal(u32 sig)
4092 *	Description
4093 *		Send signal *sig* to the process of the current task.
4094 *		The signal may be delivered to any of this process's threads.
4095 *	Return
4096 *		0 on success or successfully queued.
4097 *
4098 *		**-EBUSY** if work queue under nmi is full.
4099 *
4100 *		**-EINVAL** if *sig* is invalid.
4101 *
4102 *		**-EPERM** if no permission to send the *sig*.
4103 *
4104 *		**-EAGAIN** if bpf program can try again.
4105 *
4106 * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
4107 *	Description
4108 *		Try to issue a SYN cookie for the packet with corresponding
4109 *		IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
4110 *
4111 *		*iph* points to the start of the IPv4 or IPv6 header, while
4112 *		*iph_len* contains **sizeof**\ (**struct iphdr**) or
4113 *		**sizeof**\ (**struct ipv6hdr**).
4114 *
4115 *		*th* points to the start of the TCP header, while *th_len*
4116 *		contains the length of the TCP header with options (at least
4117 *		**sizeof**\ (**struct tcphdr**)).
4118 *	Return
4119 *		On success, lower 32 bits hold the generated SYN cookie in
4120 *		followed by 16 bits which hold the MSS value for that cookie,
4121 *		and the top 16 bits are unused.
4122 *
4123 *		On failure, the returned value is one of the following:
4124 *
4125 *		**-EINVAL** SYN cookie cannot be issued due to error
4126 *
4127 *		**-ENOENT** SYN cookie should not be issued (no SYN flood)
4128 *
4129 *		**-EOPNOTSUPP** kernel configuration does not enable SYN cookies
4130 *
4131 *		**-EPROTONOSUPPORT** IP packet version is not 4 or 6
4132 *
4133 * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4134 * 	Description
4135 * 		Write raw *data* blob into a special BPF perf event held by
4136 * 		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4137 * 		event must have the following attributes: **PERF_SAMPLE_RAW**
4138 * 		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4139 * 		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4140 *
4141 * 		The *flags* are used to indicate the index in *map* for which
4142 * 		the value must be put, masked with **BPF_F_INDEX_MASK**.
4143 * 		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4144 * 		to indicate that the index of the current CPU core should be
4145 * 		used.
4146 *
4147 * 		The value to write, of *size*, is passed through eBPF stack and
4148 * 		pointed by *data*.
4149 *
4150 * 		*ctx* is a pointer to in-kernel struct sk_buff.
4151 *
4152 * 		This helper is similar to **bpf_perf_event_output**\ () but
4153 * 		restricted to raw_tracepoint bpf programs.
4154 * 	Return
4155 * 		0 on success, or a negative error in case of failure.
4156 *
4157 * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
4158 * 	Description
4159 * 		Safely attempt to read *size* bytes from user space address
4160 * 		*unsafe_ptr* and store the data in *dst*.
4161 * 	Return
4162 * 		0 on success, or a negative error in case of failure.
4163 *
4164 * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
4165 * 	Description
4166 * 		Safely attempt to read *size* bytes from kernel space address
4167 * 		*unsafe_ptr* and store the data in *dst*.
4168 * 	Return
4169 * 		0 on success, or a negative error in case of failure.
4170 *
4171 * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
4172 * 	Description
4173 * 		Copy a NUL terminated string from an unsafe user address
4174 * 		*unsafe_ptr* to *dst*. The *size* should include the
4175 * 		terminating NUL byte. In case the string length is smaller than
4176 * 		*size*, the target is not padded with further NUL bytes. If the
4177 * 		string length is larger than *size*, just *size*-1 bytes are
4178 * 		copied and the last byte is set to NUL.
4179 *
4180 * 		On success, returns the number of bytes that were written,
4181 * 		including the terminal NUL. This makes this helper useful in
4182 * 		tracing programs for reading strings, and more importantly to
4183 * 		get its length at runtime. See the following snippet:
4184 *
4185 * 		::
4186 *
4187 * 			SEC("kprobe/sys_open")
4188 * 			void bpf_sys_open(struct pt_regs *ctx)
4189 * 			{
4190 * 			        char buf[PATHLEN]; // PATHLEN is defined to 256
4191 * 			        int res = bpf_probe_read_user_str(buf, sizeof(buf),
4192 * 				                                  ctx->di);
4193 *
4194 * 				// Consume buf, for example push it to
4195 * 				// userspace via bpf_perf_event_output(); we
4196 * 				// can use res (the string length) as event
4197 * 				// size, after checking its boundaries.
4198 * 			}
4199 *
4200 * 		In comparison, using **bpf_probe_read_user**\ () helper here
4201 * 		instead to read the string would require to estimate the length
4202 * 		at compile time, and would often result in copying more memory
4203 * 		than necessary.
4204 *
4205 * 		Another useful use case is when parsing individual process
4206 * 		arguments or individual environment variables navigating
4207 * 		*current*\ **->mm->arg_start** and *current*\
4208 * 		**->mm->env_start**: using this helper and the return value,
4209 * 		one can quickly iterate at the right offset of the memory area.
4210 * 	Return
4211 * 		On success, the strictly positive length of the output string,
4212 * 		including the trailing NUL character. On error, a negative
4213 * 		value.
4214 *
4215 * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
4216 * 	Description
4217 * 		Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
4218 * 		to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
4219 * 	Return
4220 * 		On success, the strictly positive length of the string, including
4221 * 		the trailing NUL character. On error, a negative value.
4222 *
4223 * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
4224 *	Description
4225 *		Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
4226 *		*rcv_nxt* is the ack_seq to be sent out.
4227 *	Return
4228 *		0 on success, or a negative error in case of failure.
4229 *
4230 * long bpf_send_signal_thread(u32 sig)
4231 *	Description
4232 *		Send signal *sig* to the thread corresponding to the current task.
4233 *	Return
4234 *		0 on success or successfully queued.
4235 *
4236 *		**-EBUSY** if work queue under nmi is full.
4237 *
4238 *		**-EINVAL** if *sig* is invalid.
4239 *
4240 *		**-EPERM** if no permission to send the *sig*.
4241 *
4242 *		**-EAGAIN** if bpf program can try again.
4243 *
4244 * u64 bpf_jiffies64(void)
4245 *	Description
4246 *		Obtain the 64bit jiffies
4247 *	Return
4248 *		The 64 bit jiffies
4249 *
4250 * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
4251 *	Description
4252 *		For an eBPF program attached to a perf event, retrieve the
4253 *		branch records (**struct perf_branch_entry**) associated to *ctx*
4254 *		and store it in the buffer pointed by *buf* up to size
4255 *		*size* bytes.
4256 *	Return
4257 *		On success, number of bytes written to *buf*. On error, a
4258 *		negative value.
4259 *
4260 *		The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
4261 *		instead return the number of bytes required to store all the
4262 *		branch entries. If this flag is set, *buf* may be NULL.
4263 *
4264 *		**-EINVAL** if arguments invalid or **size** not a multiple
4265 *		of **sizeof**\ (**struct perf_branch_entry**\ ).
4266 *
4267 *		**-ENOENT** if architecture does not support branch records.
4268 *
4269 * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
4270 *	Description
4271 *		Returns 0 on success, values for *pid* and *tgid* as seen from the current
4272 *		*namespace* will be returned in *nsdata*.
4273 *	Return
4274 *		0 on success, or one of the following in case of failure:
4275 *
4276 *		**-EINVAL** if dev and inum supplied don't match dev_t and inode number
4277 *              with nsfs of current task, or if dev conversion to dev_t lost high bits.
4278 *
4279 *		**-ENOENT** if pidns does not exists for the current task.
4280 *
4281 * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
4282 *	Description
4283 *		Write raw *data* blob into a special BPF perf event held by
4284 *		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
4285 *		event must have the following attributes: **PERF_SAMPLE_RAW**
4286 *		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
4287 *		**PERF_COUNT_SW_BPF_OUTPUT** as **config**.
4288 *
4289 *		The *flags* are used to indicate the index in *map* for which
4290 *		the value must be put, masked with **BPF_F_INDEX_MASK**.
4291 *		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
4292 *		to indicate that the index of the current CPU core should be
4293 *		used.
4294 *
4295 *		The value to write, of *size*, is passed through eBPF stack and
4296 *		pointed by *data*.
4297 *
4298 *		*ctx* is a pointer to in-kernel struct xdp_buff.
4299 *
4300 *		This helper is similar to **bpf_perf_eventoutput**\ () but
4301 *		restricted to raw_tracepoint bpf programs.
4302 *	Return
4303 *		0 on success, or a negative error in case of failure.
4304 *
4305 * u64 bpf_get_netns_cookie(void *ctx)
4306 * 	Description
4307 * 		Retrieve the cookie (generated by the kernel) of the network
4308 * 		namespace the input *ctx* is associated with. The network
4309 * 		namespace cookie remains stable for its lifetime and provides
4310 * 		a global identifier that can be assumed unique. If *ctx* is
4311 * 		NULL, then the helper returns the cookie for the initial
4312 * 		network namespace. The cookie itself is very similar to that
4313 * 		of **bpf_get_socket_cookie**\ () helper, but for network
4314 * 		namespaces instead of sockets.
4315 * 	Return
4316 * 		A 8-byte long opaque number.
4317 *
4318 * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
4319 * 	Description
4320 * 		Return id of cgroup v2 that is ancestor of the cgroup associated
4321 * 		with the current task at the *ancestor_level*. The root cgroup
4322 * 		is at *ancestor_level* zero and each step down the hierarchy
4323 * 		increments the level. If *ancestor_level* == level of cgroup
4324 * 		associated with the current task, then return value will be the
4325 * 		same as that of **bpf_get_current_cgroup_id**\ ().
4326 *
4327 * 		The helper is useful to implement policies based on cgroups
4328 * 		that are upper in hierarchy than immediate cgroup associated
4329 * 		with the current task.
4330 *
4331 * 		The format of returned id and helper limitations are same as in
4332 * 		**bpf_get_current_cgroup_id**\ ().
4333 * 	Return
4334 * 		The id is returned or 0 in case the id could not be retrieved.
4335 *
4336 * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
4337 *	Description
4338 *		Helper is overloaded depending on BPF program type. This
4339 *		description applies to **BPF_PROG_TYPE_SCHED_CLS** and
4340 *		**BPF_PROG_TYPE_SCHED_ACT** programs.
4341 *
4342 *		Assign the *sk* to the *skb*. When combined with appropriate
4343 *		routing configuration to receive the packet towards the socket,
4344 *		will cause *skb* to be delivered to the specified socket.
4345 *		Subsequent redirection of *skb* via  **bpf_redirect**\ (),
4346 *		**bpf_clone_redirect**\ () or other methods outside of BPF may
4347 *		interfere with successful delivery to the socket.
4348 *
4349 *		This operation is only valid from TC ingress path.
4350 *
4351 *		The *flags* argument must be zero.
4352 *	Return
4353 *		0 on success, or a negative error in case of failure:
4354 *
4355 *		**-EINVAL** if specified *flags* are not supported.
4356 *
4357 *		**-ENOENT** if the socket is unavailable for assignment.
4358 *
4359 *		**-ENETUNREACH** if the socket is unreachable (wrong netns).
4360 *
4361 *		**-EOPNOTSUPP** if the operation is not supported, for example
4362 *		a call from outside of TC ingress.
4363 *
4364 * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
4365 *	Description
4366 *		Helper is overloaded depending on BPF program type. This
4367 *		description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
4368 *
4369 *		Select the *sk* as a result of a socket lookup.
4370 *
4371 *		For the operation to succeed passed socket must be compatible
4372 *		with the packet description provided by the *ctx* object.
4373 *
4374 *		L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
4375 *		be an exact match. While IP family (**AF_INET** or
4376 *		**AF_INET6**) must be compatible, that is IPv6 sockets
4377 *		that are not v6-only can be selected for IPv4 packets.
4378 *
4379 *		Only TCP listeners and UDP unconnected sockets can be
4380 *		selected. *sk* can also be NULL to reset any previous
4381 *		selection.
4382 *
4383 *		*flags* argument can combination of following values:
4384 *
4385 *		* **BPF_SK_LOOKUP_F_REPLACE** to override the previous
4386 *		  socket selection, potentially done by a BPF program
4387 *		  that ran before us.
4388 *
4389 *		* **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
4390 *		  load-balancing within reuseport group for the socket
4391 *		  being selected.
4392 *
4393 *		On success *ctx->sk* will point to the selected socket.
4394 *
4395 *	Return
4396 *		0 on success, or a negative errno in case of failure.
4397 *
4398 *		* **-EAFNOSUPPORT** if socket family (*sk->family*) is
4399 *		  not compatible with packet family (*ctx->family*).
4400 *
4401 *		* **-EEXIST** if socket has been already selected,
4402 *		  potentially by another program, and
4403 *		  **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
4404 *
4405 *		* **-EINVAL** if unsupported flags were specified.
4406 *
4407 *		* **-EPROTOTYPE** if socket L4 protocol
4408 *		  (*sk->protocol*) doesn't match packet protocol
4409 *		  (*ctx->protocol*).
4410 *
4411 *		* **-ESOCKTNOSUPPORT** if socket is not in allowed
4412 *		  state (TCP listening or UDP unconnected).
4413 *
4414 * u64 bpf_ktime_get_boot_ns(void)
4415 * 	Description
4416 * 		Return the time elapsed since system boot, in nanoseconds.
4417 * 		Does include the time the system was suspended.
4418 * 		See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
4419 * 	Return
4420 * 		Current *ktime*.
4421 *
4422 * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
4423 * 	Description
4424 * 		**bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
4425 * 		out the format string.
4426 * 		The *m* represents the seq_file. The *fmt* and *fmt_size* are for
4427 * 		the format string itself. The *data* and *data_len* are format string
4428 * 		arguments. The *data* are a **u64** array and corresponding format string
4429 * 		values are stored in the array. For strings and pointers where pointees
4430 * 		are accessed, only the pointer values are stored in the *data* array.
4431 * 		The *data_len* is the size of *data* in bytes - must be a multiple of 8.
4432 *
4433 *		Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
4434 *		Reading kernel memory may fail due to either invalid address or
4435 *		valid address but requiring a major memory fault. If reading kernel memory
4436 *		fails, the string for **%s** will be an empty string, and the ip
4437 *		address for **%p{i,I}{4,6}** will be 0. Not returning error to
4438 *		bpf program is consistent with what **bpf_trace_printk**\ () does for now.
4439 * 	Return
4440 * 		0 on success, or a negative error in case of failure:
4441 *
4442 *		**-EBUSY** if per-CPU memory copy buffer is busy, can try again
4443 *		by returning 1 from bpf program.
4444 *
4445 *		**-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
4446 *
4447 *		**-E2BIG** if *fmt* contains too many format specifiers.
4448 *
4449 *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
4450 *
4451 * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
4452 * 	Description
4453 * 		**bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
4454 * 		The *m* represents the seq_file. The *data* and *len* represent the
4455 * 		data to write in bytes.
4456 * 	Return
4457 * 		0 on success, or a negative error in case of failure:
4458 *
4459 *		**-EOVERFLOW** if an overflow happened: The same object will be tried again.
4460 *
4461 * u64 bpf_sk_cgroup_id(void *sk)
4462 *	Description
4463 *		Return the cgroup v2 id of the socket *sk*.
4464 *
4465 *		*sk* must be a non-**NULL** pointer to a socket, e.g. one
4466 *		returned from **bpf_sk_lookup_xxx**\ (),
4467 *		**bpf_sk_fullsock**\ (), etc. The format of returned id is
4468 *		same as in **bpf_skb_cgroup_id**\ ().
4469 *
4470 *		This helper is available only if the kernel was compiled with
4471 *		the **CONFIG_SOCK_CGROUP_DATA** configuration option.
4472 *	Return
4473 *		The id is returned or 0 in case the id could not be retrieved.
4474 *
4475 * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
4476 *	Description
4477 *		Return id of cgroup v2 that is ancestor of cgroup associated
4478 *		with the *sk* at the *ancestor_level*.  The root cgroup is at
4479 *		*ancestor_level* zero and each step down the hierarchy
4480 *		increments the level. If *ancestor_level* == level of cgroup
4481 *		associated with *sk*, then return value will be same as that
4482 *		of **bpf_sk_cgroup_id**\ ().
4483 *
4484 *		The helper is useful to implement policies based on cgroups
4485 *		that are upper in hierarchy than immediate cgroup associated
4486 *		with *sk*.
4487 *
4488 *		The format of returned id and helper limitations are same as in
4489 *		**bpf_sk_cgroup_id**\ ().
4490 *	Return
4491 *		The id is returned or 0 in case the id could not be retrieved.
4492 *
4493 * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
4494 * 	Description
4495 * 		Copy *size* bytes from *data* into a ring buffer *ringbuf*.
4496 * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4497 * 		of new data availability is sent.
4498 * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4499 * 		of new data availability is sent unconditionally.
4500 * 		If **0** is specified in *flags*, an adaptive notification
4501 * 		of new data availability is sent.
4502 *
4503 * 		An adaptive notification is a notification sent whenever the user-space
4504 * 		process has caught up and consumed all available payloads. In case the user-space
4505 * 		process is still processing a previous payload, then no notification is needed
4506 * 		as it will process the newly added payload automatically.
4507 * 	Return
4508 * 		0 on success, or a negative error in case of failure.
4509 *
4510 * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
4511 * 	Description
4512 * 		Reserve *size* bytes of payload in a ring buffer *ringbuf*.
4513 * 		*flags* must be 0.
4514 * 	Return
4515 * 		Valid pointer with *size* bytes of memory available; NULL,
4516 * 		otherwise.
4517 *
4518 * void bpf_ringbuf_submit(void *data, u64 flags)
4519 * 	Description
4520 * 		Submit reserved ring buffer sample, pointed to by *data*.
4521 * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4522 * 		of new data availability is sent.
4523 * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4524 * 		of new data availability is sent unconditionally.
4525 * 		If **0** is specified in *flags*, an adaptive notification
4526 * 		of new data availability is sent.
4527 *
4528 * 		See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4529 * 	Return
4530 * 		Nothing. Always succeeds.
4531 *
4532 * void bpf_ringbuf_discard(void *data, u64 flags)
4533 * 	Description
4534 * 		Discard reserved ring buffer sample, pointed to by *data*.
4535 * 		If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
4536 * 		of new data availability is sent.
4537 * 		If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
4538 * 		of new data availability is sent unconditionally.
4539 * 		If **0** is specified in *flags*, an adaptive notification
4540 * 		of new data availability is sent.
4541 *
4542 * 		See 'bpf_ringbuf_output()' for the definition of adaptive notification.
4543 * 	Return
4544 * 		Nothing. Always succeeds.
4545 *
4546 * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
4547 *	Description
4548 *		Query various characteristics of provided ring buffer. What
4549 *		exactly is queries is determined by *flags*:
4550 *
4551 *		* **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
4552 *		* **BPF_RB_RING_SIZE**: The size of ring buffer.
4553 *		* **BPF_RB_CONS_POS**: Consumer position (can wrap around).
4554 *		* **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
4555 *
4556 *		Data returned is just a momentary snapshot of actual values
4557 *		and could be inaccurate, so this facility should be used to
4558 *		power heuristics and for reporting, not to make 100% correct
4559 *		calculation.
4560 *	Return
4561 *		Requested value, or 0, if *flags* are not recognized.
4562 *
4563 * long bpf_csum_level(struct sk_buff *skb, u64 level)
4564 * 	Description
4565 * 		Change the skbs checksum level by one layer up or down, or
4566 * 		reset it entirely to none in order to have the stack perform
4567 * 		checksum validation. The level is applicable to the following
4568 * 		protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
4569 * 		| ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
4570 * 		through **bpf_skb_adjust_room**\ () helper with passing in
4571 * 		**BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one	call
4572 * 		to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
4573 * 		the UDP header is removed. Similarly, an encap of the latter
4574 * 		into the former could be accompanied by a helper call to
4575 * 		**bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
4576 * 		skb is still intended to be processed in higher layers of the
4577 * 		stack instead of just egressing at tc.
4578 *
4579 * 		There are three supported level settings at this time:
4580 *
4581 * 		* **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
4582 * 		  with CHECKSUM_UNNECESSARY.
4583 * 		* **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
4584 * 		  with CHECKSUM_UNNECESSARY.
4585 * 		* **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
4586 * 		  sets CHECKSUM_NONE to force checksum validation by the stack.
4587 * 		* **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
4588 * 		  skb->csum_level.
4589 * 	Return
4590 * 		0 on success, or a negative error in case of failure. In the
4591 * 		case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
4592 * 		is returned or the error code -EACCES in case the skb is not
4593 * 		subject to CHECKSUM_UNNECESSARY.
4594 *
4595 * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
4596 *	Description
4597 *		Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
4598 *	Return
4599 *		*sk* if casting is valid, or **NULL** otherwise.
4600 *
4601 * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
4602 *	Description
4603 *		Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
4604 *	Return
4605 *		*sk* if casting is valid, or **NULL** otherwise.
4606 *
4607 * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
4608 * 	Description
4609 *		Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
4610 *	Return
4611 *		*sk* if casting is valid, or **NULL** otherwise.
4612 *
4613 * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
4614 * 	Description
4615 *		Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
4616 *	Return
4617 *		*sk* if casting is valid, or **NULL** otherwise.
4618 *
4619 * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
4620 * 	Description
4621 *		Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
4622 *	Return
4623 *		*sk* if casting is valid, or **NULL** otherwise.
4624 *
4625 * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
4626 *	Description
4627 *		Return a user or a kernel stack in bpf program provided buffer.
4628 *		Note: the user stack will only be populated if the *task* is
4629 *		the current task; all other tasks will return -EOPNOTSUPP.
4630 *		To achieve this, the helper needs *task*, which is a valid
4631 *		pointer to **struct task_struct**. To store the stacktrace, the
4632 *		bpf program provides *buf* with a nonnegative *size*.
4633 *
4634 *		The last argument, *flags*, holds the number of stack frames to
4635 *		skip (from 0 to 255), masked with
4636 *		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
4637 *		the following flags:
4638 *
4639 *		**BPF_F_USER_STACK**
4640 *			Collect a user space stack instead of a kernel stack.
4641 *			The *task* must be the current task.
4642 *		**BPF_F_USER_BUILD_ID**
4643 *			Collect buildid+offset instead of ips for user stack,
4644 *			only valid if **BPF_F_USER_STACK** is also specified.
4645 *
4646 *		**bpf_get_task_stack**\ () can collect up to
4647 *		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
4648 *		to sufficient large buffer size. Note that
4649 *		this limit can be controlled with the **sysctl** program, and
4650 *		that it should be manually increased in order to profile long
4651 *		user stacks (such as stacks for Java programs). To do so, use:
4652 *
4653 *		::
4654 *
4655 *			# sysctl kernel.perf_event_max_stack=<new value>
4656 *	Return
4657 * 		The non-negative copied *buf* length equal to or less than
4658 * 		*size* on success, or a negative error in case of failure.
4659 *
4660 * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
4661 *	Description
4662 *		Load header option.  Support reading a particular TCP header
4663 *		option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
4664 *
4665 *		If *flags* is 0, it will search the option from the
4666 *		*skops*\ **->skb_data**.  The comment in **struct bpf_sock_ops**
4667 *		has details on what skb_data contains under different
4668 *		*skops*\ **->op**.
4669 *
4670 *		The first byte of the *searchby_res* specifies the
4671 *		kind that it wants to search.
4672 *
4673 *		If the searching kind is an experimental kind
4674 *		(i.e. 253 or 254 according to RFC6994).  It also
4675 *		needs to specify the "magic" which is either
4676 *		2 bytes or 4 bytes.  It then also needs to
4677 *		specify the size of the magic by using
4678 *		the 2nd byte which is "kind-length" of a TCP
4679 *		header option and the "kind-length" also
4680 *		includes the first 2 bytes "kind" and "kind-length"
4681 *		itself as a normal TCP header option also does.
4682 *
4683 *		For example, to search experimental kind 254 with
4684 *		2 byte magic 0xeB9F, the searchby_res should be
4685 *		[ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
4686 *
4687 *		To search for the standard window scale option (3),
4688 *		the *searchby_res* should be [ 3, 0, 0, .... 0 ].
4689 *		Note, kind-length must be 0 for regular option.
4690 *
4691 *		Searching for No-Op (0) and End-of-Option-List (1) are
4692 *		not supported.
4693 *
4694 *		*len* must be at least 2 bytes which is the minimal size
4695 *		of a header option.
4696 *
4697 *		Supported flags:
4698 *
4699 *		* **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
4700 *		  saved_syn packet or the just-received syn packet.
4701 *
4702 *	Return
4703 *		> 0 when found, the header option is copied to *searchby_res*.
4704 *		The return value is the total length copied. On failure, a
4705 *		negative error code is returned:
4706 *
4707 *		**-EINVAL** if a parameter is invalid.
4708 *
4709 *		**-ENOMSG** if the option is not found.
4710 *
4711 *		**-ENOENT** if no syn packet is available when
4712 *		**BPF_LOAD_HDR_OPT_TCP_SYN** is used.
4713 *
4714 *		**-ENOSPC** if there is not enough space.  Only *len* number of
4715 *		bytes are copied.
4716 *
4717 *		**-EFAULT** on failure to parse the header options in the
4718 *		packet.
4719 *
4720 *		**-EPERM** if the helper cannot be used under the current
4721 *		*skops*\ **->op**.
4722 *
4723 * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
4724 *	Description
4725 *		Store header option.  The data will be copied
4726 *		from buffer *from* with length *len* to the TCP header.
4727 *
4728 *		The buffer *from* should have the whole option that
4729 *		includes the kind, kind-length, and the actual
4730 *		option data.  The *len* must be at least kind-length
4731 *		long.  The kind-length does not have to be 4 byte
4732 *		aligned.  The kernel will take care of the padding
4733 *		and setting the 4 bytes aligned value to th->doff.
4734 *
4735 *		This helper will check for duplicated option
4736 *		by searching the same option in the outgoing skb.
4737 *
4738 *		This helper can only be called during
4739 *		**BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4740 *
4741 *	Return
4742 *		0 on success, or negative error in case of failure:
4743 *
4744 *		**-EINVAL** If param is invalid.
4745 *
4746 *		**-ENOSPC** if there is not enough space in the header.
4747 *		Nothing has been written
4748 *
4749 *		**-EEXIST** if the option already exists.
4750 *
4751 *		**-EFAULT** on failure to parse the existing header options.
4752 *
4753 *		**-EPERM** if the helper cannot be used under the current
4754 *		*skops*\ **->op**.
4755 *
4756 * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
4757 *	Description
4758 *		Reserve *len* bytes for the bpf header option.  The
4759 *		space will be used by **bpf_store_hdr_opt**\ () later in
4760 *		**BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
4761 *
4762 *		If **bpf_reserve_hdr_opt**\ () is called multiple times,
4763 *		the total number of bytes will be reserved.
4764 *
4765 *		This helper can only be called during
4766 *		**BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
4767 *
4768 *	Return
4769 *		0 on success, or negative error in case of failure:
4770 *
4771 *		**-EINVAL** if a parameter is invalid.
4772 *
4773 *		**-ENOSPC** if there is not enough space in the header.
4774 *
4775 *		**-EPERM** if the helper cannot be used under the current
4776 *		*skops*\ **->op**.
4777 *
4778 * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
4779 *	Description
4780 *		Get a bpf_local_storage from an *inode*.
4781 *
4782 *		Logically, it could be thought of as getting the value from
4783 *		a *map* with *inode* as the **key**.  From this
4784 *		perspective,  the usage is not much different from
4785 *		**bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
4786 *		helper enforces the key must be an inode and the map must also
4787 *		be a **BPF_MAP_TYPE_INODE_STORAGE**.
4788 *
4789 *		Underneath, the value is stored locally at *inode* instead of
4790 *		the *map*.  The *map* is used as the bpf-local-storage
4791 *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4792 *		searched against all bpf_local_storage residing at *inode*.
4793 *
4794 *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4795 *		used such that a new bpf_local_storage will be
4796 *		created if one does not exist.  *value* can be used
4797 *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4798 *		the initial value of a bpf_local_storage.  If *value* is
4799 *		**NULL**, the new bpf_local_storage will be zero initialized.
4800 *	Return
4801 *		A bpf_local_storage pointer is returned on success.
4802 *
4803 *		**NULL** if not found or there was an error in adding
4804 *		a new bpf_local_storage.
4805 *
4806 * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
4807 *	Description
4808 *		Delete a bpf_local_storage from an *inode*.
4809 *	Return
4810 *		0 on success.
4811 *
4812 *		**-ENOENT** if the bpf_local_storage cannot be found.
4813 *
4814 * long bpf_d_path(struct path *path, char *buf, u32 sz)
4815 *	Description
4816 *		Return full path for given **struct path** object, which
4817 *		needs to be the kernel BTF *path* object. The path is
4818 *		returned in the provided buffer *buf* of size *sz* and
4819 *		is zero terminated.
4820 *
4821 *	Return
4822 *		On success, the strictly positive length of the string,
4823 *		including the trailing NUL character. On error, a negative
4824 *		value.
4825 *
4826 * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
4827 * 	Description
4828 * 		Read *size* bytes from user space address *user_ptr* and store
4829 * 		the data in *dst*. This is a wrapper of **copy_from_user**\ ().
4830 * 	Return
4831 * 		0 on success, or a negative error in case of failure.
4832 *
4833 * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
4834 *	Description
4835 *		Use BTF to store a string representation of *ptr*->ptr in *str*,
4836 *		using *ptr*->type_id.  This value should specify the type
4837 *		that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
4838 *		can be used to look up vmlinux BTF type ids. Traversing the
4839 *		data structure using BTF, the type information and values are
4840 *		stored in the first *str_size* - 1 bytes of *str*.  Safe copy of
4841 *		the pointer data is carried out to avoid kernel crashes during
4842 *		operation.  Smaller types can use string space on the stack;
4843 *		larger programs can use map data to store the string
4844 *		representation.
4845 *
4846 *		The string can be subsequently shared with userspace via
4847 *		bpf_perf_event_output() or ring buffer interfaces.
4848 *		bpf_trace_printk() is to be avoided as it places too small
4849 *		a limit on string size to be useful.
4850 *
4851 *		*flags* is a combination of
4852 *
4853 *		**BTF_F_COMPACT**
4854 *			no formatting around type information
4855 *		**BTF_F_NONAME**
4856 *			no struct/union member names/types
4857 *		**BTF_F_PTR_RAW**
4858 *			show raw (unobfuscated) pointer values;
4859 *			equivalent to printk specifier %px.
4860 *		**BTF_F_ZERO**
4861 *			show zero-valued struct/union members; they
4862 *			are not displayed by default
4863 *
4864 *	Return
4865 *		The number of bytes that were written (or would have been
4866 *		written if output had to be truncated due to string size),
4867 *		or a negative error in cases of failure.
4868 *
4869 * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
4870 *	Description
4871 *		Use BTF to write to seq_write a string representation of
4872 *		*ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
4873 *		*flags* are identical to those used for bpf_snprintf_btf.
4874 *	Return
4875 *		0 on success or a negative error in case of failure.
4876 *
4877 * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
4878 * 	Description
4879 * 		See **bpf_get_cgroup_classid**\ () for the main description.
4880 * 		This helper differs from **bpf_get_cgroup_classid**\ () in that
4881 * 		the cgroup v1 net_cls class is retrieved only from the *skb*'s
4882 * 		associated socket instead of the current process.
4883 * 	Return
4884 * 		The id is returned or 0 in case the id could not be retrieved.
4885 *
4886 * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
4887 * 	Description
4888 * 		Redirect the packet to another net device of index *ifindex*
4889 * 		and fill in L2 addresses from neighboring subsystem. This helper
4890 * 		is somewhat similar to **bpf_redirect**\ (), except that it
4891 * 		populates L2 addresses as well, meaning, internally, the helper
4892 * 		relies on the neighbor lookup for the L2 address of the nexthop.
4893 *
4894 * 		The helper will perform a FIB lookup based on the skb's
4895 * 		networking header to get the address of the next hop, unless
4896 * 		this is supplied by the caller in the *params* argument. The
4897 * 		*plen* argument indicates the len of *params* and should be set
4898 * 		to 0 if *params* is NULL.
4899 *
4900 * 		The *flags* argument is reserved and must be 0. The helper is
4901 * 		currently only supported for tc BPF program types, and enabled
4902 * 		for IPv4 and IPv6 protocols.
4903 * 	Return
4904 * 		The helper returns **TC_ACT_REDIRECT** on success or
4905 * 		**TC_ACT_SHOT** on error.
4906 *
4907 * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
4908 *     Description
4909 *             Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4910 *             pointer to the percpu kernel variable on *cpu*. A ksym is an
4911 *             extern variable decorated with '__ksym'. For ksym, there is a
4912 *             global var (either static or global) defined of the same name
4913 *             in the kernel. The ksym is percpu if the global var is percpu.
4914 *             The returned pointer points to the global percpu var on *cpu*.
4915 *
4916 *             bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
4917 *             kernel, except that bpf_per_cpu_ptr() may return NULL. This
4918 *             happens if *cpu* is larger than nr_cpu_ids. The caller of
4919 *             bpf_per_cpu_ptr() must check the returned value.
4920 *     Return
4921 *             A pointer pointing to the kernel percpu variable on *cpu*, or
4922 *             NULL, if *cpu* is invalid.
4923 *
4924 * void *bpf_this_cpu_ptr(const void *percpu_ptr)
4925 *	Description
4926 *		Take a pointer to a percpu ksym, *percpu_ptr*, and return a
4927 *		pointer to the percpu kernel variable on this cpu. See the
4928 *		description of 'ksym' in **bpf_per_cpu_ptr**\ ().
4929 *
4930 *		bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
4931 *		the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
4932 *		never return NULL.
4933 *	Return
4934 *		A pointer pointing to the kernel percpu variable on this cpu.
4935 *
4936 * long bpf_redirect_peer(u32 ifindex, u64 flags)
4937 * 	Description
4938 * 		Redirect the packet to another net device of index *ifindex*.
4939 * 		This helper is somewhat similar to **bpf_redirect**\ (), except
4940 * 		that the redirection happens to the *ifindex*' peer device and
4941 * 		the netns switch takes place from ingress to ingress without
4942 * 		going through the CPU's backlog queue.
4943 *
4944 * 		The *flags* argument is reserved and must be 0. The helper is
4945 * 		currently only supported for tc BPF program types at the
4946 * 		ingress hook and for veth and netkit target device types. The
4947 * 		peer device must reside in a different network namespace.
4948 * 	Return
4949 * 		The helper returns **TC_ACT_REDIRECT** on success or
4950 * 		**TC_ACT_SHOT** on error.
4951 *
4952 * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
4953 *	Description
4954 *		Get a bpf_local_storage from the *task*.
4955 *
4956 *		Logically, it could be thought of as getting the value from
4957 *		a *map* with *task* as the **key**.  From this
4958 *		perspective,  the usage is not much different from
4959 *		**bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
4960 *		helper enforces the key must be a task_struct and the map must also
4961 *		be a **BPF_MAP_TYPE_TASK_STORAGE**.
4962 *
4963 *		Underneath, the value is stored locally at *task* instead of
4964 *		the *map*.  The *map* is used as the bpf-local-storage
4965 *		"type". The bpf-local-storage "type" (i.e. the *map*) is
4966 *		searched against all bpf_local_storage residing at *task*.
4967 *
4968 *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
4969 *		used such that a new bpf_local_storage will be
4970 *		created if one does not exist.  *value* can be used
4971 *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
4972 *		the initial value of a bpf_local_storage.  If *value* is
4973 *		**NULL**, the new bpf_local_storage will be zero initialized.
4974 *	Return
4975 *		A bpf_local_storage pointer is returned on success.
4976 *
4977 *		**NULL** if not found or there was an error in adding
4978 *		a new bpf_local_storage.
4979 *
4980 * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
4981 *	Description
4982 *		Delete a bpf_local_storage from a *task*.
4983 *	Return
4984 *		0 on success.
4985 *
4986 *		**-ENOENT** if the bpf_local_storage cannot be found.
4987 *
4988 * struct task_struct *bpf_get_current_task_btf(void)
4989 *	Description
4990 *		Return a BTF pointer to the "current" task.
4991 *		This pointer can also be used in helpers that accept an
4992 *		*ARG_PTR_TO_BTF_ID* of type *task_struct*.
4993 *	Return
4994 *		Pointer to the current task.
4995 *
4996 * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
4997 *	Description
4998 *		Set or clear certain options on *bprm*:
4999 *
5000 *		**BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
5001 *		which sets the **AT_SECURE** auxv for glibc. The bit
5002 *		is cleared if the flag is not specified.
5003 *	Return
5004 *		**-EINVAL** if invalid *flags* are passed, zero otherwise.
5005 *
5006 * u64 bpf_ktime_get_coarse_ns(void)
5007 * 	Description
5008 * 		Return a coarse-grained version of the time elapsed since
5009 * 		system boot, in nanoseconds. Does not include time the system
5010 * 		was suspended.
5011 *
5012 * 		See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
5013 * 	Return
5014 * 		Current *ktime*.
5015 *
5016 * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
5017 *	Description
5018 *		Returns the stored IMA hash of the *inode* (if it's available).
5019 *		If the hash is larger than *size*, then only *size*
5020 *		bytes will be copied to *dst*
5021 *	Return
5022 *		The **hash_algo** is returned on success,
5023 *		**-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
5024 *		invalid arguments are passed.
5025 *
5026 * struct socket *bpf_sock_from_file(struct file *file)
5027 *	Description
5028 *		If the given file represents a socket, returns the associated
5029 *		socket.
5030 *	Return
5031 *		A pointer to a struct socket on success or NULL if the file is
5032 *		not a socket.
5033 *
5034 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
5035 *	Description
5036 *		Check packet size against exceeding MTU of net device (based
5037 *		on *ifindex*).  This helper will likely be used in combination
5038 *		with helpers that adjust/change the packet size.
5039 *
5040 *		The argument *len_diff* can be used for querying with a planned
5041 *		size change. This allows to check MTU prior to changing packet
5042 *		ctx. Providing a *len_diff* adjustment that is larger than the
5043 *		actual packet size (resulting in negative packet size) will in
5044 *		principle not exceed the MTU, which is why it is not considered
5045 *		a failure.  Other BPF helpers are needed for performing the
5046 *		planned size change; therefore the responsibility for catching
5047 *		a negative packet size belongs in those helpers.
5048 *
5049 *		Specifying *ifindex* zero means the MTU check is performed
5050 *		against the current net device.  This is practical if this isn't
5051 *		used prior to redirect.
5052 *
5053 *		On input *mtu_len* must be a valid pointer, else verifier will
5054 *		reject BPF program.  If the value *mtu_len* is initialized to
5055 *		zero then the ctx packet size is use.  When value *mtu_len* is
5056 *		provided as input this specify the L3 length that the MTU check
5057 *		is done against. Remember XDP and TC length operate at L2, but
5058 *		this value is L3 as this correlate to MTU and IP-header tot_len
5059 *		values which are L3 (similar behavior as bpf_fib_lookup).
5060 *
5061 *		The Linux kernel route table can configure MTUs on a more
5062 *		specific per route level, which is not provided by this helper.
5063 *		For route level MTU checks use the **bpf_fib_lookup**\ ()
5064 *		helper.
5065 *
5066 *		*ctx* is either **struct xdp_md** for XDP programs or
5067 *		**struct sk_buff** for tc cls_act programs.
5068 *
5069 *		The *flags* argument can be a combination of one or more of the
5070 *		following values:
5071 *
5072 *		**BPF_MTU_CHK_SEGS**
5073 *			This flag will only works for *ctx* **struct sk_buff**.
5074 *			If packet context contains extra packet segment buffers
5075 *			(often knows as GSO skb), then MTU check is harder to
5076 *			check at this point, because in transmit path it is
5077 *			possible for the skb packet to get re-segmented
5078 *			(depending on net device features).  This could still be
5079 *			a MTU violation, so this flag enables performing MTU
5080 *			check against segments, with a different violation
5081 *			return code to tell it apart. Check cannot use len_diff.
5082 *
5083 *		On return *mtu_len* pointer contains the MTU value of the net
5084 *		device.  Remember the net device configured MTU is the L3 size,
5085 *		which is returned here and XDP and TC length operate at L2.
5086 *		Helper take this into account for you, but remember when using
5087 *		MTU value in your BPF-code.
5088 *
5089 *	Return
5090 *		* 0 on success, and populate MTU value in *mtu_len* pointer.
5091 *
5092 *		* < 0 if any input argument is invalid (*mtu_len* not updated)
5093 *
5094 *		MTU violations return positive values, but also populate MTU
5095 *		value in *mtu_len* pointer, as this can be needed for
5096 *		implementing PMTU handing:
5097 *
5098 *		* **BPF_MTU_CHK_RET_FRAG_NEEDED**
5099 *		* **BPF_MTU_CHK_RET_SEGS_TOOBIG**
5100 *
5101 * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
5102 *	Description
5103 *		For each element in **map**, call **callback_fn** function with
5104 *		**map**, **callback_ctx** and other map-specific parameters.
5105 *		The **callback_fn** should be a static function and
5106 *		the **callback_ctx** should be a pointer to the stack.
5107 *		The **flags** is used to control certain aspects of the helper.
5108 *		Currently, the **flags** must be 0.
5109 *
5110 *		The following are a list of supported map types and their
5111 *		respective expected callback signatures:
5112 *
5113 *		BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
5114 *		BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
5115 *		BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
5116 *
5117 *		long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
5118 *
5119 *		For per_cpu maps, the map_value is the value on the cpu where the
5120 *		bpf_prog is running.
5121 *
5122 *		If **callback_fn** return 0, the helper will continue to the next
5123 *		element. If return value is 1, the helper will skip the rest of
5124 *		elements and return. Other return values are not used now.
5125 *
5126 *	Return
5127 *		The number of traversed map elements for success, **-EINVAL** for
5128 *		invalid **flags**.
5129 *
5130 * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
5131 *	Description
5132 *		Outputs a string into the **str** buffer of size **str_size**
5133 *		based on a format string stored in a read-only map pointed by
5134 *		**fmt**.
5135 *
5136 *		Each format specifier in **fmt** corresponds to one u64 element
5137 *		in the **data** array. For strings and pointers where pointees
5138 *		are accessed, only the pointer values are stored in the *data*
5139 *		array. The *data_len* is the size of *data* in bytes - must be
5140 *		a multiple of 8.
5141 *
5142 *		Formats **%s** and **%p{i,I}{4,6}** require to read kernel
5143 *		memory. Reading kernel memory may fail due to either invalid
5144 *		address or valid address but requiring a major memory fault. If
5145 *		reading kernel memory fails, the string for **%s** will be an
5146 *		empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
5147 *		Not returning error to bpf program is consistent with what
5148 *		**bpf_trace_printk**\ () does for now.
5149 *
5150 *	Return
5151 *		The strictly positive length of the formatted string, including
5152 *		the trailing zero character. If the return value is greater than
5153 *		**str_size**, **str** contains a truncated string, guaranteed to
5154 *		be zero-terminated except when **str_size** is 0.
5155 *
5156 *		Or **-EBUSY** if the per-CPU memory copy buffer is busy.
5157 *
5158 * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
5159 * 	Description
5160 * 		Execute bpf syscall with given arguments.
5161 * 	Return
5162 * 		A syscall result.
5163 *
5164 * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
5165 * 	Description
5166 * 		Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
5167 * 	Return
5168 * 		Returns btf_id and btf_obj_fd in lower and upper 32 bits.
5169 *
5170 * long bpf_sys_close(u32 fd)
5171 * 	Description
5172 * 		Execute close syscall for given FD.
5173 * 	Return
5174 * 		A syscall result.
5175 *
5176 * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
5177 *	Description
5178 *		Initialize the timer.
5179 *		First 4 bits of *flags* specify clockid.
5180 *		Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
5181 *		All other bits of *flags* are reserved.
5182 *		The verifier will reject the program if *timer* is not from
5183 *		the same *map*.
5184 *	Return
5185 *		0 on success.
5186 *		**-EBUSY** if *timer* is already initialized.
5187 *		**-EINVAL** if invalid *flags* are passed.
5188 *		**-EPERM** if *timer* is in a map that doesn't have any user references.
5189 *		The user space should either hold a file descriptor to a map with timers
5190 *		or pin such map in bpffs. When map is unpinned or file descriptor is
5191 *		closed all timers in the map will be cancelled and freed.
5192 *
5193 * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
5194 *	Description
5195 *		Configure the timer to call *callback_fn* static function.
5196 *	Return
5197 *		0 on success.
5198 *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5199 *		**-EPERM** if *timer* is in a map that doesn't have any user references.
5200 *		The user space should either hold a file descriptor to a map with timers
5201 *		or pin such map in bpffs. When map is unpinned or file descriptor is
5202 *		closed all timers in the map will be cancelled and freed.
5203 *
5204 * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
5205 *	Description
5206 *		Set timer expiration N nanoseconds from the current time. The
5207 *		configured callback will be invoked in soft irq context on some cpu
5208 *		and will not repeat unless another bpf_timer_start() is made.
5209 *		In such case the next invocation can migrate to a different cpu.
5210 *		Since struct bpf_timer is a field inside map element the map
5211 *		owns the timer. The bpf_timer_set_callback() will increment refcnt
5212 *		of BPF program to make sure that callback_fn code stays valid.
5213 *		When user space reference to a map reaches zero all timers
5214 *		in a map are cancelled and corresponding program's refcnts are
5215 *		decremented. This is done to make sure that Ctrl-C of a user
5216 *		process doesn't leave any timers running. If map is pinned in
5217 *		bpffs the callback_fn can re-arm itself indefinitely.
5218 *		bpf_map_update/delete_elem() helpers and user space sys_bpf commands
5219 *		cancel and free the timer in the given map element.
5220 *		The map can contain timers that invoke callback_fn-s from different
5221 *		programs. The same callback_fn can serve different timers from
5222 *		different maps if key/value layout matches across maps.
5223 *		Every bpf_timer_set_callback() can have different callback_fn.
5224 *
5225 *		*flags* can be one of:
5226 *
5227 *		**BPF_F_TIMER_ABS**
5228 *			Start the timer in absolute expire value instead of the
5229 *			default relative one.
5230 *		**BPF_F_TIMER_CPU_PIN**
5231 *			Timer will be pinned to the CPU of the caller.
5232 *
5233 *	Return
5234 *		0 on success.
5235 *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
5236 *		or invalid *flags* are passed.
5237 *
5238 * long bpf_timer_cancel(struct bpf_timer *timer)
5239 *	Description
5240 *		Cancel the timer and wait for callback_fn to finish if it was running.
5241 *	Return
5242 *		0 if the timer was not active.
5243 *		1 if the timer was active.
5244 *		**-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
5245 *		**-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
5246 *		own timer which would have led to a deadlock otherwise.
5247 *
5248 * u64 bpf_get_func_ip(void *ctx)
5249 * 	Description
5250 * 		Get address of the traced function (for tracing and kprobe programs).
5251 *
5252 * 		When called for kprobe program attached as uprobe it returns
5253 * 		probe address for both entry and return uprobe.
5254 *
5255 * 	Return
5256 * 		Address of the traced function for kprobe.
5257 * 		0 for kprobes placed within the function (not at the entry).
5258 * 		Address of the probe for uprobe and return uprobe.
5259 *
5260 * u64 bpf_get_attach_cookie(void *ctx)
5261 * 	Description
5262 * 		Get bpf_cookie value provided (optionally) during the program
5263 * 		attachment. It might be different for each individual
5264 * 		attachment, even if BPF program itself is the same.
5265 * 		Expects BPF program context *ctx* as a first argument.
5266 *
5267 * 		Supported for the following program types:
5268 *			- kprobe/uprobe;
5269 *			- tracepoint;
5270 *			- perf_event.
5271 * 	Return
5272 *		Value specified by user at BPF link creation/attachment time
5273 *		or 0, if it was not specified.
5274 *
5275 * long bpf_task_pt_regs(struct task_struct *task)
5276 *	Description
5277 *		Get the struct pt_regs associated with **task**.
5278 *	Return
5279 *		A pointer to struct pt_regs.
5280 *
5281 * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
5282 *	Description
5283 *		Get branch trace from hardware engines like Intel LBR. The
5284 *		hardware engine is stopped shortly after the helper is
5285 *		called. Therefore, the user need to filter branch entries
5286 *		based on the actual use case. To capture branch trace
5287 *		before the trigger point of the BPF program, the helper
5288 *		should be called at the beginning of the BPF program.
5289 *
5290 *		The data is stored as struct perf_branch_entry into output
5291 *		buffer *entries*. *size* is the size of *entries* in bytes.
5292 *		*flags* is reserved for now and must be zero.
5293 *
5294 *	Return
5295 *		On success, number of bytes written to *buf*. On error, a
5296 *		negative value.
5297 *
5298 *		**-EINVAL** if *flags* is not zero.
5299 *
5300 *		**-ENOENT** if architecture does not support branch records.
5301 *
5302 * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
5303 *	Description
5304 *		Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
5305 *		to format and can handle more format args as a result.
5306 *
5307 *		Arguments are to be used as in **bpf_seq_printf**\ () helper.
5308 *	Return
5309 *		The number of bytes written to the buffer, or a negative error
5310 *		in case of failure.
5311 *
5312 * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
5313 * 	Description
5314 *		Dynamically cast a *sk* pointer to a *unix_sock* pointer.
5315 *	Return
5316 *		*sk* if casting is valid, or **NULL** otherwise.
5317 *
5318 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
5319 *	Description
5320 *		Get the address of a kernel symbol, returned in *res*. *res* is
5321 *		set to 0 if the symbol is not found.
5322 *	Return
5323 *		On success, zero. On error, a negative value.
5324 *
5325 *		**-EINVAL** if *flags* is not zero.
5326 *
5327 *		**-EINVAL** if string *name* is not the same size as *name_sz*.
5328 *
5329 *		**-ENOENT** if symbol is not found.
5330 *
5331 *		**-EPERM** if caller does not have permission to obtain kernel address.
5332 *
5333 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
5334 *	Description
5335 *		Find vma of *task* that contains *addr*, call *callback_fn*
5336 *		function with *task*, *vma*, and *callback_ctx*.
5337 *		The *callback_fn* should be a static function and
5338 *		the *callback_ctx* should be a pointer to the stack.
5339 *		The *flags* is used to control certain aspects of the helper.
5340 *		Currently, the *flags* must be 0.
5341 *
5342 *		The expected callback signature is
5343 *
5344 *		long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
5345 *
5346 *	Return
5347 *		0 on success.
5348 *		**-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
5349 *		**-EBUSY** if failed to try lock mmap_lock.
5350 *		**-EINVAL** for invalid **flags**.
5351 *
5352 * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
5353 *	Description
5354 *		For **nr_loops**, call **callback_fn** function
5355 *		with **callback_ctx** as the context parameter.
5356 *		The **callback_fn** should be a static function and
5357 *		the **callback_ctx** should be a pointer to the stack.
5358 *		The **flags** is used to control certain aspects of the helper.
5359 *		Currently, the **flags** must be 0. Currently, nr_loops is
5360 *		limited to 1 << 23 (~8 million) loops.
5361 *
5362 *		long (\*callback_fn)(u32 index, void \*ctx);
5363 *
5364 *		where **index** is the current index in the loop. The index
5365 *		is zero-indexed.
5366 *
5367 *		If **callback_fn** returns 0, the helper will continue to the next
5368 *		loop. If return value is 1, the helper will skip the rest of
5369 *		the loops and return. Other return values are not used now,
5370 *		and will be rejected by the verifier.
5371 *
5372 *	Return
5373 *		The number of loops performed, **-EINVAL** for invalid **flags**,
5374 *		**-E2BIG** if **nr_loops** exceeds the maximum number of loops.
5375 *
5376 * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
5377 *	Description
5378 *		Do strncmp() between **s1** and **s2**. **s1** doesn't need
5379 *		to be null-terminated and **s1_sz** is the maximum storage
5380 *		size of **s1**. **s2** must be a read-only string.
5381 *	Return
5382 *		An integer less than, equal to, or greater than zero
5383 *		if the first **s1_sz** bytes of **s1** is found to be
5384 *		less than, to match, or be greater than **s2**.
5385 *
5386 * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
5387 *	Description
5388 *		Get **n**-th argument register (zero based) of the traced function (for tracing programs)
5389 *		returned in **value**.
5390 *
5391 *	Return
5392 *		0 on success.
5393 *		**-EINVAL** if n >= argument register count of traced function.
5394 *
5395 * long bpf_get_func_ret(void *ctx, u64 *value)
5396 *	Description
5397 *		Get return value of the traced function (for tracing programs)
5398 *		in **value**.
5399 *
5400 *	Return
5401 *		0 on success.
5402 *		**-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
5403 *
5404 * long bpf_get_func_arg_cnt(void *ctx)
5405 *	Description
5406 *		Get number of registers of the traced function (for tracing programs) where
5407 *		function arguments are stored in these registers.
5408 *
5409 *	Return
5410 *		The number of argument registers of the traced function.
5411 *
5412 * int bpf_get_retval(void)
5413 *	Description
5414 *		Get the BPF program's return value that will be returned to the upper layers.
5415 *
5416 *		This helper is currently supported by cgroup programs and only by the hooks
5417 *		where BPF program's return value is returned to the userspace via errno.
5418 *	Return
5419 *		The BPF program's return value.
5420 *
5421 * int bpf_set_retval(int retval)
5422 *	Description
5423 *		Set the BPF program's return value that will be returned to the upper layers.
5424 *
5425 *		This helper is currently supported by cgroup programs and only by the hooks
5426 *		where BPF program's return value is returned to the userspace via errno.
5427 *
5428 *		Note that there is the following corner case where the program exports an error
5429 *		via bpf_set_retval but signals success via 'return 1':
5430 *
5431 *			bpf_set_retval(-EPERM);
5432 *			return 1;
5433 *
5434 *		In this case, the BPF program's return value will use helper's -EPERM. This
5435 *		still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
5436 *
5437 *	Return
5438 *		0 on success, or a negative error in case of failure.
5439 *
5440 * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
5441 *	Description
5442 *		Get the total size of a given xdp buff (linear and paged area)
5443 *	Return
5444 *		The total size of a given xdp buffer.
5445 *
5446 * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5447 *	Description
5448 *		This helper is provided as an easy way to load data from a
5449 *		xdp buffer. It can be used to load *len* bytes from *offset* from
5450 *		the frame associated to *xdp_md*, into the buffer pointed by
5451 *		*buf*.
5452 *	Return
5453 *		0 on success, or a negative error in case of failure.
5454 *
5455 * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
5456 *	Description
5457 *		Store *len* bytes from buffer *buf* into the frame
5458 *		associated to *xdp_md*, at *offset*.
5459 *	Return
5460 *		0 on success, or a negative error in case of failure.
5461 *
5462 * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
5463 *	Description
5464 *		Read *size* bytes from user space address *user_ptr* in *tsk*'s
5465 *		address space, and stores the data in *dst*. *flags* is not
5466 *		used yet and is provided for future extensibility. This helper
5467 *		can only be used by sleepable programs.
5468 *	Return
5469 *		0 on success, or a negative error in case of failure. On error
5470 *		*dst* buffer is zeroed out.
5471 *
5472 * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
5473 *	Description
5474 *		Change the __sk_buff->tstamp_type to *tstamp_type*
5475 *		and set *tstamp* to the __sk_buff->tstamp together.
5476 *
5477 *		If there is no need to change the __sk_buff->tstamp_type,
5478 *		the tstamp value can be directly written to __sk_buff->tstamp
5479 *		instead.
5480 *
5481 *		BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
5482 *		will be kept during bpf_redirect_*().  A non zero
5483 *		*tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
5484 *		*tstamp_type*.
5485 *
5486 *		A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
5487 *		with a zero *tstamp*.
5488 *
5489 *		Only IPv4 and IPv6 skb->protocol are supported.
5490 *
5491 *		This function is most useful when it needs to set a
5492 *		mono delivery time to __sk_buff->tstamp and then
5493 *		bpf_redirect_*() to the egress of an iface.  For example,
5494 *		changing the (rcv) timestamp in __sk_buff->tstamp at
5495 *		ingress to a mono delivery time and then bpf_redirect_*()
5496 *		to sch_fq@phy-dev.
5497 *	Return
5498 *		0 on success.
5499 *		**-EINVAL** for invalid input
5500 *		**-EOPNOTSUPP** for unsupported protocol
5501 *
5502 * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
5503 *	Description
5504 *		Returns a calculated IMA hash of the *file*.
5505 *		If the hash is larger than *size*, then only *size*
5506 *		bytes will be copied to *dst*
5507 *	Return
5508 *		The **hash_algo** is returned on success,
5509 *		**-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
5510 *		invalid arguments are passed.
5511 *
5512 * void *bpf_kptr_xchg(void *map_value, void *ptr)
5513 *	Description
5514 *		Exchange kptr at pointer *map_value* with *ptr*, and return the
5515 *		old value. *ptr* can be NULL, otherwise it must be a referenced
5516 *		pointer which will be released when this helper is called.
5517 *	Return
5518 *		The old value of kptr (which can be NULL). The returned pointer
5519 *		if not NULL, is a reference which must be released using its
5520 *		corresponding release function, or moved into a BPF map before
5521 *		program exit.
5522 *
5523 * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5524 * 	Description
5525 * 		Perform a lookup in *percpu map* for an entry associated to
5526 * 		*key* on *cpu*.
5527 * 	Return
5528 * 		Map value associated to *key* on *cpu*, or **NULL** if no entry
5529 * 		was found or *cpu* is invalid.
5530 *
5531 * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
5532 *	Description
5533 *		Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
5534 *	Return
5535 *		*sk* if casting is valid, or **NULL** otherwise.
5536 *
5537 * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
5538 *	Description
5539 *		Get a dynptr to local memory *data*.
5540 *
5541 *		*data* must be a ptr to a map value.
5542 *		The maximum *size* supported is DYNPTR_MAX_SIZE.
5543 *		*flags* is currently unused.
5544 *	Return
5545 *		0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
5546 *		-EINVAL if flags is not 0.
5547 *
5548 * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
5549 *	Description
5550 *		Reserve *size* bytes of payload in a ring buffer *ringbuf*
5551 *		through the dynptr interface. *flags* must be 0.
5552 *
5553 *		Please note that a corresponding bpf_ringbuf_submit_dynptr or
5554 *		bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
5555 *		reservation fails. This is enforced by the verifier.
5556 *	Return
5557 *		0 on success, or a negative error in case of failure.
5558 *
5559 * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
5560 *	Description
5561 *		Submit reserved ring buffer sample, pointed to by *data*,
5562 *		through the dynptr interface. This is a no-op if the dynptr is
5563 *		invalid/null.
5564 *
5565 *		For more information on *flags*, please see
5566 *		'bpf_ringbuf_submit'.
5567 *	Return
5568 *		Nothing. Always succeeds.
5569 *
5570 * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
5571 *	Description
5572 *		Discard reserved ring buffer sample through the dynptr
5573 *		interface. This is a no-op if the dynptr is invalid/null.
5574 *
5575 *		For more information on *flags*, please see
5576 *		'bpf_ringbuf_discard'.
5577 *	Return
5578 *		Nothing. Always succeeds.
5579 *
5580 * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
5581 *	Description
5582 *		Read *len* bytes from *src* into *dst*, starting from *offset*
5583 *		into *src*.
5584 *		*flags* is currently unused.
5585 *	Return
5586 *		0 on success, -E2BIG if *offset* + *len* exceeds the length
5587 *		of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
5588 *		*flags* is not 0.
5589 *
5590 * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
5591 *	Description
5592 *		Write *len* bytes from *src* into *dst*, starting from *offset*
5593 *		into *dst*.
5594 *
5595 *		*flags* must be 0 except for skb-type dynptrs.
5596 *
5597 *		For skb-type dynptrs:
5598 *		    *  All data slices of the dynptr are automatically
5599 *		       invalidated after **bpf_dynptr_write**\ (). This is
5600 *		       because writing may pull the skb and change the
5601 *		       underlying packet buffer.
5602 *
5603 *		    *  For *flags*, please see the flags accepted by
5604 *		       **bpf_skb_store_bytes**\ ().
5605 *	Return
5606 *		0 on success, -E2BIG if *offset* + *len* exceeds the length
5607 *		of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
5608 *		is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
5609 *		other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
5610 *
5611 * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
5612 *	Description
5613 *		Get a pointer to the underlying dynptr data.
5614 *
5615 *		*len* must be a statically known value. The returned data slice
5616 *		is invalidated whenever the dynptr is invalidated.
5617 *
5618 *		skb and xdp type dynptrs may not use bpf_dynptr_data. They should
5619 *		instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
5620 *	Return
5621 *		Pointer to the underlying dynptr data, NULL if the dynptr is
5622 *		read-only, if the dynptr is invalid, or if the offset and length
5623 *		is out of bounds.
5624 *
5625 * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
5626 *	Description
5627 *		Try to issue a SYN cookie for the packet with corresponding
5628 *		IPv4/TCP headers, *iph* and *th*, without depending on a
5629 *		listening socket.
5630 *
5631 *		*iph* points to the IPv4 header.
5632 *
5633 *		*th* points to the start of the TCP header, while *th_len*
5634 *		contains the length of the TCP header (at least
5635 *		**sizeof**\ (**struct tcphdr**)).
5636 *	Return
5637 *		On success, lower 32 bits hold the generated SYN cookie in
5638 *		followed by 16 bits which hold the MSS value for that cookie,
5639 *		and the top 16 bits are unused.
5640 *
5641 *		On failure, the returned value is one of the following:
5642 *
5643 *		**-EINVAL** if *th_len* is invalid.
5644 *
5645 * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
5646 *	Description
5647 *		Try to issue a SYN cookie for the packet with corresponding
5648 *		IPv6/TCP headers, *iph* and *th*, without depending on a
5649 *		listening socket.
5650 *
5651 *		*iph* points to the IPv6 header.
5652 *
5653 *		*th* points to the start of the TCP header, while *th_len*
5654 *		contains the length of the TCP header (at least
5655 *		**sizeof**\ (**struct tcphdr**)).
5656 *	Return
5657 *		On success, lower 32 bits hold the generated SYN cookie in
5658 *		followed by 16 bits which hold the MSS value for that cookie,
5659 *		and the top 16 bits are unused.
5660 *
5661 *		On failure, the returned value is one of the following:
5662 *
5663 *		**-EINVAL** if *th_len* is invalid.
5664 *
5665 *		**-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5666 *
5667 * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
5668 *	Description
5669 *		Check whether *iph* and *th* contain a valid SYN cookie ACK
5670 *		without depending on a listening socket.
5671 *
5672 *		*iph* points to the IPv4 header.
5673 *
5674 *		*th* points to the TCP header.
5675 *	Return
5676 *		0 if *iph* and *th* are a valid SYN cookie ACK.
5677 *
5678 *		On failure, the returned value is one of the following:
5679 *
5680 *		**-EACCES** if the SYN cookie is not valid.
5681 *
5682 * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
5683 *	Description
5684 *		Check whether *iph* and *th* contain a valid SYN cookie ACK
5685 *		without depending on a listening socket.
5686 *
5687 *		*iph* points to the IPv6 header.
5688 *
5689 *		*th* points to the TCP header.
5690 *	Return
5691 *		0 if *iph* and *th* are a valid SYN cookie ACK.
5692 *
5693 *		On failure, the returned value is one of the following:
5694 *
5695 *		**-EACCES** if the SYN cookie is not valid.
5696 *
5697 *		**-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
5698 *
5699 * u64 bpf_ktime_get_tai_ns(void)
5700 *	Description
5701 *		A nonsettable system-wide clock derived from wall-clock time but
5702 *		ignoring leap seconds.  This clock does not experience
5703 *		discontinuities and backwards jumps caused by NTP inserting leap
5704 *		seconds as CLOCK_REALTIME does.
5705 *
5706 *		See: **clock_gettime**\ (**CLOCK_TAI**)
5707 *	Return
5708 *		Current *ktime*.
5709 *
5710 * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
5711 *	Description
5712 *		Drain samples from the specified user ring buffer, and invoke
5713 *		the provided callback for each such sample:
5714 *
5715 *		long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
5716 *
5717 *		If **callback_fn** returns 0, the helper will continue to try
5718 *		and drain the next sample, up to a maximum of
5719 *		BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
5720 *		the helper will skip the rest of the samples and return. Other
5721 *		return values are not used now, and will be rejected by the
5722 *		verifier.
5723 *	Return
5724 *		The number of drained samples if no error was encountered while
5725 *		draining samples, or 0 if no samples were present in the ring
5726 *		buffer. If a user-space producer was epoll-waiting on this map,
5727 *		and at least one sample was drained, they will receive an event
5728 *		notification notifying them of available space in the ring
5729 *		buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
5730 *		function, no wakeup notification will be sent. If the
5731 *		BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
5732 *		be sent even if no sample was drained.
5733 *
5734 *		On failure, the returned value is one of the following:
5735 *
5736 *		**-EBUSY** if the ring buffer is contended, and another calling
5737 *		context was concurrently draining the ring buffer.
5738 *
5739 *		**-EINVAL** if user-space is not properly tracking the ring
5740 *		buffer due to the producer position not being aligned to 8
5741 *		bytes, a sample not being aligned to 8 bytes, or the producer
5742 *		position not matching the advertised length of a sample.
5743 *
5744 *		**-E2BIG** if user-space has tried to publish a sample which is
5745 *		larger than the size of the ring buffer, or which cannot fit
5746 *		within a struct bpf_dynptr.
5747 *
5748 * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
5749 *	Description
5750 *		Get a bpf_local_storage from the *cgroup*.
5751 *
5752 *		Logically, it could be thought of as getting the value from
5753 *		a *map* with *cgroup* as the **key**.  From this
5754 *		perspective,  the usage is not much different from
5755 *		**bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
5756 *		helper enforces the key must be a cgroup struct and the map must also
5757 *		be a **BPF_MAP_TYPE_CGRP_STORAGE**.
5758 *
5759 *		In reality, the local-storage value is embedded directly inside of the
5760 *		*cgroup* object itself, rather than being located in the
5761 *		**BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
5762 *		queried for some *map* on a *cgroup* object, the kernel will perform an
5763 *		O(n) iteration over all of the live local-storage values for that
5764 *		*cgroup* object until the local-storage value for the *map* is found.
5765 *
5766 *		An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
5767 *		used such that a new bpf_local_storage will be
5768 *		created if one does not exist.  *value* can be used
5769 *		together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
5770 *		the initial value of a bpf_local_storage.  If *value* is
5771 *		**NULL**, the new bpf_local_storage will be zero initialized.
5772 *	Return
5773 *		A bpf_local_storage pointer is returned on success.
5774 *
5775 *		**NULL** if not found or there was an error in adding
5776 *		a new bpf_local_storage.
5777 *
5778 * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
5779 *	Description
5780 *		Delete a bpf_local_storage from a *cgroup*.
5781 *	Return
5782 *		0 on success.
5783 *
5784 *		**-ENOENT** if the bpf_local_storage cannot be found.
5785 */
5786#define ___BPF_FUNC_MAPPER(FN, ctx...)			\
5787	FN(unspec, 0, ##ctx)				\
5788	FN(map_lookup_elem, 1, ##ctx)			\
5789	FN(map_update_elem, 2, ##ctx)			\
5790	FN(map_delete_elem, 3, ##ctx)			\
5791	FN(probe_read, 4, ##ctx)			\
5792	FN(ktime_get_ns, 5, ##ctx)			\
5793	FN(trace_printk, 6, ##ctx)			\
5794	FN(get_prandom_u32, 7, ##ctx)			\
5795	FN(get_smp_processor_id, 8, ##ctx)		\
5796	FN(skb_store_bytes, 9, ##ctx)			\
5797	FN(l3_csum_replace, 10, ##ctx)			\
5798	FN(l4_csum_replace, 11, ##ctx)			\
5799	FN(tail_call, 12, ##ctx)			\
5800	FN(clone_redirect, 13, ##ctx)			\
5801	FN(get_current_pid_tgid, 14, ##ctx)		\
5802	FN(get_current_uid_gid, 15, ##ctx)		\
5803	FN(get_current_comm, 16, ##ctx)			\
5804	FN(get_cgroup_classid, 17, ##ctx)		\
5805	FN(skb_vlan_push, 18, ##ctx)			\
5806	FN(skb_vlan_pop, 19, ##ctx)			\
5807	FN(skb_get_tunnel_key, 20, ##ctx)		\
5808	FN(skb_set_tunnel_key, 21, ##ctx)		\
5809	FN(perf_event_read, 22, ##ctx)			\
5810	FN(redirect, 23, ##ctx)				\
5811	FN(get_route_realm, 24, ##ctx)			\
5812	FN(perf_event_output, 25, ##ctx)		\
5813	FN(skb_load_bytes, 26, ##ctx)			\
5814	FN(get_stackid, 27, ##ctx)			\
5815	FN(csum_diff, 28, ##ctx)			\
5816	FN(skb_get_tunnel_opt, 29, ##ctx)		\
5817	FN(skb_set_tunnel_opt, 30, ##ctx)		\
5818	FN(skb_change_proto, 31, ##ctx)			\
5819	FN(skb_change_type, 32, ##ctx)			\
5820	FN(skb_under_cgroup, 33, ##ctx)			\
5821	FN(get_hash_recalc, 34, ##ctx)			\
5822	FN(get_current_task, 35, ##ctx)			\
5823	FN(probe_write_user, 36, ##ctx)			\
5824	FN(current_task_under_cgroup, 37, ##ctx)	\
5825	FN(skb_change_tail, 38, ##ctx)			\
5826	FN(skb_pull_data, 39, ##ctx)			\
5827	FN(csum_update, 40, ##ctx)			\
5828	FN(set_hash_invalid, 41, ##ctx)			\
5829	FN(get_numa_node_id, 42, ##ctx)			\
5830	FN(skb_change_head, 43, ##ctx)			\
5831	FN(xdp_adjust_head, 44, ##ctx)			\
5832	FN(probe_read_str, 45, ##ctx)			\
5833	FN(get_socket_cookie, 46, ##ctx)		\
5834	FN(get_socket_uid, 47, ##ctx)			\
5835	FN(set_hash, 48, ##ctx)				\
5836	FN(setsockopt, 49, ##ctx)			\
5837	FN(skb_adjust_room, 50, ##ctx)			\
5838	FN(redirect_map, 51, ##ctx)			\
5839	FN(sk_redirect_map, 52, ##ctx)			\
5840	FN(sock_map_update, 53, ##ctx)			\
5841	FN(xdp_adjust_meta, 54, ##ctx)			\
5842	FN(perf_event_read_value, 55, ##ctx)		\
5843	FN(perf_prog_read_value, 56, ##ctx)		\
5844	FN(getsockopt, 57, ##ctx)			\
5845	FN(override_return, 58, ##ctx)			\
5846	FN(sock_ops_cb_flags_set, 59, ##ctx)		\
5847	FN(msg_redirect_map, 60, ##ctx)			\
5848	FN(msg_apply_bytes, 61, ##ctx)			\
5849	FN(msg_cork_bytes, 62, ##ctx)			\
5850	FN(msg_pull_data, 63, ##ctx)			\
5851	FN(bind, 64, ##ctx)				\
5852	FN(xdp_adjust_tail, 65, ##ctx)			\
5853	FN(skb_get_xfrm_state, 66, ##ctx)		\
5854	FN(get_stack, 67, ##ctx)			\
5855	FN(skb_load_bytes_relative, 68, ##ctx)		\
5856	FN(fib_lookup, 69, ##ctx)			\
5857	FN(sock_hash_update, 70, ##ctx)			\
5858	FN(msg_redirect_hash, 71, ##ctx)		\
5859	FN(sk_redirect_hash, 72, ##ctx)			\
5860	FN(lwt_push_encap, 73, ##ctx)			\
5861	FN(lwt_seg6_store_bytes, 74, ##ctx)		\
5862	FN(lwt_seg6_adjust_srh, 75, ##ctx)		\
5863	FN(lwt_seg6_action, 76, ##ctx)			\
5864	FN(rc_repeat, 77, ##ctx)			\
5865	FN(rc_keydown, 78, ##ctx)			\
5866	FN(skb_cgroup_id, 79, ##ctx)			\
5867	FN(get_current_cgroup_id, 80, ##ctx)		\
5868	FN(get_local_storage, 81, ##ctx)		\
5869	FN(sk_select_reuseport, 82, ##ctx)		\
5870	FN(skb_ancestor_cgroup_id, 83, ##ctx)		\
5871	FN(sk_lookup_tcp, 84, ##ctx)			\
5872	FN(sk_lookup_udp, 85, ##ctx)			\
5873	FN(sk_release, 86, ##ctx)			\
5874	FN(map_push_elem, 87, ##ctx)			\
5875	FN(map_pop_elem, 88, ##ctx)			\
5876	FN(map_peek_elem, 89, ##ctx)			\
5877	FN(msg_push_data, 90, ##ctx)			\
5878	FN(msg_pop_data, 91, ##ctx)			\
5879	FN(rc_pointer_rel, 92, ##ctx)			\
5880	FN(spin_lock, 93, ##ctx)			\
5881	FN(spin_unlock, 94, ##ctx)			\
5882	FN(sk_fullsock, 95, ##ctx)			\
5883	FN(tcp_sock, 96, ##ctx)				\
5884	FN(skb_ecn_set_ce, 97, ##ctx)			\
5885	FN(get_listener_sock, 98, ##ctx)		\
5886	FN(skc_lookup_tcp, 99, ##ctx)			\
5887	FN(tcp_check_syncookie, 100, ##ctx)		\
5888	FN(sysctl_get_name, 101, ##ctx)			\
5889	FN(sysctl_get_current_value, 102, ##ctx)	\
5890	FN(sysctl_get_new_value, 103, ##ctx)		\
5891	FN(sysctl_set_new_value, 104, ##ctx)		\
5892	FN(strtol, 105, ##ctx)				\
5893	FN(strtoul, 106, ##ctx)				\
5894	FN(sk_storage_get, 107, ##ctx)			\
5895	FN(sk_storage_delete, 108, ##ctx)		\
5896	FN(send_signal, 109, ##ctx)			\
5897	FN(tcp_gen_syncookie, 110, ##ctx)		\
5898	FN(skb_output, 111, ##ctx)			\
5899	FN(probe_read_user, 112, ##ctx)			\
5900	FN(probe_read_kernel, 113, ##ctx)		\
5901	FN(probe_read_user_str, 114, ##ctx)		\
5902	FN(probe_read_kernel_str, 115, ##ctx)		\
5903	FN(tcp_send_ack, 116, ##ctx)			\
5904	FN(send_signal_thread, 117, ##ctx)		\
5905	FN(jiffies64, 118, ##ctx)			\
5906	FN(read_branch_records, 119, ##ctx)		\
5907	FN(get_ns_current_pid_tgid, 120, ##ctx)		\
5908	FN(xdp_output, 121, ##ctx)			\
5909	FN(get_netns_cookie, 122, ##ctx)		\
5910	FN(get_current_ancestor_cgroup_id, 123, ##ctx)	\
5911	FN(sk_assign, 124, ##ctx)			\
5912	FN(ktime_get_boot_ns, 125, ##ctx)		\
5913	FN(seq_printf, 126, ##ctx)			\
5914	FN(seq_write, 127, ##ctx)			\
5915	FN(sk_cgroup_id, 128, ##ctx)			\
5916	FN(sk_ancestor_cgroup_id, 129, ##ctx)		\
5917	FN(ringbuf_output, 130, ##ctx)			\
5918	FN(ringbuf_reserve, 131, ##ctx)			\
5919	FN(ringbuf_submit, 132, ##ctx)			\
5920	FN(ringbuf_discard, 133, ##ctx)			\
5921	FN(ringbuf_query, 134, ##ctx)			\
5922	FN(csum_level, 135, ##ctx)			\
5923	FN(skc_to_tcp6_sock, 136, ##ctx)		\
5924	FN(skc_to_tcp_sock, 137, ##ctx)			\
5925	FN(skc_to_tcp_timewait_sock, 138, ##ctx)	\
5926	FN(skc_to_tcp_request_sock, 139, ##ctx)		\
5927	FN(skc_to_udp6_sock, 140, ##ctx)		\
5928	FN(get_task_stack, 141, ##ctx)			\
5929	FN(load_hdr_opt, 142, ##ctx)			\
5930	FN(store_hdr_opt, 143, ##ctx)			\
5931	FN(reserve_hdr_opt, 144, ##ctx)			\
5932	FN(inode_storage_get, 145, ##ctx)		\
5933	FN(inode_storage_delete, 146, ##ctx)		\
5934	FN(d_path, 147, ##ctx)				\
5935	FN(copy_from_user, 148, ##ctx)			\
5936	FN(snprintf_btf, 149, ##ctx)			\
5937	FN(seq_printf_btf, 150, ##ctx)			\
5938	FN(skb_cgroup_classid, 151, ##ctx)		\
5939	FN(redirect_neigh, 152, ##ctx)			\
5940	FN(per_cpu_ptr, 153, ##ctx)			\
5941	FN(this_cpu_ptr, 154, ##ctx)			\
5942	FN(redirect_peer, 155, ##ctx)			\
5943	FN(task_storage_get, 156, ##ctx)		\
5944	FN(task_storage_delete, 157, ##ctx)		\
5945	FN(get_current_task_btf, 158, ##ctx)		\
5946	FN(bprm_opts_set, 159, ##ctx)			\
5947	FN(ktime_get_coarse_ns, 160, ##ctx)		\
5948	FN(ima_inode_hash, 161, ##ctx)			\
5949	FN(sock_from_file, 162, ##ctx)			\
5950	FN(check_mtu, 163, ##ctx)			\
5951	FN(for_each_map_elem, 164, ##ctx)		\
5952	FN(snprintf, 165, ##ctx)			\
5953	FN(sys_bpf, 166, ##ctx)				\
5954	FN(btf_find_by_name_kind, 167, ##ctx)		\
5955	FN(sys_close, 168, ##ctx)			\
5956	FN(timer_init, 169, ##ctx)			\
5957	FN(timer_set_callback, 170, ##ctx)		\
5958	FN(timer_start, 171, ##ctx)			\
5959	FN(timer_cancel, 172, ##ctx)			\
5960	FN(get_func_ip, 173, ##ctx)			\
5961	FN(get_attach_cookie, 174, ##ctx)		\
5962	FN(task_pt_regs, 175, ##ctx)			\
5963	FN(get_branch_snapshot, 176, ##ctx)		\
5964	FN(trace_vprintk, 177, ##ctx)			\
5965	FN(skc_to_unix_sock, 178, ##ctx)		\
5966	FN(kallsyms_lookup_name, 179, ##ctx)		\
5967	FN(find_vma, 180, ##ctx)			\
5968	FN(loop, 181, ##ctx)				\
5969	FN(strncmp, 182, ##ctx)				\
5970	FN(get_func_arg, 183, ##ctx)			\
5971	FN(get_func_ret, 184, ##ctx)			\
5972	FN(get_func_arg_cnt, 185, ##ctx)		\
5973	FN(get_retval, 186, ##ctx)			\
5974	FN(set_retval, 187, ##ctx)			\
5975	FN(xdp_get_buff_len, 188, ##ctx)		\
5976	FN(xdp_load_bytes, 189, ##ctx)			\
5977	FN(xdp_store_bytes, 190, ##ctx)			\
5978	FN(copy_from_user_task, 191, ##ctx)		\
5979	FN(skb_set_tstamp, 192, ##ctx)			\
5980	FN(ima_file_hash, 193, ##ctx)			\
5981	FN(kptr_xchg, 194, ##ctx)			\
5982	FN(map_lookup_percpu_elem, 195, ##ctx)		\
5983	FN(skc_to_mptcp_sock, 196, ##ctx)		\
5984	FN(dynptr_from_mem, 197, ##ctx)			\
5985	FN(ringbuf_reserve_dynptr, 198, ##ctx)		\
5986	FN(ringbuf_submit_dynptr, 199, ##ctx)		\
5987	FN(ringbuf_discard_dynptr, 200, ##ctx)		\
5988	FN(dynptr_read, 201, ##ctx)			\
5989	FN(dynptr_write, 202, ##ctx)			\
5990	FN(dynptr_data, 203, ##ctx)			\
5991	FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx)	\
5992	FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx)	\
5993	FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx)	\
5994	FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx)	\
5995	FN(ktime_get_tai_ns, 208, ##ctx)		\
5996	FN(user_ringbuf_drain, 209, ##ctx)		\
5997	FN(cgrp_storage_get, 210, ##ctx)		\
5998	FN(cgrp_storage_delete, 211, ##ctx)		\
5999	/* */
6000
6001/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
6002 * know or care about integer value that is now passed as second argument
6003 */
6004#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
6005#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
6006
6007/* integer value in 'imm' field of BPF_CALL instruction selects which helper
6008 * function eBPF program intends to call
6009 */
6010#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
6011enum bpf_func_id {
6012	___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
6013	__BPF_FUNC_MAX_ID,
6014};
6015#undef __BPF_ENUM_FN
6016
6017/* All flags used by eBPF helper functions, placed here. */
6018
6019/* BPF_FUNC_skb_store_bytes flags. */
6020enum {
6021	BPF_F_RECOMPUTE_CSUM		= (1ULL << 0),
6022	BPF_F_INVALIDATE_HASH		= (1ULL << 1),
6023};
6024
6025/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
6026 * First 4 bits are for passing the header field size.
6027 */
6028enum {
6029	BPF_F_HDR_FIELD_MASK		= 0xfULL,
6030};
6031
6032/* BPF_FUNC_l4_csum_replace flags. */
6033enum {
6034	BPF_F_PSEUDO_HDR		= (1ULL << 4),
6035	BPF_F_MARK_MANGLED_0		= (1ULL << 5),
6036	BPF_F_MARK_ENFORCE		= (1ULL << 6),
6037};
6038
6039/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
6040enum {
6041	BPF_F_INGRESS			= (1ULL << 0),
6042};
6043
6044/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
6045enum {
6046	BPF_F_TUNINFO_IPV6		= (1ULL << 0),
6047};
6048
6049/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
6050enum {
6051	BPF_F_SKIP_FIELD_MASK		= 0xffULL,
6052	BPF_F_USER_STACK		= (1ULL << 8),
6053/* flags used by BPF_FUNC_get_stackid only. */
6054	BPF_F_FAST_STACK_CMP		= (1ULL << 9),
6055	BPF_F_REUSE_STACKID		= (1ULL << 10),
6056/* flags used by BPF_FUNC_get_stack only. */
6057	BPF_F_USER_BUILD_ID		= (1ULL << 11),
6058};
6059
6060/* BPF_FUNC_skb_set_tunnel_key flags. */
6061enum {
6062	BPF_F_ZERO_CSUM_TX		= (1ULL << 1),
6063	BPF_F_DONT_FRAGMENT		= (1ULL << 2),
6064	BPF_F_SEQ_NUMBER		= (1ULL << 3),
6065	BPF_F_NO_TUNNEL_KEY		= (1ULL << 4),
6066};
6067
6068/* BPF_FUNC_skb_get_tunnel_key flags. */
6069enum {
6070	BPF_F_TUNINFO_FLAGS		= (1ULL << 4),
6071};
6072
6073/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
6074 * BPF_FUNC_perf_event_read_value flags.
6075 */
6076enum {
6077	BPF_F_INDEX_MASK		= 0xffffffffULL,
6078	BPF_F_CURRENT_CPU		= BPF_F_INDEX_MASK,
6079/* BPF_FUNC_perf_event_output for sk_buff input context. */
6080	BPF_F_CTXLEN_MASK		= (0xfffffULL << 32),
6081};
6082
6083/* Current network namespace */
6084enum {
6085	BPF_F_CURRENT_NETNS		= (-1L),
6086};
6087
6088/* BPF_FUNC_csum_level level values. */
6089enum {
6090	BPF_CSUM_LEVEL_QUERY,
6091	BPF_CSUM_LEVEL_INC,
6092	BPF_CSUM_LEVEL_DEC,
6093	BPF_CSUM_LEVEL_RESET,
6094};
6095
6096/* BPF_FUNC_skb_adjust_room flags. */
6097enum {
6098	BPF_F_ADJ_ROOM_FIXED_GSO	= (1ULL << 0),
6099	BPF_F_ADJ_ROOM_ENCAP_L3_IPV4	= (1ULL << 1),
6100	BPF_F_ADJ_ROOM_ENCAP_L3_IPV6	= (1ULL << 2),
6101	BPF_F_ADJ_ROOM_ENCAP_L4_GRE	= (1ULL << 3),
6102	BPF_F_ADJ_ROOM_ENCAP_L4_UDP	= (1ULL << 4),
6103	BPF_F_ADJ_ROOM_NO_CSUM_RESET	= (1ULL << 5),
6104	BPF_F_ADJ_ROOM_ENCAP_L2_ETH	= (1ULL << 6),
6105	BPF_F_ADJ_ROOM_DECAP_L3_IPV4	= (1ULL << 7),
6106	BPF_F_ADJ_ROOM_DECAP_L3_IPV6	= (1ULL << 8),
6107};
6108
6109enum {
6110	BPF_ADJ_ROOM_ENCAP_L2_MASK	= 0xff,
6111	BPF_ADJ_ROOM_ENCAP_L2_SHIFT	= 56,
6112};
6113
6114#define BPF_F_ADJ_ROOM_ENCAP_L2(len)	(((__u64)len & \
6115					  BPF_ADJ_ROOM_ENCAP_L2_MASK) \
6116					 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
6117
6118/* BPF_FUNC_sysctl_get_name flags. */
6119enum {
6120	BPF_F_SYSCTL_BASE_NAME		= (1ULL << 0),
6121};
6122
6123/* BPF_FUNC_<kernel_obj>_storage_get flags */
6124enum {
6125	BPF_LOCAL_STORAGE_GET_F_CREATE	= (1ULL << 0),
6126	/* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
6127	 * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
6128	 */
6129	BPF_SK_STORAGE_GET_F_CREATE  = BPF_LOCAL_STORAGE_GET_F_CREATE,
6130};
6131
6132/* BPF_FUNC_read_branch_records flags. */
6133enum {
6134	BPF_F_GET_BRANCH_RECORDS_SIZE	= (1ULL << 0),
6135};
6136
6137/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
6138 * BPF_FUNC_bpf_ringbuf_output flags.
6139 */
6140enum {
6141	BPF_RB_NO_WAKEUP		= (1ULL << 0),
6142	BPF_RB_FORCE_WAKEUP		= (1ULL << 1),
6143};
6144
6145/* BPF_FUNC_bpf_ringbuf_query flags */
6146enum {
6147	BPF_RB_AVAIL_DATA = 0,
6148	BPF_RB_RING_SIZE = 1,
6149	BPF_RB_CONS_POS = 2,
6150	BPF_RB_PROD_POS = 3,
6151};
6152
6153/* BPF ring buffer constants */
6154enum {
6155	BPF_RINGBUF_BUSY_BIT		= (1U << 31),
6156	BPF_RINGBUF_DISCARD_BIT		= (1U << 30),
6157	BPF_RINGBUF_HDR_SZ		= 8,
6158};
6159
6160/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
6161enum {
6162	BPF_SK_LOOKUP_F_REPLACE		= (1ULL << 0),
6163	BPF_SK_LOOKUP_F_NO_REUSEPORT	= (1ULL << 1),
6164};
6165
6166/* Mode for BPF_FUNC_skb_adjust_room helper. */
6167enum bpf_adj_room_mode {
6168	BPF_ADJ_ROOM_NET,
6169	BPF_ADJ_ROOM_MAC,
6170};
6171
6172/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
6173enum bpf_hdr_start_off {
6174	BPF_HDR_START_MAC,
6175	BPF_HDR_START_NET,
6176};
6177
6178/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
6179enum bpf_lwt_encap_mode {
6180	BPF_LWT_ENCAP_SEG6,
6181	BPF_LWT_ENCAP_SEG6_INLINE,
6182	BPF_LWT_ENCAP_IP,
6183};
6184
6185/* Flags for bpf_bprm_opts_set helper */
6186enum {
6187	BPF_F_BPRM_SECUREEXEC	= (1ULL << 0),
6188};
6189
6190/* Flags for bpf_redirect_map helper */
6191enum {
6192	BPF_F_BROADCAST		= (1ULL << 3),
6193	BPF_F_EXCLUDE_INGRESS	= (1ULL << 4),
6194};
6195
6196#define __bpf_md_ptr(type, name)	\
6197union {					\
6198	type name;			\
6199	__u64 :64;			\
6200} __attribute__((aligned(8)))
6201
6202enum {
6203	BPF_SKB_TSTAMP_UNSPEC,
6204	BPF_SKB_TSTAMP_DELIVERY_MONO,	/* tstamp has mono delivery time */
6205	/* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
6206	 * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
6207	 * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
6208	 */
6209};
6210
6211/* user accessible mirror of in-kernel sk_buff.
6212 * new fields can only be added to the end of this structure
6213 */
6214struct __sk_buff {
6215	__u32 len;
6216	__u32 pkt_type;
6217	__u32 mark;
6218	__u32 queue_mapping;
6219	__u32 protocol;
6220	__u32 vlan_present;
6221	__u32 vlan_tci;
6222	__u32 vlan_proto;
6223	__u32 priority;
6224	__u32 ingress_ifindex;
6225	__u32 ifindex;
6226	__u32 tc_index;
6227	__u32 cb[5];
6228	__u32 hash;
6229	__u32 tc_classid;
6230	__u32 data;
6231	__u32 data_end;
6232	__u32 napi_id;
6233
6234	/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
6235	__u32 family;
6236	__u32 remote_ip4;	/* Stored in network byte order */
6237	__u32 local_ip4;	/* Stored in network byte order */
6238	__u32 remote_ip6[4];	/* Stored in network byte order */
6239	__u32 local_ip6[4];	/* Stored in network byte order */
6240	__u32 remote_port;	/* Stored in network byte order */
6241	__u32 local_port;	/* stored in host byte order */
6242	/* ... here. */
6243
6244	__u32 data_meta;
6245	__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
6246	__u64 tstamp;
6247	__u32 wire_len;
6248	__u32 gso_segs;
6249	__bpf_md_ptr(struct bpf_sock *, sk);
6250	__u32 gso_size;
6251	__u8  tstamp_type;
6252	__u32 :24;		/* Padding, future use. */
6253	__u64 hwtstamp;
6254};
6255
6256struct bpf_tunnel_key {
6257	__u32 tunnel_id;
6258	union {
6259		__u32 remote_ipv4;
6260		__u32 remote_ipv6[4];
6261	};
6262	__u8 tunnel_tos;
6263	__u8 tunnel_ttl;
6264	union {
6265		__u16 tunnel_ext;	/* compat */
6266		__be16 tunnel_flags;
6267	};
6268	__u32 tunnel_label;
6269	union {
6270		__u32 local_ipv4;
6271		__u32 local_ipv6[4];
6272	};
6273};
6274
6275/* user accessible mirror of in-kernel xfrm_state.
6276 * new fields can only be added to the end of this structure
6277 */
6278struct bpf_xfrm_state {
6279	__u32 reqid;
6280	__u32 spi;	/* Stored in network byte order */
6281	__u16 family;
6282	__u16 ext;	/* Padding, future use. */
6283	union {
6284		__u32 remote_ipv4;	/* Stored in network byte order */
6285		__u32 remote_ipv6[4];	/* Stored in network byte order */
6286	};
6287};
6288
6289/* Generic BPF return codes which all BPF program types may support.
6290 * The values are binary compatible with their TC_ACT_* counter-part to
6291 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
6292 * programs.
6293 *
6294 * XDP is handled seprately, see XDP_*.
6295 */
6296enum bpf_ret_code {
6297	BPF_OK = 0,
6298	/* 1 reserved */
6299	BPF_DROP = 2,
6300	/* 3-6 reserved */
6301	BPF_REDIRECT = 7,
6302	/* >127 are reserved for prog type specific return codes.
6303	 *
6304	 * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
6305	 *    BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
6306	 *    changed and should be routed based on its new L3 header.
6307	 *    (This is an L3 redirect, as opposed to L2 redirect
6308	 *    represented by BPF_REDIRECT above).
6309	 */
6310	BPF_LWT_REROUTE = 128,
6311	/* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
6312	 *   to indicate that no custom dissection was performed, and
6313	 *   fallback to standard dissector is requested.
6314	 */
6315	BPF_FLOW_DISSECTOR_CONTINUE = 129,
6316};
6317
6318struct bpf_sock {
6319	__u32 bound_dev_if;
6320	__u32 family;
6321	__u32 type;
6322	__u32 protocol;
6323	__u32 mark;
6324	__u32 priority;
6325	/* IP address also allows 1 and 2 bytes access */
6326	__u32 src_ip4;
6327	__u32 src_ip6[4];
6328	__u32 src_port;		/* host byte order */
6329	__be16 dst_port;	/* network byte order */
6330	__u16 :16;		/* zero padding */
6331	__u32 dst_ip4;
6332	__u32 dst_ip6[4];
6333	__u32 state;
6334	__s32 rx_queue_mapping;
6335};
6336
6337struct bpf_tcp_sock {
6338	__u32 snd_cwnd;		/* Sending congestion window		*/
6339	__u32 srtt_us;		/* smoothed round trip time << 3 in usecs */
6340	__u32 rtt_min;
6341	__u32 snd_ssthresh;	/* Slow start size threshold		*/
6342	__u32 rcv_nxt;		/* What we want to receive next		*/
6343	__u32 snd_nxt;		/* Next sequence we send		*/
6344	__u32 snd_una;		/* First byte we want an ack for	*/
6345	__u32 mss_cache;	/* Cached effective mss, not including SACKS */
6346	__u32 ecn_flags;	/* ECN status bits.			*/
6347	__u32 rate_delivered;	/* saved rate sample: packets delivered */
6348	__u32 rate_interval_us;	/* saved rate sample: time elapsed */
6349	__u32 packets_out;	/* Packets which are "in flight"	*/
6350	__u32 retrans_out;	/* Retransmitted packets out		*/
6351	__u32 total_retrans;	/* Total retransmits for entire connection */
6352	__u32 segs_in;		/* RFC4898 tcpEStatsPerfSegsIn
6353				 * total number of segments in.
6354				 */
6355	__u32 data_segs_in;	/* RFC4898 tcpEStatsPerfDataSegsIn
6356				 * total number of data segments in.
6357				 */
6358	__u32 segs_out;		/* RFC4898 tcpEStatsPerfSegsOut
6359				 * The total number of segments sent.
6360				 */
6361	__u32 data_segs_out;	/* RFC4898 tcpEStatsPerfDataSegsOut
6362				 * total number of data segments sent.
6363				 */
6364	__u32 lost_out;		/* Lost packets			*/
6365	__u32 sacked_out;	/* SACK'd packets			*/
6366	__u64 bytes_received;	/* RFC4898 tcpEStatsAppHCThruOctetsReceived
6367				 * sum(delta(rcv_nxt)), or how many bytes
6368				 * were acked.
6369				 */
6370	__u64 bytes_acked;	/* RFC4898 tcpEStatsAppHCThruOctetsAcked
6371				 * sum(delta(snd_una)), or how many bytes
6372				 * were acked.
6373				 */
6374	__u32 dsack_dups;	/* RFC4898 tcpEStatsStackDSACKDups
6375				 * total number of DSACK blocks received
6376				 */
6377	__u32 delivered;	/* Total data packets delivered incl. rexmits */
6378	__u32 delivered_ce;	/* Like the above but only ECE marked packets */
6379	__u32 icsk_retransmits;	/* Number of unrecovered [RTO] timeouts */
6380};
6381
6382struct bpf_sock_tuple {
6383	union {
6384		struct {
6385			__be32 saddr;
6386			__be32 daddr;
6387			__be16 sport;
6388			__be16 dport;
6389		} ipv4;
6390		struct {
6391			__be32 saddr[4];
6392			__be32 daddr[4];
6393			__be16 sport;
6394			__be16 dport;
6395		} ipv6;
6396	};
6397};
6398
6399/* (Simplified) user return codes for tcx prog type.
6400 * A valid tcx program must return one of these defined values. All other
6401 * return codes are reserved for future use. Must remain compatible with
6402 * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
6403 * return codes are mapped to TCX_NEXT.
6404 */
6405enum tcx_action_base {
6406	TCX_NEXT	= -1,
6407	TCX_PASS	= 0,
6408	TCX_DROP	= 2,
6409	TCX_REDIRECT	= 7,
6410};
6411
6412struct bpf_xdp_sock {
6413	__u32 queue_id;
6414};
6415
6416#define XDP_PACKET_HEADROOM 256
6417
6418/* User return codes for XDP prog type.
6419 * A valid XDP program must return one of these defined values. All other
6420 * return codes are reserved for future use. Unknown return codes will
6421 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
6422 */
6423enum xdp_action {
6424	XDP_ABORTED = 0,
6425	XDP_DROP,
6426	XDP_PASS,
6427	XDP_TX,
6428	XDP_REDIRECT,
6429};
6430
6431/* user accessible metadata for XDP packet hook
6432 * new fields must be added to the end of this structure
6433 */
6434struct xdp_md {
6435	__u32 data;
6436	__u32 data_end;
6437	__u32 data_meta;
6438	/* Below access go through struct xdp_rxq_info */
6439	__u32 ingress_ifindex; /* rxq->dev->ifindex */
6440	__u32 rx_queue_index;  /* rxq->queue_index  */
6441
6442	__u32 egress_ifindex;  /* txq->dev->ifindex */
6443};
6444
6445/* DEVMAP map-value layout
6446 *
6447 * The struct data-layout of map-value is a configuration interface.
6448 * New members can only be added to the end of this structure.
6449 */
6450struct bpf_devmap_val {
6451	__u32 ifindex;   /* device index */
6452	union {
6453		int   fd;  /* prog fd on map write */
6454		__u32 id;  /* prog id on map read */
6455	} bpf_prog;
6456};
6457
6458/* CPUMAP map-value layout
6459 *
6460 * The struct data-layout of map-value is a configuration interface.
6461 * New members can only be added to the end of this structure.
6462 */
6463struct bpf_cpumap_val {
6464	__u32 qsize;	/* queue size to remote target CPU */
6465	union {
6466		int   fd;	/* prog fd on map write */
6467		__u32 id;	/* prog id on map read */
6468	} bpf_prog;
6469};
6470
6471enum sk_action {
6472	SK_DROP = 0,
6473	SK_PASS,
6474};
6475
6476/* user accessible metadata for SK_MSG packet hook, new fields must
6477 * be added to the end of this structure
6478 */
6479struct sk_msg_md {
6480	__bpf_md_ptr(void *, data);
6481	__bpf_md_ptr(void *, data_end);
6482
6483	__u32 family;
6484	__u32 remote_ip4;	/* Stored in network byte order */
6485	__u32 local_ip4;	/* Stored in network byte order */
6486	__u32 remote_ip6[4];	/* Stored in network byte order */
6487	__u32 local_ip6[4];	/* Stored in network byte order */
6488	__u32 remote_port;	/* Stored in network byte order */
6489	__u32 local_port;	/* stored in host byte order */
6490	__u32 size;		/* Total size of sk_msg */
6491
6492	__bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
6493};
6494
6495struct sk_reuseport_md {
6496	/*
6497	 * Start of directly accessible data. It begins from
6498	 * the tcp/udp header.
6499	 */
6500	__bpf_md_ptr(void *, data);
6501	/* End of directly accessible data */
6502	__bpf_md_ptr(void *, data_end);
6503	/*
6504	 * Total length of packet (starting from the tcp/udp header).
6505	 * Note that the directly accessible bytes (data_end - data)
6506	 * could be less than this "len".  Those bytes could be
6507	 * indirectly read by a helper "bpf_skb_load_bytes()".
6508	 */
6509	__u32 len;
6510	/*
6511	 * Eth protocol in the mac header (network byte order). e.g.
6512	 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
6513	 */
6514	__u32 eth_protocol;
6515	__u32 ip_protocol;	/* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
6516	__u32 bind_inany;	/* Is sock bound to an INANY address? */
6517	__u32 hash;		/* A hash of the packet 4 tuples */
6518	/* When reuse->migrating_sk is NULL, it is selecting a sk for the
6519	 * new incoming connection request (e.g. selecting a listen sk for
6520	 * the received SYN in the TCP case).  reuse->sk is one of the sk
6521	 * in the reuseport group. The bpf prog can use reuse->sk to learn
6522	 * the local listening ip/port without looking into the skb.
6523	 *
6524	 * When reuse->migrating_sk is not NULL, reuse->sk is closed and
6525	 * reuse->migrating_sk is the socket that needs to be migrated
6526	 * to another listening socket.  migrating_sk could be a fullsock
6527	 * sk that is fully established or a reqsk that is in-the-middle
6528	 * of 3-way handshake.
6529	 */
6530	__bpf_md_ptr(struct bpf_sock *, sk);
6531	__bpf_md_ptr(struct bpf_sock *, migrating_sk);
6532};
6533
6534#define BPF_TAG_SIZE	8
6535
6536struct bpf_prog_info {
6537	__u32 type;
6538	__u32 id;
6539	__u8  tag[BPF_TAG_SIZE];
6540	__u32 jited_prog_len;
6541	__u32 xlated_prog_len;
6542	__aligned_u64 jited_prog_insns;
6543	__aligned_u64 xlated_prog_insns;
6544	__u64 load_time;	/* ns since boottime */
6545	__u32 created_by_uid;
6546	__u32 nr_map_ids;
6547	__aligned_u64 map_ids;
6548	char name[BPF_OBJ_NAME_LEN];
6549	__u32 ifindex;
6550	__u32 gpl_compatible:1;
6551	__u32 :31; /* alignment pad */
6552	__u64 netns_dev;
6553	__u64 netns_ino;
6554	__u32 nr_jited_ksyms;
6555	__u32 nr_jited_func_lens;
6556	__aligned_u64 jited_ksyms;
6557	__aligned_u64 jited_func_lens;
6558	__u32 btf_id;
6559	__u32 func_info_rec_size;
6560	__aligned_u64 func_info;
6561	__u32 nr_func_info;
6562	__u32 nr_line_info;
6563	__aligned_u64 line_info;
6564	__aligned_u64 jited_line_info;
6565	__u32 nr_jited_line_info;
6566	__u32 line_info_rec_size;
6567	__u32 jited_line_info_rec_size;
6568	__u32 nr_prog_tags;
6569	__aligned_u64 prog_tags;
6570	__u64 run_time_ns;
6571	__u64 run_cnt;
6572	__u64 recursion_misses;
6573	__u32 verified_insns;
6574	__u32 attach_btf_obj_id;
6575	__u32 attach_btf_id;
6576} __attribute__((aligned(8)));
6577
6578struct bpf_map_info {
6579	__u32 type;
6580	__u32 id;
6581	__u32 key_size;
6582	__u32 value_size;
6583	__u32 max_entries;
6584	__u32 map_flags;
6585	char  name[BPF_OBJ_NAME_LEN];
6586	__u32 ifindex;
6587	__u32 btf_vmlinux_value_type_id;
6588	__u64 netns_dev;
6589	__u64 netns_ino;
6590	__u32 btf_id;
6591	__u32 btf_key_type_id;
6592	__u32 btf_value_type_id;
6593	__u32 btf_vmlinux_id;
6594	__u64 map_extra;
6595} __attribute__((aligned(8)));
6596
6597struct bpf_btf_info {
6598	__aligned_u64 btf;
6599	__u32 btf_size;
6600	__u32 id;
6601	__aligned_u64 name;
6602	__u32 name_len;
6603	__u32 kernel_btf;
6604} __attribute__((aligned(8)));
6605
6606struct bpf_link_info {
6607	__u32 type;
6608	__u32 id;
6609	__u32 prog_id;
6610	union {
6611		struct {
6612			__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
6613			__u32 tp_name_len;     /* in/out: tp_name buffer len */
6614		} raw_tracepoint;
6615		struct {
6616			__u32 attach_type;
6617			__u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
6618			__u32 target_btf_id; /* BTF type id inside the object */
6619		} tracing;
6620		struct {
6621			__u64 cgroup_id;
6622			__u32 attach_type;
6623		} cgroup;
6624		struct {
6625			__aligned_u64 target_name; /* in/out: target_name buffer ptr */
6626			__u32 target_name_len;	   /* in/out: target_name buffer len */
6627
6628			/* If the iter specific field is 32 bits, it can be put
6629			 * in the first or second union. Otherwise it should be
6630			 * put in the second union.
6631			 */
6632			union {
6633				struct {
6634					__u32 map_id;
6635				} map;
6636			};
6637			union {
6638				struct {
6639					__u64 cgroup_id;
6640					__u32 order;
6641				} cgroup;
6642				struct {
6643					__u32 tid;
6644					__u32 pid;
6645				} task;
6646			};
6647		} iter;
6648		struct  {
6649			__u32 netns_ino;
6650			__u32 attach_type;
6651		} netns;
6652		struct {
6653			__u32 ifindex;
6654		} xdp;
6655		struct {
6656			__u32 map_id;
6657		} struct_ops;
6658		struct {
6659			__u32 pf;
6660			__u32 hooknum;
6661			__s32 priority;
6662			__u32 flags;
6663		} netfilter;
6664		struct {
6665			__aligned_u64 addrs;
6666			__u32 count; /* in/out: kprobe_multi function count */
6667			__u32 flags;
6668			__u64 missed;
6669			__aligned_u64 cookies;
6670		} kprobe_multi;
6671		struct {
6672			__aligned_u64 path;
6673			__aligned_u64 offsets;
6674			__aligned_u64 ref_ctr_offsets;
6675			__aligned_u64 cookies;
6676			__u32 path_size; /* in/out: real path size on success, including zero byte */
6677			__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
6678			__u32 flags;
6679			__u32 pid;
6680		} uprobe_multi;
6681		struct {
6682			__u32 type; /* enum bpf_perf_event_type */
6683			__u32 :32;
6684			union {
6685				struct {
6686					__aligned_u64 file_name; /* in/out */
6687					__u32 name_len;
6688					__u32 offset; /* offset from file_name */
6689					__u64 cookie;
6690				} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
6691				struct {
6692					__aligned_u64 func_name; /* in/out */
6693					__u32 name_len;
6694					__u32 offset; /* offset from func_name */
6695					__u64 addr;
6696					__u64 missed;
6697					__u64 cookie;
6698				} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
6699				struct {
6700					__aligned_u64 tp_name;   /* in/out */
6701					__u32 name_len;
6702					__u32 :32;
6703					__u64 cookie;
6704				} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
6705				struct {
6706					__u64 config;
6707					__u32 type;
6708					__u32 :32;
6709					__u64 cookie;
6710				} event; /* BPF_PERF_EVENT_EVENT */
6711			};
6712		} perf_event;
6713		struct {
6714			__u32 ifindex;
6715			__u32 attach_type;
6716		} tcx;
6717		struct {
6718			__u32 ifindex;
6719			__u32 attach_type;
6720		} netkit;
6721	};
6722} __attribute__((aligned(8)));
6723
6724/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
6725 * by user and intended to be used by socket (e.g. to bind to, depends on
6726 * attach type).
6727 */
6728struct bpf_sock_addr {
6729	__u32 user_family;	/* Allows 4-byte read, but no write. */
6730	__u32 user_ip4;		/* Allows 1,2,4-byte read and 4-byte write.
6731				 * Stored in network byte order.
6732				 */
6733	__u32 user_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
6734				 * Stored in network byte order.
6735				 */
6736	__u32 user_port;	/* Allows 1,2,4-byte read and 4-byte write.
6737				 * Stored in network byte order
6738				 */
6739	__u32 family;		/* Allows 4-byte read, but no write */
6740	__u32 type;		/* Allows 4-byte read, but no write */
6741	__u32 protocol;		/* Allows 4-byte read, but no write */
6742	__u32 msg_src_ip4;	/* Allows 1,2,4-byte read and 4-byte write.
6743				 * Stored in network byte order.
6744				 */
6745	__u32 msg_src_ip6[4];	/* Allows 1,2,4,8-byte read and 4,8-byte write.
6746				 * Stored in network byte order.
6747				 */
6748	__bpf_md_ptr(struct bpf_sock *, sk);
6749};
6750
6751/* User bpf_sock_ops struct to access socket values and specify request ops
6752 * and their replies.
6753 * Some of this fields are in network (bigendian) byte order and may need
6754 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
6755 * New fields can only be added at the end of this structure
6756 */
6757struct bpf_sock_ops {
6758	__u32 op;
6759	union {
6760		__u32 args[4];		/* Optionally passed to bpf program */
6761		__u32 reply;		/* Returned by bpf program	    */
6762		__u32 replylong[4];	/* Optionally returned by bpf prog  */
6763	};
6764	__u32 family;
6765	__u32 remote_ip4;	/* Stored in network byte order */
6766	__u32 local_ip4;	/* Stored in network byte order */
6767	__u32 remote_ip6[4];	/* Stored in network byte order */
6768	__u32 local_ip6[4];	/* Stored in network byte order */
6769	__u32 remote_port;	/* Stored in network byte order */
6770	__u32 local_port;	/* stored in host byte order */
6771	__u32 is_fullsock;	/* Some TCP fields are only valid if
6772				 * there is a full socket. If not, the
6773				 * fields read as zero.
6774				 */
6775	__u32 snd_cwnd;
6776	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
6777	__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
6778	__u32 state;
6779	__u32 rtt_min;
6780	__u32 snd_ssthresh;
6781	__u32 rcv_nxt;
6782	__u32 snd_nxt;
6783	__u32 snd_una;
6784	__u32 mss_cache;
6785	__u32 ecn_flags;
6786	__u32 rate_delivered;
6787	__u32 rate_interval_us;
6788	__u32 packets_out;
6789	__u32 retrans_out;
6790	__u32 total_retrans;
6791	__u32 segs_in;
6792	__u32 data_segs_in;
6793	__u32 segs_out;
6794	__u32 data_segs_out;
6795	__u32 lost_out;
6796	__u32 sacked_out;
6797	__u32 sk_txhash;
6798	__u64 bytes_received;
6799	__u64 bytes_acked;
6800	__bpf_md_ptr(struct bpf_sock *, sk);
6801	/* [skb_data, skb_data_end) covers the whole TCP header.
6802	 *
6803	 * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
6804	 * BPF_SOCK_OPS_HDR_OPT_LEN_CB:   Not useful because the
6805	 *                                header has not been written.
6806	 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
6807	 *				  been written so far.
6808	 * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:  The SYNACK that concludes
6809	 *					the 3WHS.
6810	 * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
6811	 *					the 3WHS.
6812	 *
6813	 * bpf_load_hdr_opt() can also be used to read a particular option.
6814	 */
6815	__bpf_md_ptr(void *, skb_data);
6816	__bpf_md_ptr(void *, skb_data_end);
6817	__u32 skb_len;		/* The total length of a packet.
6818				 * It includes the header, options,
6819				 * and payload.
6820				 */
6821	__u32 skb_tcp_flags;	/* tcp_flags of the header.  It provides
6822				 * an easy way to check for tcp_flags
6823				 * without parsing skb_data.
6824				 *
6825				 * In particular, the skb_tcp_flags
6826				 * will still be available in
6827				 * BPF_SOCK_OPS_HDR_OPT_LEN even though
6828				 * the outgoing header has not
6829				 * been written yet.
6830				 */
6831	__u64 skb_hwtstamp;
6832};
6833
6834/* Definitions for bpf_sock_ops_cb_flags */
6835enum {
6836	BPF_SOCK_OPS_RTO_CB_FLAG	= (1<<0),
6837	BPF_SOCK_OPS_RETRANS_CB_FLAG	= (1<<1),
6838	BPF_SOCK_OPS_STATE_CB_FLAG	= (1<<2),
6839	BPF_SOCK_OPS_RTT_CB_FLAG	= (1<<3),
6840	/* Call bpf for all received TCP headers.  The bpf prog will be
6841	 * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6842	 *
6843	 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6844	 * for the header option related helpers that will be useful
6845	 * to the bpf programs.
6846	 *
6847	 * It could be used at the client/active side (i.e. connect() side)
6848	 * when the server told it that the server was in syncookie
6849	 * mode and required the active side to resend the bpf-written
6850	 * options.  The active side can keep writing the bpf-options until
6851	 * it received a valid packet from the server side to confirm
6852	 * the earlier packet (and options) has been received.  The later
6853	 * example patch is using it like this at the active side when the
6854	 * server is in syncookie mode.
6855	 *
6856	 * The bpf prog will usually turn this off in the common cases.
6857	 */
6858	BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG	= (1<<4),
6859	/* Call bpf when kernel has received a header option that
6860	 * the kernel cannot handle.  The bpf prog will be called under
6861	 * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
6862	 *
6863	 * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
6864	 * for the header option related helpers that will be useful
6865	 * to the bpf programs.
6866	 */
6867	BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
6868	/* Call bpf when the kernel is writing header options for the
6869	 * outgoing packet.  The bpf prog will first be called
6870	 * to reserve space in a skb under
6871	 * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB.  Then
6872	 * the bpf prog will be called to write the header option(s)
6873	 * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6874	 *
6875	 * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
6876	 * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
6877	 * related helpers that will be useful to the bpf programs.
6878	 *
6879	 * The kernel gets its chance to reserve space and write
6880	 * options first before the BPF program does.
6881	 */
6882	BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
6883/* Mask of all currently supported cb flags */
6884	BPF_SOCK_OPS_ALL_CB_FLAGS       = 0x7F,
6885};
6886
6887/* List of known BPF sock_ops operators.
6888 * New entries can only be added at the end
6889 */
6890enum {
6891	BPF_SOCK_OPS_VOID,
6892	BPF_SOCK_OPS_TIMEOUT_INIT,	/* Should return SYN-RTO value to use or
6893					 * -1 if default value should be used
6894					 */
6895	BPF_SOCK_OPS_RWND_INIT,		/* Should return initial advertized
6896					 * window (in packets) or -1 if default
6897					 * value should be used
6898					 */
6899	BPF_SOCK_OPS_TCP_CONNECT_CB,	/* Calls BPF program right before an
6900					 * active connection is initialized
6901					 */
6902	BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB,	/* Calls BPF program when an
6903						 * active connection is
6904						 * established
6905						 */
6906	BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,	/* Calls BPF program when a
6907						 * passive connection is
6908						 * established
6909						 */
6910	BPF_SOCK_OPS_NEEDS_ECN,		/* If connection's congestion control
6911					 * needs ECN
6912					 */
6913	BPF_SOCK_OPS_BASE_RTT,		/* Get base RTT. The correct value is
6914					 * based on the path and may be
6915					 * dependent on the congestion control
6916					 * algorithm. In general it indicates
6917					 * a congestion threshold. RTTs above
6918					 * this indicate congestion
6919					 */
6920	BPF_SOCK_OPS_RTO_CB,		/* Called when an RTO has triggered.
6921					 * Arg1: value of icsk_retransmits
6922					 * Arg2: value of icsk_rto
6923					 * Arg3: whether RTO has expired
6924					 */
6925	BPF_SOCK_OPS_RETRANS_CB,	/* Called when skb is retransmitted.
6926					 * Arg1: sequence number of 1st byte
6927					 * Arg2: # segments
6928					 * Arg3: return value of
6929					 *       tcp_transmit_skb (0 => success)
6930					 */
6931	BPF_SOCK_OPS_STATE_CB,		/* Called when TCP changes state.
6932					 * Arg1: old_state
6933					 * Arg2: new_state
6934					 */
6935	BPF_SOCK_OPS_TCP_LISTEN_CB,	/* Called on listen(2), right after
6936					 * socket transition to LISTEN state.
6937					 */
6938	BPF_SOCK_OPS_RTT_CB,		/* Called on every RTT.
6939					 */
6940	BPF_SOCK_OPS_PARSE_HDR_OPT_CB,	/* Parse the header option.
6941					 * It will be called to handle
6942					 * the packets received at
6943					 * an already established
6944					 * connection.
6945					 *
6946					 * sock_ops->skb_data:
6947					 * Referring to the received skb.
6948					 * It covers the TCP header only.
6949					 *
6950					 * bpf_load_hdr_opt() can also
6951					 * be used to search for a
6952					 * particular option.
6953					 */
6954	BPF_SOCK_OPS_HDR_OPT_LEN_CB,	/* Reserve space for writing the
6955					 * header option later in
6956					 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
6957					 * Arg1: bool want_cookie. (in
6958					 *       writing SYNACK only)
6959					 *
6960					 * sock_ops->skb_data:
6961					 * Not available because no header has
6962					 * been	written yet.
6963					 *
6964					 * sock_ops->skb_tcp_flags:
6965					 * The tcp_flags of the
6966					 * outgoing skb. (e.g. SYN, ACK, FIN).
6967					 *
6968					 * bpf_reserve_hdr_opt() should
6969					 * be used to reserve space.
6970					 */
6971	BPF_SOCK_OPS_WRITE_HDR_OPT_CB,	/* Write the header options
6972					 * Arg1: bool want_cookie. (in
6973					 *       writing SYNACK only)
6974					 *
6975					 * sock_ops->skb_data:
6976					 * Referring to the outgoing skb.
6977					 * It covers the TCP header
6978					 * that has already been written
6979					 * by the kernel and the
6980					 * earlier bpf-progs.
6981					 *
6982					 * sock_ops->skb_tcp_flags:
6983					 * The tcp_flags of the outgoing
6984					 * skb. (e.g. SYN, ACK, FIN).
6985					 *
6986					 * bpf_store_hdr_opt() should
6987					 * be used to write the
6988					 * option.
6989					 *
6990					 * bpf_load_hdr_opt() can also
6991					 * be used to search for a
6992					 * particular option that
6993					 * has already been written
6994					 * by the kernel or the
6995					 * earlier bpf-progs.
6996					 */
6997};
6998
6999/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
7000 * changes between the TCP and BPF versions. Ideally this should never happen.
7001 * If it does, we need to add code to convert them before calling
7002 * the BPF sock_ops function.
7003 */
7004enum {
7005	BPF_TCP_ESTABLISHED = 1,
7006	BPF_TCP_SYN_SENT,
7007	BPF_TCP_SYN_RECV,
7008	BPF_TCP_FIN_WAIT1,
7009	BPF_TCP_FIN_WAIT2,
7010	BPF_TCP_TIME_WAIT,
7011	BPF_TCP_CLOSE,
7012	BPF_TCP_CLOSE_WAIT,
7013	BPF_TCP_LAST_ACK,
7014	BPF_TCP_LISTEN,
7015	BPF_TCP_CLOSING,	/* Now a valid state */
7016	BPF_TCP_NEW_SYN_RECV,
7017	BPF_TCP_BOUND_INACTIVE,
7018
7019	BPF_TCP_MAX_STATES	/* Leave at the end! */
7020};
7021
7022enum {
7023	TCP_BPF_IW		= 1001,	/* Set TCP initial congestion window */
7024	TCP_BPF_SNDCWND_CLAMP	= 1002,	/* Set sndcwnd_clamp */
7025	TCP_BPF_DELACK_MAX	= 1003, /* Max delay ack in usecs */
7026	TCP_BPF_RTO_MIN		= 1004, /* Min delay ack in usecs */
7027	/* Copy the SYN pkt to optval
7028	 *
7029	 * BPF_PROG_TYPE_SOCK_OPS only.  It is similar to the
7030	 * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
7031	 * to only getting from the saved_syn.  It can either get the
7032	 * syn packet from:
7033	 *
7034	 * 1. the just-received SYN packet (only available when writing the
7035	 *    SYNACK).  It will be useful when it is not necessary to
7036	 *    save the SYN packet for latter use.  It is also the only way
7037	 *    to get the SYN during syncookie mode because the syn
7038	 *    packet cannot be saved during syncookie.
7039	 *
7040	 * OR
7041	 *
7042	 * 2. the earlier saved syn which was done by
7043	 *    bpf_setsockopt(TCP_SAVE_SYN).
7044	 *
7045	 * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
7046	 * SYN packet is obtained.
7047	 *
7048	 * If the bpf-prog does not need the IP[46] header,  the
7049	 * bpf-prog can avoid parsing the IP header by using
7050	 * TCP_BPF_SYN.  Otherwise, the bpf-prog can get both
7051	 * IP[46] and TCP header by using TCP_BPF_SYN_IP.
7052	 *
7053	 *      >0: Total number of bytes copied
7054	 * -ENOSPC: Not enough space in optval. Only optlen number of
7055	 *          bytes is copied.
7056	 * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
7057	 *	    is not saved by setsockopt(TCP_SAVE_SYN).
7058	 */
7059	TCP_BPF_SYN		= 1005, /* Copy the TCP header */
7060	TCP_BPF_SYN_IP		= 1006, /* Copy the IP[46] and TCP header */
7061	TCP_BPF_SYN_MAC         = 1007, /* Copy the MAC, IP[46], and TCP header */
7062};
7063
7064enum {
7065	BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
7066};
7067
7068/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
7069 * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
7070 */
7071enum {
7072	BPF_WRITE_HDR_TCP_CURRENT_MSS = 1,	/* Kernel is finding the
7073						 * total option spaces
7074						 * required for an established
7075						 * sk in order to calculate the
7076						 * MSS.  No skb is actually
7077						 * sent.
7078						 */
7079	BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2,	/* Kernel is in syncookie mode
7080						 * when sending a SYN.
7081						 */
7082};
7083
7084struct bpf_perf_event_value {
7085	__u64 counter;
7086	__u64 enabled;
7087	__u64 running;
7088};
7089
7090enum {
7091	BPF_DEVCG_ACC_MKNOD	= (1ULL << 0),
7092	BPF_DEVCG_ACC_READ	= (1ULL << 1),
7093	BPF_DEVCG_ACC_WRITE	= (1ULL << 2),
7094};
7095
7096enum {
7097	BPF_DEVCG_DEV_BLOCK	= (1ULL << 0),
7098	BPF_DEVCG_DEV_CHAR	= (1ULL << 1),
7099};
7100
7101struct bpf_cgroup_dev_ctx {
7102	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
7103	__u32 access_type;
7104	__u32 major;
7105	__u32 minor;
7106};
7107
7108struct bpf_raw_tracepoint_args {
7109	__u64 args[0];
7110};
7111
7112/* DIRECT:  Skip the FIB rules and go to FIB table associated with device
7113 * OUTPUT:  Do lookup from egress perspective; default is ingress
7114 */
7115enum {
7116	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),
7117	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),
7118	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
7119	BPF_FIB_LOOKUP_TBID    = (1U << 3),
7120	BPF_FIB_LOOKUP_SRC     = (1U << 4),
7121};
7122
7123enum {
7124	BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
7125	BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
7126	BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
7127	BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
7128	BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
7129	BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
7130	BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
7131	BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
7132	BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
7133	BPF_FIB_LKUP_RET_NO_SRC_ADDR,  /* failed to derive IP src addr */
7134};
7135
7136struct bpf_fib_lookup {
7137	/* input:  network family for lookup (AF_INET, AF_INET6)
7138	 * output: network family of egress nexthop
7139	 */
7140	__u8	family;
7141
7142	/* set if lookup is to consider L4 data - e.g., FIB rules */
7143	__u8	l4_protocol;
7144	__be16	sport;
7145	__be16	dport;
7146
7147	union {	/* used for MTU check */
7148		/* input to lookup */
7149		__u16	tot_len; /* L3 length from network hdr (iph->tot_len) */
7150
7151		/* output: MTU value */
7152		__u16	mtu_result;
7153	};
7154	/* input: L3 device index for lookup
7155	 * output: device index from FIB lookup
7156	 */
7157	__u32	ifindex;
7158
7159	union {
7160		/* inputs to lookup */
7161		__u8	tos;		/* AF_INET  */
7162		__be32	flowinfo;	/* AF_INET6, flow_label + priority */
7163
7164		/* output: metric of fib result (IPv4/IPv6 only) */
7165		__u32	rt_metric;
7166	};
7167
7168	/* input: source address to consider for lookup
7169	 * output: source address result from lookup
7170	 */
7171	union {
7172		__be32		ipv4_src;
7173		__u32		ipv6_src[4];  /* in6_addr; network order */
7174	};
7175
7176	/* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
7177	 * network header. output: bpf_fib_lookup sets to gateway address
7178	 * if FIB lookup returns gateway route
7179	 */
7180	union {
7181		__be32		ipv4_dst;
7182		__u32		ipv6_dst[4];  /* in6_addr; network order */
7183	};
7184
7185	union {
7186		struct {
7187			/* output */
7188			__be16	h_vlan_proto;
7189			__be16	h_vlan_TCI;
7190		};
7191		/* input: when accompanied with the
7192		 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
7193		 * specific routing table to use for the fib lookup.
7194		 */
7195		__u32	tbid;
7196	};
7197
7198	__u8	smac[6];     /* ETH_ALEN */
7199	__u8	dmac[6];     /* ETH_ALEN */
7200};
7201
7202struct bpf_redir_neigh {
7203	/* network family for lookup (AF_INET, AF_INET6) */
7204	__u32 nh_family;
7205	/* network address of nexthop; skips fib lookup to find gateway */
7206	union {
7207		__be32		ipv4_nh;
7208		__u32		ipv6_nh[4];  /* in6_addr; network order */
7209	};
7210};
7211
7212/* bpf_check_mtu flags*/
7213enum  bpf_check_mtu_flags {
7214	BPF_MTU_CHK_SEGS  = (1U << 0),
7215};
7216
7217enum bpf_check_mtu_ret {
7218	BPF_MTU_CHK_RET_SUCCESS,      /* check and lookup successful */
7219	BPF_MTU_CHK_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
7220	BPF_MTU_CHK_RET_SEGS_TOOBIG,  /* GSO re-segmentation needed to fwd */
7221};
7222
7223enum bpf_task_fd_type {
7224	BPF_FD_TYPE_RAW_TRACEPOINT,	/* tp name */
7225	BPF_FD_TYPE_TRACEPOINT,		/* tp name */
7226	BPF_FD_TYPE_KPROBE,		/* (symbol + offset) or addr */
7227	BPF_FD_TYPE_KRETPROBE,		/* (symbol + offset) or addr */
7228	BPF_FD_TYPE_UPROBE,		/* filename + offset */
7229	BPF_FD_TYPE_URETPROBE,		/* filename + offset */
7230};
7231
7232enum {
7233	BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG		= (1U << 0),
7234	BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL		= (1U << 1),
7235	BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP		= (1U << 2),
7236};
7237
7238struct bpf_flow_keys {
7239	__u16	nhoff;
7240	__u16	thoff;
7241	__u16	addr_proto;			/* ETH_P_* of valid addrs */
7242	__u8	is_frag;
7243	__u8	is_first_frag;
7244	__u8	is_encap;
7245	__u8	ip_proto;
7246	__be16	n_proto;
7247	__be16	sport;
7248	__be16	dport;
7249	union {
7250		struct {
7251			__be32	ipv4_src;
7252			__be32	ipv4_dst;
7253		};
7254		struct {
7255			__u32	ipv6_src[4];	/* in6_addr; network order */
7256			__u32	ipv6_dst[4];	/* in6_addr; network order */
7257		};
7258	};
7259	__u32	flags;
7260	__be32	flow_label;
7261};
7262
7263struct bpf_func_info {
7264	__u32	insn_off;
7265	__u32	type_id;
7266};
7267
7268#define BPF_LINE_INFO_LINE_NUM(line_col)	((line_col) >> 10)
7269#define BPF_LINE_INFO_LINE_COL(line_col)	((line_col) & 0x3ff)
7270
7271struct bpf_line_info {
7272	__u32	insn_off;
7273	__u32	file_name_off;
7274	__u32	line_off;
7275	__u32	line_col;
7276};
7277
7278struct bpf_spin_lock {
7279	__u32	val;
7280};
7281
7282struct bpf_timer {
7283	__u64 __opaque[2];
7284} __attribute__((aligned(8)));
7285
7286struct bpf_dynptr {
7287	__u64 __opaque[2];
7288} __attribute__((aligned(8)));
7289
7290struct bpf_list_head {
7291	__u64 __opaque[2];
7292} __attribute__((aligned(8)));
7293
7294struct bpf_list_node {
7295	__u64 __opaque[3];
7296} __attribute__((aligned(8)));
7297
7298struct bpf_rb_root {
7299	__u64 __opaque[2];
7300} __attribute__((aligned(8)));
7301
7302struct bpf_rb_node {
7303	__u64 __opaque[4];
7304} __attribute__((aligned(8)));
7305
7306struct bpf_refcount {
7307	__u32 __opaque[1];
7308} __attribute__((aligned(4)));
7309
7310struct bpf_sysctl {
7311	__u32	write;		/* Sysctl is being read (= 0) or written (= 1).
7312				 * Allows 1,2,4-byte read, but no write.
7313				 */
7314	__u32	file_pos;	/* Sysctl file position to read from, write to.
7315				 * Allows 1,2,4-byte read an 4-byte write.
7316				 */
7317};
7318
7319struct bpf_sockopt {
7320	__bpf_md_ptr(struct bpf_sock *, sk);
7321	__bpf_md_ptr(void *, optval);
7322	__bpf_md_ptr(void *, optval_end);
7323
7324	__s32	level;
7325	__s32	optname;
7326	__s32	optlen;
7327	__s32	retval;
7328};
7329
7330struct bpf_pidns_info {
7331	__u32 pid;
7332	__u32 tgid;
7333};
7334
7335/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
7336struct bpf_sk_lookup {
7337	union {
7338		__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
7339		__u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
7340	};
7341
7342	__u32 family;		/* Protocol family (AF_INET, AF_INET6) */
7343	__u32 protocol;		/* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
7344	__u32 remote_ip4;	/* Network byte order */
7345	__u32 remote_ip6[4];	/* Network byte order */
7346	__be16 remote_port;	/* Network byte order */
7347	__u16 :16;		/* Zero padding */
7348	__u32 local_ip4;	/* Network byte order */
7349	__u32 local_ip6[4];	/* Network byte order */
7350	__u32 local_port;	/* Host byte order */
7351	__u32 ingress_ifindex;		/* The arriving interface. Determined by inet_iif. */
7352};
7353
7354/*
7355 * struct btf_ptr is used for typed pointer representation; the
7356 * type id is used to render the pointer data as the appropriate type
7357 * via the bpf_snprintf_btf() helper described above.  A flags field -
7358 * potentially to specify additional details about the BTF pointer
7359 * (rather than its mode of display) - is included for future use.
7360 * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
7361 */
7362struct btf_ptr {
7363	void *ptr;
7364	__u32 type_id;
7365	__u32 flags;		/* BTF ptr flags; unused at present. */
7366};
7367
7368/*
7369 * Flags to control bpf_snprintf_btf() behaviour.
7370 *     - BTF_F_COMPACT: no formatting around type information
7371 *     - BTF_F_NONAME: no struct/union member names/types
7372 *     - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
7373 *       equivalent to %px.
7374 *     - BTF_F_ZERO: show zero-valued struct/union members; they
7375 *       are not displayed by default
7376 */
7377enum {
7378	BTF_F_COMPACT	=	(1ULL << 0),
7379	BTF_F_NONAME	=	(1ULL << 1),
7380	BTF_F_PTR_RAW	=	(1ULL << 2),
7381	BTF_F_ZERO	=	(1ULL << 3),
7382};
7383
7384/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
7385 * has to be adjusted by relocations. It is emitted by llvm and passed to
7386 * libbpf and later to the kernel.
7387 */
7388enum bpf_core_relo_kind {
7389	BPF_CORE_FIELD_BYTE_OFFSET = 0,      /* field byte offset */
7390	BPF_CORE_FIELD_BYTE_SIZE = 1,        /* field size in bytes */
7391	BPF_CORE_FIELD_EXISTS = 2,           /* field existence in target kernel */
7392	BPF_CORE_FIELD_SIGNED = 3,           /* field signedness (0 - unsigned, 1 - signed) */
7393	BPF_CORE_FIELD_LSHIFT_U64 = 4,       /* bitfield-specific left bitshift */
7394	BPF_CORE_FIELD_RSHIFT_U64 = 5,       /* bitfield-specific right bitshift */
7395	BPF_CORE_TYPE_ID_LOCAL = 6,          /* type ID in local BPF object */
7396	BPF_CORE_TYPE_ID_TARGET = 7,         /* type ID in target kernel */
7397	BPF_CORE_TYPE_EXISTS = 8,            /* type existence in target kernel */
7398	BPF_CORE_TYPE_SIZE = 9,              /* type size in bytes */
7399	BPF_CORE_ENUMVAL_EXISTS = 10,        /* enum value existence in target kernel */
7400	BPF_CORE_ENUMVAL_VALUE = 11,         /* enum value integer value */
7401	BPF_CORE_TYPE_MATCHES = 12,          /* type match in target kernel */
7402};
7403
7404/*
7405 * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
7406 * and from libbpf to the kernel.
7407 *
7408 * CO-RE relocation captures the following data:
7409 * - insn_off - instruction offset (in bytes) within a BPF program that needs
7410 *   its insn->imm field to be relocated with actual field info;
7411 * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
7412 *   type or field;
7413 * - access_str_off - offset into corresponding .BTF string section. String
7414 *   interpretation depends on specific relocation kind:
7415 *     - for field-based relocations, string encodes an accessed field using
7416 *       a sequence of field and array indices, separated by colon (:). It's
7417 *       conceptually very close to LLVM's getelementptr ([0]) instruction's
7418 *       arguments for identifying offset to a field.
7419 *     - for type-based relocations, strings is expected to be just "0";
7420 *     - for enum value-based relocations, string contains an index of enum
7421 *       value within its enum type;
7422 * - kind - one of enum bpf_core_relo_kind;
7423 *
7424 * Example:
7425 *   struct sample {
7426 *       int a;
7427 *       struct {
7428 *           int b[10];
7429 *       };
7430 *   };
7431 *
7432 *   struct sample *s = ...;
7433 *   int *x = &s->a;     // encoded as "0:0" (a is field #0)
7434 *   int *y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1,
7435 *                       // b is field #0 inside anon struct, accessing elem #5)
7436 *   int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
7437 *
7438 * type_id for all relocs in this example will capture BTF type id of
7439 * `struct sample`.
7440 *
7441 * Such relocation is emitted when using __builtin_preserve_access_index()
7442 * Clang built-in, passing expression that captures field address, e.g.:
7443 *
7444 * bpf_probe_read(&dst, sizeof(dst),
7445 *		  __builtin_preserve_access_index(&src->a.b.c));
7446 *
7447 * In this case Clang will emit field relocation recording necessary data to
7448 * be able to find offset of embedded `a.b.c` field within `src` struct.
7449 *
7450 * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
7451 */
7452struct bpf_core_relo {
7453	__u32 insn_off;
7454	__u32 type_id;
7455	__u32 access_str_off;
7456	enum bpf_core_relo_kind kind;
7457};
7458
7459/*
7460 * Flags to control bpf_timer_start() behaviour.
7461 *     - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
7462 *       relative to current time.
7463 *     - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
7464 */
7465enum {
7466	BPF_F_TIMER_ABS = (1ULL << 0),
7467	BPF_F_TIMER_CPU_PIN = (1ULL << 1),
7468};
7469
7470/* BPF numbers iterator state */
7471struct bpf_iter_num {
7472	/* opaque iterator state; having __u64 here allows to preserve correct
7473	 * alignment requirements in vmlinux.h, generated from BTF
7474	 */
7475	__u64 __opaque[1];
7476} __attribute__((aligned(8)));
7477
7478#endif /* _UAPI__LINUX_BPF_H__ */
7479