1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2/* eBPF instruction mini library */
3#ifndef __BPF_INSN_H
4#define __BPF_INSN_H
5
6struct bpf_insn;
7
8/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
9
10#define BPF_ALU64_REG(OP, DST, SRC)				\
11	((struct bpf_insn) {					\
12		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
13		.dst_reg = DST,					\
14		.src_reg = SRC,					\
15		.off   = 0,					\
16		.imm   = 0 })
17
18#define BPF_ALU32_REG(OP, DST, SRC)				\
19	((struct bpf_insn) {					\
20		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
21		.dst_reg = DST,					\
22		.src_reg = SRC,					\
23		.off   = 0,					\
24		.imm   = 0 })
25
26/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
27
28#define BPF_ALU64_IMM(OP, DST, IMM)				\
29	((struct bpf_insn) {					\
30		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
31		.dst_reg = DST,					\
32		.src_reg = 0,					\
33		.off   = 0,					\
34		.imm   = IMM })
35
36#define BPF_ALU32_IMM(OP, DST, IMM)				\
37	((struct bpf_insn) {					\
38		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
39		.dst_reg = DST,					\
40		.src_reg = 0,					\
41		.off   = 0,					\
42		.imm   = IMM })
43
44/* Short form of mov, dst_reg = src_reg */
45
46#define BPF_MOV64_REG(DST, SRC)					\
47	((struct bpf_insn) {					\
48		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
49		.dst_reg = DST,					\
50		.src_reg = SRC,					\
51		.off   = 0,					\
52		.imm   = 0 })
53
54#define BPF_MOV32_REG(DST, SRC)					\
55	((struct bpf_insn) {					\
56		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
57		.dst_reg = DST,					\
58		.src_reg = SRC,					\
59		.off   = 0,					\
60		.imm   = 0 })
61
62/* Short form of mov, dst_reg = imm32 */
63
64#define BPF_MOV64_IMM(DST, IMM)					\
65	((struct bpf_insn) {					\
66		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
67		.dst_reg = DST,					\
68		.src_reg = 0,					\
69		.off   = 0,					\
70		.imm   = IMM })
71
72#define BPF_MOV32_IMM(DST, IMM)					\
73	((struct bpf_insn) {					\
74		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
75		.dst_reg = DST,					\
76		.src_reg = 0,					\
77		.off   = 0,					\
78		.imm   = IMM })
79
80/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
81#define BPF_LD_IMM64(DST, IMM)					\
82	BPF_LD_IMM64_RAW(DST, 0, IMM)
83
84#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
85	((struct bpf_insn) {					\
86		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
87		.dst_reg = DST,					\
88		.src_reg = SRC,					\
89		.off   = 0,					\
90		.imm   = (__u32) (IMM) }),			\
91	((struct bpf_insn) {					\
92		.code  = 0, /* zero is reserved opcode */	\
93		.dst_reg = 0,					\
94		.src_reg = 0,					\
95		.off   = 0,					\
96		.imm   = ((__u64) (IMM)) >> 32 })
97
98#ifndef BPF_PSEUDO_MAP_FD
99# define BPF_PSEUDO_MAP_FD	1
100#endif
101
102/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
103#define BPF_LD_MAP_FD(DST, MAP_FD)				\
104	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
105
106
107/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
108
109#define BPF_LD_ABS(SIZE, IMM)					\
110	((struct bpf_insn) {					\
111		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
112		.dst_reg = 0,					\
113		.src_reg = 0,					\
114		.off   = 0,					\
115		.imm   = IMM })
116
117/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
118
119#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
120	((struct bpf_insn) {					\
121		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
122		.dst_reg = DST,					\
123		.src_reg = SRC,					\
124		.off   = OFF,					\
125		.imm   = 0 })
126
127/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
128
129#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
130	((struct bpf_insn) {					\
131		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
132		.dst_reg = DST,					\
133		.src_reg = SRC,					\
134		.off   = OFF,					\
135		.imm   = 0 })
136
137/*
138 * Atomic operations:
139 *
140 *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
141 *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
142 *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
143 *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
144 *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
145 *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
146 *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
147 *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
148 *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
149 *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
150 */
151
152#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)			\
153	((struct bpf_insn) {					\
154		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
155		.dst_reg = DST,					\
156		.src_reg = SRC,					\
157		.off   = OFF,					\
158		.imm   = OP })
159
160/* Legacy alias */
161#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
162
163/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
164
165#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
166	((struct bpf_insn) {					\
167		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
168		.dst_reg = DST,					\
169		.src_reg = 0,					\
170		.off   = OFF,					\
171		.imm   = IMM })
172
173/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
174
175#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
176	((struct bpf_insn) {					\
177		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
178		.dst_reg = DST,					\
179		.src_reg = SRC,					\
180		.off   = OFF,					\
181		.imm   = 0 })
182
183/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
184
185#define BPF_JMP32_REG(OP, DST, SRC, OFF)			\
186	((struct bpf_insn) {					\
187		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,	\
188		.dst_reg = DST,					\
189		.src_reg = SRC,					\
190		.off   = OFF,					\
191		.imm   = 0 })
192
193/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
194
195#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
196	((struct bpf_insn) {					\
197		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
198		.dst_reg = DST,					\
199		.src_reg = 0,					\
200		.off   = OFF,					\
201		.imm   = IMM })
202
203/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
204
205#define BPF_JMP32_IMM(OP, DST, IMM, OFF)			\
206	((struct bpf_insn) {					\
207		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,	\
208		.dst_reg = DST,					\
209		.src_reg = 0,					\
210		.off   = OFF,					\
211		.imm   = IMM })
212
213/* Raw code statement block */
214
215#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
216	((struct bpf_insn) {					\
217		.code  = CODE,					\
218		.dst_reg = DST,					\
219		.src_reg = SRC,					\
220		.off   = OFF,					\
221		.imm   = IMM })
222
223/* Program exit */
224
225#define BPF_EXIT_INSN()						\
226	((struct bpf_insn) {					\
227		.code  = BPF_JMP | BPF_EXIT,			\
228		.dst_reg = 0,					\
229		.src_reg = 0,					\
230		.off   = 0,					\
231		.imm   = 0 })
232
233#endif
234