141227Sjdp// SPDX-License-Identifier: GPL-2.0
241227Sjdp#include <vmlinux.h>
341227Sjdp#include <bpf/bpf_tracing.h>
441227Sjdp#include <bpf/bpf_helpers.h>
541227Sjdp#include <bpf/bpf_core_read.h>
641227Sjdp#include <bpf/bpf_endian.h>
741227Sjdp#include "bpf_misc.h"
841227Sjdp#include "bpf_experimental.h"
941227Sjdp
1041227Sjdp#ifndef ETH_P_IP
1141227Sjdp#define ETH_P_IP 0x0800
1241227Sjdp#endif
1341227Sjdp
1441227Sjdpstruct {
1541227Sjdp	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
1641227Sjdp	__uint(max_entries, 4);
1741227Sjdp	__uint(key_size, sizeof(__u32));
1841227Sjdp	__uint(value_size, sizeof(__u32));
1941227Sjdp} jmp_table SEC(".maps");
2041227Sjdp
2141227Sjdpstatic __noinline int static_func(u64 i)
2241227Sjdp{
2341227Sjdp	bpf_throw(32);
2441227Sjdp	return i;
25112044Sobrien}
2641227Sjdp
27127023Sdes__noinline int global2static_simple(u64 i)
28127023Sdes{
29127023Sdes	static_func(i + 2);
30127023Sdes	return i - 1;
31195767Skensmith}
32147458Sdes
33204596Suqs__noinline int global2static(u64 i)
34204596Suqs{
35	if (i == ETH_P_IP)
36		bpf_throw(16);
37	return static_func(i);
38}
39
40static __noinline int static2global(u64 i)
41{
42	return global2static(i) + i;
43}
44
45SEC("tc")
46int exception_throw_always_1(struct __sk_buff *ctx)
47{
48	bpf_throw(64);
49	return 0;
50}
51
52/* In this case, the global func will never be seen executing after call to
53 * static subprog, hence verifier will DCE the remaining instructions. Ensure we
54 * are resilient to that.
55 */
56SEC("tc")
57int exception_throw_always_2(struct __sk_buff *ctx)
58{
59	return global2static_simple(ctx->protocol);
60}
61
62SEC("tc")
63int exception_throw_unwind_1(struct __sk_buff *ctx)
64{
65	return static2global(bpf_ntohs(ctx->protocol));
66}
67
68SEC("tc")
69int exception_throw_unwind_2(struct __sk_buff *ctx)
70{
71	return static2global(bpf_ntohs(ctx->protocol) - 1);
72}
73
74SEC("tc")
75int exception_throw_default(struct __sk_buff *ctx)
76{
77	bpf_throw(0);
78	return 1;
79}
80
81SEC("tc")
82int exception_throw_default_value(struct __sk_buff *ctx)
83{
84	bpf_throw(5);
85	return 1;
86}
87
88SEC("tc")
89int exception_tail_call_target(struct __sk_buff *ctx)
90{
91	bpf_throw(16);
92	return 0;
93}
94
95static __noinline
96int exception_tail_call_subprog(struct __sk_buff *ctx)
97{
98	volatile int ret = 10;
99
100	bpf_tail_call_static(ctx, &jmp_table, 0);
101	return ret;
102}
103
104SEC("tc")
105int exception_tail_call(struct __sk_buff *ctx) {
106	volatile int ret = 0;
107
108	ret = exception_tail_call_subprog(ctx);
109	return ret + 8;
110}
111
112__noinline int exception_ext_global(struct __sk_buff *ctx)
113{
114	volatile int ret = 0;
115
116	return ret;
117}
118
119static __noinline int exception_ext_static(struct __sk_buff *ctx)
120{
121	return exception_ext_global(ctx);
122}
123
124SEC("tc")
125int exception_ext(struct __sk_buff *ctx)
126{
127	return exception_ext_static(ctx);
128}
129
130__noinline int exception_cb_mod_global(u64 cookie)
131{
132	volatile int ret = 0;
133
134	return ret;
135}
136
137/* Example of how the exception callback supplied during verification can still
138 * introduce extensions by calling to dummy global functions, and alter runtime
139 * behavior.
140 *
141 * Right now we don't allow freplace attachment to exception callback itself,
142 * but if the need arises this restriction is technically feasible to relax in
143 * the future.
144 */
145__noinline int exception_cb_mod(u64 cookie)
146{
147	return exception_cb_mod_global(cookie) + cookie + 10;
148}
149
150SEC("tc")
151__exception_cb(exception_cb_mod)
152int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
153{
154	bpf_throw(25);
155	return 0;
156}
157
158__noinline static int subprog(struct __sk_buff *ctx)
159{
160	return bpf_ktime_get_ns();
161}
162
163__noinline static int throwing_subprog(struct __sk_buff *ctx)
164{
165	if (ctx->tstamp)
166		bpf_throw(0);
167	return bpf_ktime_get_ns();
168}
169
170__noinline int global_subprog(struct __sk_buff *ctx)
171{
172	return bpf_ktime_get_ns();
173}
174
175__noinline int throwing_global_subprog(struct __sk_buff *ctx)
176{
177	if (ctx->tstamp)
178		bpf_throw(0);
179	return bpf_ktime_get_ns();
180}
181
182SEC("tc")
183int exception_throw_subprog(struct __sk_buff *ctx)
184{
185	switch (ctx->protocol) {
186	case 1:
187		return subprog(ctx);
188	case 2:
189		return global_subprog(ctx);
190	case 3:
191		return throwing_subprog(ctx);
192	case 4:
193		return throwing_global_subprog(ctx);
194	default:
195		break;
196	}
197	bpf_throw(1);
198	return 0;
199}
200
201__noinline int assert_nz_gfunc(u64 c)
202{
203	volatile u64 cookie = c;
204
205	bpf_assert(cookie != 0);
206	return 0;
207}
208
209__noinline int assert_zero_gfunc(u64 c)
210{
211	volatile u64 cookie = c;
212
213	bpf_assert(bpf_cmp_unlikely(cookie, ==, 0));
214	return 0;
215}
216
217__noinline int assert_neg_gfunc(s64 c)
218{
219	volatile s64 cookie = c;
220
221	bpf_assert(bpf_cmp_unlikely(cookie, <, 0));
222	return 0;
223}
224
225__noinline int assert_pos_gfunc(s64 c)
226{
227	volatile s64 cookie = c;
228
229	bpf_assert(bpf_cmp_unlikely(cookie, >, 0));
230	return 0;
231}
232
233__noinline int assert_negeq_gfunc(s64 c)
234{
235	volatile s64 cookie = c;
236
237	bpf_assert(bpf_cmp_unlikely(cookie, <=, -1));
238	return 0;
239}
240
241__noinline int assert_poseq_gfunc(s64 c)
242{
243	volatile s64 cookie = c;
244
245	bpf_assert(bpf_cmp_unlikely(cookie, >=, 1));
246	return 0;
247}
248
249__noinline int assert_nz_gfunc_with(u64 c)
250{
251	volatile u64 cookie = c;
252
253	bpf_assert_with(cookie != 0, cookie + 100);
254	return 0;
255}
256
257__noinline int assert_zero_gfunc_with(u64 c)
258{
259	volatile u64 cookie = c;
260
261	bpf_assert_with(bpf_cmp_unlikely(cookie, ==, 0), cookie + 100);
262	return 0;
263}
264
265__noinline int assert_neg_gfunc_with(s64 c)
266{
267	volatile s64 cookie = c;
268
269	bpf_assert_with(bpf_cmp_unlikely(cookie, <, 0), cookie + 100);
270	return 0;
271}
272
273__noinline int assert_pos_gfunc_with(s64 c)
274{
275	volatile s64 cookie = c;
276
277	bpf_assert_with(bpf_cmp_unlikely(cookie, >, 0), cookie + 100);
278	return 0;
279}
280
281__noinline int assert_negeq_gfunc_with(s64 c)
282{
283	volatile s64 cookie = c;
284
285	bpf_assert_with(bpf_cmp_unlikely(cookie, <=, -1), cookie + 100);
286	return 0;
287}
288
289__noinline int assert_poseq_gfunc_with(s64 c)
290{
291	volatile s64 cookie = c;
292
293	bpf_assert_with(bpf_cmp_unlikely(cookie, >=, 1), cookie + 100);
294	return 0;
295}
296
297#define check_assert(name, cookie, tag)				\
298SEC("tc")							\
299int exception##tag##name(struct __sk_buff *ctx)			\
300{								\
301	return name(cookie) + 1;				\
302}
303
304check_assert(assert_nz_gfunc, 5, _);
305check_assert(assert_zero_gfunc, 0, _);
306check_assert(assert_neg_gfunc, -100, _);
307check_assert(assert_pos_gfunc, 100, _);
308check_assert(assert_negeq_gfunc, -1, _);
309check_assert(assert_poseq_gfunc, 1, _);
310
311check_assert(assert_nz_gfunc_with, 5, _);
312check_assert(assert_zero_gfunc_with, 0, _);
313check_assert(assert_neg_gfunc_with, -100, _);
314check_assert(assert_pos_gfunc_with, 100, _);
315check_assert(assert_negeq_gfunc_with, -1, _);
316check_assert(assert_poseq_gfunc_with, 1, _);
317
318check_assert(assert_nz_gfunc, 0, _bad_);
319check_assert(assert_zero_gfunc, 5, _bad_);
320check_assert(assert_neg_gfunc, 100, _bad_);
321check_assert(assert_pos_gfunc, -100, _bad_);
322check_assert(assert_negeq_gfunc, 1, _bad_);
323check_assert(assert_poseq_gfunc, -1, _bad_);
324
325check_assert(assert_nz_gfunc_with, 0, _bad_);
326check_assert(assert_zero_gfunc_with, 5, _bad_);
327check_assert(assert_neg_gfunc_with, 100, _bad_);
328check_assert(assert_pos_gfunc_with, -100, _bad_);
329check_assert(assert_negeq_gfunc_with, 1, _bad_);
330check_assert(assert_poseq_gfunc_with, -1, _bad_);
331
332SEC("tc")
333int exception_assert_range(struct __sk_buff *ctx)
334{
335	u64 time = bpf_ktime_get_ns();
336
337	bpf_assert_range(time, 0, ~0ULL);
338	return 1;
339}
340
341SEC("tc")
342int exception_assert_range_with(struct __sk_buff *ctx)
343{
344	u64 time = bpf_ktime_get_ns();
345
346	bpf_assert_range_with(time, 0, ~0ULL, 10);
347	return 1;
348}
349
350SEC("tc")
351int exception_bad_assert_range(struct __sk_buff *ctx)
352{
353	u64 time = bpf_ktime_get_ns();
354
355	bpf_assert_range(time, -100, 100);
356	return 1;
357}
358
359SEC("tc")
360int exception_bad_assert_range_with(struct __sk_buff *ctx)
361{
362	u64 time = bpf_ktime_get_ns();
363
364	bpf_assert_range_with(time, -1000, 1000, 10);
365	return 1;
366}
367
368char _license[] SEC("license") = "GPL";
369