1// SPDX-License-Identifier: GPL-2.0 2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 4#include <vmlinux.h> 5#include <bpf/bpf_helpers.h> 6#include <bpf/bpf_tracing.h> 7#include "bpf_misc.h" 8#include "xdp_metadata.h" 9#include "bpf_kfuncs.h" 10 11int arr[1]; 12int unkn_idx; 13const volatile bool call_dead_subprog = false; 14 15__noinline long global_bad(void) 16{ 17 return arr[unkn_idx]; /* BOOM */ 18} 19 20__noinline long global_good(void) 21{ 22 return arr[0]; 23} 24 25__noinline long global_calls_bad(void) 26{ 27 return global_good() + global_bad() /* does BOOM indirectly */; 28} 29 30__noinline long global_calls_good_only(void) 31{ 32 return global_good(); 33} 34 35__noinline long global_dead(void) 36{ 37 return arr[0] * 2; 38} 39 40SEC("?raw_tp") 41__success __log_level(2) 42/* main prog is validated completely first */ 43__msg("('global_calls_good_only') is global and assumed valid.") 44/* eventually global_good() is transitively validated as well */ 45__msg("Validating global_good() func") 46__msg("('global_good') is safe for any args that match its prototype") 47int chained_global_func_calls_success(void) 48{ 49 int sum = 0; 50 51 if (call_dead_subprog) 52 sum += global_dead(); 53 return global_calls_good_only() + sum; 54} 55 56SEC("?raw_tp") 57__failure __log_level(2) 58/* main prog validated successfully first */ 59__msg("('global_calls_bad') is global and assumed valid.") 60/* eventually we validate global_bad() and fail */ 61__msg("Validating global_bad() func") 62__msg("math between map_value pointer and register") /* BOOM */ 63int chained_global_func_calls_bad(void) 64{ 65 return global_calls_bad(); 66} 67 68/* do out of bounds access forcing verifier to fail verification if this 69 * global func is called 70 */ 71__noinline int global_unsupp(const int *mem) 72{ 73 if (!mem) 74 return 0; 75 return mem[100]; /* BOOM */ 76} 77 78const volatile bool skip_unsupp_global = true; 79 80SEC("?raw_tp") 81__success 82int guarded_unsupp_global_called(void) 83{ 84 if (!skip_unsupp_global) 85 return global_unsupp(NULL); 86 return 0; 87} 88 89SEC("?raw_tp") 90__failure __log_level(2) 91__msg("Func#1 ('global_unsupp') is global and assumed valid.") 92__msg("Validating global_unsupp() func#1...") 93__msg("value is outside of the allowed memory range") 94int unguarded_unsupp_global_called(void) 95{ 96 int x = 0; 97 98 return global_unsupp(&x); 99} 100 101long stack[128]; 102 103__weak int subprog_nullable_ptr_bad(int *p) 104{ 105 return (*p) * 2; /* bad, missing null check */ 106} 107 108SEC("?raw_tp") 109__failure __log_level(2) 110__msg("invalid mem access 'mem_or_null'") 111int arg_tag_nullable_ptr_fail(void *ctx) 112{ 113 int x = 42; 114 115 return subprog_nullable_ptr_bad(&x); 116} 117 118typedef struct { 119 int x; 120} user_struct_t; 121 122__noinline __weak int subprog_user_anon_mem(user_struct_t *t) 123{ 124 return t ? t->x : 0; 125} 126 127SEC("?tracepoint") 128__failure __log_level(2) 129__msg("invalid bpf_context access") 130__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')") 131int anon_user_mem_invalid(void *ctx) 132{ 133 /* can't pass PTR_TO_CTX as user memory */ 134 return subprog_user_anon_mem(ctx); 135} 136 137SEC("?tracepoint") 138__success __log_level(2) 139__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype") 140int anon_user_mem_valid(void *ctx) 141{ 142 user_struct_t t = { .x = 42 }; 143 144 return subprog_user_anon_mem(&t); 145} 146 147__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull) 148{ 149 return (*p1) * (*p2); /* good, no need for NULL checks */ 150} 151 152int x = 47; 153 154SEC("?raw_tp") 155__success __log_level(2) 156int arg_tag_nonnull_ptr_good(void *ctx) 157{ 158 int y = 74; 159 160 return subprog_nonnull_ptr_good(&x, &y); 161} 162 163/* this global subprog can be now called from many types of entry progs, each 164 * with different context type 165 */ 166__weak int subprog_ctx_tag(void *ctx __arg_ctx) 167{ 168 return bpf_get_stack(ctx, stack, sizeof(stack), 0); 169} 170 171__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx) 172{ 173 return 0; 174} 175 176__weak int raw_tp_u64_array(u64 *ctx __arg_ctx) 177{ 178 return 0; 179} 180 181SEC("?raw_tp") 182__success __log_level(2) 183int arg_tag_ctx_raw_tp(void *ctx) 184{ 185 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 186} 187 188SEC("?raw_tp.w") 189__success __log_level(2) 190int arg_tag_ctx_raw_tp_writable(void *ctx) 191{ 192 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 193} 194 195SEC("?tp_btf/sys_enter") 196__success __log_level(2) 197int arg_tag_ctx_raw_tp_btf(void *ctx) 198{ 199 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 200} 201 202struct whatever { }; 203 204__weak int tp_whatever(struct whatever *ctx __arg_ctx) 205{ 206 return 0; 207} 208 209SEC("?tp") 210__success __log_level(2) 211int arg_tag_ctx_tp(void *ctx) 212{ 213 return subprog_ctx_tag(ctx) + tp_whatever(ctx); 214} 215 216__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx) 217{ 218 return 0; 219} 220 221__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx) 222{ 223 return 0; 224} 225 226SEC("?kprobe") 227__success __log_level(2) 228int arg_tag_ctx_kprobe(void *ctx) 229{ 230 return subprog_ctx_tag(ctx) + 231 kprobe_subprog_pt_regs(ctx) + 232 kprobe_subprog_typedef(ctx); 233} 234 235__weak int perf_subprog_regs( 236#if defined(bpf_target_riscv) 237 struct user_regs_struct *ctx __arg_ctx 238#elif defined(bpf_target_s390) 239 /* user_pt_regs typedef is anonymous struct, so only `void *` works */ 240 void *ctx __arg_ctx 241#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc) 242 struct user_pt_regs *ctx __arg_ctx 243#else 244 struct pt_regs *ctx __arg_ctx 245#endif 246) 247{ 248 return 0; 249} 250 251__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx) 252{ 253 return 0; 254} 255 256__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx) 257{ 258 return 0; 259} 260 261SEC("?perf_event") 262__success __log_level(2) 263int arg_tag_ctx_perf(void *ctx) 264{ 265 return subprog_ctx_tag(ctx) + 266 perf_subprog_regs(ctx) + 267 perf_subprog_typedef(ctx) + 268 perf_subprog_canonical(ctx); 269} 270 271__weak int iter_subprog_void(void *ctx __arg_ctx) 272{ 273 return 0; 274} 275 276__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx) 277{ 278 return 0; 279} 280 281SEC("?iter/task") 282__success __log_level(2) 283int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx) 284{ 285 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1; 286} 287 288__weak int tracing_subprog_void(void *ctx __arg_ctx) 289{ 290 return 0; 291} 292 293__weak int tracing_subprog_u64(u64 *ctx __arg_ctx) 294{ 295 return 0; 296} 297 298int acc; 299 300SEC("?fentry/" SYS_PREFIX "sys_nanosleep") 301__success __log_level(2) 302int BPF_PROG(arg_tag_ctx_fentry) 303{ 304 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 305 return 0; 306} 307 308SEC("?fexit/" SYS_PREFIX "sys_nanosleep") 309__success __log_level(2) 310int BPF_PROG(arg_tag_ctx_fexit) 311{ 312 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 313 return 0; 314} 315 316SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep") 317__success __log_level(2) 318int BPF_PROG(arg_tag_ctx_fmod_ret) 319{ 320 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 321} 322 323SEC("?lsm/bpf") 324__success __log_level(2) 325int BPF_PROG(arg_tag_ctx_lsm) 326{ 327 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 328} 329 330SEC("?struct_ops/test_1") 331__success __log_level(2) 332int BPF_PROG(arg_tag_ctx_struct_ops) 333{ 334 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 335} 336 337SEC(".struct_ops") 338struct bpf_dummy_ops dummy_1 = { 339 .test_1 = (void *)arg_tag_ctx_struct_ops, 340}; 341 342SEC("?syscall") 343__success __log_level(2) 344int arg_tag_ctx_syscall(void *ctx) 345{ 346 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx); 347} 348 349__weak int subprog_dynptr(struct bpf_dynptr *dptr) 350{ 351 long *d, t, buf[1] = {}; 352 353 d = bpf_dynptr_data(dptr, 0, sizeof(long)); 354 if (!d) 355 return 0; 356 357 t = *d + 1; 358 359 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long)); 360 if (!d) 361 return t; 362 363 t = *d + 2; 364 365 return t; 366} 367 368SEC("?xdp") 369__success __log_level(2) 370int arg_tag_dynptr(struct xdp_md *ctx) 371{ 372 struct bpf_dynptr dptr; 373 374 bpf_dynptr_from_xdp(ctx, 0, &dptr); 375 376 return subprog_dynptr(&dptr); 377} 378 379char _license[] SEC("license") = "GPL"; 380