1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3#include "vmlinux.h"
4#include <bpf/bpf_helpers.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_core_read.h>
7#include "bpf_misc.h"
8
9char _license[] SEC("license") = "GPL";
10
11static long stack[256];
12
13/*
14 * KPROBE contexts
15 */
16
17__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx)
18{
19	return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
20}
21
22SEC("?kprobe")
23__success
24int kprobe_typedef_ctx(void *ctx)
25{
26	return kprobe_typedef_ctx_subprog(ctx);
27}
28
29/* s390x defines:
30 *
31 * typedef user_pt_regs bpf_user_pt_regs_t;
32 * typedef struct { ... } user_pt_regs;
33 *
34 * And so "canonical" underlying struct type is anonymous.
35 * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs
36 * are:
37 *   - bpf_user_pt_regs_t *ctx (typedef);
38 *   - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack);
39 *   - void *ctx __arg_ctx (arg:ctx tag)
40 *
41 * Other architectures also allow using underlying struct types (e.g.,
42 * `struct pt_regs *ctx` for x86-64)
43 */
44#ifndef bpf_target_s390
45
46#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
47
48__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
49{
50	return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0);
51}
52
53SEC("?kprobe")
54__success
55int kprobe_resolved_ctx(void *ctx)
56{
57	return kprobe_struct_ctx_subprog(ctx);
58}
59
60#endif
61
62/* this is current hack to make this work on old kernels */
63struct bpf_user_pt_regs_t {};
64
65__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx)
66{
67	return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
68}
69
70SEC("?kprobe")
71__success
72int kprobe_workaround_ctx(void *ctx)
73{
74	return kprobe_workaround_ctx_subprog(ctx);
75}
76
77/*
78 * RAW_TRACEPOINT contexts
79 */
80
81__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
82{
83	return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
84}
85
86SEC("?raw_tp")
87__success
88int raw_tp_ctx(void *ctx)
89{
90	return raw_tp_ctx_subprog(ctx);
91}
92
93/*
94 * RAW_TRACEPOINT_WRITABLE contexts
95 */
96
97__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx)
98{
99	return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
100}
101
102SEC("?raw_tp")
103__success
104int raw_tp_writable_ctx(void *ctx)
105{
106	return raw_tp_writable_ctx_subprog(ctx);
107}
108
109/*
110 * PERF_EVENT contexts
111 */
112
113__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx)
114{
115	return bpf_get_stack(ctx, &stack, sizeof(stack), 0);
116}
117
118SEC("?perf_event")
119__success
120int perf_event_ctx(void *ctx)
121{
122	return perf_event_ctx_subprog(ctx);
123}
124
125/* this global subprog can be now called from many types of entry progs, each
126 * with different context type
127 */
128__weak int subprog_ctx_tag(void *ctx __arg_ctx)
129{
130	return bpf_get_stack(ctx, stack, sizeof(stack), 0);
131}
132
133struct my_struct { int x; };
134
135__weak int subprog_multi_ctx_tags(void *ctx1 __arg_ctx,
136				  struct my_struct *mem,
137				  void *ctx2 __arg_ctx)
138{
139	if (!mem)
140		return 0;
141
142	return bpf_get_stack(ctx1, stack, sizeof(stack), 0) +
143	       mem->x +
144	       bpf_get_stack(ctx2, stack, sizeof(stack), 0);
145}
146
147SEC("?raw_tp")
148__success __log_level(2)
149int arg_tag_ctx_raw_tp(void *ctx)
150{
151	struct my_struct x = { .x = 123 };
152
153	return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
154}
155
156SEC("?perf_event")
157__success __log_level(2)
158int arg_tag_ctx_perf(void *ctx)
159{
160	struct my_struct x = { .x = 123 };
161
162	return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
163}
164
165SEC("?kprobe")
166__success __log_level(2)
167int arg_tag_ctx_kprobe(void *ctx)
168{
169	struct my_struct x = { .x = 123 };
170
171	return subprog_ctx_tag(ctx) + subprog_multi_ctx_tags(ctx, &x, ctx);
172}
173