1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2019 Facebook
3#include <linux/bpf.h>
4#include <stdbool.h>
5#include <bpf/bpf_helpers.h>
6#include <bpf/bpf_endian.h>
7#include <bpf/bpf_tracing.h>
8
9char _license[] SEC("license") = "GPL";
10struct {
11	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
12	__type(key, int);
13	__type(value, int);
14} perf_buf_map SEC(".maps");
15
16#define _(P) (__builtin_preserve_access_index(P))
17
18/* define few struct-s that bpf program needs to access */
19struct callback_head {
20	struct callback_head *next;
21	void (*func)(struct callback_head *head);
22};
23struct dev_ifalias {
24	struct callback_head rcuhead;
25};
26
27struct net_device /* same as kernel's struct net_device */ {
28	int ifindex;
29	struct dev_ifalias *ifalias;
30};
31
32typedef struct {
33        int counter;
34} atomic_t;
35typedef struct refcount_struct {
36        atomic_t refs;
37} refcount_t;
38
39struct sk_buff {
40	/* field names and sizes should match to those in the kernel */
41	unsigned int len, data_len;
42	__u16 mac_len, hdr_len, queue_mapping;
43	struct net_device *dev;
44	/* order of the fields doesn't matter */
45	refcount_t users;
46	unsigned char *data;
47	char __pkt_type_offset[0];
48	char cb[48];
49};
50
51struct meta {
52	int ifindex;
53	__u32 cb32_0;
54	__u8 cb8_0;
55};
56
57/* TRACE_EVENT(kfree_skb,
58 *         TP_PROTO(struct sk_buff *skb, void *location),
59 */
60SEC("tp_btf/kfree_skb")
61int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
62{
63	struct net_device *dev;
64	struct callback_head *ptr;
65	void *func;
66	int users;
67	unsigned char *data;
68	unsigned short pkt_data;
69	struct meta meta = {};
70	char pkt_type;
71	__u32 *cb32;
72	__u8 *cb8;
73
74	__builtin_preserve_access_index(({
75		users = skb->users.refs.counter;
76		data = skb->data;
77		dev = skb->dev;
78		ptr = dev->ifalias->rcuhead.next;
79		func = ptr->func;
80		cb8 = (__u8 *)&skb->cb;
81		cb32 = (__u32 *)&skb->cb;
82	}));
83
84	meta.ifindex = _(dev->ifindex);
85	meta.cb8_0 = cb8[8];
86	meta.cb32_0 = cb32[2];
87
88	bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset));
89	pkt_type &= 7;
90
91	/* read eth proto */
92	bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12);
93
94	bpf_printk("rcuhead.next %llx func %llx\n", ptr, func);
95	bpf_printk("skb->len %d users %d pkt_type %x\n",
96		   _(skb->len), users, pkt_type);
97	bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping));
98	bpf_printk("dev->ifindex %d data %llx pkt_data %x\n",
99		   meta.ifindex, data, pkt_data);
100	bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0);
101
102	if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1)
103		/* raw tp ignores return value */
104		return 0;
105
106	/* send first 72 byte of the packet to user space */
107	bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU,
108		       &meta, sizeof(meta));
109	return 0;
110}
111
112struct {
113	bool fentry_test_ok;
114	bool fexit_test_ok;
115} result = {};
116
117SEC("fentry/eth_type_trans")
118int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
119	     unsigned short protocol)
120{
121	int len, ifindex;
122
123	__builtin_preserve_access_index(({
124		len = skb->len;
125		ifindex = dev->ifindex;
126	}));
127
128	/* fentry sees full packet including L2 header */
129	if (len != 74 || ifindex != 1)
130		return 0;
131	result.fentry_test_ok = true;
132	return 0;
133}
134
135SEC("fexit/eth_type_trans")
136int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, struct net_device *dev,
137	     unsigned short protocol)
138{
139	int len, ifindex;
140
141	__builtin_preserve_access_index(({
142		len = skb->len;
143		ifindex = dev->ifindex;
144	}));
145
146	/* fexit sees packet without L2 header that eth_type_trans should have
147	 * consumed.
148	 */
149	if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
150		return 0;
151	result.fexit_test_ok = true;
152	return 0;
153}
154