1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8SEC("cgroup/skb")
9__description("direct packet read test#1 for CGROUP_SKB")
10__success __failure_unpriv
11__msg_unpriv("invalid bpf_context access off=76 size=4")
12__retval(0)
13__naked void test_1_for_cgroup_skb(void)
14{
15	asm volatile ("					\
16	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
17	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
18	r4 = *(u32*)(r1 + %[__sk_buff_len]);		\
19	r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]);	\
20	r6 = *(u32*)(r1 + %[__sk_buff_mark]);		\
21	*(u32*)(r1 + %[__sk_buff_mark]) = r6;		\
22	r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]);	\
23	r8 = *(u32*)(r1 + %[__sk_buff_protocol]);	\
24	r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]);	\
25	r0 = r2;					\
26	r0 += 8;					\
27	if r0 > r3 goto l0_%=;				\
28	r0 = *(u8*)(r2 + 0);				\
29l0_%=:	r0 = 0;						\
30	exit;						\
31"	:
32	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
33	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
34	  __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
35	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
36	  __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
37	  __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
38	  __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
39	  __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
40	: __clobber_all);
41}
42
43SEC("cgroup/skb")
44__description("direct packet read test#2 for CGROUP_SKB")
45__success __success_unpriv __retval(0)
46__naked void test_2_for_cgroup_skb(void)
47{
48	asm volatile ("					\
49	r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]);	\
50	r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]);	\
51	r6 = *(u32*)(r1 + %[__sk_buff_priority]);	\
52	*(u32*)(r1 + %[__sk_buff_priority]) = r6;	\
53	r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
54	r8 = *(u32*)(r1 + %[__sk_buff_tc_index]);	\
55	r9 = *(u32*)(r1 + %[__sk_buff_hash]);		\
56	r0 = 0;						\
57	exit;						\
58"	:
59	: __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
60	  __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
61	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
62	  __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
63	  __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
64	  __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
65	: __clobber_all);
66}
67
68SEC("cgroup/skb")
69__description("direct packet read test#3 for CGROUP_SKB")
70__success __success_unpriv __retval(0)
71__naked void test_3_for_cgroup_skb(void)
72{
73	asm volatile ("					\
74	r4 = *(u32*)(r1 + %[__sk_buff_cb_0]);		\
75	r5 = *(u32*)(r1 + %[__sk_buff_cb_1]);		\
76	r6 = *(u32*)(r1 + %[__sk_buff_cb_2]);		\
77	r7 = *(u32*)(r1 + %[__sk_buff_cb_3]);		\
78	r8 = *(u32*)(r1 + %[__sk_buff_cb_4]);		\
79	r9 = *(u32*)(r1 + %[__sk_buff_napi_id]);	\
80	*(u32*)(r1 + %[__sk_buff_cb_0]) = r4;		\
81	*(u32*)(r1 + %[__sk_buff_cb_1]) = r5;		\
82	*(u32*)(r1 + %[__sk_buff_cb_2]) = r6;		\
83	*(u32*)(r1 + %[__sk_buff_cb_3]) = r7;		\
84	*(u32*)(r1 + %[__sk_buff_cb_4]) = r8;		\
85	r0 = 0;						\
86	exit;						\
87"	:
88	: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
89	  __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
90	  __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
91	  __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
92	  __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
93	  __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
94	: __clobber_all);
95}
96
97SEC("cgroup/skb")
98__description("direct packet read test#4 for CGROUP_SKB")
99__success __success_unpriv __retval(0)
100__naked void test_4_for_cgroup_skb(void)
101{
102	asm volatile ("					\
103	r2 = *(u32*)(r1 + %[__sk_buff_family]);		\
104	r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]);	\
105	r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]);	\
106	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]);	\
107	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]);	\
108	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]);	\
109	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]);	\
110	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]);	\
111	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]);	\
112	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]);	\
113	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]);	\
114	r7 = *(u32*)(r1 + %[__sk_buff_remote_port]);	\
115	r8 = *(u32*)(r1 + %[__sk_buff_local_port]);	\
116	r0 = 0;						\
117	exit;						\
118"	:
119	: __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
120	  __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
121	  __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
122	  __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
123	  __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
124	  __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
125	  __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
126	  __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
127	  __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
128	  __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
129	  __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
130	  __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
131	  __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
132	: __clobber_all);
133}
134
135SEC("cgroup/skb")
136__description("invalid access of tc_classid for CGROUP_SKB")
137__failure __msg("invalid bpf_context access")
138__failure_unpriv
139__naked void tc_classid_for_cgroup_skb(void)
140{
141	asm volatile ("					\
142	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
143	r0 = 0;						\
144	exit;						\
145"	:
146	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
147	: __clobber_all);
148}
149
150SEC("cgroup/skb")
151__description("invalid access of data_meta for CGROUP_SKB")
152__failure __msg("invalid bpf_context access")
153__failure_unpriv
154__naked void data_meta_for_cgroup_skb(void)
155{
156	asm volatile ("					\
157	r0 = *(u32*)(r1 + %[__sk_buff_data_meta]);	\
158	r0 = 0;						\
159	exit;						\
160"	:
161	: __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
162	: __clobber_all);
163}
164
165SEC("cgroup/skb")
166__description("invalid access of flow_keys for CGROUP_SKB")
167__failure __msg("invalid bpf_context access")
168__failure_unpriv
169__naked void flow_keys_for_cgroup_skb(void)
170{
171	asm volatile ("					\
172	r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]);	\
173	r0 = 0;						\
174	exit;						\
175"	:
176	: __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
177	: __clobber_all);
178}
179
180SEC("cgroup/skb")
181__description("invalid write access to napi_id for CGROUP_SKB")
182__failure __msg("invalid bpf_context access")
183__failure_unpriv
184__naked void napi_id_for_cgroup_skb(void)
185{
186	asm volatile ("					\
187	r9 = *(u32*)(r1 + %[__sk_buff_napi_id]);	\
188	*(u32*)(r1 + %[__sk_buff_napi_id]) = r9;	\
189	r0 = 0;						\
190	exit;						\
191"	:
192	: __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
193	: __clobber_all);
194}
195
196SEC("cgroup/skb")
197__description("write tstamp from CGROUP_SKB")
198__success __failure_unpriv
199__msg_unpriv("invalid bpf_context access off=152 size=8")
200__retval(0)
201__naked void write_tstamp_from_cgroup_skb(void)
202{
203	asm volatile ("					\
204	r0 = 0;						\
205	*(u64*)(r1 + %[__sk_buff_tstamp]) = r0;		\
206	r0 = 0;						\
207	exit;						\
208"	:
209	: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
210	: __clobber_all);
211}
212
213SEC("cgroup/skb")
214__description("read tstamp from CGROUP_SKB")
215__success __success_unpriv __retval(0)
216__naked void read_tstamp_from_cgroup_skb(void)
217{
218	asm volatile ("					\
219	r0 = *(u64*)(r1 + %[__sk_buff_tstamp]);		\
220	r0 = 0;						\
221	exit;						\
222"	:
223	: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
224	: __clobber_all);
225}
226
227char _license[] SEC("license") = "GPL";
228