1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define MAX_ENTRIES 11
9
10struct test_val {
11	unsigned int index;
12	int foo[MAX_ENTRIES];
13};
14
15struct {
16	__uint(type, BPF_MAP_TYPE_HASH);
17	__uint(max_entries, 1);
18	__type(key, long long);
19	__type(value, struct test_val);
20} map_hash_48b SEC(".maps");
21
22struct {
23	__uint(type, BPF_MAP_TYPE_HASH);
24	__uint(max_entries, 1);
25	__type(key, long long);
26	__type(value, long long);
27} map_hash_8b SEC(".maps");
28
29SEC("tc")
30__description("multiple registers share map_lookup_elem result")
31__success __retval(0)
32__naked void share_map_lookup_elem_result(void)
33{
34	asm volatile ("					\
35	r1 = 10;					\
36	*(u64*)(r10 - 8) = r1;				\
37	r2 = r10;					\
38	r2 += -8;					\
39	r1 = %[map_hash_8b] ll;				\
40	call %[bpf_map_lookup_elem];			\
41	r4 = r0;					\
42	if r0 == 0 goto l0_%=;				\
43	r1 = 0;						\
44	*(u64*)(r4 + 0) = r1;				\
45l0_%=:	exit;						\
46"	:
47	: __imm(bpf_map_lookup_elem),
48	  __imm_addr(map_hash_8b)
49	: __clobber_all);
50}
51
52SEC("tc")
53__description("alu ops on ptr_to_map_value_or_null, 1")
54__failure __msg("R4 pointer arithmetic on map_value_or_null")
55__naked void map_value_or_null_1(void)
56{
57	asm volatile ("					\
58	r1 = 10;					\
59	*(u64*)(r10 - 8) = r1;				\
60	r2 = r10;					\
61	r2 += -8;					\
62	r1 = %[map_hash_8b] ll;				\
63	call %[bpf_map_lookup_elem];			\
64	r4 = r0;					\
65	r4 += -2;					\
66	r4 += 2;					\
67	if r0 == 0 goto l0_%=;				\
68	r1 = 0;						\
69	*(u64*)(r4 + 0) = r1;				\
70l0_%=:	exit;						\
71"	:
72	: __imm(bpf_map_lookup_elem),
73	  __imm_addr(map_hash_8b)
74	: __clobber_all);
75}
76
77SEC("tc")
78__description("alu ops on ptr_to_map_value_or_null, 2")
79__failure __msg("R4 pointer arithmetic on map_value_or_null")
80__naked void map_value_or_null_2(void)
81{
82	asm volatile ("					\
83	r1 = 10;					\
84	*(u64*)(r10 - 8) = r1;				\
85	r2 = r10;					\
86	r2 += -8;					\
87	r1 = %[map_hash_8b] ll;				\
88	call %[bpf_map_lookup_elem];			\
89	r4 = r0;					\
90	r4 &= -1;					\
91	if r0 == 0 goto l0_%=;				\
92	r1 = 0;						\
93	*(u64*)(r4 + 0) = r1;				\
94l0_%=:	exit;						\
95"	:
96	: __imm(bpf_map_lookup_elem),
97	  __imm_addr(map_hash_8b)
98	: __clobber_all);
99}
100
101SEC("tc")
102__description("alu ops on ptr_to_map_value_or_null, 3")
103__failure __msg("R4 pointer arithmetic on map_value_or_null")
104__naked void map_value_or_null_3(void)
105{
106	asm volatile ("					\
107	r1 = 10;					\
108	*(u64*)(r10 - 8) = r1;				\
109	r2 = r10;					\
110	r2 += -8;					\
111	r1 = %[map_hash_8b] ll;				\
112	call %[bpf_map_lookup_elem];			\
113	r4 = r0;					\
114	r4 <<= 1;					\
115	if r0 == 0 goto l0_%=;				\
116	r1 = 0;						\
117	*(u64*)(r4 + 0) = r1;				\
118l0_%=:	exit;						\
119"	:
120	: __imm(bpf_map_lookup_elem),
121	  __imm_addr(map_hash_8b)
122	: __clobber_all);
123}
124
125SEC("tc")
126__description("invalid memory access with multiple map_lookup_elem calls")
127__failure __msg("R4 !read_ok")
128__naked void multiple_map_lookup_elem_calls(void)
129{
130	asm volatile ("					\
131	r1 = 10;					\
132	*(u64*)(r10 - 8) = r1;				\
133	r2 = r10;					\
134	r2 += -8;					\
135	r1 = %[map_hash_8b] ll;				\
136	r8 = r1;					\
137	r7 = r2;					\
138	call %[bpf_map_lookup_elem];			\
139	r4 = r0;					\
140	r1 = r8;					\
141	r2 = r7;					\
142	call %[bpf_map_lookup_elem];			\
143	if r0 == 0 goto l0_%=;				\
144	r1 = 0;						\
145	*(u64*)(r4 + 0) = r1;				\
146l0_%=:	exit;						\
147"	:
148	: __imm(bpf_map_lookup_elem),
149	  __imm_addr(map_hash_8b)
150	: __clobber_all);
151}
152
153SEC("tc")
154__description("valid indirect map_lookup_elem access with 2nd lookup in branch")
155__success __retval(0)
156__naked void with_2nd_lookup_in_branch(void)
157{
158	asm volatile ("					\
159	r1 = 10;					\
160	*(u64*)(r10 - 8) = r1;				\
161	r2 = r10;					\
162	r2 += -8;					\
163	r1 = %[map_hash_8b] ll;				\
164	r8 = r1;					\
165	r7 = r2;					\
166	call %[bpf_map_lookup_elem];			\
167	r2 = 10;					\
168	if r2 != 0 goto l0_%=;				\
169	r1 = r8;					\
170	r2 = r7;					\
171	call %[bpf_map_lookup_elem];			\
172l0_%=:	r4 = r0;					\
173	if r0 == 0 goto l1_%=;				\
174	r1 = 0;						\
175	*(u64*)(r4 + 0) = r1;				\
176l1_%=:	exit;						\
177"	:
178	: __imm(bpf_map_lookup_elem),
179	  __imm_addr(map_hash_8b)
180	: __clobber_all);
181}
182
183SEC("socket")
184__description("invalid map access from else condition")
185__failure __msg("R0 unbounded memory access")
186__failure_unpriv __msg_unpriv("R0 leaks addr")
187__flag(BPF_F_ANY_ALIGNMENT)
188__naked void map_access_from_else_condition(void)
189{
190	asm volatile ("					\
191	r1 = 0;						\
192	*(u64*)(r10 - 8) = r1;				\
193	r2 = r10;					\
194	r2 += -8;					\
195	r1 = %[map_hash_48b] ll;			\
196	call %[bpf_map_lookup_elem];			\
197	if r0 == 0 goto l0_%=;				\
198	r1 = *(u32*)(r0 + 0);				\
199	if r1 >= %[__imm_0] goto l1_%=;			\
200	r1 += 1;					\
201l1_%=:	r1 <<= 2;					\
202	r0 += r1;					\
203	r1 = %[test_val_foo];				\
204	*(u64*)(r0 + 0) = r1;				\
205l0_%=:	exit;						\
206"	:
207	: __imm(bpf_map_lookup_elem),
208	  __imm_addr(map_hash_48b),
209	  __imm_const(__imm_0, MAX_ENTRIES-1),
210	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
211	: __clobber_all);
212}
213
214SEC("tc")
215__description("map lookup and null branch prediction")
216__success __retval(0)
217__naked void lookup_and_null_branch_prediction(void)
218{
219	asm volatile ("					\
220	r1 = 10;					\
221	*(u64*)(r10 - 8) = r1;				\
222	r2 = r10;					\
223	r2 += -8;					\
224	r1 = %[map_hash_8b] ll;				\
225	call %[bpf_map_lookup_elem];			\
226	r6 = r0;					\
227	if r6 == 0 goto l0_%=;				\
228	if r6 != 0 goto l0_%=;				\
229	r10 += 10;					\
230l0_%=:	exit;						\
231"	:
232	: __imm(bpf_map_lookup_elem),
233	  __imm_addr(map_hash_8b)
234	: __clobber_all);
235}
236
237SEC("cgroup/skb")
238__description("MAP_VALUE_OR_NULL check_ids() in regsafe()")
239__failure __msg("R8 invalid mem access 'map_value_or_null'")
240__failure_unpriv __msg_unpriv("")
241__flag(BPF_F_TEST_STATE_FREQ)
242__naked void null_check_ids_in_regsafe(void)
243{
244	asm volatile ("					\
245	r1 = 0;						\
246	*(u64*)(r10 - 8) = r1;				\
247	/* r9 = map_lookup_elem(...) */			\
248	r2 = r10;					\
249	r2 += -8;					\
250	r1 = %[map_hash_8b] ll;				\
251	call %[bpf_map_lookup_elem];			\
252	r9 = r0;					\
253	/* r8 = map_lookup_elem(...) */			\
254	r2 = r10;					\
255	r2 += -8;					\
256	r1 = %[map_hash_8b] ll;				\
257	call %[bpf_map_lookup_elem];			\
258	r8 = r0;					\
259	/* r7 = ktime_get_ns() */			\
260	call %[bpf_ktime_get_ns];			\
261	r7 = r0;					\
262	/* r6 = ktime_get_ns() */			\
263	call %[bpf_ktime_get_ns];			\
264	r6 = r0;					\
265	/* if r6 > r7 goto +1    ; no new information about the state is derived from\
266	 *                       ; this check, thus produced verifier states differ\
267	 *                       ; only in 'insn_idx'	\
268	 * r9 = r8               ; optionally share ID between r9 and r8\
269	 */						\
270	if r6 > r7 goto l0_%=;				\
271	r9 = r8;					\
272l0_%=:	/* if r9 == 0 goto <exit> */			\
273	if r9 == 0 goto l1_%=;				\
274	/* read map value via r8, this is not always	\
275	 * safe because r8 might be not equal to r9.	\
276	 */						\
277	r0 = *(u64*)(r8 + 0);				\
278l1_%=:	/* exit 0 */					\
279	r0 = 0;						\
280	exit;						\
281"	:
282	: __imm(bpf_ktime_get_ns),
283	  __imm(bpf_map_lookup_elem),
284	  __imm_addr(map_hash_8b)
285	: __clobber_all);
286}
287
288char _license[] SEC("license") = "GPL";
289