1/* $NetBSD: t_futex_robust.c,v 1.2 2020/05/01 01:44:30 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__COPYRIGHT("@(#) Copyright (c) 2019\
31 The NetBSD Foundation, inc. All rights reserved.");
32__RCSID("$NetBSD: t_futex_robust.c,v 1.2 2020/05/01 01:44:30 thorpej Exp $");
33
34#include <sys/mman.h>
35#include <errno.h>
36#include <lwp.h>
37#include <stdio.h>
38#include <time.h>
39
40#include <atf-c.h>
41
42#include <libc/include/futex_private.h>
43
44#define	STACK_SIZE	65536
45#define	NLOCKS		16
46
47struct futex_lock_pos {
48	struct futex_robust_list	list;
49	int				fword;
50};
51struct futex_lock_pos pos_locks[NLOCKS];
52
53struct futex_lock_neg {
54	int				fword;
55	struct futex_robust_list	list;
56};
57struct futex_lock_neg neg_locks[NLOCKS];
58
59struct lwp_data {
60	ucontext_t	context;
61	void		*stack_base;
62	lwpid_t		lwpid;
63	lwpid_t		threadid;
64	struct futex_robust_list_head rhead;
65
66	/* Results to be asserted by main thread. */
67	bool		set_robust_list_failed;
68};
69
70struct lwp_data lwp_data;
71
72static void
73setup_lwp_context(void (*func)(void *))
74{
75
76	memset(&lwp_data, 0, sizeof(lwp_data));
77	lwp_data.stack_base = mmap(NULL, STACK_SIZE,
78	    PROT_READ | PROT_WRITE,
79	    MAP_ANON | MAP_STACK | MAP_PRIVATE, -1, 0);
80	ATF_REQUIRE(lwp_data.stack_base != MAP_FAILED);
81	_lwp_makecontext(&lwp_data.context, func,
82	    &lwp_data, NULL, lwp_data.stack_base, STACK_SIZE);
83	lwp_data.threadid = 0;
84}
85
86static void
87do_cleanup(void)
88{
89	if (lwp_data.stack_base != NULL &&
90	    lwp_data.stack_base != MAP_FAILED) {
91		(void) munmap(lwp_data.stack_base, STACK_SIZE);
92	}
93	memset(&lwp_data, 0, sizeof(lwp_data));
94	memset(pos_locks, 0, sizeof(pos_locks));
95	memset(neg_locks, 0, sizeof(neg_locks));
96}
97
98static void
99test_pos_robust_list(void *arg)
100{
101	struct lwp_data *d = arg;
102	int i;
103
104	d->rhead.list.next = &d->rhead.list;
105	d->rhead.futex_offset = offsetof(struct futex_lock_pos, fword) -
106	    offsetof(struct futex_lock_pos, list);
107	d->rhead.pending_list = NULL;
108
109	if (__futex_set_robust_list(&d->rhead, sizeof(d->rhead)) != 0) {
110		d->set_robust_list_failed = true;
111		_lwp_exit();
112	}
113
114	memset(pos_locks, 0, sizeof(pos_locks));
115
116	d->threadid = _lwp_self();
117
118	for (i = 0; i < NLOCKS-1; i++) {
119		pos_locks[i].fword = _lwp_self();
120		pos_locks[i].list.next = d->rhead.list.next;
121		d->rhead.list.next = &pos_locks[i].list;
122	}
123
124	pos_locks[i].fword = _lwp_self();
125	d->rhead.pending_list = &pos_locks[i].list;
126
127	_lwp_exit();
128}
129
130static void
131test_neg_robust_list(void *arg)
132{
133	struct lwp_data *d = arg;
134	int i;
135
136	d->rhead.list.next = &d->rhead.list;
137	d->rhead.futex_offset = offsetof(struct futex_lock_neg, fword) -
138	    offsetof(struct futex_lock_neg, list);
139	d->rhead.pending_list = NULL;
140
141	if (__futex_set_robust_list(&d->rhead, sizeof(d->rhead)) != 0) {
142		d->set_robust_list_failed = true;
143		_lwp_exit();
144	}
145
146	memset(neg_locks, 0, sizeof(neg_locks));
147
148	d->threadid = _lwp_self();
149
150	for (i = 0; i < NLOCKS-1; i++) {
151		neg_locks[i].fword = _lwp_self();
152		neg_locks[i].list.next = d->rhead.list.next;
153		d->rhead.list.next = &neg_locks[i].list;
154	}
155
156	neg_locks[i].fword = _lwp_self();
157	d->rhead.pending_list = &neg_locks[i].list;
158
159	_lwp_exit();
160}
161
162static void
163test_unmapped_robust_list(void *arg)
164{
165	struct lwp_data *d = arg;
166
167	d->rhead.list.next = &d->rhead.list;
168	d->rhead.futex_offset = offsetof(struct futex_lock_pos, fword) -
169	    offsetof(struct futex_lock_pos, list);
170	d->rhead.pending_list = NULL;
171
172	if (__futex_set_robust_list((void *)sizeof(d->rhead),
173				    sizeof(d->rhead)) != 0) {
174		d->set_robust_list_failed = true;
175		_lwp_exit();
176	}
177
178	memset(pos_locks, 0, sizeof(pos_locks));
179
180	d->threadid = _lwp_self();
181
182	_lwp_exit();
183}
184
185static void
186test_evil_circular_robust_list(void *arg)
187{
188	struct lwp_data *d = arg;
189	int i;
190
191	d->rhead.list.next = &d->rhead.list;
192	d->rhead.futex_offset = offsetof(struct futex_lock_pos, fword) -
193	    offsetof(struct futex_lock_pos, list);
194	d->rhead.pending_list = NULL;
195
196	if (__futex_set_robust_list(&d->rhead, sizeof(d->rhead)) != 0) {
197		d->set_robust_list_failed = true;
198		_lwp_exit();
199	}
200
201	memset(pos_locks, 0, sizeof(pos_locks));
202
203	d->threadid = _lwp_self();
204
205	for (i = 0; i < NLOCKS; i++) {
206		pos_locks[i].fword = _lwp_self();
207		pos_locks[i].list.next = d->rhead.list.next;
208		d->rhead.list.next = &pos_locks[i].list;
209	}
210
211	/* Make a loop. */
212	pos_locks[0].list.next = pos_locks[NLOCKS-1].list.next;
213
214	_lwp_exit();
215}
216
217static void
218test_bad_pending_robust_list(void *arg)
219{
220	struct lwp_data *d = arg;
221	int i;
222
223	d->rhead.list.next = &d->rhead.list;
224	d->rhead.futex_offset = offsetof(struct futex_lock_pos, fword) -
225	    offsetof(struct futex_lock_pos, list);
226	d->rhead.pending_list = NULL;
227
228	if (__futex_set_robust_list(&d->rhead, sizeof(d->rhead)) != 0) {
229		d->set_robust_list_failed = true;
230		_lwp_exit();
231	}
232
233	memset(pos_locks, 0, sizeof(pos_locks));
234
235	d->threadid = _lwp_self();
236
237	for (i = 0; i < NLOCKS; i++) {
238		pos_locks[i].fword = _lwp_self();
239		pos_locks[i].list.next = d->rhead.list.next;
240		d->rhead.list.next = &pos_locks[i].list;
241	}
242
243	d->rhead.pending_list = (void *)sizeof(d->rhead);
244
245	_lwp_exit();
246}
247
248ATF_TC_WITH_CLEANUP(futex_robust_positive);
249ATF_TC_HEAD(futex_robust_positive, tc)
250{
251	atf_tc_set_md_var(tc, "descr",
252	    "checks futex robust list with positive futex word offset");
253}
254
255ATF_TC_BODY(futex_robust_positive, tc)
256{
257	int i;
258
259	setup_lwp_context(test_pos_robust_list);
260
261	ATF_REQUIRE(_lwp_create(&lwp_data.context, 0, &lwp_data.lwpid) == 0);
262	ATF_REQUIRE(_lwp_wait(lwp_data.lwpid, NULL) == 0);
263
264	ATF_REQUIRE(lwp_data.set_robust_list_failed == false);
265
266	for (i = 0; i < NLOCKS; i++) {
267		ATF_REQUIRE((pos_locks[i].fword & FUTEX_TID_MASK) ==
268		    lwp_data.threadid);
269		ATF_REQUIRE((pos_locks[i].fword & FUTEX_OWNER_DIED) != 0);
270	}
271}
272
273ATF_TC_CLEANUP(futex_robust_positive, tc)
274{
275	do_cleanup();
276}
277
278ATF_TC_WITH_CLEANUP(futex_robust_negative);
279ATF_TC_HEAD(futex_robust_negative, tc)
280{
281	atf_tc_set_md_var(tc, "descr",
282	    "checks futex robust list with negative futex word offset");
283}
284
285ATF_TC_BODY(futex_robust_negative, tc)
286{
287	int i;
288
289	setup_lwp_context(test_neg_robust_list);
290
291	ATF_REQUIRE(_lwp_create(&lwp_data.context, 0, &lwp_data.lwpid) == 0);
292	ATF_REQUIRE(_lwp_wait(lwp_data.lwpid, NULL) == 0);
293
294	ATF_REQUIRE(lwp_data.set_robust_list_failed == false);
295
296	for (i = 0; i < NLOCKS; i++) {
297		ATF_REQUIRE((neg_locks[i].fword & FUTEX_TID_MASK) ==
298		    lwp_data.threadid);
299		ATF_REQUIRE((neg_locks[i].fword & FUTEX_OWNER_DIED) != 0);
300	}
301}
302
303ATF_TC_CLEANUP(futex_robust_negative, tc)
304{
305	do_cleanup();
306}
307
308ATF_TC_WITH_CLEANUP(futex_robust_unmapped);
309ATF_TC_HEAD(futex_robust_unmapped, tc)
310{
311	atf_tc_set_md_var(tc, "descr",
312	    "checks futex robust list with unmapped robust list pointer");
313}
314
315ATF_TC_BODY(futex_robust_unmapped, tc)
316{
317
318	setup_lwp_context(test_unmapped_robust_list);
319
320	ATF_REQUIRE(_lwp_create(&lwp_data.context, 0, &lwp_data.lwpid) == 0);
321	ATF_REQUIRE(_lwp_wait(lwp_data.lwpid, NULL) == 0);
322
323	ATF_REQUIRE(lwp_data.set_robust_list_failed == false);
324
325	/*
326	 * No additional validation; just exercises a code path
327	 * in the kernel.
328	 */
329}
330
331ATF_TC_CLEANUP(futex_robust_unmapped, tc)
332{
333	do_cleanup();
334}
335
336ATF_TC_WITH_CLEANUP(futex_robust_evil_circular);
337ATF_TC_HEAD(futex_robust_evil_circular, tc)
338{
339	atf_tc_set_md_var(tc, "descr",
340	    "checks futex robust list processing faced with a deliberately "
341	    "ciruclar list");
342}
343
344ATF_TC_BODY(futex_robust_evil_circular, tc)
345{
346	int i;
347
348	setup_lwp_context(test_evil_circular_robust_list);
349
350	ATF_REQUIRE(_lwp_create(&lwp_data.context, 0, &lwp_data.lwpid) == 0);
351	ATF_REQUIRE(_lwp_wait(lwp_data.lwpid, NULL) == 0);
352
353	ATF_REQUIRE(lwp_data.set_robust_list_failed == false);
354
355	for (i = 0; i < NLOCKS; i++) {
356		ATF_REQUIRE((pos_locks[i].fword & FUTEX_TID_MASK) ==
357		    lwp_data.threadid);
358		ATF_REQUIRE((pos_locks[i].fword & FUTEX_OWNER_DIED) != 0);
359	}
360}
361
362ATF_TC_CLEANUP(futex_robust_evil_circular, tc)
363{
364	do_cleanup();
365}
366
367ATF_TC_WITH_CLEANUP(futex_robust_bad_pending);
368ATF_TC_HEAD(futex_robust_bad_pending, tc)
369{
370	atf_tc_set_md_var(tc, "descr",
371	    "checks futex robust list processing with a bad pending pointer");
372}
373
374ATF_TC_BODY(futex_robust_bad_pending, tc)
375{
376	int i;
377
378	setup_lwp_context(test_bad_pending_robust_list);
379
380	ATF_REQUIRE(_lwp_create(&lwp_data.context, 0, &lwp_data.lwpid) == 0);
381	ATF_REQUIRE(_lwp_wait(lwp_data.lwpid, NULL) == 0);
382
383	ATF_REQUIRE(lwp_data.set_robust_list_failed == false);
384
385	for (i = 0; i < NLOCKS; i++) {
386		ATF_REQUIRE((pos_locks[i].fword & FUTEX_TID_MASK) ==
387		    lwp_data.threadid);
388		ATF_REQUIRE((pos_locks[i].fword & FUTEX_OWNER_DIED) != 0);
389	}
390}
391
392ATF_TC_CLEANUP(futex_robust_bad_pending, tc)
393{
394	do_cleanup();
395}
396
397ATF_TP_ADD_TCS(tp)
398{
399	ATF_TP_ADD_TC(tp, futex_robust_positive);
400	ATF_TP_ADD_TC(tp, futex_robust_negative);
401	ATF_TP_ADD_TC(tp, futex_robust_unmapped);
402	ATF_TP_ADD_TC(tp, futex_robust_evil_circular);
403	ATF_TP_ADD_TC(tp, futex_robust_bad_pending);
404
405	return atf_no_error();
406}
407