1/*-
2 * Copyright (c) 2004 Robert N. M. Watson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/tests/sys/aio/aio_test.c 330222 2018-03-01 06:54:03Z eadler $
27 */
28
29/*
30 * Regression test to do some very basic AIO exercising on several types of
31 * file descriptors.  Currently, the tests consist of initializing a fixed
32 * size buffer with pseudo-random data, writing it to one fd using AIO, then
33 * reading it from a second descriptor using AIO.  For some targets, the same
34 * fd is used for write and read (i.e., file, md device), but for others the
35 * operation is performed on a peer (pty, socket, fifo, etc).  For each file
36 * descriptor type, several completion methods are tested.  This test program
37 * does not attempt to exercise error cases or more subtle asynchronous
38 * behavior, just make sure that the basic operations work on some basic object
39 * types.
40 */
41
42#include <sys/param.h>
43#include <sys/module.h>
44#include <sys/resource.h>
45#include <sys/socket.h>
46#include <sys/stat.h>
47#include <sys/mdioctl.h>
48
49#include <aio.h>
50#include <err.h>
51#include <errno.h>
52#include <fcntl.h>
53#include <libutil.h>
54#include <limits.h>
55#include <semaphore.h>
56#include <stdint.h>
57#include <stdio.h>
58#include <stdlib.h>
59#include <string.h>
60#include <termios.h>
61#include <unistd.h>
62
63#include <atf-c.h>
64
65#include "freebsd_test_suite/macros.h"
66#include "local.h"
67
68/*
69 * GLOBAL_MAX sets the largest usable buffer size to be read and written, as
70 * it sizes ac_buffer in the aio_context structure.  It is also the default
71 * size for file I/O.  For other types, we use smaller blocks or we risk
72 * blocking (and we run in a single process/thread so that would be bad).
73 */
74#define	GLOBAL_MAX	16384
75
76#define	BUFFER_MAX	GLOBAL_MAX
77
78/*
79 * A completion function will block until the aio has completed, then return
80 * the result of the aio.  errno will be set appropriately.
81 */
82typedef ssize_t (*completion)(struct aiocb*);
83
84struct aio_context {
85	int		 ac_read_fd, ac_write_fd;
86	long		 ac_seed;
87	char		 ac_buffer[GLOBAL_MAX];
88	int		 ac_buflen;
89	int		 ac_seconds;
90};
91
92static sem_t		completions;
93
94
95/*
96 * Fill a buffer given a seed that can be fed into srandom() to initialize
97 * the PRNG in a repeatable manner.
98 */
99static void
100aio_fill_buffer(char *buffer, int len, long seed)
101{
102	char ch;
103	int i;
104
105	srandom(seed);
106	for (i = 0; i < len; i++) {
107		ch = random() & 0xff;
108		buffer[i] = ch;
109	}
110}
111
112/*
113 * Test that a buffer matches a given seed.  See aio_fill_buffer().  Return
114 * (1) on a match, (0) on a mismatch.
115 */
116static int
117aio_test_buffer(char *buffer, int len, long seed)
118{
119	char ch;
120	int i;
121
122	srandom(seed);
123	for (i = 0; i < len; i++) {
124		ch = random() & 0xff;
125		if (buffer[i] != ch)
126			return (0);
127	}
128	return (1);
129}
130
131/*
132 * Initialize a testing context given the file descriptors provided by the
133 * test setup.
134 */
135static void
136aio_context_init(struct aio_context *ac, int read_fd,
137    int write_fd, int buflen)
138{
139
140	ATF_REQUIRE_MSG(buflen <= BUFFER_MAX,
141	    "aio_context_init: buffer too large (%d > %d)",
142	    buflen, BUFFER_MAX);
143	bzero(ac, sizeof(*ac));
144	ac->ac_read_fd = read_fd;
145	ac->ac_write_fd = write_fd;
146	ac->ac_buflen = buflen;
147	srandomdev();
148	ac->ac_seed = random();
149	aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
150	ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen,
151	    ac->ac_seed) != 0, "aio_test_buffer: internal error");
152}
153
154static ssize_t
155poll(struct aiocb *aio)
156{
157	int error;
158
159	while ((error = aio_error(aio)) == EINPROGRESS)
160		usleep(25000);
161	if (error)
162		return (error);
163	else
164		return (aio_return(aio));
165}
166
167static void
168sigusr1_handler(int sig __unused)
169{
170	ATF_REQUIRE_EQ(0, sem_post(&completions));
171}
172
173static void
174thr_handler(union sigval sv __unused)
175{
176	ATF_REQUIRE_EQ(0, sem_post(&completions));
177}
178
179static ssize_t
180poll_signaled(struct aiocb *aio)
181{
182	int error;
183
184	ATF_REQUIRE_EQ(0, sem_wait(&completions));
185	error = aio_error(aio);
186	switch (error) {
187		case EINPROGRESS:
188			errno = EINTR;
189			return (-1);
190		case 0:
191			return (aio_return(aio));
192		default:
193			return (error);
194	}
195}
196
197/*
198 * Setup a signal handler for signal delivery tests
199 * This isn't thread safe, but it's ok since ATF runs each testcase in a
200 * separate process
201 */
202static struct sigevent*
203setup_signal(void)
204{
205	static struct sigevent sev;
206
207	ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
208	sev.sigev_notify = SIGEV_SIGNAL;
209	sev.sigev_signo = SIGUSR1;
210	ATF_REQUIRE(SIG_ERR != signal(SIGUSR1, sigusr1_handler));
211	return (&sev);
212}
213
214/*
215 * Setup a thread for thread delivery tests
216 * This isn't thread safe, but it's ok since ATF runs each testcase in a
217 * separate process
218 */
219static struct sigevent*
220setup_thread(void)
221{
222	static struct sigevent sev;
223
224	ATF_REQUIRE_EQ(0, sem_init(&completions, false, 0));
225	sev.sigev_notify = SIGEV_THREAD;
226	sev.sigev_notify_function = thr_handler;
227	sev.sigev_notify_attributes = NULL;
228	return (&sev);
229}
230
231static ssize_t
232suspend(struct aiocb *aio)
233{
234	const struct aiocb *const iocbs[] = {aio};
235	int error;
236
237	error = aio_suspend(iocbs, 1, NULL);
238	if (error == 0)
239		return (aio_return(aio));
240	else
241		return (error);
242}
243
244static ssize_t
245waitcomplete(struct aiocb *aio)
246{
247	struct aiocb *aiop;
248	ssize_t ret;
249
250	ret = aio_waitcomplete(&aiop, NULL);
251	ATF_REQUIRE_EQ(aio, aiop);
252	return (ret);
253}
254
255/*
256 * Perform a simple write test of our initialized data buffer to the provided
257 * file descriptor.
258 */
259static void
260aio_write_test(struct aio_context *ac, completion comp, struct sigevent *sev)
261{
262	struct aiocb aio;
263	ssize_t len;
264
265	bzero(&aio, sizeof(aio));
266	aio.aio_buf = ac->ac_buffer;
267	aio.aio_nbytes = ac->ac_buflen;
268	aio.aio_fildes = ac->ac_write_fd;
269	aio.aio_offset = 0;
270	if (sev)
271		aio.aio_sigevent = *sev;
272
273	if (aio_write(&aio) < 0)
274		atf_tc_fail("aio_write failed: %s", strerror(errno));
275
276	len = comp(&aio);
277	if (len < 0)
278		atf_tc_fail("aio failed: %s", strerror(errno));
279
280	if (len != ac->ac_buflen)
281		atf_tc_fail("aio short write (%jd)", (intmax_t)len);
282}
283
284/*
285 * Perform a simple read test of our initialized data buffer from the
286 * provided file descriptor.
287 */
288static void
289aio_read_test(struct aio_context *ac, completion comp, struct sigevent *sev)
290{
291	struct aiocb aio;
292	ssize_t len;
293
294	bzero(ac->ac_buffer, ac->ac_buflen);
295	bzero(&aio, sizeof(aio));
296	aio.aio_buf = ac->ac_buffer;
297	aio.aio_nbytes = ac->ac_buflen;
298	aio.aio_fildes = ac->ac_read_fd;
299	aio.aio_offset = 0;
300	if (sev)
301		aio.aio_sigevent = *sev;
302
303	if (aio_read(&aio) < 0)
304		atf_tc_fail("aio_read failed: %s", strerror(errno));
305
306	len = comp(&aio);
307	if (len < 0)
308		atf_tc_fail("aio failed: %s", strerror(errno));
309
310	ATF_REQUIRE_EQ_MSG(len, ac->ac_buflen,
311	    "aio short read (%jd)", (intmax_t)len);
312
313	if (aio_test_buffer(ac->ac_buffer, ac->ac_buflen, ac->ac_seed) == 0)
314		atf_tc_fail("buffer mismatched");
315}
316
317/*
318 * Series of type-specific tests for AIO.  For now, we just make sure we can
319 * issue a write and then a read to each type.  We assume that once a write
320 * is issued, a read can follow.
321 */
322
323/*
324 * Test with a classic file.  Assumes we can create a moderate size temporary
325 * file.
326 */
327#define	FILE_LEN	GLOBAL_MAX
328#define	FILE_PATHNAME	"testfile"
329
330static void
331aio_file_test(completion comp, struct sigevent *sev)
332{
333	struct aio_context ac;
334	int fd;
335
336	ATF_REQUIRE_KERNEL_MODULE("aio");
337	ATF_REQUIRE_UNSAFE_AIO();
338
339	fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
340	ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
341
342	aio_context_init(&ac, fd, fd, FILE_LEN);
343	aio_write_test(&ac, comp, sev);
344	aio_read_test(&ac, comp, sev);
345	close(fd);
346}
347
348ATF_TC_WITHOUT_HEAD(file_poll);
349ATF_TC_BODY(file_poll, tc)
350{
351	aio_file_test(poll, NULL);
352}
353
354ATF_TC_WITHOUT_HEAD(file_signal);
355ATF_TC_BODY(file_signal, tc)
356{
357	aio_file_test(poll_signaled, setup_signal());
358}
359
360ATF_TC_WITHOUT_HEAD(file_suspend);
361ATF_TC_BODY(file_suspend, tc)
362{
363	aio_file_test(suspend, NULL);
364}
365
366ATF_TC_WITHOUT_HEAD(file_thread);
367ATF_TC_BODY(file_thread, tc)
368{
369	aio_file_test(poll_signaled, setup_thread());
370}
371
372ATF_TC_WITHOUT_HEAD(file_waitcomplete);
373ATF_TC_BODY(file_waitcomplete, tc)
374{
375	aio_file_test(waitcomplete, NULL);
376}
377
378#define	FIFO_LEN	256
379#define	FIFO_PATHNAME	"testfifo"
380
381static void
382aio_fifo_test(completion comp, struct sigevent *sev)
383{
384	int error, read_fd = -1, write_fd = -1;
385	struct aio_context ac;
386
387	ATF_REQUIRE_KERNEL_MODULE("aio");
388	ATF_REQUIRE_UNSAFE_AIO();
389
390	ATF_REQUIRE_MSG(mkfifo(FIFO_PATHNAME, 0600) != -1,
391	    "mkfifo failed: %s", strerror(errno));
392
393	read_fd = open(FIFO_PATHNAME, O_RDONLY | O_NONBLOCK);
394	if (read_fd == -1) {
395		error = errno;
396		errno = error;
397		atf_tc_fail("read_fd open failed: %s",
398		    strerror(errno));
399	}
400
401	write_fd = open(FIFO_PATHNAME, O_WRONLY);
402	if (write_fd == -1) {
403		error = errno;
404		errno = error;
405		atf_tc_fail("write_fd open failed: %s",
406		    strerror(errno));
407	}
408
409	aio_context_init(&ac, read_fd, write_fd, FIFO_LEN);
410	aio_write_test(&ac, comp, sev);
411	aio_read_test(&ac, comp, sev);
412
413	close(read_fd);
414	close(write_fd);
415}
416
417ATF_TC_WITHOUT_HEAD(fifo_poll);
418ATF_TC_BODY(fifo_poll, tc)
419{
420	aio_fifo_test(poll, NULL);
421}
422
423ATF_TC_WITHOUT_HEAD(fifo_signal);
424ATF_TC_BODY(fifo_signal, tc)
425{
426	aio_fifo_test(poll_signaled, setup_signal());
427}
428
429ATF_TC_WITHOUT_HEAD(fifo_suspend);
430ATF_TC_BODY(fifo_suspend, tc)
431{
432	aio_fifo_test(suspend, NULL);
433}
434
435ATF_TC_WITHOUT_HEAD(fifo_thread);
436ATF_TC_BODY(fifo_thread, tc)
437{
438	aio_fifo_test(poll_signaled, setup_thread());
439}
440
441ATF_TC_WITHOUT_HEAD(fifo_waitcomplete);
442ATF_TC_BODY(fifo_waitcomplete, tc)
443{
444	aio_fifo_test(waitcomplete, NULL);
445}
446
447#define	UNIX_SOCKETPAIR_LEN	256
448static void
449aio_unix_socketpair_test(completion comp, struct sigevent *sev)
450{
451	struct aio_context ac;
452	struct rusage ru_before, ru_after;
453	int sockets[2];
454
455	ATF_REQUIRE_KERNEL_MODULE("aio");
456
457	ATF_REQUIRE_MSG(socketpair(PF_UNIX, SOCK_STREAM, 0, sockets) != -1,
458	    "socketpair failed: %s", strerror(errno));
459
460	aio_context_init(&ac, sockets[0], sockets[1], UNIX_SOCKETPAIR_LEN);
461	ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_before) != -1,
462	    "getrusage failed: %s", strerror(errno));
463	aio_write_test(&ac, comp, sev);
464	ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
465	    "getrusage failed: %s", strerror(errno));
466	ATF_REQUIRE(ru_after.ru_msgsnd == ru_before.ru_msgsnd + 1);
467	ru_before = ru_after;
468	aio_read_test(&ac, comp, sev);
469	ATF_REQUIRE_MSG(getrusage(RUSAGE_SELF, &ru_after) != -1,
470	    "getrusage failed: %s", strerror(errno));
471	ATF_REQUIRE(ru_after.ru_msgrcv == ru_before.ru_msgrcv + 1);
472
473	close(sockets[0]);
474	close(sockets[1]);
475}
476
477ATF_TC_WITHOUT_HEAD(socket_poll);
478ATF_TC_BODY(socket_poll, tc)
479{
480	aio_unix_socketpair_test(poll, NULL);
481}
482
483ATF_TC_WITHOUT_HEAD(socket_signal);
484ATF_TC_BODY(socket_signal, tc)
485{
486	aio_unix_socketpair_test(poll_signaled, setup_signal());
487}
488
489ATF_TC_WITHOUT_HEAD(socket_suspend);
490ATF_TC_BODY(socket_suspend, tc)
491{
492	aio_unix_socketpair_test(suspend, NULL);
493}
494
495ATF_TC_WITHOUT_HEAD(socket_thread);
496ATF_TC_BODY(socket_thread, tc)
497{
498	aio_unix_socketpair_test(poll_signaled, setup_thread());
499}
500
501ATF_TC_WITHOUT_HEAD(socket_waitcomplete);
502ATF_TC_BODY(socket_waitcomplete, tc)
503{
504	aio_unix_socketpair_test(waitcomplete, NULL);
505}
506
507struct aio_pty_arg {
508	int	apa_read_fd;
509	int	apa_write_fd;
510};
511
512#define	PTY_LEN		256
513static void
514aio_pty_test(completion comp, struct sigevent *sev)
515{
516	struct aio_context ac;
517	int read_fd, write_fd;
518	struct termios ts;
519	int error;
520
521	ATF_REQUIRE_KERNEL_MODULE("aio");
522	ATF_REQUIRE_UNSAFE_AIO();
523
524	ATF_REQUIRE_MSG(openpty(&read_fd, &write_fd, NULL, NULL, NULL) == 0,
525	    "openpty failed: %s", strerror(errno));
526
527
528	if (tcgetattr(write_fd, &ts) < 0) {
529		error = errno;
530		errno = error;
531		atf_tc_fail("tcgetattr failed: %s", strerror(errno));
532	}
533	cfmakeraw(&ts);
534	if (tcsetattr(write_fd, TCSANOW, &ts) < 0) {
535		error = errno;
536		errno = error;
537		atf_tc_fail("tcsetattr failed: %s", strerror(errno));
538	}
539	aio_context_init(&ac, read_fd, write_fd, PTY_LEN);
540
541	aio_write_test(&ac, comp, sev);
542	aio_read_test(&ac, comp, sev);
543
544	close(read_fd);
545	close(write_fd);
546}
547
548ATF_TC_WITHOUT_HEAD(pty_poll);
549ATF_TC_BODY(pty_poll, tc)
550{
551	aio_pty_test(poll, NULL);
552}
553
554ATF_TC_WITHOUT_HEAD(pty_signal);
555ATF_TC_BODY(pty_signal, tc)
556{
557	aio_pty_test(poll_signaled, setup_signal());
558}
559
560ATF_TC_WITHOUT_HEAD(pty_suspend);
561ATF_TC_BODY(pty_suspend, tc)
562{
563	aio_pty_test(suspend, NULL);
564}
565
566ATF_TC_WITHOUT_HEAD(pty_thread);
567ATF_TC_BODY(pty_thread, tc)
568{
569	aio_pty_test(poll_signaled, setup_thread());
570}
571
572ATF_TC_WITHOUT_HEAD(pty_waitcomplete);
573ATF_TC_BODY(pty_waitcomplete, tc)
574{
575	aio_pty_test(waitcomplete, NULL);
576}
577
578#define	PIPE_LEN	256
579static void
580aio_pipe_test(completion comp, struct sigevent *sev)
581{
582	struct aio_context ac;
583	int pipes[2];
584
585	ATF_REQUIRE_KERNEL_MODULE("aio");
586	ATF_REQUIRE_UNSAFE_AIO();
587
588	ATF_REQUIRE_MSG(pipe(pipes) != -1,
589	    "pipe failed: %s", strerror(errno));
590
591	aio_context_init(&ac, pipes[0], pipes[1], PIPE_LEN);
592	aio_write_test(&ac, comp, sev);
593	aio_read_test(&ac, comp, sev);
594
595	close(pipes[0]);
596	close(pipes[1]);
597}
598
599ATF_TC_WITHOUT_HEAD(pipe_poll);
600ATF_TC_BODY(pipe_poll, tc)
601{
602	aio_pipe_test(poll, NULL);
603}
604
605ATF_TC_WITHOUT_HEAD(pipe_signal);
606ATF_TC_BODY(pipe_signal, tc)
607{
608	aio_pipe_test(poll_signaled, setup_signal());
609}
610
611ATF_TC_WITHOUT_HEAD(pipe_suspend);
612ATF_TC_BODY(pipe_suspend, tc)
613{
614	aio_pipe_test(suspend, NULL);
615}
616
617ATF_TC_WITHOUT_HEAD(pipe_thread);
618ATF_TC_BODY(pipe_thread, tc)
619{
620	aio_pipe_test(poll_signaled, setup_thread());
621}
622
623ATF_TC_WITHOUT_HEAD(pipe_waitcomplete);
624ATF_TC_BODY(pipe_waitcomplete, tc)
625{
626	aio_pipe_test(waitcomplete, NULL);
627}
628
629#define	MD_LEN		GLOBAL_MAX
630#define	MDUNIT_LINK	"mdunit_link"
631
632static void
633aio_md_cleanup(void)
634{
635	struct md_ioctl mdio;
636	int mdctl_fd, error, n, unit;
637	char buf[80];
638
639	mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
640	ATF_REQUIRE(mdctl_fd >= 0);
641	n = readlink(MDUNIT_LINK, buf, sizeof(buf));
642	if (n > 0) {
643		if (sscanf(buf, "%d", &unit) == 1 && unit >= 0) {
644			bzero(&mdio, sizeof(mdio));
645			mdio.md_version = MDIOVERSION;
646			mdio.md_unit = unit;
647			if (ioctl(mdctl_fd, MDIOCDETACH, &mdio) == -1) {
648				error = errno;
649				close(mdctl_fd);
650				errno = error;
651				atf_tc_fail("ioctl MDIOCDETACH failed: %s",
652				    strerror(errno));
653			}
654		}
655	}
656
657	close(mdctl_fd);
658}
659
660static void
661aio_md_test(completion comp, struct sigevent *sev)
662{
663	int error, fd, mdctl_fd, unit;
664	char pathname[PATH_MAX];
665	struct aio_context ac;
666	struct md_ioctl mdio;
667	char buf[80];
668
669	ATF_REQUIRE_KERNEL_MODULE("aio");
670	ATF_REQUIRE_UNSAFE_AIO();
671
672	mdctl_fd = open("/dev/" MDCTL_NAME, O_RDWR, 0);
673	ATF_REQUIRE_MSG(mdctl_fd != -1,
674	    "opening /dev/%s failed: %s", MDCTL_NAME, strerror(errno));
675
676	bzero(&mdio, sizeof(mdio));
677	mdio.md_version = MDIOVERSION;
678	mdio.md_type = MD_MALLOC;
679	mdio.md_options = MD_AUTOUNIT | MD_COMPRESS;
680	mdio.md_mediasize = GLOBAL_MAX;
681	mdio.md_sectorsize = 512;
682
683	if (ioctl(mdctl_fd, MDIOCATTACH, &mdio) < 0) {
684		error = errno;
685		errno = error;
686		atf_tc_fail("ioctl MDIOCATTACH failed: %s", strerror(errno));
687	}
688	close(mdctl_fd);
689
690	/* Store the md unit number in a symlink for future cleanup */
691	unit = mdio.md_unit;
692	snprintf(buf, sizeof(buf), "%d", unit);
693	ATF_REQUIRE_EQ(0, symlink(buf, MDUNIT_LINK));
694	snprintf(pathname, PATH_MAX, "/dev/md%d", unit);
695	fd = open(pathname, O_RDWR);
696	ATF_REQUIRE_MSG(fd != -1,
697	    "opening %s failed: %s", pathname, strerror(errno));
698
699	aio_context_init(&ac, fd, fd, MD_LEN);
700	aio_write_test(&ac, comp, sev);
701	aio_read_test(&ac, comp, sev);
702
703	close(fd);
704}
705
706ATF_TC_WITH_CLEANUP(md_poll);
707ATF_TC_HEAD(md_poll, tc)
708{
709
710	atf_tc_set_md_var(tc, "require.user", "root");
711}
712ATF_TC_BODY(md_poll, tc)
713{
714	aio_md_test(poll, NULL);
715}
716ATF_TC_CLEANUP(md_poll, tc)
717{
718	aio_md_cleanup();
719}
720
721ATF_TC_WITH_CLEANUP(md_signal);
722ATF_TC_HEAD(md_signal, tc)
723{
724
725	atf_tc_set_md_var(tc, "require.user", "root");
726}
727ATF_TC_BODY(md_signal, tc)
728{
729	aio_md_test(poll_signaled, setup_signal());
730}
731ATF_TC_CLEANUP(md_signal, tc)
732{
733	aio_md_cleanup();
734}
735
736ATF_TC_WITH_CLEANUP(md_suspend);
737ATF_TC_HEAD(md_suspend, tc)
738{
739
740	atf_tc_set_md_var(tc, "require.user", "root");
741}
742ATF_TC_BODY(md_suspend, tc)
743{
744	aio_md_test(suspend, NULL);
745}
746ATF_TC_CLEANUP(md_suspend, tc)
747{
748	aio_md_cleanup();
749}
750
751ATF_TC_WITH_CLEANUP(md_thread);
752ATF_TC_HEAD(md_thread, tc)
753{
754
755	atf_tc_set_md_var(tc, "require.user", "root");
756}
757ATF_TC_BODY(md_thread, tc)
758{
759	aio_md_test(poll_signaled, setup_thread());
760}
761ATF_TC_CLEANUP(md_thread, tc)
762{
763	aio_md_cleanup();
764}
765
766ATF_TC_WITH_CLEANUP(md_waitcomplete);
767ATF_TC_HEAD(md_waitcomplete, tc)
768{
769
770	atf_tc_set_md_var(tc, "require.user", "root");
771}
772ATF_TC_BODY(md_waitcomplete, tc)
773{
774	aio_md_test(waitcomplete, NULL);
775}
776ATF_TC_CLEANUP(md_waitcomplete, tc)
777{
778	aio_md_cleanup();
779}
780
781ATF_TC_WITHOUT_HEAD(aio_large_read_test);
782ATF_TC_BODY(aio_large_read_test, tc)
783{
784	struct aiocb cb, *cbp;
785	ssize_t nread;
786	size_t len;
787	int fd;
788#ifdef __LP64__
789	int clamped;
790#endif
791
792	ATF_REQUIRE_KERNEL_MODULE("aio");
793	ATF_REQUIRE_UNSAFE_AIO();
794
795#ifdef __LP64__
796	len = sizeof(clamped);
797	if (sysctlbyname("debug.iosize_max_clamp", &clamped, &len, NULL, 0) ==
798	    -1)
799		atf_libc_error(errno, "Failed to read debug.iosize_max_clamp");
800#endif
801
802	/* Determine the maximum supported read(2) size. */
803	len = SSIZE_MAX;
804#ifdef __LP64__
805	if (clamped)
806		len = INT_MAX;
807#endif
808
809	fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
810	ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
811
812	unlink(FILE_PATHNAME);
813
814	memset(&cb, 0, sizeof(cb));
815	cb.aio_nbytes = len;
816	cb.aio_fildes = fd;
817	cb.aio_buf = NULL;
818	if (aio_read(&cb) == -1)
819		atf_tc_fail("aio_read() of maximum read size failed: %s",
820		    strerror(errno));
821
822	nread = aio_waitcomplete(&cbp, NULL);
823	if (nread == -1)
824		atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
825	if (nread != 0)
826		atf_tc_fail("aio_read() from empty file returned data: %zd",
827		    nread);
828
829	memset(&cb, 0, sizeof(cb));
830	cb.aio_nbytes = len + 1;
831	cb.aio_fildes = fd;
832	cb.aio_buf = NULL;
833	if (aio_read(&cb) == -1) {
834		if (errno == EINVAL)
835			goto finished;
836		atf_tc_fail("aio_read() of too large read size failed: %s",
837		    strerror(errno));
838	}
839
840	nread = aio_waitcomplete(&cbp, NULL);
841	if (nread == -1) {
842		if (errno == EINVAL)
843			goto finished;
844		atf_tc_fail("aio_waitcomplete() failed: %s", strerror(errno));
845	}
846	atf_tc_fail("aio_read() of too large read size returned: %zd", nread);
847
848finished:
849	close(fd);
850}
851
852/*
853 * This tests for a bug where arriving socket data can wakeup multiple
854 * AIO read requests resulting in an uncancellable request.
855 */
856ATF_TC_WITHOUT_HEAD(aio_socket_two_reads);
857ATF_TC_BODY(aio_socket_two_reads, tc)
858{
859	struct ioreq {
860		struct aiocb iocb;
861		char buffer[1024];
862	} ioreq[2];
863	struct aiocb *iocb;
864	unsigned i;
865	int s[2];
866	char c;
867
868	ATF_REQUIRE_KERNEL_MODULE("aio");
869#if __FreeBSD_version < 1100101
870	aft_tc_skip("kernel version %d is too old (%d required)",
871	    __FreeBSD_version, 1100101);
872#endif
873
874	ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
875
876	/* Queue two read requests. */
877	memset(&ioreq, 0, sizeof(ioreq));
878	for (i = 0; i < nitems(ioreq); i++) {
879		ioreq[i].iocb.aio_nbytes = sizeof(ioreq[i].buffer);
880		ioreq[i].iocb.aio_fildes = s[0];
881		ioreq[i].iocb.aio_buf = ioreq[i].buffer;
882		ATF_REQUIRE(aio_read(&ioreq[i].iocb) == 0);
883	}
884
885	/* Send a single byte.  This should complete one request. */
886	c = 0xc3;
887	ATF_REQUIRE(write(s[1], &c, sizeof(c)) == 1);
888
889	ATF_REQUIRE(aio_waitcomplete(&iocb, NULL) == 1);
890
891	/* Determine which request completed and verify the data was read. */
892	if (iocb == &ioreq[0].iocb)
893		i = 0;
894	else
895		i = 1;
896	ATF_REQUIRE(ioreq[i].buffer[0] == c);
897
898	i ^= 1;
899
900	/*
901	 * Try to cancel the other request.  On broken systems this
902	 * will fail and the process will hang on exit.
903	 */
904	ATF_REQUIRE(aio_error(&ioreq[i].iocb) == EINPROGRESS);
905	ATF_REQUIRE(aio_cancel(s[0], &ioreq[i].iocb) == AIO_CANCELED);
906
907	close(s[1]);
908	close(s[0]);
909}
910
911/*
912 * This test ensures that aio_write() on a blocking socket of a "large"
913 * buffer does not return a short completion.
914 */
915ATF_TC_WITHOUT_HEAD(aio_socket_blocking_short_write);
916ATF_TC_BODY(aio_socket_blocking_short_write, tc)
917{
918	struct aiocb iocb, *iocbp;
919	char *buffer[2];
920	ssize_t done;
921	int buffer_size, sb_size;
922	socklen_t len;
923	int s[2];
924
925	ATF_REQUIRE_KERNEL_MODULE("aio");
926
927	ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
928
929	len = sizeof(sb_size);
930	ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
931	    -1);
932	ATF_REQUIRE(len == sizeof(sb_size));
933	buffer_size = sb_size;
934
935	ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
936	    -1);
937	ATF_REQUIRE(len == sizeof(sb_size));
938	if (sb_size > buffer_size)
939		buffer_size = sb_size;
940
941	/*
942	 * Use twice the size of the MAX(receive buffer, send buffer)
943	 * to ensure that the write is split up into multiple writes
944	 * internally.
945	 */
946	buffer_size *= 2;
947
948	buffer[0] = malloc(buffer_size);
949	ATF_REQUIRE(buffer[0] != NULL);
950	buffer[1] = malloc(buffer_size);
951	ATF_REQUIRE(buffer[1] != NULL);
952
953	srandomdev();
954	aio_fill_buffer(buffer[1], buffer_size, random());
955
956	memset(&iocb, 0, sizeof(iocb));
957	iocb.aio_fildes = s[1];
958	iocb.aio_buf = buffer[1];
959	iocb.aio_nbytes = buffer_size;
960	ATF_REQUIRE(aio_write(&iocb) == 0);
961
962	done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
963	ATF_REQUIRE(done == buffer_size);
964
965	done = aio_waitcomplete(&iocbp, NULL);
966	ATF_REQUIRE(iocbp == &iocb);
967	ATF_REQUIRE(done == buffer_size);
968
969	ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
970
971	close(s[1]);
972	close(s[0]);
973}
974
975/*
976 * This test verifies that cancelling a partially completed socket write
977 * returns a short write rather than ECANCELED.
978 */
979ATF_TC_WITHOUT_HEAD(aio_socket_short_write_cancel);
980ATF_TC_BODY(aio_socket_short_write_cancel, tc)
981{
982	struct aiocb iocb, *iocbp;
983	char *buffer[2];
984	ssize_t done;
985	int buffer_size, sb_size;
986	socklen_t len;
987	int s[2];
988
989	ATF_REQUIRE_KERNEL_MODULE("aio");
990
991	ATF_REQUIRE(socketpair(PF_UNIX, SOCK_STREAM, 0, s) != -1);
992
993	len = sizeof(sb_size);
994	ATF_REQUIRE(getsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &sb_size, &len) !=
995	    -1);
996	ATF_REQUIRE(len == sizeof(sb_size));
997	buffer_size = sb_size;
998
999	ATF_REQUIRE(getsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &sb_size, &len) !=
1000	    -1);
1001	ATF_REQUIRE(len == sizeof(sb_size));
1002	if (sb_size > buffer_size)
1003		buffer_size = sb_size;
1004
1005	/*
1006	 * Use three times the size of the MAX(receive buffer, send
1007	 * buffer) for the write to ensure that the write is split up
1008	 * into multiple writes internally.  The recv() ensures that
1009	 * the write has partially completed, but a remaining size of
1010	 * two buffers should ensure that the write has not completed
1011	 * fully when it is cancelled.
1012	 */
1013	buffer[0] = malloc(buffer_size);
1014	ATF_REQUIRE(buffer[0] != NULL);
1015	buffer[1] = malloc(buffer_size * 3);
1016	ATF_REQUIRE(buffer[1] != NULL);
1017
1018	srandomdev();
1019	aio_fill_buffer(buffer[1], buffer_size * 3, random());
1020
1021	memset(&iocb, 0, sizeof(iocb));
1022	iocb.aio_fildes = s[1];
1023	iocb.aio_buf = buffer[1];
1024	iocb.aio_nbytes = buffer_size * 3;
1025	ATF_REQUIRE(aio_write(&iocb) == 0);
1026
1027	done = recv(s[0], buffer[0], buffer_size, MSG_WAITALL);
1028	ATF_REQUIRE(done == buffer_size);
1029
1030	ATF_REQUIRE(aio_error(&iocb) == EINPROGRESS);
1031	ATF_REQUIRE(aio_cancel(s[1], &iocb) == AIO_NOTCANCELED);
1032
1033	done = aio_waitcomplete(&iocbp, NULL);
1034	ATF_REQUIRE(iocbp == &iocb);
1035	ATF_REQUIRE(done >= buffer_size && done <= buffer_size * 2);
1036
1037	ATF_REQUIRE(memcmp(buffer[0], buffer[1], buffer_size) == 0);
1038
1039	close(s[1]);
1040	close(s[0]);
1041}
1042
1043/*
1044 * This test just performs a basic test of aio_fsync().
1045 */
1046ATF_TC_WITHOUT_HEAD(aio_fsync_test);
1047ATF_TC_BODY(aio_fsync_test, tc)
1048{
1049	struct aiocb synccb, *iocbp;
1050	struct {
1051		struct aiocb iocb;
1052		bool done;
1053		char *buffer;
1054	} buffers[16];
1055	struct stat sb;
1056	ssize_t rval;
1057	unsigned i;
1058	int fd;
1059
1060	ATF_REQUIRE_KERNEL_MODULE("aio");
1061	ATF_REQUIRE_UNSAFE_AIO();
1062
1063	fd = open(FILE_PATHNAME, O_RDWR | O_CREAT, 0600);
1064	ATF_REQUIRE_MSG(fd != -1, "open failed: %s", strerror(errno));
1065	unlink(FILE_PATHNAME);
1066
1067	ATF_REQUIRE(fstat(fd, &sb) == 0);
1068	ATF_REQUIRE(sb.st_blksize != 0);
1069	ATF_REQUIRE(ftruncate(fd, sb.st_blksize * nitems(buffers)) == 0);
1070
1071	/*
1072	 * Queue several asynchronous write requests.  Hopefully this
1073	 * forces the aio_fsync() request to be deferred.  There is no
1074	 * reliable way to guarantee that however.
1075	 */
1076	srandomdev();
1077	for (i = 0; i < nitems(buffers); i++) {
1078		buffers[i].done = false;
1079		memset(&buffers[i].iocb, 0, sizeof(buffers[i].iocb));
1080		buffers[i].buffer = malloc(sb.st_blksize);
1081		aio_fill_buffer(buffers[i].buffer, sb.st_blksize, random());
1082		buffers[i].iocb.aio_fildes = fd;
1083		buffers[i].iocb.aio_buf = buffers[i].buffer;
1084		buffers[i].iocb.aio_nbytes = sb.st_blksize;
1085		buffers[i].iocb.aio_offset = sb.st_blksize * i;
1086		ATF_REQUIRE(aio_write(&buffers[i].iocb) == 0);
1087	}
1088
1089	/* Queue the aio_fsync request. */
1090	memset(&synccb, 0, sizeof(synccb));
1091	synccb.aio_fildes = fd;
1092	ATF_REQUIRE(aio_fsync(O_SYNC, &synccb) == 0);
1093
1094	/* Wait for requests to complete. */
1095	for (;;) {
1096	next:
1097		rval = aio_waitcomplete(&iocbp, NULL);
1098		ATF_REQUIRE(iocbp != NULL);
1099		if (iocbp == &synccb) {
1100			ATF_REQUIRE(rval == 0);
1101			break;
1102		}
1103
1104		for (i = 0; i < nitems(buffers); i++) {
1105			if (iocbp == &buffers[i].iocb) {
1106				ATF_REQUIRE(buffers[i].done == false);
1107				ATF_REQUIRE(rval == sb.st_blksize);
1108				buffers[i].done = true;
1109				goto next;
1110			}
1111		}
1112
1113		ATF_REQUIRE_MSG(false, "unmatched AIO request");
1114	}
1115
1116	for (i = 0; i < nitems(buffers); i++)
1117		ATF_REQUIRE_MSG(buffers[i].done,
1118		    "AIO request %u did not complete", i);
1119
1120	close(fd);
1121}
1122
1123ATF_TP_ADD_TCS(tp)
1124{
1125
1126	ATF_TP_ADD_TC(tp, file_poll);
1127	ATF_TP_ADD_TC(tp, file_signal);
1128	ATF_TP_ADD_TC(tp, file_suspend);
1129	ATF_TP_ADD_TC(tp, file_thread);
1130	ATF_TP_ADD_TC(tp, file_waitcomplete);
1131	ATF_TP_ADD_TC(tp, fifo_poll);
1132	ATF_TP_ADD_TC(tp, fifo_signal);
1133	ATF_TP_ADD_TC(tp, fifo_suspend);
1134	ATF_TP_ADD_TC(tp, fifo_thread);
1135	ATF_TP_ADD_TC(tp, fifo_waitcomplete);
1136	ATF_TP_ADD_TC(tp, socket_poll);
1137	ATF_TP_ADD_TC(tp, socket_signal);
1138	ATF_TP_ADD_TC(tp, socket_suspend);
1139	ATF_TP_ADD_TC(tp, socket_thread);
1140	ATF_TP_ADD_TC(tp, socket_waitcomplete);
1141	ATF_TP_ADD_TC(tp, pty_poll);
1142	ATF_TP_ADD_TC(tp, pty_signal);
1143	ATF_TP_ADD_TC(tp, pty_suspend);
1144	ATF_TP_ADD_TC(tp, pty_thread);
1145	ATF_TP_ADD_TC(tp, pty_waitcomplete);
1146	ATF_TP_ADD_TC(tp, pipe_poll);
1147	ATF_TP_ADD_TC(tp, pipe_signal);
1148	ATF_TP_ADD_TC(tp, pipe_suspend);
1149	ATF_TP_ADD_TC(tp, pipe_thread);
1150	ATF_TP_ADD_TC(tp, pipe_waitcomplete);
1151	ATF_TP_ADD_TC(tp, md_poll);
1152	ATF_TP_ADD_TC(tp, md_signal);
1153	ATF_TP_ADD_TC(tp, md_suspend);
1154	ATF_TP_ADD_TC(tp, md_thread);
1155	ATF_TP_ADD_TC(tp, md_waitcomplete);
1156	ATF_TP_ADD_TC(tp, aio_fsync_test);
1157	ATF_TP_ADD_TC(tp, aio_large_read_test);
1158	ATF_TP_ADD_TC(tp, aio_socket_two_reads);
1159	ATF_TP_ADD_TC(tp, aio_socket_blocking_short_write);
1160	ATF_TP_ADD_TC(tp, aio_socket_short_write_cancel);
1161
1162	return (atf_no_error());
1163}
1164