Deleted Added
full compact
vfs_aio.c (121483) vfs_aio.c (122747)
1/*
2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
11 *
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
15 */
16
17/*
18 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19 */
20
21#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1997 John S. Dyson. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. John S. Dyson's name may not be used to endorse or promote products
10 * derived from this software without specific prior written permission.
11 *
12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything
13 * bad that happens because of using this software isn't the responsibility
14 * of the author. This software is distributed AS-IS.
15 */
16
17/*
18 * This file contains support for the POSIX 1003.1B AIO/LIO facility.
19 */
20
21#include <sys/cdefs.h>
22__FBSDID("$FreeBSD: head/sys/kern/vfs_aio.c 121483 2003-10-24 21:07:53Z jmg $");
22__FBSDID("$FreeBSD: head/sys/kern/vfs_aio.c 122747 2003-11-15 09:28:09Z phk $");
23
24#include <sys/param.h>
25#include <sys/systm.h>
26#include <sys/malloc.h>
27#include <sys/bio.h>
28#include <sys/buf.h>
29#include <sys/eventhandler.h>
30#include <sys/sysproto.h>
31#include <sys/filedesc.h>
32#include <sys/kernel.h>
33#include <sys/kthread.h>
34#include <sys/fcntl.h>
35#include <sys/file.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/unistd.h>
40#include <sys/proc.h>
41#include <sys/resourcevar.h>
42#include <sys/signalvar.h>
43#include <sys/protosw.h>
44#include <sys/socketvar.h>
45#include <sys/syscall.h>
46#include <sys/sysent.h>
47#include <sys/sysctl.h>
48#include <sys/sx.h>
49#include <sys/vnode.h>
50#include <sys/conf.h>
51#include <sys/event.h>
52
53#include <posix4/posix4.h>
54#include <vm/vm.h>
55#include <vm/vm_extern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <vm/uma.h>
59#include <sys/aio.h>
60
61#include "opt_vfs_aio.h"
62
63/*
64 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
65 * overflow.
66 */
67static long jobrefid;
68
69#define JOBST_NULL 0x0
70#define JOBST_JOBQGLOBAL 0x2
71#define JOBST_JOBRUNNING 0x3
72#define JOBST_JOBFINISHED 0x4
73#define JOBST_JOBQBUF 0x5
74#define JOBST_JOBBFINISHED 0x6
75
76#ifndef MAX_AIO_PER_PROC
77#define MAX_AIO_PER_PROC 32
78#endif
79
80#ifndef MAX_AIO_QUEUE_PER_PROC
81#define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
82#endif
83
84#ifndef MAX_AIO_PROCS
85#define MAX_AIO_PROCS 32
86#endif
87
88#ifndef MAX_AIO_QUEUE
89#define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
90#endif
91
92#ifndef TARGET_AIO_PROCS
93#define TARGET_AIO_PROCS 4
94#endif
95
96#ifndef MAX_BUF_AIO
97#define MAX_BUF_AIO 16
98#endif
99
100#ifndef AIOD_TIMEOUT_DEFAULT
101#define AIOD_TIMEOUT_DEFAULT (10 * hz)
102#endif
103
104#ifndef AIOD_LIFETIME_DEFAULT
105#define AIOD_LIFETIME_DEFAULT (30 * hz)
106#endif
107
108SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
109
110static int max_aio_procs = MAX_AIO_PROCS;
111SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
112 CTLFLAG_RW, &max_aio_procs, 0,
113 "Maximum number of kernel threads to use for handling async IO ");
114
115static int num_aio_procs = 0;
116SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
117 CTLFLAG_RD, &num_aio_procs, 0,
118 "Number of presently active kernel threads for async IO");
119
120/*
121 * The code will adjust the actual number of AIO processes towards this
122 * number when it gets a chance.
123 */
124static int target_aio_procs = TARGET_AIO_PROCS;
125SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
126 0, "Preferred number of ready kernel threads for async IO");
127
128static int max_queue_count = MAX_AIO_QUEUE;
129SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
130 "Maximum number of aio requests to queue, globally");
131
132static int num_queue_count = 0;
133SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
134 "Number of queued aio requests");
135
136static int num_buf_aio = 0;
137SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
138 "Number of aio requests presently handled by the buf subsystem");
139
140/* Number of async I/O thread in the process of being started */
141/* XXX This should be local to _aio_aqueue() */
142static int num_aio_resv_start = 0;
143
144static int aiod_timeout;
145SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
146 "Timeout value for synchronous aio operations");
147
148static int aiod_lifetime;
149SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
150 "Maximum lifetime for idle aiod");
151
152static int unloadable = 0;
153SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
154 "Allow unload of aio (not recommended)");
155
156
157static int max_aio_per_proc = MAX_AIO_PER_PROC;
158SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
159 0, "Maximum active aio requests per process (stored in the process)");
160
161static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
162SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
163 &max_aio_queue_per_proc, 0,
164 "Maximum queued aio requests per process (stored in the process)");
165
166static int max_buf_aio = MAX_BUF_AIO;
167SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
168 "Maximum buf aio requests per process (stored in the process)");
169
170struct aiocblist {
171 TAILQ_ENTRY(aiocblist) list; /* List of jobs */
172 TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */
173 int jobflags;
174 int jobstate;
175 int inputcharge;
176 int outputcharge;
177 struct callout_handle timeouthandle;
178 struct buf *bp; /* Buffer pointer */
179 struct proc *userproc; /* User process */ /* Not td! */
180 struct ucred *cred; /* Active credential when created */
181 struct file *fd_file; /* Pointer to file structure */
182 struct aio_liojob *lio; /* Optional lio job */
183 struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */
184 struct klist klist; /* list of knotes */
185 struct aiocb uaiocb; /* Kernel I/O control block */
186};
187
188/* jobflags */
189#define AIOCBLIST_RUNDOWN 0x4
190#define AIOCBLIST_DONE 0x10
191
192/*
193 * AIO process info
194 */
195#define AIOP_FREE 0x1 /* proc on free queue */
196#define AIOP_SCHED 0x2 /* proc explicitly scheduled */
197
198struct aiothreadlist {
199 int aiothreadflags; /* AIO proc flags */
200 TAILQ_ENTRY(aiothreadlist) list; /* List of processes */
201 struct thread *aiothread; /* The AIO thread */
202};
203
204/*
205 * data-structure for lio signal management
206 */
207struct aio_liojob {
208 int lioj_flags;
209 int lioj_buffer_count;
210 int lioj_buffer_finished_count;
211 int lioj_queue_count;
212 int lioj_queue_finished_count;
213 struct sigevent lioj_signal; /* signal on all I/O done */
214 TAILQ_ENTRY(aio_liojob) lioj_list;
215 struct kaioinfo *lioj_ki;
216};
217#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
218#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
219
220/*
221 * per process aio data structure
222 */
223struct kaioinfo {
224 int kaio_flags; /* per process kaio flags */
225 int kaio_maxactive_count; /* maximum number of AIOs */
226 int kaio_active_count; /* number of currently used AIOs */
227 int kaio_qallowed_count; /* maxiumu size of AIO queue */
228 int kaio_queue_count; /* size of AIO queue */
229 int kaio_ballowed_count; /* maximum number of buffers */
230 int kaio_queue_finished_count; /* number of daemon jobs finished */
231 int kaio_buffer_count; /* number of physio buffers */
232 int kaio_buffer_finished_count; /* count of I/O done */
233 struct proc *kaio_p; /* process that uses this kaio block */
234 TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */
235 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */
236 TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */
237 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */
238 TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */
239 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */
240};
241
242#define KAIO_RUNDOWN 0x1 /* process is being run down */
243#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
244
245static TAILQ_HEAD(,aiothreadlist) aio_activeproc; /* Active daemons */
246static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */
247static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
248static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
249
250static void aio_init_aioinfo(struct proc *p);
251static void aio_onceonly(void);
252static int aio_free_entry(struct aiocblist *aiocbe);
253static void aio_process(struct aiocblist *aiocbe);
254static int aio_newproc(void);
255static int aio_aqueue(struct thread *td, struct aiocb *job, int type);
256static void aio_physwakeup(struct buf *bp);
257static void aio_proc_rundown(void *arg, struct proc *p);
258static int aio_fphysio(struct aiocblist *aiocbe);
259static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
260static void aio_daemon(void *uproc);
261static void aio_swake_cb(struct socket *, struct sockbuf *);
262static int aio_unload(void);
263static void process_signal(void *aioj);
264static int filt_aioattach(struct knote *kn);
265static void filt_aiodetach(struct knote *kn);
266static int filt_aio(struct knote *kn, long hint);
267
268/*
269 * Zones for:
270 * kaio Per process async io info
271 * aiop async io thread data
272 * aiocb async io jobs
273 * aiol list io job pointer - internal to aio_suspend XXX
274 * aiolio list io jobs
275 */
276static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
277
278/* kqueue filters for aio */
279static struct filterops aio_filtops =
280 { 0, filt_aioattach, filt_aiodetach, filt_aio };
281
282static eventhandler_tag exit_tag, exec_tag;
283
284/*
285 * Main operations function for use as a kernel module.
286 */
287static int
288aio_modload(struct module *module, int cmd, void *arg)
289{
290 int error = 0;
291
292 switch (cmd) {
293 case MOD_LOAD:
294 aio_onceonly();
295 break;
296 case MOD_UNLOAD:
297 error = aio_unload();
298 break;
299 case MOD_SHUTDOWN:
300 break;
301 default:
302 error = EINVAL;
303 break;
304 }
305 return (error);
306}
307
308static moduledata_t aio_mod = {
309 "aio",
310 &aio_modload,
311 NULL
312};
313
314SYSCALL_MODULE_HELPER(aio_return);
315SYSCALL_MODULE_HELPER(aio_suspend);
316SYSCALL_MODULE_HELPER(aio_cancel);
317SYSCALL_MODULE_HELPER(aio_error);
318SYSCALL_MODULE_HELPER(aio_read);
319SYSCALL_MODULE_HELPER(aio_write);
320SYSCALL_MODULE_HELPER(aio_waitcomplete);
321SYSCALL_MODULE_HELPER(lio_listio);
322
323DECLARE_MODULE(aio, aio_mod,
324 SI_SUB_VFS, SI_ORDER_ANY);
325MODULE_VERSION(aio, 1);
326
327/*
328 * Startup initialization
329 */
330static void
331aio_onceonly(void)
332{
333
334 /* XXX: should probably just use so->callback */
335 aio_swake = &aio_swake_cb;
336 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
337 EVENTHANDLER_PRI_ANY);
338 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL,
339 EVENTHANDLER_PRI_ANY);
340 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
341 TAILQ_INIT(&aio_freeproc);
342 TAILQ_INIT(&aio_activeproc);
343 TAILQ_INIT(&aio_jobs);
344 TAILQ_INIT(&aio_bufjobs);
345 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
346 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
347 aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
348 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
349 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
350 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
351 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
352 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
353 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL,
354 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
355 aiod_timeout = AIOD_TIMEOUT_DEFAULT;
356 aiod_lifetime = AIOD_LIFETIME_DEFAULT;
357 jobrefid = 1;
358 async_io_version = _POSIX_VERSION;
359 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
360 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
361 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
362}
363
364/*
365 * Callback for unload of AIO when used as a module.
366 */
367static int
368aio_unload(void)
369{
370
371 /*
372 * XXX: no unloads by default, it's too dangerous.
373 * perhaps we could do it if locked out callers and then
374 * did an aio_proc_rundown() on each process.
375 */
376 if (!unloadable)
377 return (EOPNOTSUPP);
378
379 async_io_version = 0;
380 aio_swake = NULL;
381 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
382 EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
383 kqueue_del_filteropts(EVFILT_AIO);
384 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
385 p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
386 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
387 return (0);
388}
389
390/*
391 * Init the per-process aioinfo structure. The aioinfo limits are set
392 * per-process for user limit (resource) management.
393 */
394static void
395aio_init_aioinfo(struct proc *p)
396{
397 struct kaioinfo *ki;
398
399 if (p->p_aioinfo == NULL) {
400 ki = uma_zalloc(kaio_zone, M_WAITOK);
401 p->p_aioinfo = ki;
402 ki->kaio_flags = 0;
403 ki->kaio_maxactive_count = max_aio_per_proc;
404 ki->kaio_active_count = 0;
405 ki->kaio_qallowed_count = max_aio_queue_per_proc;
406 ki->kaio_queue_count = 0;
407 ki->kaio_ballowed_count = max_buf_aio;
408 ki->kaio_buffer_count = 0;
409 ki->kaio_buffer_finished_count = 0;
410 ki->kaio_p = p;
411 TAILQ_INIT(&ki->kaio_jobdone);
412 TAILQ_INIT(&ki->kaio_jobqueue);
413 TAILQ_INIT(&ki->kaio_bufdone);
414 TAILQ_INIT(&ki->kaio_bufqueue);
415 TAILQ_INIT(&ki->kaio_liojoblist);
416 TAILQ_INIT(&ki->kaio_sockqueue);
417 }
418
419 while (num_aio_procs < target_aio_procs)
420 aio_newproc();
421}
422
423/*
424 * Free a job entry. Wait for completion if it is currently active, but don't
425 * delay forever. If we delay, we return a flag that says that we have to
426 * restart the queue scan.
427 */
428static int
429aio_free_entry(struct aiocblist *aiocbe)
430{
431 struct kaioinfo *ki;
432 struct aio_liojob *lj;
433 struct proc *p;
434 int error;
435 int s;
436
437 if (aiocbe->jobstate == JOBST_NULL)
438 panic("aio_free_entry: freeing already free job");
439
440 p = aiocbe->userproc;
441 ki = p->p_aioinfo;
442 lj = aiocbe->lio;
443 if (ki == NULL)
444 panic("aio_free_entry: missing p->p_aioinfo");
445
446 while (aiocbe->jobstate == JOBST_JOBRUNNING) {
447 aiocbe->jobflags |= AIOCBLIST_RUNDOWN;
448 tsleep(aiocbe, PRIBIO, "jobwai", 0);
449 }
450 if (aiocbe->bp == NULL) {
451 if (ki->kaio_queue_count <= 0)
452 panic("aio_free_entry: process queue size <= 0");
453 if (num_queue_count <= 0)
454 panic("aio_free_entry: system wide queue size <= 0");
455
456 if (lj) {
457 lj->lioj_queue_count--;
458 if (aiocbe->jobflags & AIOCBLIST_DONE)
459 lj->lioj_queue_finished_count--;
460 }
461 ki->kaio_queue_count--;
462 if (aiocbe->jobflags & AIOCBLIST_DONE)
463 ki->kaio_queue_finished_count--;
464 num_queue_count--;
465 } else {
466 if (lj) {
467 lj->lioj_buffer_count--;
468 if (aiocbe->jobflags & AIOCBLIST_DONE)
469 lj->lioj_buffer_finished_count--;
470 }
471 if (aiocbe->jobflags & AIOCBLIST_DONE)
472 ki->kaio_buffer_finished_count--;
473 ki->kaio_buffer_count--;
474 num_buf_aio--;
475 }
476
477 /* aiocbe is going away, we need to destroy any knotes */
478 /* XXXKSE Note the thread here is used to eventually find the
479 * owning process again, but it is also used to do a fo_close
480 * and that requires the thread. (but does it require the
481 * OWNING thread? (or maybe the running thread?)
482 * There is a semantic problem here...
483 */
484 knote_remove(FIRST_THREAD_IN_PROC(p), &aiocbe->klist); /* XXXKSE */
485
486 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
487 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
488 ki->kaio_flags &= ~KAIO_WAKEUP;
489 wakeup(p);
490 }
491
492 if (aiocbe->jobstate == JOBST_JOBQBUF) {
493 if ((error = aio_fphysio(aiocbe)) != 0)
494 return (error);
495 if (aiocbe->jobstate != JOBST_JOBBFINISHED)
496 panic("aio_free_entry: invalid physio finish-up state");
497 s = splbio();
498 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
499 splx(s);
500 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) {
501 s = splnet();
502 TAILQ_REMOVE(&aio_jobs, aiocbe, list);
503 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
504 splx(s);
505 } else if (aiocbe->jobstate == JOBST_JOBFINISHED)
506 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist);
507 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) {
508 s = splbio();
509 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
510 splx(s);
511 if (aiocbe->bp) {
512 vunmapbuf(aiocbe->bp);
513 relpbuf(aiocbe->bp, NULL);
514 aiocbe->bp = NULL;
515 }
516 }
517 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
518 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
519 uma_zfree(aiolio_zone, lj);
520 }
521 aiocbe->jobstate = JOBST_NULL;
522 untimeout(process_signal, aiocbe, aiocbe->timeouthandle);
523 fdrop(aiocbe->fd_file, curthread);
524 crfree(aiocbe->cred);
525 uma_zfree(aiocb_zone, aiocbe);
526 return (0);
527}
528
529/*
530 * Rundown the jobs for a given process.
531 */
532static void
533aio_proc_rundown(void *arg, struct proc *p)
534{
535 int s;
536 struct kaioinfo *ki;
537 struct aio_liojob *lj, *ljn;
538 struct aiocblist *aiocbe, *aiocbn;
539 struct file *fp;
540 struct socket *so;
541
542 ki = p->p_aioinfo;
543 if (ki == NULL)
544 return;
545
546 ki->kaio_flags |= LIOJ_SIGNAL_POSTED;
547 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count >
548 ki->kaio_buffer_finished_count)) {
549 ki->kaio_flags |= KAIO_RUNDOWN;
550 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout))
551 break;
552 }
553
554 /*
555 * Move any aio ops that are waiting on socket I/O to the normal job
556 * queues so they are cleaned up with any others.
557 */
558 s = splnet();
559 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe =
560 aiocbn) {
561 aiocbn = TAILQ_NEXT(aiocbe, plist);
562 fp = aiocbe->fd_file;
563 if (fp != NULL) {
564 so = fp->f_data;
565 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list);
566 if (TAILQ_EMPTY(&so->so_aiojobq)) {
567 so->so_snd.sb_flags &= ~SB_AIO;
568 so->so_rcv.sb_flags &= ~SB_AIO;
569 }
570 }
571 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist);
572 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list);
573 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist);
574 }
575 splx(s);
576
577restart1:
578 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) {
579 aiocbn = TAILQ_NEXT(aiocbe, plist);
580 if (aio_free_entry(aiocbe))
581 goto restart1;
582 }
583
584restart2:
585 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe =
586 aiocbn) {
587 aiocbn = TAILQ_NEXT(aiocbe, plist);
588 if (aio_free_entry(aiocbe))
589 goto restart2;
590 }
591
592/*
593 * Note the use of lots of splbio here, trying to avoid splbio for long chains
594 * of I/O. Probably unnecessary.
595 */
596restart3:
597 s = splbio();
598 while (TAILQ_FIRST(&ki->kaio_bufqueue)) {
599 ki->kaio_flags |= KAIO_WAKEUP;
600 tsleep(p, PRIBIO, "aioprn", 0);
601 splx(s);
602 goto restart3;
603 }
604 splx(s);
605
606restart4:
607 s = splbio();
608 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) {
609 aiocbn = TAILQ_NEXT(aiocbe, plist);
610 if (aio_free_entry(aiocbe)) {
611 splx(s);
612 goto restart4;
613 }
614 }
615 splx(s);
616
617 /*
618 * If we've slept, jobs might have moved from one queue to another.
619 * Retry rundown if we didn't manage to empty the queues.
620 */
621 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL ||
622 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL ||
623 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL ||
624 TAILQ_FIRST(&ki->kaio_bufdone) != NULL)
625 goto restart1;
626
627 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) {
628 ljn = TAILQ_NEXT(lj, lioj_list);
629 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
630 0)) {
631 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
632 uma_zfree(aiolio_zone, lj);
633 } else {
634#ifdef DIAGNOSTIC
635 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
636 "QF:%d\n", lj->lioj_buffer_count,
637 lj->lioj_buffer_finished_count,
638 lj->lioj_queue_count,
639 lj->lioj_queue_finished_count);
640#endif
641 }
642 }
643
644 uma_zfree(kaio_zone, ki);
645 p->p_aioinfo = NULL;
646}
647
648/*
649 * Select a job to run (called by an AIO daemon).
650 */
651static struct aiocblist *
652aio_selectjob(struct aiothreadlist *aiop)
653{
654 int s;
655 struct aiocblist *aiocbe;
656 struct kaioinfo *ki;
657 struct proc *userp;
658
659 s = splnet();
660 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe =
661 TAILQ_NEXT(aiocbe, list)) {
662 userp = aiocbe->userproc;
663 ki = userp->p_aioinfo;
664
665 if (ki->kaio_active_count < ki->kaio_maxactive_count) {
666 TAILQ_REMOVE(&aio_jobs, aiocbe, list);
667 splx(s);
668 return (aiocbe);
669 }
670 }
671 splx(s);
672
673 return (NULL);
674}
675
676/*
677 * The AIO processing activity. This is the code that does the I/O request for
678 * the non-physio version of the operations. The normal vn operations are used,
679 * and this code should work in all instances for every type of file, including
680 * pipes, sockets, fifos, and regular files.
681 */
682static void
683aio_process(struct aiocblist *aiocbe)
684{
685 struct ucred *td_savedcred;
686 struct thread *td;
687 struct proc *mycp;
688 struct aiocb *cb;
689 struct file *fp;
690 struct uio auio;
691 struct iovec aiov;
692 int cnt;
693 int error;
694 int oublock_st, oublock_end;
695 int inblock_st, inblock_end;
696
697 td = curthread;
698 td_savedcred = td->td_ucred;
699 td->td_ucred = aiocbe->cred;
700 mycp = td->td_proc;
701 cb = &aiocbe->uaiocb;
702 fp = aiocbe->fd_file;
703
704 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
705 aiov.iov_len = cb->aio_nbytes;
706
707 auio.uio_iov = &aiov;
708 auio.uio_iovcnt = 1;
709 auio.uio_offset = cb->aio_offset;
710 auio.uio_resid = cb->aio_nbytes;
711 cnt = cb->aio_nbytes;
712 auio.uio_segflg = UIO_USERSPACE;
713 auio.uio_td = td;
714
715 inblock_st = mycp->p_stats->p_ru.ru_inblock;
716 oublock_st = mycp->p_stats->p_ru.ru_oublock;
717 /*
718 * _aio_aqueue() acquires a reference to the file that is
719 * released in aio_free_entry().
720 */
721 if (cb->aio_lio_opcode == LIO_READ) {
722 auio.uio_rw = UIO_READ;
723 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
724 } else {
725 auio.uio_rw = UIO_WRITE;
726 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
727 }
728 inblock_end = mycp->p_stats->p_ru.ru_inblock;
729 oublock_end = mycp->p_stats->p_ru.ru_oublock;
730
731 aiocbe->inputcharge = inblock_end - inblock_st;
732 aiocbe->outputcharge = oublock_end - oublock_st;
733
734 if ((error) && (auio.uio_resid != cnt)) {
735 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
736 error = 0;
737 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
738 PROC_LOCK(aiocbe->userproc);
739 psignal(aiocbe->userproc, SIGPIPE);
740 PROC_UNLOCK(aiocbe->userproc);
741 }
742 }
743
744 cnt -= auio.uio_resid;
745 cb->_aiocb_private.error = error;
746 cb->_aiocb_private.status = cnt;
747 td->td_ucred = td_savedcred;
748}
749
750/*
751 * The AIO daemon, most of the actual work is done in aio_process,
752 * but the setup (and address space mgmt) is done in this routine.
753 */
754static void
755aio_daemon(void *uproc)
756{
757 int s;
758 struct aio_liojob *lj;
759 struct aiocb *cb;
760 struct aiocblist *aiocbe;
761 struct aiothreadlist *aiop;
762 struct kaioinfo *ki;
763 struct proc *curcp, *mycp, *userp;
764 struct vmspace *myvm, *tmpvm;
765 struct thread *td = curthread;
766 struct pgrp *newpgrp;
767 struct session *newsess;
768
769 mtx_lock(&Giant);
770 /*
771 * Local copies of curproc (cp) and vmspace (myvm)
772 */
773 mycp = td->td_proc;
774 myvm = mycp->p_vmspace;
775
776 KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
777
778 /*
779 * Allocate and ready the aio control info. There is one aiop structure
780 * per daemon.
781 */
782 aiop = uma_zalloc(aiop_zone, M_WAITOK);
783 aiop->aiothread = td;
784 aiop->aiothreadflags |= AIOP_FREE;
785
786 s = splnet();
787
788 /*
789 * Place thread (lightweight process) onto the AIO free thread list.
790 */
791 if (TAILQ_EMPTY(&aio_freeproc))
792 wakeup(&aio_freeproc);
793 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
794
795 splx(s);
796
797 /*
798 * Get rid of our current filedescriptors. AIOD's don't need any
799 * filedescriptors, except as temporarily inherited from the client.
800 */
801 fdfree(td);
802
803 mtx_unlock(&Giant);
804 /* The daemon resides in its own pgrp. */
805 MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
806 M_WAITOK | M_ZERO);
807 MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION,
808 M_WAITOK | M_ZERO);
809
810 sx_xlock(&proctree_lock);
811 enterpgrp(mycp, mycp->p_pid, newpgrp, newsess);
812 sx_xunlock(&proctree_lock);
813 mtx_lock(&Giant);
814
815 /*
816 * Wakeup parent process. (Parent sleeps to keep from blasting away
817 * and creating too many daemons.)
818 */
819 wakeup(mycp);
820
821 for (;;) {
822 /*
823 * curcp is the current daemon process context.
824 * userp is the current user process context.
825 */
826 curcp = mycp;
827
828 /*
829 * Take daemon off of free queue
830 */
831 if (aiop->aiothreadflags & AIOP_FREE) {
832 s = splnet();
833 TAILQ_REMOVE(&aio_freeproc, aiop, list);
834 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
835 aiop->aiothreadflags &= ~AIOP_FREE;
836 splx(s);
837 }
838 aiop->aiothreadflags &= ~AIOP_SCHED;
839
840 /*
841 * Check for jobs.
842 */
843 while ((aiocbe = aio_selectjob(aiop)) != NULL) {
844 cb = &aiocbe->uaiocb;
845 userp = aiocbe->userproc;
846
847 aiocbe->jobstate = JOBST_JOBRUNNING;
848
849 /*
850 * Connect to process address space for user program.
851 */
852 if (userp != curcp) {
853 /*
854 * Save the current address space that we are
855 * connected to.
856 */
857 tmpvm = mycp->p_vmspace;
858
859 /*
860 * Point to the new user address space, and
861 * refer to it.
862 */
863 mycp->p_vmspace = userp->p_vmspace;
864 mycp->p_vmspace->vm_refcnt++;
865
866 /* Activate the new mapping. */
867 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
868
869 /*
870 * If the old address space wasn't the daemons
871 * own address space, then we need to remove the
872 * daemon's reference from the other process
873 * that it was acting on behalf of.
874 */
875 if (tmpvm != myvm) {
876 vmspace_free(tmpvm);
877 }
878 curcp = userp;
879 }
880
881 ki = userp->p_aioinfo;
882 lj = aiocbe->lio;
883
884 /* Account for currently active jobs. */
885 ki->kaio_active_count++;
886
887 /* Do the I/O function. */
888 aio_process(aiocbe);
889
890 /* Decrement the active job count. */
891 ki->kaio_active_count--;
892
893 /*
894 * Increment the completion count for wakeup/signal
895 * comparisons.
896 */
897 aiocbe->jobflags |= AIOCBLIST_DONE;
898 ki->kaio_queue_finished_count++;
899 if (lj)
900 lj->lioj_queue_finished_count++;
901 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags
902 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) {
903 ki->kaio_flags &= ~KAIO_WAKEUP;
904 wakeup(userp);
905 }
906
907 s = splbio();
908 if (lj && (lj->lioj_flags &
909 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) {
910 if ((lj->lioj_queue_finished_count ==
911 lj->lioj_queue_count) &&
912 (lj->lioj_buffer_finished_count ==
913 lj->lioj_buffer_count)) {
914 PROC_LOCK(userp);
915 psignal(userp,
916 lj->lioj_signal.sigev_signo);
917 PROC_UNLOCK(userp);
918 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
919 }
920 }
921 splx(s);
922
923 aiocbe->jobstate = JOBST_JOBFINISHED;
924
925 s = splnet();
926 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
927 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist);
928 splx(s);
929 KNOTE(&aiocbe->klist, 0);
930
931 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
932 wakeup(aiocbe);
933 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN;
934 }
935
936 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
937 PROC_LOCK(userp);
938 psignal(userp, cb->aio_sigevent.sigev_signo);
939 PROC_UNLOCK(userp);
940 }
941 }
942
943 /*
944 * Disconnect from user address space.
945 */
946 if (curcp != mycp) {
947 /* Get the user address space to disconnect from. */
948 tmpvm = mycp->p_vmspace;
949
950 /* Get original address space for daemon. */
951 mycp->p_vmspace = myvm;
952
953 /* Activate the daemon's address space. */
954 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
955#ifdef DIAGNOSTIC
956 if (tmpvm == myvm) {
957 printf("AIOD: vmspace problem -- %d\n",
958 mycp->p_pid);
959 }
960#endif
961 /* Remove our vmspace reference. */
962 vmspace_free(tmpvm);
963
964 curcp = mycp;
965 }
966
967 /*
968 * If we are the first to be put onto the free queue, wakeup
969 * anyone waiting for a daemon.
970 */
971 s = splnet();
972 TAILQ_REMOVE(&aio_activeproc, aiop, list);
973 if (TAILQ_EMPTY(&aio_freeproc))
974 wakeup(&aio_freeproc);
975 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
976 aiop->aiothreadflags |= AIOP_FREE;
977 splx(s);
978
979 /*
980 * If daemon is inactive for a long time, allow it to exit,
981 * thereby freeing resources.
982 */
983 if ((aiop->aiothreadflags & AIOP_SCHED) == 0 &&
984 tsleep(aiop->aiothread, PRIBIO, "aiordy", aiod_lifetime)) {
985 s = splnet();
986 if (TAILQ_EMPTY(&aio_jobs)) {
987 if ((aiop->aiothreadflags & AIOP_FREE) &&
988 (num_aio_procs > target_aio_procs)) {
989 TAILQ_REMOVE(&aio_freeproc, aiop, list);
990 splx(s);
991 uma_zfree(aiop_zone, aiop);
992 num_aio_procs--;
993#ifdef DIAGNOSTIC
994 if (mycp->p_vmspace->vm_refcnt <= 1) {
995 printf("AIOD: bad vm refcnt for"
996 " exiting daemon: %d\n",
997 mycp->p_vmspace->vm_refcnt);
998 }
999#endif
1000 kthread_exit(0);
1001 }
1002 }
1003 splx(s);
1004 }
1005 }
1006}
1007
1008/*
1009 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1010 * AIO daemon modifies its environment itself.
1011 */
1012static int
1013aio_newproc(void)
1014{
1015 int error;
1016 struct proc *p;
1017
1018 error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
1019 num_aio_procs);
1020 if (error)
1021 return (error);
1022
1023 /*
1024 * Wait until daemon is started, but continue on just in case to
1025 * handle error conditions.
1026 */
1027 error = tsleep(p, PZERO, "aiosta", aiod_timeout);
1028
1029 num_aio_procs++;
1030
1031 return (error);
1032}
1033
1034/*
1035 * Try the high-performance, low-overhead physio method for eligible
1036 * VCHR devices. This method doesn't use an aio helper thread, and
1037 * thus has very low overhead.
1038 *
1039 * Assumes that the caller, _aio_aqueue(), has incremented the file
1040 * structure's reference count, preventing its deallocation for the
1041 * duration of this call.
1042 */
1043static int
1044aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
1045{
1046 int error;
1047 struct aiocb *cb;
1048 struct file *fp;
1049 struct buf *bp;
1050 struct vnode *vp;
1051 struct kaioinfo *ki;
1052 struct aio_liojob *lj;
1053 int s;
1054 int notify;
1055
1056 cb = &aiocbe->uaiocb;
1057 fp = aiocbe->fd_file;
1058
1059 if (fp->f_type != DTYPE_VNODE)
1060 return (-1);
1061
1062 vp = fp->f_vnode;
1063
1064 /*
1065 * If its not a disk, we don't want to return a positive error.
1066 * It causes the aio code to not fall through to try the thread
1067 * way when you're talking to a regular file.
1068 */
1069 if (!vn_isdisk(vp, &error)) {
1070 if (error == ENOTBLK)
1071 return (-1);
1072 else
1073 return (error);
1074 }
1075
1076 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys)
1077 return (-1);
1078
1079 if (cb->aio_nbytes >
1080 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1081 return (-1);
1082
1083 ki = p->p_aioinfo;
1084 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1085 return (-1);
1086
1087 ki->kaio_buffer_count++;
1088
1089 lj = aiocbe->lio;
1090 if (lj)
1091 lj->lioj_buffer_count++;
1092
1093 /* Create and build a buffer header for a transfer. */
1094 bp = (struct buf *)getpbuf(NULL);
1095 BUF_KERNPROC(bp);
1096
1097 /*
1098 * Get a copy of the kva from the physical buffer.
1099 */
1100 bp->b_dev = vp->v_rdev;
1101 error = 0;
1102
1103 bp->b_bcount = cb->aio_nbytes;
1104 bp->b_bufsize = cb->aio_nbytes;
23
24#include <sys/param.h>
25#include <sys/systm.h>
26#include <sys/malloc.h>
27#include <sys/bio.h>
28#include <sys/buf.h>
29#include <sys/eventhandler.h>
30#include <sys/sysproto.h>
31#include <sys/filedesc.h>
32#include <sys/kernel.h>
33#include <sys/kthread.h>
34#include <sys/fcntl.h>
35#include <sys/file.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/unistd.h>
40#include <sys/proc.h>
41#include <sys/resourcevar.h>
42#include <sys/signalvar.h>
43#include <sys/protosw.h>
44#include <sys/socketvar.h>
45#include <sys/syscall.h>
46#include <sys/sysent.h>
47#include <sys/sysctl.h>
48#include <sys/sx.h>
49#include <sys/vnode.h>
50#include <sys/conf.h>
51#include <sys/event.h>
52
53#include <posix4/posix4.h>
54#include <vm/vm.h>
55#include <vm/vm_extern.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <vm/uma.h>
59#include <sys/aio.h>
60
61#include "opt_vfs_aio.h"
62
63/*
64 * Counter for allocating reference ids to new jobs. Wrapped to 1 on
65 * overflow.
66 */
67static long jobrefid;
68
69#define JOBST_NULL 0x0
70#define JOBST_JOBQGLOBAL 0x2
71#define JOBST_JOBRUNNING 0x3
72#define JOBST_JOBFINISHED 0x4
73#define JOBST_JOBQBUF 0x5
74#define JOBST_JOBBFINISHED 0x6
75
76#ifndef MAX_AIO_PER_PROC
77#define MAX_AIO_PER_PROC 32
78#endif
79
80#ifndef MAX_AIO_QUEUE_PER_PROC
81#define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */
82#endif
83
84#ifndef MAX_AIO_PROCS
85#define MAX_AIO_PROCS 32
86#endif
87
88#ifndef MAX_AIO_QUEUE
89#define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */
90#endif
91
92#ifndef TARGET_AIO_PROCS
93#define TARGET_AIO_PROCS 4
94#endif
95
96#ifndef MAX_BUF_AIO
97#define MAX_BUF_AIO 16
98#endif
99
100#ifndef AIOD_TIMEOUT_DEFAULT
101#define AIOD_TIMEOUT_DEFAULT (10 * hz)
102#endif
103
104#ifndef AIOD_LIFETIME_DEFAULT
105#define AIOD_LIFETIME_DEFAULT (30 * hz)
106#endif
107
108SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
109
110static int max_aio_procs = MAX_AIO_PROCS;
111SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
112 CTLFLAG_RW, &max_aio_procs, 0,
113 "Maximum number of kernel threads to use for handling async IO ");
114
115static int num_aio_procs = 0;
116SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
117 CTLFLAG_RD, &num_aio_procs, 0,
118 "Number of presently active kernel threads for async IO");
119
120/*
121 * The code will adjust the actual number of AIO processes towards this
122 * number when it gets a chance.
123 */
124static int target_aio_procs = TARGET_AIO_PROCS;
125SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
126 0, "Preferred number of ready kernel threads for async IO");
127
128static int max_queue_count = MAX_AIO_QUEUE;
129SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
130 "Maximum number of aio requests to queue, globally");
131
132static int num_queue_count = 0;
133SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0,
134 "Number of queued aio requests");
135
136static int num_buf_aio = 0;
137SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
138 "Number of aio requests presently handled by the buf subsystem");
139
140/* Number of async I/O thread in the process of being started */
141/* XXX This should be local to _aio_aqueue() */
142static int num_aio_resv_start = 0;
143
144static int aiod_timeout;
145SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_timeout, CTLFLAG_RW, &aiod_timeout, 0,
146 "Timeout value for synchronous aio operations");
147
148static int aiod_lifetime;
149SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0,
150 "Maximum lifetime for idle aiod");
151
152static int unloadable = 0;
153SYSCTL_INT(_vfs_aio, OID_AUTO, unloadable, CTLFLAG_RW, &unloadable, 0,
154 "Allow unload of aio (not recommended)");
155
156
157static int max_aio_per_proc = MAX_AIO_PER_PROC;
158SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc,
159 0, "Maximum active aio requests per process (stored in the process)");
160
161static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC;
162SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW,
163 &max_aio_queue_per_proc, 0,
164 "Maximum queued aio requests per process (stored in the process)");
165
166static int max_buf_aio = MAX_BUF_AIO;
167SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0,
168 "Maximum buf aio requests per process (stored in the process)");
169
170struct aiocblist {
171 TAILQ_ENTRY(aiocblist) list; /* List of jobs */
172 TAILQ_ENTRY(aiocblist) plist; /* List of jobs for proc */
173 int jobflags;
174 int jobstate;
175 int inputcharge;
176 int outputcharge;
177 struct callout_handle timeouthandle;
178 struct buf *bp; /* Buffer pointer */
179 struct proc *userproc; /* User process */ /* Not td! */
180 struct ucred *cred; /* Active credential when created */
181 struct file *fd_file; /* Pointer to file structure */
182 struct aio_liojob *lio; /* Optional lio job */
183 struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */
184 struct klist klist; /* list of knotes */
185 struct aiocb uaiocb; /* Kernel I/O control block */
186};
187
188/* jobflags */
189#define AIOCBLIST_RUNDOWN 0x4
190#define AIOCBLIST_DONE 0x10
191
192/*
193 * AIO process info
194 */
195#define AIOP_FREE 0x1 /* proc on free queue */
196#define AIOP_SCHED 0x2 /* proc explicitly scheduled */
197
198struct aiothreadlist {
199 int aiothreadflags; /* AIO proc flags */
200 TAILQ_ENTRY(aiothreadlist) list; /* List of processes */
201 struct thread *aiothread; /* The AIO thread */
202};
203
204/*
205 * data-structure for lio signal management
206 */
207struct aio_liojob {
208 int lioj_flags;
209 int lioj_buffer_count;
210 int lioj_buffer_finished_count;
211 int lioj_queue_count;
212 int lioj_queue_finished_count;
213 struct sigevent lioj_signal; /* signal on all I/O done */
214 TAILQ_ENTRY(aio_liojob) lioj_list;
215 struct kaioinfo *lioj_ki;
216};
217#define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */
218#define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */
219
220/*
221 * per process aio data structure
222 */
223struct kaioinfo {
224 int kaio_flags; /* per process kaio flags */
225 int kaio_maxactive_count; /* maximum number of AIOs */
226 int kaio_active_count; /* number of currently used AIOs */
227 int kaio_qallowed_count; /* maxiumu size of AIO queue */
228 int kaio_queue_count; /* size of AIO queue */
229 int kaio_ballowed_count; /* maximum number of buffers */
230 int kaio_queue_finished_count; /* number of daemon jobs finished */
231 int kaio_buffer_count; /* number of physio buffers */
232 int kaio_buffer_finished_count; /* count of I/O done */
233 struct proc *kaio_p; /* process that uses this kaio block */
234 TAILQ_HEAD(,aio_liojob) kaio_liojoblist; /* list of lio jobs */
235 TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* job queue for process */
236 TAILQ_HEAD(,aiocblist) kaio_jobdone; /* done queue for process */
237 TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* buffer job queue for process */
238 TAILQ_HEAD(,aiocblist) kaio_bufdone; /* buffer done queue for process */
239 TAILQ_HEAD(,aiocblist) kaio_sockqueue; /* queue for aios waiting on sockets */
240};
241
242#define KAIO_RUNDOWN 0x1 /* process is being run down */
243#define KAIO_WAKEUP 0x2 /* wakeup process when there is a significant event */
244
245static TAILQ_HEAD(,aiothreadlist) aio_activeproc; /* Active daemons */
246static TAILQ_HEAD(,aiothreadlist) aio_freeproc; /* Idle daemons */
247static TAILQ_HEAD(,aiocblist) aio_jobs; /* Async job list */
248static TAILQ_HEAD(,aiocblist) aio_bufjobs; /* Phys I/O job list */
249
250static void aio_init_aioinfo(struct proc *p);
251static void aio_onceonly(void);
252static int aio_free_entry(struct aiocblist *aiocbe);
253static void aio_process(struct aiocblist *aiocbe);
254static int aio_newproc(void);
255static int aio_aqueue(struct thread *td, struct aiocb *job, int type);
256static void aio_physwakeup(struct buf *bp);
257static void aio_proc_rundown(void *arg, struct proc *p);
258static int aio_fphysio(struct aiocblist *aiocbe);
259static int aio_qphysio(struct proc *p, struct aiocblist *iocb);
260static void aio_daemon(void *uproc);
261static void aio_swake_cb(struct socket *, struct sockbuf *);
262static int aio_unload(void);
263static void process_signal(void *aioj);
264static int filt_aioattach(struct knote *kn);
265static void filt_aiodetach(struct knote *kn);
266static int filt_aio(struct knote *kn, long hint);
267
268/*
269 * Zones for:
270 * kaio Per process async io info
271 * aiop async io thread data
272 * aiocb async io jobs
273 * aiol list io job pointer - internal to aio_suspend XXX
274 * aiolio list io jobs
275 */
276static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone;
277
278/* kqueue filters for aio */
279static struct filterops aio_filtops =
280 { 0, filt_aioattach, filt_aiodetach, filt_aio };
281
282static eventhandler_tag exit_tag, exec_tag;
283
284/*
285 * Main operations function for use as a kernel module.
286 */
287static int
288aio_modload(struct module *module, int cmd, void *arg)
289{
290 int error = 0;
291
292 switch (cmd) {
293 case MOD_LOAD:
294 aio_onceonly();
295 break;
296 case MOD_UNLOAD:
297 error = aio_unload();
298 break;
299 case MOD_SHUTDOWN:
300 break;
301 default:
302 error = EINVAL;
303 break;
304 }
305 return (error);
306}
307
308static moduledata_t aio_mod = {
309 "aio",
310 &aio_modload,
311 NULL
312};
313
314SYSCALL_MODULE_HELPER(aio_return);
315SYSCALL_MODULE_HELPER(aio_suspend);
316SYSCALL_MODULE_HELPER(aio_cancel);
317SYSCALL_MODULE_HELPER(aio_error);
318SYSCALL_MODULE_HELPER(aio_read);
319SYSCALL_MODULE_HELPER(aio_write);
320SYSCALL_MODULE_HELPER(aio_waitcomplete);
321SYSCALL_MODULE_HELPER(lio_listio);
322
323DECLARE_MODULE(aio, aio_mod,
324 SI_SUB_VFS, SI_ORDER_ANY);
325MODULE_VERSION(aio, 1);
326
327/*
328 * Startup initialization
329 */
330static void
331aio_onceonly(void)
332{
333
334 /* XXX: should probably just use so->callback */
335 aio_swake = &aio_swake_cb;
336 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL,
337 EVENTHANDLER_PRI_ANY);
338 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown, NULL,
339 EVENTHANDLER_PRI_ANY);
340 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops);
341 TAILQ_INIT(&aio_freeproc);
342 TAILQ_INIT(&aio_activeproc);
343 TAILQ_INIT(&aio_jobs);
344 TAILQ_INIT(&aio_bufjobs);
345 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
346 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
347 aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
348 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
349 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
350 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
351 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL,
352 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
353 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aio_liojob), NULL,
354 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
355 aiod_timeout = AIOD_TIMEOUT_DEFAULT;
356 aiod_lifetime = AIOD_LIFETIME_DEFAULT;
357 jobrefid = 1;
358 async_io_version = _POSIX_VERSION;
359 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX);
360 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE);
361 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0);
362}
363
364/*
365 * Callback for unload of AIO when used as a module.
366 */
367static int
368aio_unload(void)
369{
370
371 /*
372 * XXX: no unloads by default, it's too dangerous.
373 * perhaps we could do it if locked out callers and then
374 * did an aio_proc_rundown() on each process.
375 */
376 if (!unloadable)
377 return (EOPNOTSUPP);
378
379 async_io_version = 0;
380 aio_swake = NULL;
381 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
382 EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
383 kqueue_del_filteropts(EVFILT_AIO);
384 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
385 p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
386 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
387 return (0);
388}
389
390/*
391 * Init the per-process aioinfo structure. The aioinfo limits are set
392 * per-process for user limit (resource) management.
393 */
394static void
395aio_init_aioinfo(struct proc *p)
396{
397 struct kaioinfo *ki;
398
399 if (p->p_aioinfo == NULL) {
400 ki = uma_zalloc(kaio_zone, M_WAITOK);
401 p->p_aioinfo = ki;
402 ki->kaio_flags = 0;
403 ki->kaio_maxactive_count = max_aio_per_proc;
404 ki->kaio_active_count = 0;
405 ki->kaio_qallowed_count = max_aio_queue_per_proc;
406 ki->kaio_queue_count = 0;
407 ki->kaio_ballowed_count = max_buf_aio;
408 ki->kaio_buffer_count = 0;
409 ki->kaio_buffer_finished_count = 0;
410 ki->kaio_p = p;
411 TAILQ_INIT(&ki->kaio_jobdone);
412 TAILQ_INIT(&ki->kaio_jobqueue);
413 TAILQ_INIT(&ki->kaio_bufdone);
414 TAILQ_INIT(&ki->kaio_bufqueue);
415 TAILQ_INIT(&ki->kaio_liojoblist);
416 TAILQ_INIT(&ki->kaio_sockqueue);
417 }
418
419 while (num_aio_procs < target_aio_procs)
420 aio_newproc();
421}
422
423/*
424 * Free a job entry. Wait for completion if it is currently active, but don't
425 * delay forever. If we delay, we return a flag that says that we have to
426 * restart the queue scan.
427 */
428static int
429aio_free_entry(struct aiocblist *aiocbe)
430{
431 struct kaioinfo *ki;
432 struct aio_liojob *lj;
433 struct proc *p;
434 int error;
435 int s;
436
437 if (aiocbe->jobstate == JOBST_NULL)
438 panic("aio_free_entry: freeing already free job");
439
440 p = aiocbe->userproc;
441 ki = p->p_aioinfo;
442 lj = aiocbe->lio;
443 if (ki == NULL)
444 panic("aio_free_entry: missing p->p_aioinfo");
445
446 while (aiocbe->jobstate == JOBST_JOBRUNNING) {
447 aiocbe->jobflags |= AIOCBLIST_RUNDOWN;
448 tsleep(aiocbe, PRIBIO, "jobwai", 0);
449 }
450 if (aiocbe->bp == NULL) {
451 if (ki->kaio_queue_count <= 0)
452 panic("aio_free_entry: process queue size <= 0");
453 if (num_queue_count <= 0)
454 panic("aio_free_entry: system wide queue size <= 0");
455
456 if (lj) {
457 lj->lioj_queue_count--;
458 if (aiocbe->jobflags & AIOCBLIST_DONE)
459 lj->lioj_queue_finished_count--;
460 }
461 ki->kaio_queue_count--;
462 if (aiocbe->jobflags & AIOCBLIST_DONE)
463 ki->kaio_queue_finished_count--;
464 num_queue_count--;
465 } else {
466 if (lj) {
467 lj->lioj_buffer_count--;
468 if (aiocbe->jobflags & AIOCBLIST_DONE)
469 lj->lioj_buffer_finished_count--;
470 }
471 if (aiocbe->jobflags & AIOCBLIST_DONE)
472 ki->kaio_buffer_finished_count--;
473 ki->kaio_buffer_count--;
474 num_buf_aio--;
475 }
476
477 /* aiocbe is going away, we need to destroy any knotes */
478 /* XXXKSE Note the thread here is used to eventually find the
479 * owning process again, but it is also used to do a fo_close
480 * and that requires the thread. (but does it require the
481 * OWNING thread? (or maybe the running thread?)
482 * There is a semantic problem here...
483 */
484 knote_remove(FIRST_THREAD_IN_PROC(p), &aiocbe->klist); /* XXXKSE */
485
486 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
487 && ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
488 ki->kaio_flags &= ~KAIO_WAKEUP;
489 wakeup(p);
490 }
491
492 if (aiocbe->jobstate == JOBST_JOBQBUF) {
493 if ((error = aio_fphysio(aiocbe)) != 0)
494 return (error);
495 if (aiocbe->jobstate != JOBST_JOBBFINISHED)
496 panic("aio_free_entry: invalid physio finish-up state");
497 s = splbio();
498 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
499 splx(s);
500 } else if (aiocbe->jobstate == JOBST_JOBQGLOBAL) {
501 s = splnet();
502 TAILQ_REMOVE(&aio_jobs, aiocbe, list);
503 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
504 splx(s);
505 } else if (aiocbe->jobstate == JOBST_JOBFINISHED)
506 TAILQ_REMOVE(&ki->kaio_jobdone, aiocbe, plist);
507 else if (aiocbe->jobstate == JOBST_JOBBFINISHED) {
508 s = splbio();
509 TAILQ_REMOVE(&ki->kaio_bufdone, aiocbe, plist);
510 splx(s);
511 if (aiocbe->bp) {
512 vunmapbuf(aiocbe->bp);
513 relpbuf(aiocbe->bp, NULL);
514 aiocbe->bp = NULL;
515 }
516 }
517 if (lj && (lj->lioj_buffer_count == 0) && (lj->lioj_queue_count == 0)) {
518 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
519 uma_zfree(aiolio_zone, lj);
520 }
521 aiocbe->jobstate = JOBST_NULL;
522 untimeout(process_signal, aiocbe, aiocbe->timeouthandle);
523 fdrop(aiocbe->fd_file, curthread);
524 crfree(aiocbe->cred);
525 uma_zfree(aiocb_zone, aiocbe);
526 return (0);
527}
528
529/*
530 * Rundown the jobs for a given process.
531 */
532static void
533aio_proc_rundown(void *arg, struct proc *p)
534{
535 int s;
536 struct kaioinfo *ki;
537 struct aio_liojob *lj, *ljn;
538 struct aiocblist *aiocbe, *aiocbn;
539 struct file *fp;
540 struct socket *so;
541
542 ki = p->p_aioinfo;
543 if (ki == NULL)
544 return;
545
546 ki->kaio_flags |= LIOJ_SIGNAL_POSTED;
547 while ((ki->kaio_active_count > 0) || (ki->kaio_buffer_count >
548 ki->kaio_buffer_finished_count)) {
549 ki->kaio_flags |= KAIO_RUNDOWN;
550 if (tsleep(p, PRIBIO, "kaiowt", aiod_timeout))
551 break;
552 }
553
554 /*
555 * Move any aio ops that are waiting on socket I/O to the normal job
556 * queues so they are cleaned up with any others.
557 */
558 s = splnet();
559 for (aiocbe = TAILQ_FIRST(&ki->kaio_sockqueue); aiocbe; aiocbe =
560 aiocbn) {
561 aiocbn = TAILQ_NEXT(aiocbe, plist);
562 fp = aiocbe->fd_file;
563 if (fp != NULL) {
564 so = fp->f_data;
565 TAILQ_REMOVE(&so->so_aiojobq, aiocbe, list);
566 if (TAILQ_EMPTY(&so->so_aiojobq)) {
567 so->so_snd.sb_flags &= ~SB_AIO;
568 so->so_rcv.sb_flags &= ~SB_AIO;
569 }
570 }
571 TAILQ_REMOVE(&ki->kaio_sockqueue, aiocbe, plist);
572 TAILQ_INSERT_HEAD(&aio_jobs, aiocbe, list);
573 TAILQ_INSERT_HEAD(&ki->kaio_jobqueue, aiocbe, plist);
574 }
575 splx(s);
576
577restart1:
578 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobdone); aiocbe; aiocbe = aiocbn) {
579 aiocbn = TAILQ_NEXT(aiocbe, plist);
580 if (aio_free_entry(aiocbe))
581 goto restart1;
582 }
583
584restart2:
585 for (aiocbe = TAILQ_FIRST(&ki->kaio_jobqueue); aiocbe; aiocbe =
586 aiocbn) {
587 aiocbn = TAILQ_NEXT(aiocbe, plist);
588 if (aio_free_entry(aiocbe))
589 goto restart2;
590 }
591
592/*
593 * Note the use of lots of splbio here, trying to avoid splbio for long chains
594 * of I/O. Probably unnecessary.
595 */
596restart3:
597 s = splbio();
598 while (TAILQ_FIRST(&ki->kaio_bufqueue)) {
599 ki->kaio_flags |= KAIO_WAKEUP;
600 tsleep(p, PRIBIO, "aioprn", 0);
601 splx(s);
602 goto restart3;
603 }
604 splx(s);
605
606restart4:
607 s = splbio();
608 for (aiocbe = TAILQ_FIRST(&ki->kaio_bufdone); aiocbe; aiocbe = aiocbn) {
609 aiocbn = TAILQ_NEXT(aiocbe, plist);
610 if (aio_free_entry(aiocbe)) {
611 splx(s);
612 goto restart4;
613 }
614 }
615 splx(s);
616
617 /*
618 * If we've slept, jobs might have moved from one queue to another.
619 * Retry rundown if we didn't manage to empty the queues.
620 */
621 if (TAILQ_FIRST(&ki->kaio_jobdone) != NULL ||
622 TAILQ_FIRST(&ki->kaio_jobqueue) != NULL ||
623 TAILQ_FIRST(&ki->kaio_bufqueue) != NULL ||
624 TAILQ_FIRST(&ki->kaio_bufdone) != NULL)
625 goto restart1;
626
627 for (lj = TAILQ_FIRST(&ki->kaio_liojoblist); lj; lj = ljn) {
628 ljn = TAILQ_NEXT(lj, lioj_list);
629 if ((lj->lioj_buffer_count == 0) && (lj->lioj_queue_count ==
630 0)) {
631 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list);
632 uma_zfree(aiolio_zone, lj);
633 } else {
634#ifdef DIAGNOSTIC
635 printf("LIO job not cleaned up: B:%d, BF:%d, Q:%d, "
636 "QF:%d\n", lj->lioj_buffer_count,
637 lj->lioj_buffer_finished_count,
638 lj->lioj_queue_count,
639 lj->lioj_queue_finished_count);
640#endif
641 }
642 }
643
644 uma_zfree(kaio_zone, ki);
645 p->p_aioinfo = NULL;
646}
647
648/*
649 * Select a job to run (called by an AIO daemon).
650 */
651static struct aiocblist *
652aio_selectjob(struct aiothreadlist *aiop)
653{
654 int s;
655 struct aiocblist *aiocbe;
656 struct kaioinfo *ki;
657 struct proc *userp;
658
659 s = splnet();
660 for (aiocbe = TAILQ_FIRST(&aio_jobs); aiocbe; aiocbe =
661 TAILQ_NEXT(aiocbe, list)) {
662 userp = aiocbe->userproc;
663 ki = userp->p_aioinfo;
664
665 if (ki->kaio_active_count < ki->kaio_maxactive_count) {
666 TAILQ_REMOVE(&aio_jobs, aiocbe, list);
667 splx(s);
668 return (aiocbe);
669 }
670 }
671 splx(s);
672
673 return (NULL);
674}
675
676/*
677 * The AIO processing activity. This is the code that does the I/O request for
678 * the non-physio version of the operations. The normal vn operations are used,
679 * and this code should work in all instances for every type of file, including
680 * pipes, sockets, fifos, and regular files.
681 */
682static void
683aio_process(struct aiocblist *aiocbe)
684{
685 struct ucred *td_savedcred;
686 struct thread *td;
687 struct proc *mycp;
688 struct aiocb *cb;
689 struct file *fp;
690 struct uio auio;
691 struct iovec aiov;
692 int cnt;
693 int error;
694 int oublock_st, oublock_end;
695 int inblock_st, inblock_end;
696
697 td = curthread;
698 td_savedcred = td->td_ucred;
699 td->td_ucred = aiocbe->cred;
700 mycp = td->td_proc;
701 cb = &aiocbe->uaiocb;
702 fp = aiocbe->fd_file;
703
704 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf;
705 aiov.iov_len = cb->aio_nbytes;
706
707 auio.uio_iov = &aiov;
708 auio.uio_iovcnt = 1;
709 auio.uio_offset = cb->aio_offset;
710 auio.uio_resid = cb->aio_nbytes;
711 cnt = cb->aio_nbytes;
712 auio.uio_segflg = UIO_USERSPACE;
713 auio.uio_td = td;
714
715 inblock_st = mycp->p_stats->p_ru.ru_inblock;
716 oublock_st = mycp->p_stats->p_ru.ru_oublock;
717 /*
718 * _aio_aqueue() acquires a reference to the file that is
719 * released in aio_free_entry().
720 */
721 if (cb->aio_lio_opcode == LIO_READ) {
722 auio.uio_rw = UIO_READ;
723 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td);
724 } else {
725 auio.uio_rw = UIO_WRITE;
726 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td);
727 }
728 inblock_end = mycp->p_stats->p_ru.ru_inblock;
729 oublock_end = mycp->p_stats->p_ru.ru_oublock;
730
731 aiocbe->inputcharge = inblock_end - inblock_st;
732 aiocbe->outputcharge = oublock_end - oublock_st;
733
734 if ((error) && (auio.uio_resid != cnt)) {
735 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK)
736 error = 0;
737 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) {
738 PROC_LOCK(aiocbe->userproc);
739 psignal(aiocbe->userproc, SIGPIPE);
740 PROC_UNLOCK(aiocbe->userproc);
741 }
742 }
743
744 cnt -= auio.uio_resid;
745 cb->_aiocb_private.error = error;
746 cb->_aiocb_private.status = cnt;
747 td->td_ucred = td_savedcred;
748}
749
750/*
751 * The AIO daemon, most of the actual work is done in aio_process,
752 * but the setup (and address space mgmt) is done in this routine.
753 */
754static void
755aio_daemon(void *uproc)
756{
757 int s;
758 struct aio_liojob *lj;
759 struct aiocb *cb;
760 struct aiocblist *aiocbe;
761 struct aiothreadlist *aiop;
762 struct kaioinfo *ki;
763 struct proc *curcp, *mycp, *userp;
764 struct vmspace *myvm, *tmpvm;
765 struct thread *td = curthread;
766 struct pgrp *newpgrp;
767 struct session *newsess;
768
769 mtx_lock(&Giant);
770 /*
771 * Local copies of curproc (cp) and vmspace (myvm)
772 */
773 mycp = td->td_proc;
774 myvm = mycp->p_vmspace;
775
776 KASSERT(mycp->p_textvp == NULL, ("kthread has a textvp"));
777
778 /*
779 * Allocate and ready the aio control info. There is one aiop structure
780 * per daemon.
781 */
782 aiop = uma_zalloc(aiop_zone, M_WAITOK);
783 aiop->aiothread = td;
784 aiop->aiothreadflags |= AIOP_FREE;
785
786 s = splnet();
787
788 /*
789 * Place thread (lightweight process) onto the AIO free thread list.
790 */
791 if (TAILQ_EMPTY(&aio_freeproc))
792 wakeup(&aio_freeproc);
793 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
794
795 splx(s);
796
797 /*
798 * Get rid of our current filedescriptors. AIOD's don't need any
799 * filedescriptors, except as temporarily inherited from the client.
800 */
801 fdfree(td);
802
803 mtx_unlock(&Giant);
804 /* The daemon resides in its own pgrp. */
805 MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
806 M_WAITOK | M_ZERO);
807 MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION,
808 M_WAITOK | M_ZERO);
809
810 sx_xlock(&proctree_lock);
811 enterpgrp(mycp, mycp->p_pid, newpgrp, newsess);
812 sx_xunlock(&proctree_lock);
813 mtx_lock(&Giant);
814
815 /*
816 * Wakeup parent process. (Parent sleeps to keep from blasting away
817 * and creating too many daemons.)
818 */
819 wakeup(mycp);
820
821 for (;;) {
822 /*
823 * curcp is the current daemon process context.
824 * userp is the current user process context.
825 */
826 curcp = mycp;
827
828 /*
829 * Take daemon off of free queue
830 */
831 if (aiop->aiothreadflags & AIOP_FREE) {
832 s = splnet();
833 TAILQ_REMOVE(&aio_freeproc, aiop, list);
834 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
835 aiop->aiothreadflags &= ~AIOP_FREE;
836 splx(s);
837 }
838 aiop->aiothreadflags &= ~AIOP_SCHED;
839
840 /*
841 * Check for jobs.
842 */
843 while ((aiocbe = aio_selectjob(aiop)) != NULL) {
844 cb = &aiocbe->uaiocb;
845 userp = aiocbe->userproc;
846
847 aiocbe->jobstate = JOBST_JOBRUNNING;
848
849 /*
850 * Connect to process address space for user program.
851 */
852 if (userp != curcp) {
853 /*
854 * Save the current address space that we are
855 * connected to.
856 */
857 tmpvm = mycp->p_vmspace;
858
859 /*
860 * Point to the new user address space, and
861 * refer to it.
862 */
863 mycp->p_vmspace = userp->p_vmspace;
864 mycp->p_vmspace->vm_refcnt++;
865
866 /* Activate the new mapping. */
867 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
868
869 /*
870 * If the old address space wasn't the daemons
871 * own address space, then we need to remove the
872 * daemon's reference from the other process
873 * that it was acting on behalf of.
874 */
875 if (tmpvm != myvm) {
876 vmspace_free(tmpvm);
877 }
878 curcp = userp;
879 }
880
881 ki = userp->p_aioinfo;
882 lj = aiocbe->lio;
883
884 /* Account for currently active jobs. */
885 ki->kaio_active_count++;
886
887 /* Do the I/O function. */
888 aio_process(aiocbe);
889
890 /* Decrement the active job count. */
891 ki->kaio_active_count--;
892
893 /*
894 * Increment the completion count for wakeup/signal
895 * comparisons.
896 */
897 aiocbe->jobflags |= AIOCBLIST_DONE;
898 ki->kaio_queue_finished_count++;
899 if (lj)
900 lj->lioj_queue_finished_count++;
901 if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags
902 & KAIO_RUNDOWN) && (ki->kaio_active_count == 0))) {
903 ki->kaio_flags &= ~KAIO_WAKEUP;
904 wakeup(userp);
905 }
906
907 s = splbio();
908 if (lj && (lj->lioj_flags &
909 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL) {
910 if ((lj->lioj_queue_finished_count ==
911 lj->lioj_queue_count) &&
912 (lj->lioj_buffer_finished_count ==
913 lj->lioj_buffer_count)) {
914 PROC_LOCK(userp);
915 psignal(userp,
916 lj->lioj_signal.sigev_signo);
917 PROC_UNLOCK(userp);
918 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
919 }
920 }
921 splx(s);
922
923 aiocbe->jobstate = JOBST_JOBFINISHED;
924
925 s = splnet();
926 TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
927 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist);
928 splx(s);
929 KNOTE(&aiocbe->klist, 0);
930
931 if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
932 wakeup(aiocbe);
933 aiocbe->jobflags &= ~AIOCBLIST_RUNDOWN;
934 }
935
936 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
937 PROC_LOCK(userp);
938 psignal(userp, cb->aio_sigevent.sigev_signo);
939 PROC_UNLOCK(userp);
940 }
941 }
942
943 /*
944 * Disconnect from user address space.
945 */
946 if (curcp != mycp) {
947 /* Get the user address space to disconnect from. */
948 tmpvm = mycp->p_vmspace;
949
950 /* Get original address space for daemon. */
951 mycp->p_vmspace = myvm;
952
953 /* Activate the daemon's address space. */
954 pmap_activate(FIRST_THREAD_IN_PROC(mycp));
955#ifdef DIAGNOSTIC
956 if (tmpvm == myvm) {
957 printf("AIOD: vmspace problem -- %d\n",
958 mycp->p_pid);
959 }
960#endif
961 /* Remove our vmspace reference. */
962 vmspace_free(tmpvm);
963
964 curcp = mycp;
965 }
966
967 /*
968 * If we are the first to be put onto the free queue, wakeup
969 * anyone waiting for a daemon.
970 */
971 s = splnet();
972 TAILQ_REMOVE(&aio_activeproc, aiop, list);
973 if (TAILQ_EMPTY(&aio_freeproc))
974 wakeup(&aio_freeproc);
975 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
976 aiop->aiothreadflags |= AIOP_FREE;
977 splx(s);
978
979 /*
980 * If daemon is inactive for a long time, allow it to exit,
981 * thereby freeing resources.
982 */
983 if ((aiop->aiothreadflags & AIOP_SCHED) == 0 &&
984 tsleep(aiop->aiothread, PRIBIO, "aiordy", aiod_lifetime)) {
985 s = splnet();
986 if (TAILQ_EMPTY(&aio_jobs)) {
987 if ((aiop->aiothreadflags & AIOP_FREE) &&
988 (num_aio_procs > target_aio_procs)) {
989 TAILQ_REMOVE(&aio_freeproc, aiop, list);
990 splx(s);
991 uma_zfree(aiop_zone, aiop);
992 num_aio_procs--;
993#ifdef DIAGNOSTIC
994 if (mycp->p_vmspace->vm_refcnt <= 1) {
995 printf("AIOD: bad vm refcnt for"
996 " exiting daemon: %d\n",
997 mycp->p_vmspace->vm_refcnt);
998 }
999#endif
1000 kthread_exit(0);
1001 }
1002 }
1003 splx(s);
1004 }
1005 }
1006}
1007
1008/*
1009 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The
1010 * AIO daemon modifies its environment itself.
1011 */
1012static int
1013aio_newproc(void)
1014{
1015 int error;
1016 struct proc *p;
1017
1018 error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
1019 num_aio_procs);
1020 if (error)
1021 return (error);
1022
1023 /*
1024 * Wait until daemon is started, but continue on just in case to
1025 * handle error conditions.
1026 */
1027 error = tsleep(p, PZERO, "aiosta", aiod_timeout);
1028
1029 num_aio_procs++;
1030
1031 return (error);
1032}
1033
1034/*
1035 * Try the high-performance, low-overhead physio method for eligible
1036 * VCHR devices. This method doesn't use an aio helper thread, and
1037 * thus has very low overhead.
1038 *
1039 * Assumes that the caller, _aio_aqueue(), has incremented the file
1040 * structure's reference count, preventing its deallocation for the
1041 * duration of this call.
1042 */
1043static int
1044aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
1045{
1046 int error;
1047 struct aiocb *cb;
1048 struct file *fp;
1049 struct buf *bp;
1050 struct vnode *vp;
1051 struct kaioinfo *ki;
1052 struct aio_liojob *lj;
1053 int s;
1054 int notify;
1055
1056 cb = &aiocbe->uaiocb;
1057 fp = aiocbe->fd_file;
1058
1059 if (fp->f_type != DTYPE_VNODE)
1060 return (-1);
1061
1062 vp = fp->f_vnode;
1063
1064 /*
1065 * If its not a disk, we don't want to return a positive error.
1066 * It causes the aio code to not fall through to try the thread
1067 * way when you're talking to a regular file.
1068 */
1069 if (!vn_isdisk(vp, &error)) {
1070 if (error == ENOTBLK)
1071 return (-1);
1072 else
1073 return (error);
1074 }
1075
1076 if (cb->aio_nbytes % vp->v_rdev->si_bsize_phys)
1077 return (-1);
1078
1079 if (cb->aio_nbytes >
1080 MAXPHYS - (((vm_offset_t) cb->aio_buf) & PAGE_MASK))
1081 return (-1);
1082
1083 ki = p->p_aioinfo;
1084 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count)
1085 return (-1);
1086
1087 ki->kaio_buffer_count++;
1088
1089 lj = aiocbe->lio;
1090 if (lj)
1091 lj->lioj_buffer_count++;
1092
1093 /* Create and build a buffer header for a transfer. */
1094 bp = (struct buf *)getpbuf(NULL);
1095 BUF_KERNPROC(bp);
1096
1097 /*
1098 * Get a copy of the kva from the physical buffer.
1099 */
1100 bp->b_dev = vp->v_rdev;
1101 error = 0;
1102
1103 bp->b_bcount = cb->aio_nbytes;
1104 bp->b_bufsize = cb->aio_nbytes;
1105 bp->b_flags = B_PHYS;
1106 bp->b_iodone = aio_physwakeup;
1107 bp->b_saveaddr = bp->b_data;
1108 bp->b_data = (void *)(uintptr_t)cb->aio_buf;
1109 bp->b_offset = cb->aio_offset;
1110 bp->b_iooffset = cb->aio_offset;
1111 bp->b_blkno = btodb(cb->aio_offset);
1112 bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1113
1114 /*
1115 * Bring buffer into kernel space.
1116 */
1117 if (vmapbuf(bp) < 0) {
1118 error = EFAULT;
1119 goto doerror;
1120 }
1121
1122 s = splbio();
1123 aiocbe->bp = bp;
1124 bp->b_caller1 = (void *)aiocbe;
1125 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list);
1126 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
1127 aiocbe->jobstate = JOBST_JOBQBUF;
1128 cb->_aiocb_private.status = cb->aio_nbytes;
1129 num_buf_aio++;
1130 bp->b_error = 0;
1131
1132 splx(s);
1133
1134 /* Perform transfer. */
1135 DEV_STRATEGY(bp);
1136
1137 notify = 0;
1138 s = splbio();
1139
1140 /*
1141 * If we had an error invoking the request, or an error in processing
1142 * the request before we have returned, we process it as an error in
1143 * transfer. Note that such an I/O error is not indicated immediately,
1144 * but is returned using the aio_error mechanism. In this case,
1145 * aio_suspend will return immediately.
1146 */
1147 if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
1148 struct aiocb *job = aiocbe->uuaiocb;
1149
1150 aiocbe->uaiocb._aiocb_private.status = 0;
1151 suword(&job->_aiocb_private.status, 0);
1152 aiocbe->uaiocb._aiocb_private.error = bp->b_error;
1153 suword(&job->_aiocb_private.error, bp->b_error);
1154
1155 ki->kaio_buffer_finished_count++;
1156
1157 if (aiocbe->jobstate != JOBST_JOBBFINISHED) {
1158 aiocbe->jobstate = JOBST_JOBBFINISHED;
1159 aiocbe->jobflags |= AIOCBLIST_DONE;
1160 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
1161 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
1162 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
1163 notify = 1;
1164 }
1165 }
1166 splx(s);
1167 if (notify)
1168 KNOTE(&aiocbe->klist, 0);
1169 return (0);
1170
1171doerror:
1172 ki->kaio_buffer_count--;
1173 if (lj)
1174 lj->lioj_buffer_count--;
1175 aiocbe->bp = NULL;
1176 relpbuf(bp, NULL);
1177 return (error);
1178}
1179
1180/*
1181 * This waits/tests physio completion.
1182 */
1183static int
1184aio_fphysio(struct aiocblist *iocb)
1185{
1186 int s;
1187 struct buf *bp;
1188 int error;
1189
1190 bp = iocb->bp;
1191
1192 s = splbio();
1193 while ((bp->b_flags & B_DONE) == 0) {
1194 if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) {
1195 if ((bp->b_flags & B_DONE) == 0) {
1196 splx(s);
1197 return (EINPROGRESS);
1198 } else
1199 break;
1200 }
1201 }
1202 splx(s);
1203
1204 /* Release mapping into kernel space. */
1205 vunmapbuf(bp);
1206 iocb->bp = 0;
1207
1208 error = 0;
1209
1210 /* Check for an error. */
1211 if (bp->b_ioflags & BIO_ERROR)
1212 error = bp->b_error;
1213
1214 relpbuf(bp, NULL);
1215 return (error);
1216}
1217
1218/*
1219 * Wake up aio requests that may be serviceable now.
1220 */
1221static void
1222aio_swake_cb(struct socket *so, struct sockbuf *sb)
1223{
1224 struct aiocblist *cb,*cbn;
1225 struct proc *p;
1226 struct kaioinfo *ki = NULL;
1227 int opcode, wakecount = 0;
1228 struct aiothreadlist *aiop;
1229
1230 if (sb == &so->so_snd) {
1231 opcode = LIO_WRITE;
1232 so->so_snd.sb_flags &= ~SB_AIO;
1233 } else {
1234 opcode = LIO_READ;
1235 so->so_rcv.sb_flags &= ~SB_AIO;
1236 }
1237
1238 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) {
1239 cbn = TAILQ_NEXT(cb, list);
1240 if (opcode == cb->uaiocb.aio_lio_opcode) {
1241 p = cb->userproc;
1242 ki = p->p_aioinfo;
1243 TAILQ_REMOVE(&so->so_aiojobq, cb, list);
1244 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist);
1245 TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
1246 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist);
1247 wakecount++;
1248 if (cb->jobstate != JOBST_JOBQGLOBAL)
1249 panic("invalid queue value");
1250 }
1251 }
1252
1253 while (wakecount--) {
1254 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
1255 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1256 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1257 aiop->aiothreadflags &= ~AIOP_FREE;
1258 wakeup(aiop->aiothread);
1259 }
1260 }
1261}
1262
1263/*
1264 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1265 * technique is done in this code.
1266 */
1267static int
1268_aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type)
1269{
1270 struct proc *p = td->td_proc;
1271 struct filedesc *fdp;
1272 struct file *fp;
1273 unsigned int fd;
1274 struct socket *so;
1275 int s;
1276 int error;
1277 int opcode, user_opcode;
1278 struct aiocblist *aiocbe;
1279 struct aiothreadlist *aiop;
1280 struct kaioinfo *ki;
1281 struct kevent kev;
1282 struct kqueue *kq;
1283 struct file *kq_fp;
1284
1285 aiocbe = uma_zalloc(aiocb_zone, M_WAITOK);
1286 aiocbe->inputcharge = 0;
1287 aiocbe->outputcharge = 0;
1288 callout_handle_init(&aiocbe->timeouthandle);
1289 SLIST_INIT(&aiocbe->klist);
1290
1291 suword(&job->_aiocb_private.status, -1);
1292 suword(&job->_aiocb_private.error, 0);
1293 suword(&job->_aiocb_private.kernelinfo, -1);
1294
1295 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb));
1296 if (error) {
1297 suword(&job->_aiocb_private.error, error);
1298 uma_zfree(aiocb_zone, aiocbe);
1299 return (error);
1300 }
1301 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL &&
1302 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1303 uma_zfree(aiocb_zone, aiocbe);
1304 return (EINVAL);
1305 }
1306
1307 /* Save userspace address of the job info. */
1308 aiocbe->uuaiocb = job;
1309
1310 /* Get the opcode. */
1311 user_opcode = aiocbe->uaiocb.aio_lio_opcode;
1312 if (type != LIO_NOP)
1313 aiocbe->uaiocb.aio_lio_opcode = type;
1314 opcode = aiocbe->uaiocb.aio_lio_opcode;
1315
1316 /* Get the fd info for process. */
1317 fdp = p->p_fd;
1318
1319 /*
1320 * Range check file descriptor.
1321 */
1322 FILEDESC_LOCK(fdp);
1323 fd = aiocbe->uaiocb.aio_fildes;
1324 if (fd >= fdp->fd_nfiles) {
1325 FILEDESC_UNLOCK(fdp);
1326 uma_zfree(aiocb_zone, aiocbe);
1327 if (type == 0)
1328 suword(&job->_aiocb_private.error, EBADF);
1329 return (EBADF);
1330 }
1331
1332 fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
1333 if ((fp == NULL) ||
1334 ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0)) ||
1335 ((opcode == LIO_READ) && ((fp->f_flag & FREAD) == 0))) {
1336 FILEDESC_UNLOCK(fdp);
1337 uma_zfree(aiocb_zone, aiocbe);
1338 if (type == 0)
1339 suword(&job->_aiocb_private.error, EBADF);
1340 return (EBADF);
1341 }
1342 fhold(fp);
1343 FILEDESC_UNLOCK(fdp);
1344
1345 if (aiocbe->uaiocb.aio_offset == -1LL) {
1346 error = EINVAL;
1347 goto aqueue_fail;
1348 }
1349 error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
1350 if (error) {
1351 error = EINVAL;
1352 goto aqueue_fail;
1353 }
1354 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid;
1355 if (jobrefid == LONG_MAX)
1356 jobrefid = 1;
1357 else
1358 jobrefid++;
1359
1360 if (opcode == LIO_NOP) {
1361 fdrop(fp, td);
1362 uma_zfree(aiocb_zone, aiocbe);
1363 if (type == 0) {
1364 suword(&job->_aiocb_private.error, 0);
1365 suword(&job->_aiocb_private.status, 0);
1366 suword(&job->_aiocb_private.kernelinfo, 0);
1367 }
1368 return (0);
1369 }
1370 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
1371 if (type == 0)
1372 suword(&job->_aiocb_private.status, 0);
1373 error = EINVAL;
1374 goto aqueue_fail;
1375 }
1376
1377 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) {
1378 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
1379 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr;
1380 }
1381 else {
1382 /*
1383 * This method for requesting kevent-based notification won't
1384 * work on the alpha, since we're passing in a pointer
1385 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1386 * based method instead.
1387 */
1388 if (user_opcode == LIO_NOP || user_opcode == LIO_READ ||
1389 user_opcode == LIO_WRITE)
1390 goto no_kqueue;
1391
1392 error = copyin((struct kevent *)(uintptr_t)user_opcode,
1393 &kev, sizeof(kev));
1394 if (error)
1395 goto aqueue_fail;
1396 }
1397 if ((u_int)kev.ident >= fdp->fd_nfiles ||
1398 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL ||
1399 (kq_fp->f_type != DTYPE_KQUEUE)) {
1400 error = EBADF;
1401 goto aqueue_fail;
1402 }
1403 kq = kq_fp->f_data;
1404 kev.ident = (uintptr_t)aiocbe->uuaiocb;
1405 kev.filter = EVFILT_AIO;
1406 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
1407 kev.data = (intptr_t)aiocbe;
1408 error = kqueue_register(kq, &kev, td);
1409aqueue_fail:
1410 if (error) {
1411 fdrop(fp, td);
1412 uma_zfree(aiocb_zone, aiocbe);
1413 if (type == 0)
1414 suword(&job->_aiocb_private.error, error);
1415 goto done;
1416 }
1417no_kqueue:
1418
1419 suword(&job->_aiocb_private.error, EINPROGRESS);
1420 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
1421 aiocbe->userproc = p;
1422 aiocbe->cred = crhold(td->td_ucred);
1423 aiocbe->jobflags = 0;
1424 aiocbe->lio = lj;
1425 ki = p->p_aioinfo;
1426
1427 if (fp->f_type == DTYPE_SOCKET) {
1428 /*
1429 * Alternate queueing for socket ops: Reach down into the
1430 * descriptor to get the socket data. Then check to see if the
1431 * socket is ready to be read or written (based on the requested
1432 * operation).
1433 *
1434 * If it is not ready for io, then queue the aiocbe on the
1435 * socket, and set the flags so we get a call when sbnotify()
1436 * happens.
1437 */
1438 so = fp->f_data;
1439 s = splnet();
1440 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1441 LIO_WRITE) && (!sowriteable(so)))) {
1442 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
1443 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist);
1444 if (opcode == LIO_READ)
1445 so->so_rcv.sb_flags |= SB_AIO;
1446 else
1447 so->so_snd.sb_flags |= SB_AIO;
1448 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */
1449 ki->kaio_queue_count++;
1450 num_queue_count++;
1451 splx(s);
1452 error = 0;
1453 goto done;
1454 }
1455 splx(s);
1456 }
1457
1458 if ((error = aio_qphysio(p, aiocbe)) == 0)
1459 goto done;
1460 if (error > 0) {
1461 suword(&job->_aiocb_private.status, 0);
1462 aiocbe->uaiocb._aiocb_private.error = error;
1463 suword(&job->_aiocb_private.error, error);
1464 goto done;
1465 }
1466
1467 /* No buffer for daemon I/O. */
1468 aiocbe->bp = NULL;
1469
1470 ki->kaio_queue_count++;
1471 if (lj)
1472 lj->lioj_queue_count++;
1473 s = splnet();
1474 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1475 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
1476 splx(s);
1477 aiocbe->jobstate = JOBST_JOBQGLOBAL;
1478
1479 num_queue_count++;
1480 error = 0;
1481
1482 /*
1483 * If we don't have a free AIO process, and we are below our quota, then
1484 * start one. Otherwise, depend on the subsequent I/O completions to
1485 * pick-up this job. If we don't sucessfully create the new process
1486 * (thread) due to resource issues, we return an error for now (EAGAIN),
1487 * which is likely not the correct thing to do.
1488 */
1489 s = splnet();
1490retryproc:
1491 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1492 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1493 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1494 aiop->aiothreadflags &= ~AIOP_FREE;
1495 wakeup(aiop->aiothread);
1496 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1497 ((ki->kaio_active_count + num_aio_resv_start) <
1498 ki->kaio_maxactive_count)) {
1499 num_aio_resv_start++;
1500 if ((error = aio_newproc()) == 0) {
1501 num_aio_resv_start--;
1502 goto retryproc;
1503 }
1504 num_aio_resv_start--;
1505 }
1506 splx(s);
1507done:
1508 return (error);
1509}
1510
1511/*
1512 * This routine queues an AIO request, checking for quotas.
1513 */
1514static int
1515aio_aqueue(struct thread *td, struct aiocb *job, int type)
1516{
1517 struct proc *p = td->td_proc;
1518 struct kaioinfo *ki;
1519
1520 if (p->p_aioinfo == NULL)
1521 aio_init_aioinfo(p);
1522
1523 if (num_queue_count >= max_queue_count)
1524 return (EAGAIN);
1525
1526 ki = p->p_aioinfo;
1527 if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
1528 return (EAGAIN);
1529
1530 return _aio_aqueue(td, job, NULL, type);
1531}
1532
1533/*
1534 * Support the aio_return system call, as a side-effect, kernel resources are
1535 * released.
1536 */
1537int
1538aio_return(struct thread *td, struct aio_return_args *uap)
1539{
1540 struct proc *p = td->td_proc;
1541 int s;
1542 long jobref;
1543 struct aiocblist *cb, *ncb;
1544 struct aiocb *ujob;
1545 struct kaioinfo *ki;
1546
1547 ujob = uap->aiocbp;
1548 jobref = fuword(&ujob->_aiocb_private.kernelinfo);
1549 if (jobref == -1 || jobref == 0)
1550 return (EINVAL);
1551
1552 ki = p->p_aioinfo;
1553 if (ki == NULL)
1554 return (EINVAL);
1555 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1556 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) ==
1557 jobref) {
1558 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
1559 p->p_stats->p_ru.ru_oublock +=
1560 cb->outputcharge;
1561 cb->outputcharge = 0;
1562 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
1563 p->p_stats->p_ru.ru_inblock += cb->inputcharge;
1564 cb->inputcharge = 0;
1565 }
1566 goto done;
1567 }
1568 }
1569 s = splbio();
1570 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) {
1571 ncb = TAILQ_NEXT(cb, plist);
1572 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo)
1573 == jobref) {
1574 break;
1575 }
1576 }
1577 splx(s);
1578 done:
1579 if (cb != NULL) {
1580 if (ujob == cb->uuaiocb) {
1581 td->td_retval[0] =
1582 cb->uaiocb._aiocb_private.status;
1583 } else
1584 td->td_retval[0] = EFAULT;
1585 aio_free_entry(cb);
1586 return (0);
1587 }
1588 return (EINVAL);
1589}
1590
1591/*
1592 * Allow a process to wakeup when any of the I/O requests are completed.
1593 */
1594int
1595aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1596{
1597 struct proc *p = td->td_proc;
1598 struct timeval atv;
1599 struct timespec ts;
1600 struct aiocb *const *cbptr, *cbp;
1601 struct kaioinfo *ki;
1602 struct aiocblist *cb;
1603 int i;
1604 int njoblist;
1605 int error, s, timo;
1606 long *ijoblist;
1607 struct aiocb **ujoblist;
1608
1609 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1610 return (EINVAL);
1611
1612 timo = 0;
1613 if (uap->timeout) {
1614 /* Get timespec struct. */
1615 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1616 return (error);
1617
1618 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
1619 return (EINVAL);
1620
1621 TIMESPEC_TO_TIMEVAL(&atv, &ts);
1622 if (itimerfix(&atv))
1623 return (EINVAL);
1624 timo = tvtohz(&atv);
1625 }
1626
1627 ki = p->p_aioinfo;
1628 if (ki == NULL)
1629 return (EAGAIN);
1630
1631 njoblist = 0;
1632 ijoblist = uma_zalloc(aiol_zone, M_WAITOK);
1633 ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
1634 cbptr = uap->aiocbp;
1635
1636 for (i = 0; i < uap->nent; i++) {
1637 cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
1638 if (cbp == 0)
1639 continue;
1640 ujoblist[njoblist] = cbp;
1641 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo);
1642 njoblist++;
1643 }
1644
1645 if (njoblist == 0) {
1646 uma_zfree(aiol_zone, ijoblist);
1647 uma_zfree(aiol_zone, ujoblist);
1648 return (0);
1649 }
1650
1651 error = 0;
1652 for (;;) {
1653 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1654 for (i = 0; i < njoblist; i++) {
1655 if (((intptr_t)
1656 cb->uaiocb._aiocb_private.kernelinfo) ==
1657 ijoblist[i]) {
1658 if (ujoblist[i] != cb->uuaiocb)
1659 error = EINVAL;
1660 uma_zfree(aiol_zone, ijoblist);
1661 uma_zfree(aiol_zone, ujoblist);
1662 return (error);
1663 }
1664 }
1665 }
1666
1667 s = splbio();
1668 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb =
1669 TAILQ_NEXT(cb, plist)) {
1670 for (i = 0; i < njoblist; i++) {
1671 if (((intptr_t)
1672 cb->uaiocb._aiocb_private.kernelinfo) ==
1673 ijoblist[i]) {
1674 splx(s);
1675 if (ujoblist[i] != cb->uuaiocb)
1676 error = EINVAL;
1677 uma_zfree(aiol_zone, ijoblist);
1678 uma_zfree(aiol_zone, ujoblist);
1679 return (error);
1680 }
1681 }
1682 }
1683
1684 ki->kaio_flags |= KAIO_WAKEUP;
1685 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo);
1686 splx(s);
1687
1688 if (error == ERESTART || error == EINTR) {
1689 uma_zfree(aiol_zone, ijoblist);
1690 uma_zfree(aiol_zone, ujoblist);
1691 return (EINTR);
1692 } else if (error == EWOULDBLOCK) {
1693 uma_zfree(aiol_zone, ijoblist);
1694 uma_zfree(aiol_zone, ujoblist);
1695 return (EAGAIN);
1696 }
1697 }
1698
1699/* NOTREACHED */
1700 return (EINVAL);
1701}
1702
1703/*
1704 * aio_cancel cancels any non-physio aio operations not currently in
1705 * progress.
1706 */
1707int
1708aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1709{
1710 struct proc *p = td->td_proc;
1711 struct kaioinfo *ki;
1712 struct aiocblist *cbe, *cbn;
1713 struct file *fp;
1714 struct filedesc *fdp;
1715 struct socket *so;
1716 struct proc *po;
1717 int s,error;
1718 int cancelled=0;
1719 int notcancelled=0;
1720 struct vnode *vp;
1721
1722 fdp = p->p_fd;
1723 if ((u_int)uap->fd >= fdp->fd_nfiles ||
1724 (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1725 return (EBADF);
1726
1727 if (fp->f_type == DTYPE_VNODE) {
1728 vp = fp->f_vnode;
1729
1730 if (vn_isdisk(vp,&error)) {
1731 td->td_retval[0] = AIO_NOTCANCELED;
1732 return (0);
1733 }
1734 } else if (fp->f_type == DTYPE_SOCKET) {
1735 so = fp->f_data;
1736
1737 s = splnet();
1738
1739 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) {
1740 cbn = TAILQ_NEXT(cbe, list);
1741 if ((uap->aiocbp == NULL) ||
1742 (uap->aiocbp == cbe->uuaiocb) ) {
1743 po = cbe->userproc;
1744 ki = po->p_aioinfo;
1745 TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
1746 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist);
1747 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist);
1748 if (ki->kaio_flags & KAIO_WAKEUP) {
1749 wakeup(po);
1750 }
1751 cbe->jobstate = JOBST_JOBFINISHED;
1752 cbe->uaiocb._aiocb_private.status=-1;
1753 cbe->uaiocb._aiocb_private.error=ECANCELED;
1754 cancelled++;
1755/* XXX cancelled, knote? */
1756 if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1757 SIGEV_SIGNAL) {
1758 PROC_LOCK(cbe->userproc);
1759 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1760 PROC_UNLOCK(cbe->userproc);
1761 }
1762 if (uap->aiocbp)
1763 break;
1764 }
1765 }
1766 splx(s);
1767
1768 if ((cancelled) && (uap->aiocbp)) {
1769 td->td_retval[0] = AIO_CANCELED;
1770 return (0);
1771 }
1772 }
1773 ki=p->p_aioinfo;
1774 if (ki == NULL)
1775 goto done;
1776 s = splnet();
1777
1778 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) {
1779 cbn = TAILQ_NEXT(cbe, plist);
1780
1781 if ((uap->fd == cbe->uaiocb.aio_fildes) &&
1782 ((uap->aiocbp == NULL ) ||
1783 (uap->aiocbp == cbe->uuaiocb))) {
1784
1785 if (cbe->jobstate == JOBST_JOBQGLOBAL) {
1786 TAILQ_REMOVE(&aio_jobs, cbe, list);
1787 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
1788 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe,
1789 plist);
1790 cancelled++;
1791 ki->kaio_queue_finished_count++;
1792 cbe->jobstate = JOBST_JOBFINISHED;
1793 cbe->uaiocb._aiocb_private.status = -1;
1794 cbe->uaiocb._aiocb_private.error = ECANCELED;
1795/* XXX cancelled, knote? */
1796 if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1797 SIGEV_SIGNAL) {
1798 PROC_LOCK(cbe->userproc);
1799 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1800 PROC_UNLOCK(cbe->userproc);
1801 }
1802 } else {
1803 notcancelled++;
1804 }
1805 }
1806 }
1807 splx(s);
1808done:
1809 if (notcancelled) {
1810 td->td_retval[0] = AIO_NOTCANCELED;
1811 return (0);
1812 }
1813 if (cancelled) {
1814 td->td_retval[0] = AIO_CANCELED;
1815 return (0);
1816 }
1817 td->td_retval[0] = AIO_ALLDONE;
1818
1819 return (0);
1820}
1821
1822/*
1823 * aio_error is implemented in the kernel level for compatibility purposes only.
1824 * For a user mode async implementation, it would be best to do it in a userland
1825 * subroutine.
1826 */
1827int
1828aio_error(struct thread *td, struct aio_error_args *uap)
1829{
1830 struct proc *p = td->td_proc;
1831 int s;
1832 struct aiocblist *cb;
1833 struct kaioinfo *ki;
1834 long jobref;
1835
1836 ki = p->p_aioinfo;
1837 if (ki == NULL)
1838 return (EINVAL);
1839
1840 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo);
1841 if ((jobref == -1) || (jobref == 0))
1842 return (EINVAL);
1843
1844 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1845 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1846 jobref) {
1847 td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1848 return (0);
1849 }
1850 }
1851
1852 s = splnet();
1853
1854 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb,
1855 plist)) {
1856 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1857 jobref) {
1858 td->td_retval[0] = EINPROGRESS;
1859 splx(s);
1860 return (0);
1861 }
1862 }
1863
1864 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb,
1865 plist)) {
1866 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1867 jobref) {
1868 td->td_retval[0] = EINPROGRESS;
1869 splx(s);
1870 return (0);
1871 }
1872 }
1873 splx(s);
1874
1875 s = splbio();
1876 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb,
1877 plist)) {
1878 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1879 jobref) {
1880 td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1881 splx(s);
1882 return (0);
1883 }
1884 }
1885
1886 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb,
1887 plist)) {
1888 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1889 jobref) {
1890 td->td_retval[0] = EINPROGRESS;
1891 splx(s);
1892 return (0);
1893 }
1894 }
1895 splx(s);
1896
1897#if (0)
1898 /*
1899 * Hack for lio.
1900 */
1901 status = fuword(&uap->aiocbp->_aiocb_private.status);
1902 if (status == -1)
1903 return fuword(&uap->aiocbp->_aiocb_private.error);
1904#endif
1905 return (EINVAL);
1906}
1907
1908/* syscall - asynchronous read from a file (REALTIME) */
1909int
1910aio_read(struct thread *td, struct aio_read_args *uap)
1911{
1912
1913 return aio_aqueue(td, uap->aiocbp, LIO_READ);
1914}
1915
1916/* syscall - asynchronous write to a file (REALTIME) */
1917int
1918aio_write(struct thread *td, struct aio_write_args *uap)
1919{
1920
1921 return aio_aqueue(td, uap->aiocbp, LIO_WRITE);
1922}
1923
1924/* syscall - list directed I/O (REALTIME) */
1925int
1926lio_listio(struct thread *td, struct lio_listio_args *uap)
1927{
1928 struct proc *p = td->td_proc;
1929 int nent, nentqueued;
1930 struct aiocb *iocb, * const *cbptr;
1931 struct aiocblist *cb;
1932 struct kaioinfo *ki;
1933 struct aio_liojob *lj;
1934 int error, runningcode;
1935 int nerror;
1936 int i;
1937 int s;
1938
1939 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
1940 return (EINVAL);
1941
1942 nent = uap->nent;
1943 if (nent < 0 || nent > AIO_LISTIO_MAX)
1944 return (EINVAL);
1945
1946 if (p->p_aioinfo == NULL)
1947 aio_init_aioinfo(p);
1948
1949 if ((nent + num_queue_count) > max_queue_count)
1950 return (EAGAIN);
1951
1952 ki = p->p_aioinfo;
1953 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
1954 return (EAGAIN);
1955
1956 lj = uma_zalloc(aiolio_zone, M_WAITOK);
1957 if (!lj)
1958 return (EAGAIN);
1959
1960 lj->lioj_flags = 0;
1961 lj->lioj_buffer_count = 0;
1962 lj->lioj_buffer_finished_count = 0;
1963 lj->lioj_queue_count = 0;
1964 lj->lioj_queue_finished_count = 0;
1965 lj->lioj_ki = ki;
1966
1967 /*
1968 * Setup signal.
1969 */
1970 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
1971 error = copyin(uap->sig, &lj->lioj_signal,
1972 sizeof(lj->lioj_signal));
1973 if (error) {
1974 uma_zfree(aiolio_zone, lj);
1975 return (error);
1976 }
1977 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
1978 uma_zfree(aiolio_zone, lj);
1979 return (EINVAL);
1980 }
1981 lj->lioj_flags |= LIOJ_SIGNAL;
1982 }
1983 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
1984 /*
1985 * Get pointers to the list of I/O requests.
1986 */
1987 nerror = 0;
1988 nentqueued = 0;
1989 cbptr = uap->acb_list;
1990 for (i = 0; i < uap->nent; i++) {
1991 iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
1992 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) {
1993 error = _aio_aqueue(td, iocb, lj, 0);
1994 if (error == 0)
1995 nentqueued++;
1996 else
1997 nerror++;
1998 }
1999 }
2000
2001 /*
2002 * If we haven't queued any, then just return error.
2003 */
2004 if (nentqueued == 0)
2005 return (0);
2006
2007 /*
2008 * Calculate the appropriate error return.
2009 */
2010 runningcode = 0;
2011 if (nerror)
2012 runningcode = EIO;
2013
2014 if (uap->mode == LIO_WAIT) {
2015 int command, found, jobref;
2016
2017 for (;;) {
2018 found = 0;
2019 for (i = 0; i < uap->nent; i++) {
2020 /*
2021 * Fetch address of the control buf pointer in
2022 * user space.
2023 */
2024 iocb = (struct aiocb *)
2025 (intptr_t)fuword(&cbptr[i]);
2026 if (((intptr_t)iocb == -1) || ((intptr_t)iocb
2027 == 0))
2028 continue;
2029
2030 /*
2031 * Fetch the associated command from user space.
2032 */
2033 command = fuword(&iocb->aio_lio_opcode);
2034 if (command == LIO_NOP) {
2035 found++;
2036 continue;
2037 }
2038
2039 jobref =
2040 fuword(&iocb->_aiocb_private.kernelinfo);
2041
2042 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
2043 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2044 == jobref) {
2045 if (cb->uaiocb.aio_lio_opcode
2046 == LIO_WRITE) {
2047 p->p_stats->p_ru.ru_oublock
2048 +=
2049 cb->outputcharge;
2050 cb->outputcharge = 0;
2051 } else if (cb->uaiocb.aio_lio_opcode
2052 == LIO_READ) {
2053 p->p_stats->p_ru.ru_inblock
2054 += cb->inputcharge;
2055 cb->inputcharge = 0;
2056 }
2057 found++;
2058 break;
2059 }
2060 }
2061
2062 s = splbio();
2063 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) {
2064 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2065 == jobref) {
2066 found++;
2067 break;
2068 }
2069 }
2070 splx(s);
2071 }
2072
2073 /*
2074 * If all I/Os have been disposed of, then we can
2075 * return.
2076 */
2077 if (found == nentqueued)
2078 return (runningcode);
2079
2080 ki->kaio_flags |= KAIO_WAKEUP;
2081 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0);
2082
2083 if (error == EINTR)
2084 return (EINTR);
2085 else if (error == EWOULDBLOCK)
2086 return (EAGAIN);
2087 }
2088 }
2089
2090 return (runningcode);
2091}
2092
2093/*
2094 * This is a weird hack so that we can post a signal. It is safe to do so from
2095 * a timeout routine, but *not* from an interrupt routine.
2096 */
2097static void
2098process_signal(void *aioj)
2099{
2100 struct aiocblist *aiocbe = aioj;
2101 struct aio_liojob *lj = aiocbe->lio;
2102 struct aiocb *cb = &aiocbe->uaiocb;
2103
2104 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) &&
2105 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) {
2106 PROC_LOCK(lj->lioj_ki->kaio_p);
2107 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo);
2108 PROC_UNLOCK(lj->lioj_ki->kaio_p);
2109 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2110 }
2111
2112 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
2113 PROC_LOCK(aiocbe->userproc);
2114 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo);
2115 PROC_UNLOCK(aiocbe->userproc);
2116 }
2117}
2118
2119/*
2120 * Interrupt handler for physio, performs the necessary process wakeups, and
2121 * signals.
2122 */
2123static void
2124aio_physwakeup(struct buf *bp)
2125{
2126 struct aiocblist *aiocbe;
2127 struct proc *p;
2128 struct kaioinfo *ki;
2129 struct aio_liojob *lj;
2130
2131 wakeup(bp);
2132
2133 aiocbe = (struct aiocblist *)bp->b_caller1;
2134 if (aiocbe) {
2135 p = aiocbe->userproc;
2136
2137 aiocbe->jobstate = JOBST_JOBBFINISHED;
2138 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
2139 aiocbe->uaiocb._aiocb_private.error = 0;
2140 aiocbe->jobflags |= AIOCBLIST_DONE;
2141
2142 if (bp->b_ioflags & BIO_ERROR)
2143 aiocbe->uaiocb._aiocb_private.error = bp->b_error;
2144
2145 lj = aiocbe->lio;
2146 if (lj) {
2147 lj->lioj_buffer_finished_count++;
2148
2149 /*
2150 * wakeup/signal if all of the interrupt jobs are done.
2151 */
2152 if (lj->lioj_buffer_finished_count ==
2153 lj->lioj_buffer_count) {
2154 /*
2155 * Post a signal if it is called for.
2156 */
2157 if ((lj->lioj_flags &
2158 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) ==
2159 LIOJ_SIGNAL) {
2160 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2161 aiocbe->timeouthandle =
2162 timeout(process_signal,
2163 aiocbe, 0);
2164 }
2165 }
2166 }
2167
2168 ki = p->p_aioinfo;
2169 if (ki) {
2170 ki->kaio_buffer_finished_count++;
2171 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
2172 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
2173 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
2174
2175 KNOTE(&aiocbe->klist, 0);
2176 /* Do the wakeup. */
2177 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
2178 ki->kaio_flags &= ~KAIO_WAKEUP;
2179 wakeup(p);
2180 }
2181 }
2182
2183 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL)
2184 aiocbe->timeouthandle =
2185 timeout(process_signal, aiocbe, 0);
2186 }
2187}
2188
2189/* syscall - wait for the next completion of an aio request */
2190int
2191aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2192{
2193 struct proc *p = td->td_proc;
2194 struct timeval atv;
2195 struct timespec ts;
2196 struct kaioinfo *ki;
2197 struct aiocblist *cb = NULL;
2198 int error, s, timo;
2199
2200 suword(uap->aiocbp, (int)NULL);
2201
2202 timo = 0;
2203 if (uap->timeout) {
2204 /* Get timespec struct. */
2205 error = copyin(uap->timeout, &ts, sizeof(ts));
2206 if (error)
2207 return (error);
2208
2209 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
2210 return (EINVAL);
2211
2212 TIMESPEC_TO_TIMEVAL(&atv, &ts);
2213 if (itimerfix(&atv))
2214 return (EINVAL);
2215 timo = tvtohz(&atv);
2216 }
2217
2218 ki = p->p_aioinfo;
2219 if (ki == NULL)
2220 return (EAGAIN);
2221
2222 for (;;) {
2223 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
2224 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
2225 td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2226 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
2227 p->p_stats->p_ru.ru_oublock +=
2228 cb->outputcharge;
2229 cb->outputcharge = 0;
2230 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2231 p->p_stats->p_ru.ru_inblock += cb->inputcharge;
2232 cb->inputcharge = 0;
2233 }
2234 aio_free_entry(cb);
2235 return (cb->uaiocb._aiocb_private.error);
2236 }
2237
2238 s = splbio();
2239 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
2240 splx(s);
2241 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
2242 td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2243 aio_free_entry(cb);
2244 return (cb->uaiocb._aiocb_private.error);
2245 }
2246
2247 ki->kaio_flags |= KAIO_WAKEUP;
2248 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo);
2249 splx(s);
2250
2251 if (error == ERESTART)
2252 return (EINTR);
2253 else if (error < 0)
2254 return (error);
2255 else if (error == EINTR)
2256 return (EINTR);
2257 else if (error == EWOULDBLOCK)
2258 return (EAGAIN);
2259 }
2260}
2261
2262/* kqueue attach function */
2263static int
2264filt_aioattach(struct knote *kn)
2265{
2266 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2267
2268 /*
2269 * The aiocbe pointer must be validated before using it, so
2270 * registration is restricted to the kernel; the user cannot
2271 * set EV_FLAG1.
2272 */
2273 if ((kn->kn_flags & EV_FLAG1) == 0)
2274 return (EPERM);
2275 kn->kn_flags &= ~EV_FLAG1;
2276
2277 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext);
2278
2279 return (0);
2280}
2281
2282/* kqueue detach function */
2283static void
2284filt_aiodetach(struct knote *kn)
2285{
2286 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2287
2288 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext);
2289}
2290
2291/* kqueue filter function */
2292/*ARGSUSED*/
2293static int
2294filt_aio(struct knote *kn, long hint)
2295{
2296 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2297
2298 kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
2299 if (aiocbe->jobstate != JOBST_JOBFINISHED &&
2300 aiocbe->jobstate != JOBST_JOBBFINISHED)
2301 return (0);
2302 kn->kn_flags |= EV_EOF;
2303 return (1);
2304}
1105 bp->b_iodone = aio_physwakeup;
1106 bp->b_saveaddr = bp->b_data;
1107 bp->b_data = (void *)(uintptr_t)cb->aio_buf;
1108 bp->b_offset = cb->aio_offset;
1109 bp->b_iooffset = cb->aio_offset;
1110 bp->b_blkno = btodb(cb->aio_offset);
1111 bp->b_iocmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ;
1112
1113 /*
1114 * Bring buffer into kernel space.
1115 */
1116 if (vmapbuf(bp) < 0) {
1117 error = EFAULT;
1118 goto doerror;
1119 }
1120
1121 s = splbio();
1122 aiocbe->bp = bp;
1123 bp->b_caller1 = (void *)aiocbe;
1124 TAILQ_INSERT_TAIL(&aio_bufjobs, aiocbe, list);
1125 TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist);
1126 aiocbe->jobstate = JOBST_JOBQBUF;
1127 cb->_aiocb_private.status = cb->aio_nbytes;
1128 num_buf_aio++;
1129 bp->b_error = 0;
1130
1131 splx(s);
1132
1133 /* Perform transfer. */
1134 DEV_STRATEGY(bp);
1135
1136 notify = 0;
1137 s = splbio();
1138
1139 /*
1140 * If we had an error invoking the request, or an error in processing
1141 * the request before we have returned, we process it as an error in
1142 * transfer. Note that such an I/O error is not indicated immediately,
1143 * but is returned using the aio_error mechanism. In this case,
1144 * aio_suspend will return immediately.
1145 */
1146 if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) {
1147 struct aiocb *job = aiocbe->uuaiocb;
1148
1149 aiocbe->uaiocb._aiocb_private.status = 0;
1150 suword(&job->_aiocb_private.status, 0);
1151 aiocbe->uaiocb._aiocb_private.error = bp->b_error;
1152 suword(&job->_aiocb_private.error, bp->b_error);
1153
1154 ki->kaio_buffer_finished_count++;
1155
1156 if (aiocbe->jobstate != JOBST_JOBBFINISHED) {
1157 aiocbe->jobstate = JOBST_JOBBFINISHED;
1158 aiocbe->jobflags |= AIOCBLIST_DONE;
1159 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
1160 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
1161 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
1162 notify = 1;
1163 }
1164 }
1165 splx(s);
1166 if (notify)
1167 KNOTE(&aiocbe->klist, 0);
1168 return (0);
1169
1170doerror:
1171 ki->kaio_buffer_count--;
1172 if (lj)
1173 lj->lioj_buffer_count--;
1174 aiocbe->bp = NULL;
1175 relpbuf(bp, NULL);
1176 return (error);
1177}
1178
1179/*
1180 * This waits/tests physio completion.
1181 */
1182static int
1183aio_fphysio(struct aiocblist *iocb)
1184{
1185 int s;
1186 struct buf *bp;
1187 int error;
1188
1189 bp = iocb->bp;
1190
1191 s = splbio();
1192 while ((bp->b_flags & B_DONE) == 0) {
1193 if (tsleep(bp, PRIBIO, "physstr", aiod_timeout)) {
1194 if ((bp->b_flags & B_DONE) == 0) {
1195 splx(s);
1196 return (EINPROGRESS);
1197 } else
1198 break;
1199 }
1200 }
1201 splx(s);
1202
1203 /* Release mapping into kernel space. */
1204 vunmapbuf(bp);
1205 iocb->bp = 0;
1206
1207 error = 0;
1208
1209 /* Check for an error. */
1210 if (bp->b_ioflags & BIO_ERROR)
1211 error = bp->b_error;
1212
1213 relpbuf(bp, NULL);
1214 return (error);
1215}
1216
1217/*
1218 * Wake up aio requests that may be serviceable now.
1219 */
1220static void
1221aio_swake_cb(struct socket *so, struct sockbuf *sb)
1222{
1223 struct aiocblist *cb,*cbn;
1224 struct proc *p;
1225 struct kaioinfo *ki = NULL;
1226 int opcode, wakecount = 0;
1227 struct aiothreadlist *aiop;
1228
1229 if (sb == &so->so_snd) {
1230 opcode = LIO_WRITE;
1231 so->so_snd.sb_flags &= ~SB_AIO;
1232 } else {
1233 opcode = LIO_READ;
1234 so->so_rcv.sb_flags &= ~SB_AIO;
1235 }
1236
1237 for (cb = TAILQ_FIRST(&so->so_aiojobq); cb; cb = cbn) {
1238 cbn = TAILQ_NEXT(cb, list);
1239 if (opcode == cb->uaiocb.aio_lio_opcode) {
1240 p = cb->userproc;
1241 ki = p->p_aioinfo;
1242 TAILQ_REMOVE(&so->so_aiojobq, cb, list);
1243 TAILQ_REMOVE(&ki->kaio_sockqueue, cb, plist);
1244 TAILQ_INSERT_TAIL(&aio_jobs, cb, list);
1245 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, cb, plist);
1246 wakecount++;
1247 if (cb->jobstate != JOBST_JOBQGLOBAL)
1248 panic("invalid queue value");
1249 }
1250 }
1251
1252 while (wakecount--) {
1253 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != 0) {
1254 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1255 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1256 aiop->aiothreadflags &= ~AIOP_FREE;
1257 wakeup(aiop->aiothread);
1258 }
1259 }
1260}
1261
1262/*
1263 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR
1264 * technique is done in this code.
1265 */
1266static int
1267_aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int type)
1268{
1269 struct proc *p = td->td_proc;
1270 struct filedesc *fdp;
1271 struct file *fp;
1272 unsigned int fd;
1273 struct socket *so;
1274 int s;
1275 int error;
1276 int opcode, user_opcode;
1277 struct aiocblist *aiocbe;
1278 struct aiothreadlist *aiop;
1279 struct kaioinfo *ki;
1280 struct kevent kev;
1281 struct kqueue *kq;
1282 struct file *kq_fp;
1283
1284 aiocbe = uma_zalloc(aiocb_zone, M_WAITOK);
1285 aiocbe->inputcharge = 0;
1286 aiocbe->outputcharge = 0;
1287 callout_handle_init(&aiocbe->timeouthandle);
1288 SLIST_INIT(&aiocbe->klist);
1289
1290 suword(&job->_aiocb_private.status, -1);
1291 suword(&job->_aiocb_private.error, 0);
1292 suword(&job->_aiocb_private.kernelinfo, -1);
1293
1294 error = copyin(job, &aiocbe->uaiocb, sizeof(aiocbe->uaiocb));
1295 if (error) {
1296 suword(&job->_aiocb_private.error, error);
1297 uma_zfree(aiocb_zone, aiocbe);
1298 return (error);
1299 }
1300 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL &&
1301 !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) {
1302 uma_zfree(aiocb_zone, aiocbe);
1303 return (EINVAL);
1304 }
1305
1306 /* Save userspace address of the job info. */
1307 aiocbe->uuaiocb = job;
1308
1309 /* Get the opcode. */
1310 user_opcode = aiocbe->uaiocb.aio_lio_opcode;
1311 if (type != LIO_NOP)
1312 aiocbe->uaiocb.aio_lio_opcode = type;
1313 opcode = aiocbe->uaiocb.aio_lio_opcode;
1314
1315 /* Get the fd info for process. */
1316 fdp = p->p_fd;
1317
1318 /*
1319 * Range check file descriptor.
1320 */
1321 FILEDESC_LOCK(fdp);
1322 fd = aiocbe->uaiocb.aio_fildes;
1323 if (fd >= fdp->fd_nfiles) {
1324 FILEDESC_UNLOCK(fdp);
1325 uma_zfree(aiocb_zone, aiocbe);
1326 if (type == 0)
1327 suword(&job->_aiocb_private.error, EBADF);
1328 return (EBADF);
1329 }
1330
1331 fp = aiocbe->fd_file = fdp->fd_ofiles[fd];
1332 if ((fp == NULL) ||
1333 ((opcode == LIO_WRITE) && ((fp->f_flag & FWRITE) == 0)) ||
1334 ((opcode == LIO_READ) && ((fp->f_flag & FREAD) == 0))) {
1335 FILEDESC_UNLOCK(fdp);
1336 uma_zfree(aiocb_zone, aiocbe);
1337 if (type == 0)
1338 suword(&job->_aiocb_private.error, EBADF);
1339 return (EBADF);
1340 }
1341 fhold(fp);
1342 FILEDESC_UNLOCK(fdp);
1343
1344 if (aiocbe->uaiocb.aio_offset == -1LL) {
1345 error = EINVAL;
1346 goto aqueue_fail;
1347 }
1348 error = suword(&job->_aiocb_private.kernelinfo, jobrefid);
1349 if (error) {
1350 error = EINVAL;
1351 goto aqueue_fail;
1352 }
1353 aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jobrefid;
1354 if (jobrefid == LONG_MAX)
1355 jobrefid = 1;
1356 else
1357 jobrefid++;
1358
1359 if (opcode == LIO_NOP) {
1360 fdrop(fp, td);
1361 uma_zfree(aiocb_zone, aiocbe);
1362 if (type == 0) {
1363 suword(&job->_aiocb_private.error, 0);
1364 suword(&job->_aiocb_private.status, 0);
1365 suword(&job->_aiocb_private.kernelinfo, 0);
1366 }
1367 return (0);
1368 }
1369 if ((opcode != LIO_READ) && (opcode != LIO_WRITE)) {
1370 if (type == 0)
1371 suword(&job->_aiocb_private.status, 0);
1372 error = EINVAL;
1373 goto aqueue_fail;
1374 }
1375
1376 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_KEVENT) {
1377 kev.ident = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue;
1378 kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sigval_ptr;
1379 }
1380 else {
1381 /*
1382 * This method for requesting kevent-based notification won't
1383 * work on the alpha, since we're passing in a pointer
1384 * via aio_lio_opcode, which is an int. Use the SIGEV_KEVENT-
1385 * based method instead.
1386 */
1387 if (user_opcode == LIO_NOP || user_opcode == LIO_READ ||
1388 user_opcode == LIO_WRITE)
1389 goto no_kqueue;
1390
1391 error = copyin((struct kevent *)(uintptr_t)user_opcode,
1392 &kev, sizeof(kev));
1393 if (error)
1394 goto aqueue_fail;
1395 }
1396 if ((u_int)kev.ident >= fdp->fd_nfiles ||
1397 (kq_fp = fdp->fd_ofiles[kev.ident]) == NULL ||
1398 (kq_fp->f_type != DTYPE_KQUEUE)) {
1399 error = EBADF;
1400 goto aqueue_fail;
1401 }
1402 kq = kq_fp->f_data;
1403 kev.ident = (uintptr_t)aiocbe->uuaiocb;
1404 kev.filter = EVFILT_AIO;
1405 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
1406 kev.data = (intptr_t)aiocbe;
1407 error = kqueue_register(kq, &kev, td);
1408aqueue_fail:
1409 if (error) {
1410 fdrop(fp, td);
1411 uma_zfree(aiocb_zone, aiocbe);
1412 if (type == 0)
1413 suword(&job->_aiocb_private.error, error);
1414 goto done;
1415 }
1416no_kqueue:
1417
1418 suword(&job->_aiocb_private.error, EINPROGRESS);
1419 aiocbe->uaiocb._aiocb_private.error = EINPROGRESS;
1420 aiocbe->userproc = p;
1421 aiocbe->cred = crhold(td->td_ucred);
1422 aiocbe->jobflags = 0;
1423 aiocbe->lio = lj;
1424 ki = p->p_aioinfo;
1425
1426 if (fp->f_type == DTYPE_SOCKET) {
1427 /*
1428 * Alternate queueing for socket ops: Reach down into the
1429 * descriptor to get the socket data. Then check to see if the
1430 * socket is ready to be read or written (based on the requested
1431 * operation).
1432 *
1433 * If it is not ready for io, then queue the aiocbe on the
1434 * socket, and set the flags so we get a call when sbnotify()
1435 * happens.
1436 */
1437 so = fp->f_data;
1438 s = splnet();
1439 if (((opcode == LIO_READ) && (!soreadable(so))) || ((opcode ==
1440 LIO_WRITE) && (!sowriteable(so)))) {
1441 TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list);
1442 TAILQ_INSERT_TAIL(&ki->kaio_sockqueue, aiocbe, plist);
1443 if (opcode == LIO_READ)
1444 so->so_rcv.sb_flags |= SB_AIO;
1445 else
1446 so->so_snd.sb_flags |= SB_AIO;
1447 aiocbe->jobstate = JOBST_JOBQGLOBAL; /* XXX */
1448 ki->kaio_queue_count++;
1449 num_queue_count++;
1450 splx(s);
1451 error = 0;
1452 goto done;
1453 }
1454 splx(s);
1455 }
1456
1457 if ((error = aio_qphysio(p, aiocbe)) == 0)
1458 goto done;
1459 if (error > 0) {
1460 suword(&job->_aiocb_private.status, 0);
1461 aiocbe->uaiocb._aiocb_private.error = error;
1462 suword(&job->_aiocb_private.error, error);
1463 goto done;
1464 }
1465
1466 /* No buffer for daemon I/O. */
1467 aiocbe->bp = NULL;
1468
1469 ki->kaio_queue_count++;
1470 if (lj)
1471 lj->lioj_queue_count++;
1472 s = splnet();
1473 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist);
1474 TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list);
1475 splx(s);
1476 aiocbe->jobstate = JOBST_JOBQGLOBAL;
1477
1478 num_queue_count++;
1479 error = 0;
1480
1481 /*
1482 * If we don't have a free AIO process, and we are below our quota, then
1483 * start one. Otherwise, depend on the subsequent I/O completions to
1484 * pick-up this job. If we don't sucessfully create the new process
1485 * (thread) due to resource issues, we return an error for now (EAGAIN),
1486 * which is likely not the correct thing to do.
1487 */
1488 s = splnet();
1489retryproc:
1490 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
1491 TAILQ_REMOVE(&aio_freeproc, aiop, list);
1492 TAILQ_INSERT_TAIL(&aio_activeproc, aiop, list);
1493 aiop->aiothreadflags &= ~AIOP_FREE;
1494 wakeup(aiop->aiothread);
1495 } else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
1496 ((ki->kaio_active_count + num_aio_resv_start) <
1497 ki->kaio_maxactive_count)) {
1498 num_aio_resv_start++;
1499 if ((error = aio_newproc()) == 0) {
1500 num_aio_resv_start--;
1501 goto retryproc;
1502 }
1503 num_aio_resv_start--;
1504 }
1505 splx(s);
1506done:
1507 return (error);
1508}
1509
1510/*
1511 * This routine queues an AIO request, checking for quotas.
1512 */
1513static int
1514aio_aqueue(struct thread *td, struct aiocb *job, int type)
1515{
1516 struct proc *p = td->td_proc;
1517 struct kaioinfo *ki;
1518
1519 if (p->p_aioinfo == NULL)
1520 aio_init_aioinfo(p);
1521
1522 if (num_queue_count >= max_queue_count)
1523 return (EAGAIN);
1524
1525 ki = p->p_aioinfo;
1526 if (ki->kaio_queue_count >= ki->kaio_qallowed_count)
1527 return (EAGAIN);
1528
1529 return _aio_aqueue(td, job, NULL, type);
1530}
1531
1532/*
1533 * Support the aio_return system call, as a side-effect, kernel resources are
1534 * released.
1535 */
1536int
1537aio_return(struct thread *td, struct aio_return_args *uap)
1538{
1539 struct proc *p = td->td_proc;
1540 int s;
1541 long jobref;
1542 struct aiocblist *cb, *ncb;
1543 struct aiocb *ujob;
1544 struct kaioinfo *ki;
1545
1546 ujob = uap->aiocbp;
1547 jobref = fuword(&ujob->_aiocb_private.kernelinfo);
1548 if (jobref == -1 || jobref == 0)
1549 return (EINVAL);
1550
1551 ki = p->p_aioinfo;
1552 if (ki == NULL)
1553 return (EINVAL);
1554 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1555 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo) ==
1556 jobref) {
1557 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
1558 p->p_stats->p_ru.ru_oublock +=
1559 cb->outputcharge;
1560 cb->outputcharge = 0;
1561 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
1562 p->p_stats->p_ru.ru_inblock += cb->inputcharge;
1563 cb->inputcharge = 0;
1564 }
1565 goto done;
1566 }
1567 }
1568 s = splbio();
1569 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = ncb) {
1570 ncb = TAILQ_NEXT(cb, plist);
1571 if (((intptr_t) cb->uaiocb._aiocb_private.kernelinfo)
1572 == jobref) {
1573 break;
1574 }
1575 }
1576 splx(s);
1577 done:
1578 if (cb != NULL) {
1579 if (ujob == cb->uuaiocb) {
1580 td->td_retval[0] =
1581 cb->uaiocb._aiocb_private.status;
1582 } else
1583 td->td_retval[0] = EFAULT;
1584 aio_free_entry(cb);
1585 return (0);
1586 }
1587 return (EINVAL);
1588}
1589
1590/*
1591 * Allow a process to wakeup when any of the I/O requests are completed.
1592 */
1593int
1594aio_suspend(struct thread *td, struct aio_suspend_args *uap)
1595{
1596 struct proc *p = td->td_proc;
1597 struct timeval atv;
1598 struct timespec ts;
1599 struct aiocb *const *cbptr, *cbp;
1600 struct kaioinfo *ki;
1601 struct aiocblist *cb;
1602 int i;
1603 int njoblist;
1604 int error, s, timo;
1605 long *ijoblist;
1606 struct aiocb **ujoblist;
1607
1608 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX)
1609 return (EINVAL);
1610
1611 timo = 0;
1612 if (uap->timeout) {
1613 /* Get timespec struct. */
1614 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0)
1615 return (error);
1616
1617 if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000)
1618 return (EINVAL);
1619
1620 TIMESPEC_TO_TIMEVAL(&atv, &ts);
1621 if (itimerfix(&atv))
1622 return (EINVAL);
1623 timo = tvtohz(&atv);
1624 }
1625
1626 ki = p->p_aioinfo;
1627 if (ki == NULL)
1628 return (EAGAIN);
1629
1630 njoblist = 0;
1631 ijoblist = uma_zalloc(aiol_zone, M_WAITOK);
1632 ujoblist = uma_zalloc(aiol_zone, M_WAITOK);
1633 cbptr = uap->aiocbp;
1634
1635 for (i = 0; i < uap->nent; i++) {
1636 cbp = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
1637 if (cbp == 0)
1638 continue;
1639 ujoblist[njoblist] = cbp;
1640 ijoblist[njoblist] = fuword(&cbp->_aiocb_private.kernelinfo);
1641 njoblist++;
1642 }
1643
1644 if (njoblist == 0) {
1645 uma_zfree(aiol_zone, ijoblist);
1646 uma_zfree(aiol_zone, ujoblist);
1647 return (0);
1648 }
1649
1650 error = 0;
1651 for (;;) {
1652 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1653 for (i = 0; i < njoblist; i++) {
1654 if (((intptr_t)
1655 cb->uaiocb._aiocb_private.kernelinfo) ==
1656 ijoblist[i]) {
1657 if (ujoblist[i] != cb->uuaiocb)
1658 error = EINVAL;
1659 uma_zfree(aiol_zone, ijoblist);
1660 uma_zfree(aiol_zone, ujoblist);
1661 return (error);
1662 }
1663 }
1664 }
1665
1666 s = splbio();
1667 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb =
1668 TAILQ_NEXT(cb, plist)) {
1669 for (i = 0; i < njoblist; i++) {
1670 if (((intptr_t)
1671 cb->uaiocb._aiocb_private.kernelinfo) ==
1672 ijoblist[i]) {
1673 splx(s);
1674 if (ujoblist[i] != cb->uuaiocb)
1675 error = EINVAL;
1676 uma_zfree(aiol_zone, ijoblist);
1677 uma_zfree(aiol_zone, ujoblist);
1678 return (error);
1679 }
1680 }
1681 }
1682
1683 ki->kaio_flags |= KAIO_WAKEUP;
1684 error = tsleep(p, PRIBIO | PCATCH, "aiospn", timo);
1685 splx(s);
1686
1687 if (error == ERESTART || error == EINTR) {
1688 uma_zfree(aiol_zone, ijoblist);
1689 uma_zfree(aiol_zone, ujoblist);
1690 return (EINTR);
1691 } else if (error == EWOULDBLOCK) {
1692 uma_zfree(aiol_zone, ijoblist);
1693 uma_zfree(aiol_zone, ujoblist);
1694 return (EAGAIN);
1695 }
1696 }
1697
1698/* NOTREACHED */
1699 return (EINVAL);
1700}
1701
1702/*
1703 * aio_cancel cancels any non-physio aio operations not currently in
1704 * progress.
1705 */
1706int
1707aio_cancel(struct thread *td, struct aio_cancel_args *uap)
1708{
1709 struct proc *p = td->td_proc;
1710 struct kaioinfo *ki;
1711 struct aiocblist *cbe, *cbn;
1712 struct file *fp;
1713 struct filedesc *fdp;
1714 struct socket *so;
1715 struct proc *po;
1716 int s,error;
1717 int cancelled=0;
1718 int notcancelled=0;
1719 struct vnode *vp;
1720
1721 fdp = p->p_fd;
1722 if ((u_int)uap->fd >= fdp->fd_nfiles ||
1723 (fp = fdp->fd_ofiles[uap->fd]) == NULL)
1724 return (EBADF);
1725
1726 if (fp->f_type == DTYPE_VNODE) {
1727 vp = fp->f_vnode;
1728
1729 if (vn_isdisk(vp,&error)) {
1730 td->td_retval[0] = AIO_NOTCANCELED;
1731 return (0);
1732 }
1733 } else if (fp->f_type == DTYPE_SOCKET) {
1734 so = fp->f_data;
1735
1736 s = splnet();
1737
1738 for (cbe = TAILQ_FIRST(&so->so_aiojobq); cbe; cbe = cbn) {
1739 cbn = TAILQ_NEXT(cbe, list);
1740 if ((uap->aiocbp == NULL) ||
1741 (uap->aiocbp == cbe->uuaiocb) ) {
1742 po = cbe->userproc;
1743 ki = po->p_aioinfo;
1744 TAILQ_REMOVE(&so->so_aiojobq, cbe, list);
1745 TAILQ_REMOVE(&ki->kaio_sockqueue, cbe, plist);
1746 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe, plist);
1747 if (ki->kaio_flags & KAIO_WAKEUP) {
1748 wakeup(po);
1749 }
1750 cbe->jobstate = JOBST_JOBFINISHED;
1751 cbe->uaiocb._aiocb_private.status=-1;
1752 cbe->uaiocb._aiocb_private.error=ECANCELED;
1753 cancelled++;
1754/* XXX cancelled, knote? */
1755 if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1756 SIGEV_SIGNAL) {
1757 PROC_LOCK(cbe->userproc);
1758 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1759 PROC_UNLOCK(cbe->userproc);
1760 }
1761 if (uap->aiocbp)
1762 break;
1763 }
1764 }
1765 splx(s);
1766
1767 if ((cancelled) && (uap->aiocbp)) {
1768 td->td_retval[0] = AIO_CANCELED;
1769 return (0);
1770 }
1771 }
1772 ki=p->p_aioinfo;
1773 if (ki == NULL)
1774 goto done;
1775 s = splnet();
1776
1777 for (cbe = TAILQ_FIRST(&ki->kaio_jobqueue); cbe; cbe = cbn) {
1778 cbn = TAILQ_NEXT(cbe, plist);
1779
1780 if ((uap->fd == cbe->uaiocb.aio_fildes) &&
1781 ((uap->aiocbp == NULL ) ||
1782 (uap->aiocbp == cbe->uuaiocb))) {
1783
1784 if (cbe->jobstate == JOBST_JOBQGLOBAL) {
1785 TAILQ_REMOVE(&aio_jobs, cbe, list);
1786 TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist);
1787 TAILQ_INSERT_TAIL(&ki->kaio_jobdone, cbe,
1788 plist);
1789 cancelled++;
1790 ki->kaio_queue_finished_count++;
1791 cbe->jobstate = JOBST_JOBFINISHED;
1792 cbe->uaiocb._aiocb_private.status = -1;
1793 cbe->uaiocb._aiocb_private.error = ECANCELED;
1794/* XXX cancelled, knote? */
1795 if (cbe->uaiocb.aio_sigevent.sigev_notify ==
1796 SIGEV_SIGNAL) {
1797 PROC_LOCK(cbe->userproc);
1798 psignal(cbe->userproc, cbe->uaiocb.aio_sigevent.sigev_signo);
1799 PROC_UNLOCK(cbe->userproc);
1800 }
1801 } else {
1802 notcancelled++;
1803 }
1804 }
1805 }
1806 splx(s);
1807done:
1808 if (notcancelled) {
1809 td->td_retval[0] = AIO_NOTCANCELED;
1810 return (0);
1811 }
1812 if (cancelled) {
1813 td->td_retval[0] = AIO_CANCELED;
1814 return (0);
1815 }
1816 td->td_retval[0] = AIO_ALLDONE;
1817
1818 return (0);
1819}
1820
1821/*
1822 * aio_error is implemented in the kernel level for compatibility purposes only.
1823 * For a user mode async implementation, it would be best to do it in a userland
1824 * subroutine.
1825 */
1826int
1827aio_error(struct thread *td, struct aio_error_args *uap)
1828{
1829 struct proc *p = td->td_proc;
1830 int s;
1831 struct aiocblist *cb;
1832 struct kaioinfo *ki;
1833 long jobref;
1834
1835 ki = p->p_aioinfo;
1836 if (ki == NULL)
1837 return (EINVAL);
1838
1839 jobref = fuword(&uap->aiocbp->_aiocb_private.kernelinfo);
1840 if ((jobref == -1) || (jobref == 0))
1841 return (EINVAL);
1842
1843 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
1844 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1845 jobref) {
1846 td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1847 return (0);
1848 }
1849 }
1850
1851 s = splnet();
1852
1853 for (cb = TAILQ_FIRST(&ki->kaio_jobqueue); cb; cb = TAILQ_NEXT(cb,
1854 plist)) {
1855 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1856 jobref) {
1857 td->td_retval[0] = EINPROGRESS;
1858 splx(s);
1859 return (0);
1860 }
1861 }
1862
1863 for (cb = TAILQ_FIRST(&ki->kaio_sockqueue); cb; cb = TAILQ_NEXT(cb,
1864 plist)) {
1865 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1866 jobref) {
1867 td->td_retval[0] = EINPROGRESS;
1868 splx(s);
1869 return (0);
1870 }
1871 }
1872 splx(s);
1873
1874 s = splbio();
1875 for (cb = TAILQ_FIRST(&ki->kaio_bufdone); cb; cb = TAILQ_NEXT(cb,
1876 plist)) {
1877 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1878 jobref) {
1879 td->td_retval[0] = cb->uaiocb._aiocb_private.error;
1880 splx(s);
1881 return (0);
1882 }
1883 }
1884
1885 for (cb = TAILQ_FIRST(&ki->kaio_bufqueue); cb; cb = TAILQ_NEXT(cb,
1886 plist)) {
1887 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo) ==
1888 jobref) {
1889 td->td_retval[0] = EINPROGRESS;
1890 splx(s);
1891 return (0);
1892 }
1893 }
1894 splx(s);
1895
1896#if (0)
1897 /*
1898 * Hack for lio.
1899 */
1900 status = fuword(&uap->aiocbp->_aiocb_private.status);
1901 if (status == -1)
1902 return fuword(&uap->aiocbp->_aiocb_private.error);
1903#endif
1904 return (EINVAL);
1905}
1906
1907/* syscall - asynchronous read from a file (REALTIME) */
1908int
1909aio_read(struct thread *td, struct aio_read_args *uap)
1910{
1911
1912 return aio_aqueue(td, uap->aiocbp, LIO_READ);
1913}
1914
1915/* syscall - asynchronous write to a file (REALTIME) */
1916int
1917aio_write(struct thread *td, struct aio_write_args *uap)
1918{
1919
1920 return aio_aqueue(td, uap->aiocbp, LIO_WRITE);
1921}
1922
1923/* syscall - list directed I/O (REALTIME) */
1924int
1925lio_listio(struct thread *td, struct lio_listio_args *uap)
1926{
1927 struct proc *p = td->td_proc;
1928 int nent, nentqueued;
1929 struct aiocb *iocb, * const *cbptr;
1930 struct aiocblist *cb;
1931 struct kaioinfo *ki;
1932 struct aio_liojob *lj;
1933 int error, runningcode;
1934 int nerror;
1935 int i;
1936 int s;
1937
1938 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT))
1939 return (EINVAL);
1940
1941 nent = uap->nent;
1942 if (nent < 0 || nent > AIO_LISTIO_MAX)
1943 return (EINVAL);
1944
1945 if (p->p_aioinfo == NULL)
1946 aio_init_aioinfo(p);
1947
1948 if ((nent + num_queue_count) > max_queue_count)
1949 return (EAGAIN);
1950
1951 ki = p->p_aioinfo;
1952 if ((nent + ki->kaio_queue_count) > ki->kaio_qallowed_count)
1953 return (EAGAIN);
1954
1955 lj = uma_zalloc(aiolio_zone, M_WAITOK);
1956 if (!lj)
1957 return (EAGAIN);
1958
1959 lj->lioj_flags = 0;
1960 lj->lioj_buffer_count = 0;
1961 lj->lioj_buffer_finished_count = 0;
1962 lj->lioj_queue_count = 0;
1963 lj->lioj_queue_finished_count = 0;
1964 lj->lioj_ki = ki;
1965
1966 /*
1967 * Setup signal.
1968 */
1969 if (uap->sig && (uap->mode == LIO_NOWAIT)) {
1970 error = copyin(uap->sig, &lj->lioj_signal,
1971 sizeof(lj->lioj_signal));
1972 if (error) {
1973 uma_zfree(aiolio_zone, lj);
1974 return (error);
1975 }
1976 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) {
1977 uma_zfree(aiolio_zone, lj);
1978 return (EINVAL);
1979 }
1980 lj->lioj_flags |= LIOJ_SIGNAL;
1981 }
1982 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list);
1983 /*
1984 * Get pointers to the list of I/O requests.
1985 */
1986 nerror = 0;
1987 nentqueued = 0;
1988 cbptr = uap->acb_list;
1989 for (i = 0; i < uap->nent; i++) {
1990 iocb = (struct aiocb *)(intptr_t)fuword(&cbptr[i]);
1991 if (((intptr_t)iocb != -1) && ((intptr_t)iocb != 0)) {
1992 error = _aio_aqueue(td, iocb, lj, 0);
1993 if (error == 0)
1994 nentqueued++;
1995 else
1996 nerror++;
1997 }
1998 }
1999
2000 /*
2001 * If we haven't queued any, then just return error.
2002 */
2003 if (nentqueued == 0)
2004 return (0);
2005
2006 /*
2007 * Calculate the appropriate error return.
2008 */
2009 runningcode = 0;
2010 if (nerror)
2011 runningcode = EIO;
2012
2013 if (uap->mode == LIO_WAIT) {
2014 int command, found, jobref;
2015
2016 for (;;) {
2017 found = 0;
2018 for (i = 0; i < uap->nent; i++) {
2019 /*
2020 * Fetch address of the control buf pointer in
2021 * user space.
2022 */
2023 iocb = (struct aiocb *)
2024 (intptr_t)fuword(&cbptr[i]);
2025 if (((intptr_t)iocb == -1) || ((intptr_t)iocb
2026 == 0))
2027 continue;
2028
2029 /*
2030 * Fetch the associated command from user space.
2031 */
2032 command = fuword(&iocb->aio_lio_opcode);
2033 if (command == LIO_NOP) {
2034 found++;
2035 continue;
2036 }
2037
2038 jobref =
2039 fuword(&iocb->_aiocb_private.kernelinfo);
2040
2041 TAILQ_FOREACH(cb, &ki->kaio_jobdone, plist) {
2042 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2043 == jobref) {
2044 if (cb->uaiocb.aio_lio_opcode
2045 == LIO_WRITE) {
2046 p->p_stats->p_ru.ru_oublock
2047 +=
2048 cb->outputcharge;
2049 cb->outputcharge = 0;
2050 } else if (cb->uaiocb.aio_lio_opcode
2051 == LIO_READ) {
2052 p->p_stats->p_ru.ru_inblock
2053 += cb->inputcharge;
2054 cb->inputcharge = 0;
2055 }
2056 found++;
2057 break;
2058 }
2059 }
2060
2061 s = splbio();
2062 TAILQ_FOREACH(cb, &ki->kaio_bufdone, plist) {
2063 if (((intptr_t)cb->uaiocb._aiocb_private.kernelinfo)
2064 == jobref) {
2065 found++;
2066 break;
2067 }
2068 }
2069 splx(s);
2070 }
2071
2072 /*
2073 * If all I/Os have been disposed of, then we can
2074 * return.
2075 */
2076 if (found == nentqueued)
2077 return (runningcode);
2078
2079 ki->kaio_flags |= KAIO_WAKEUP;
2080 error = tsleep(p, PRIBIO | PCATCH, "aiospn", 0);
2081
2082 if (error == EINTR)
2083 return (EINTR);
2084 else if (error == EWOULDBLOCK)
2085 return (EAGAIN);
2086 }
2087 }
2088
2089 return (runningcode);
2090}
2091
2092/*
2093 * This is a weird hack so that we can post a signal. It is safe to do so from
2094 * a timeout routine, but *not* from an interrupt routine.
2095 */
2096static void
2097process_signal(void *aioj)
2098{
2099 struct aiocblist *aiocbe = aioj;
2100 struct aio_liojob *lj = aiocbe->lio;
2101 struct aiocb *cb = &aiocbe->uaiocb;
2102
2103 if ((lj) && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL) &&
2104 (lj->lioj_queue_count == lj->lioj_queue_finished_count)) {
2105 PROC_LOCK(lj->lioj_ki->kaio_p);
2106 psignal(lj->lioj_ki->kaio_p, lj->lioj_signal.sigev_signo);
2107 PROC_UNLOCK(lj->lioj_ki->kaio_p);
2108 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2109 }
2110
2111 if (cb->aio_sigevent.sigev_notify == SIGEV_SIGNAL) {
2112 PROC_LOCK(aiocbe->userproc);
2113 psignal(aiocbe->userproc, cb->aio_sigevent.sigev_signo);
2114 PROC_UNLOCK(aiocbe->userproc);
2115 }
2116}
2117
2118/*
2119 * Interrupt handler for physio, performs the necessary process wakeups, and
2120 * signals.
2121 */
2122static void
2123aio_physwakeup(struct buf *bp)
2124{
2125 struct aiocblist *aiocbe;
2126 struct proc *p;
2127 struct kaioinfo *ki;
2128 struct aio_liojob *lj;
2129
2130 wakeup(bp);
2131
2132 aiocbe = (struct aiocblist *)bp->b_caller1;
2133 if (aiocbe) {
2134 p = aiocbe->userproc;
2135
2136 aiocbe->jobstate = JOBST_JOBBFINISHED;
2137 aiocbe->uaiocb._aiocb_private.status -= bp->b_resid;
2138 aiocbe->uaiocb._aiocb_private.error = 0;
2139 aiocbe->jobflags |= AIOCBLIST_DONE;
2140
2141 if (bp->b_ioflags & BIO_ERROR)
2142 aiocbe->uaiocb._aiocb_private.error = bp->b_error;
2143
2144 lj = aiocbe->lio;
2145 if (lj) {
2146 lj->lioj_buffer_finished_count++;
2147
2148 /*
2149 * wakeup/signal if all of the interrupt jobs are done.
2150 */
2151 if (lj->lioj_buffer_finished_count ==
2152 lj->lioj_buffer_count) {
2153 /*
2154 * Post a signal if it is called for.
2155 */
2156 if ((lj->lioj_flags &
2157 (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) ==
2158 LIOJ_SIGNAL) {
2159 lj->lioj_flags |= LIOJ_SIGNAL_POSTED;
2160 aiocbe->timeouthandle =
2161 timeout(process_signal,
2162 aiocbe, 0);
2163 }
2164 }
2165 }
2166
2167 ki = p->p_aioinfo;
2168 if (ki) {
2169 ki->kaio_buffer_finished_count++;
2170 TAILQ_REMOVE(&aio_bufjobs, aiocbe, list);
2171 TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
2172 TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
2173
2174 KNOTE(&aiocbe->klist, 0);
2175 /* Do the wakeup. */
2176 if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
2177 ki->kaio_flags &= ~KAIO_WAKEUP;
2178 wakeup(p);
2179 }
2180 }
2181
2182 if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL)
2183 aiocbe->timeouthandle =
2184 timeout(process_signal, aiocbe, 0);
2185 }
2186}
2187
2188/* syscall - wait for the next completion of an aio request */
2189int
2190aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap)
2191{
2192 struct proc *p = td->td_proc;
2193 struct timeval atv;
2194 struct timespec ts;
2195 struct kaioinfo *ki;
2196 struct aiocblist *cb = NULL;
2197 int error, s, timo;
2198
2199 suword(uap->aiocbp, (int)NULL);
2200
2201 timo = 0;
2202 if (uap->timeout) {
2203 /* Get timespec struct. */
2204 error = copyin(uap->timeout, &ts, sizeof(ts));
2205 if (error)
2206 return (error);
2207
2208 if ((ts.tv_nsec < 0) || (ts.tv_nsec >= 1000000000))
2209 return (EINVAL);
2210
2211 TIMESPEC_TO_TIMEVAL(&atv, &ts);
2212 if (itimerfix(&atv))
2213 return (EINVAL);
2214 timo = tvtohz(&atv);
2215 }
2216
2217 ki = p->p_aioinfo;
2218 if (ki == NULL)
2219 return (EAGAIN);
2220
2221 for (;;) {
2222 if ((cb = TAILQ_FIRST(&ki->kaio_jobdone)) != 0) {
2223 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
2224 td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2225 if (cb->uaiocb.aio_lio_opcode == LIO_WRITE) {
2226 p->p_stats->p_ru.ru_oublock +=
2227 cb->outputcharge;
2228 cb->outputcharge = 0;
2229 } else if (cb->uaiocb.aio_lio_opcode == LIO_READ) {
2230 p->p_stats->p_ru.ru_inblock += cb->inputcharge;
2231 cb->inputcharge = 0;
2232 }
2233 aio_free_entry(cb);
2234 return (cb->uaiocb._aiocb_private.error);
2235 }
2236
2237 s = splbio();
2238 if ((cb = TAILQ_FIRST(&ki->kaio_bufdone)) != 0 ) {
2239 splx(s);
2240 suword(uap->aiocbp, (uintptr_t)cb->uuaiocb);
2241 td->td_retval[0] = cb->uaiocb._aiocb_private.status;
2242 aio_free_entry(cb);
2243 return (cb->uaiocb._aiocb_private.error);
2244 }
2245
2246 ki->kaio_flags |= KAIO_WAKEUP;
2247 error = tsleep(p, PRIBIO | PCATCH, "aiowc", timo);
2248 splx(s);
2249
2250 if (error == ERESTART)
2251 return (EINTR);
2252 else if (error < 0)
2253 return (error);
2254 else if (error == EINTR)
2255 return (EINTR);
2256 else if (error == EWOULDBLOCK)
2257 return (EAGAIN);
2258 }
2259}
2260
2261/* kqueue attach function */
2262static int
2263filt_aioattach(struct knote *kn)
2264{
2265 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2266
2267 /*
2268 * The aiocbe pointer must be validated before using it, so
2269 * registration is restricted to the kernel; the user cannot
2270 * set EV_FLAG1.
2271 */
2272 if ((kn->kn_flags & EV_FLAG1) == 0)
2273 return (EPERM);
2274 kn->kn_flags &= ~EV_FLAG1;
2275
2276 SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext);
2277
2278 return (0);
2279}
2280
2281/* kqueue detach function */
2282static void
2283filt_aiodetach(struct knote *kn)
2284{
2285 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2286
2287 SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext);
2288}
2289
2290/* kqueue filter function */
2291/*ARGSUSED*/
2292static int
2293filt_aio(struct knote *kn, long hint)
2294{
2295 struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
2296
2297 kn->kn_data = aiocbe->uaiocb._aiocb_private.error;
2298 if (aiocbe->jobstate != JOBST_JOBFINISHED &&
2299 aiocbe->jobstate != JOBST_JOBBFINISHED)
2300 return (0);
2301 kn->kn_flags |= EV_EOF;
2302 return (1);
2303}