1/* 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 5 * Implements an efficient asynchronous io interface. 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * 9 * See ../COPYING for licensing terms. 10 */ 11#include <linux/kernel.h> 12#include <linux/init.h> 13#include <linux/errno.h> 14#include <linux/time.h> 15#include <linux/aio_abi.h> 16#include <linux/module.h> 17#include <linux/syscalls.h> 18#include <linux/backing-dev.h> 19#include <linux/uio.h> 20 21#define DEBUG 0 22 23#include <linux/sched.h> 24#include <linux/fs.h> 25#include <linux/file.h> 26#include <linux/mm.h> 27#include <linux/mman.h> 28#include <linux/mmu_context.h> 29#include <linux/slab.h> 30#include <linux/timer.h> 31#include <linux/aio.h> 32#include <linux/highmem.h> 33#include <linux/workqueue.h> 34#include <linux/security.h> 35#include <linux/eventfd.h> 36#include <linux/blkdev.h> 37#include <linux/mempool.h> 38#include <linux/hash.h> 39#include <linux/compat.h> 40 41#include <asm/kmap_types.h> 42#include <asm/uaccess.h> 43 44#if DEBUG > 1 45#define dprintk printk 46#else 47#define dprintk(x...) do { ; } while (0) 48#endif 49 50/*------ sysctl variables----*/ 51static DEFINE_SPINLOCK(aio_nr_lock); 52unsigned long aio_nr; /* current system wide number of aio requests */ 53unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 54/*----end sysctl variables---*/ 55 56static struct kmem_cache *kiocb_cachep; 57static struct kmem_cache *kioctx_cachep; 58 59static struct workqueue_struct *aio_wq; 60 61/* Used for rare fput completion. */ 62static void aio_fput_routine(struct work_struct *); 63static DECLARE_WORK(fput_work, aio_fput_routine); 64 65static DEFINE_SPINLOCK(fput_lock); 66static LIST_HEAD(fput_head); 67 68#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */ 69#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS) 70struct aio_batch_entry { 71 struct hlist_node list; 72 struct address_space *mapping; 73}; 74mempool_t *abe_pool; 75 76static void aio_kick_handler(struct work_struct *); 77static void aio_queue_work(struct kioctx *); 78 79/* aio_setup 80 * Creates the slab caches used by the aio routines, panic on 81 * failure as this is done early during the boot sequence. 82 */ 83static int __init aio_setup(void) 84{ 85 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 86 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 87 88 aio_wq = create_workqueue("aio"); 89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 90 BUG_ON(!abe_pool); 91 92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 93 94 return 0; 95} 96__initcall(aio_setup); 97 98static void aio_free_ring(struct kioctx *ctx) 99{ 100 struct aio_ring_info *info = &ctx->ring_info; 101 long i; 102 103 for (i=0; i<info->nr_pages; i++) 104 put_page(info->ring_pages[i]); 105 106 if (info->mmap_size) { 107 down_write(&ctx->mm->mmap_sem); 108 do_munmap(ctx->mm, info->mmap_base, info->mmap_size); 109 up_write(&ctx->mm->mmap_sem); 110 } 111 112 if (info->ring_pages && info->ring_pages != info->internal_pages) 113 kfree(info->ring_pages); 114 info->ring_pages = NULL; 115 info->nr = 0; 116} 117 118static int aio_setup_ring(struct kioctx *ctx) 119{ 120 struct aio_ring *ring; 121 struct aio_ring_info *info = &ctx->ring_info; 122 unsigned nr_events = ctx->max_reqs; 123 unsigned long size; 124 int nr_pages; 125 126 /* Compensate for the ring buffer's head/tail overlap entry */ 127 nr_events += 2; /* 1 is required, 2 for good luck */ 128 129 size = sizeof(struct aio_ring); 130 size += sizeof(struct io_event) * nr_events; 131 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 132 133 if (nr_pages < 0) 134 return -EINVAL; 135 136 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 137 138 info->nr = 0; 139 info->ring_pages = info->internal_pages; 140 if (nr_pages > AIO_RING_PAGES) { 141 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 142 if (!info->ring_pages) 143 return -ENOMEM; 144 } 145 146 info->mmap_size = nr_pages * PAGE_SIZE; 147 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 148 down_write(&ctx->mm->mmap_sem); 149 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 150 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 151 0); 152 if (IS_ERR((void *)info->mmap_base)) { 153 up_write(&ctx->mm->mmap_sem); 154 info->mmap_size = 0; 155 aio_free_ring(ctx); 156 return -EAGAIN; 157 } 158 159 dprintk("mmap address: 0x%08lx\n", info->mmap_base); 160 info->nr_pages = get_user_pages(current, ctx->mm, 161 info->mmap_base, nr_pages, 162 1, 0, info->ring_pages, NULL); 163 up_write(&ctx->mm->mmap_sem); 164 165 if (unlikely(info->nr_pages != nr_pages)) { 166 aio_free_ring(ctx); 167 return -EAGAIN; 168 } 169 170 ctx->user_id = info->mmap_base; 171 172 info->nr = nr_events; /* trusted copy */ 173 174 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 175 ring->nr = nr_events; /* user copy */ 176 ring->id = ctx->user_id; 177 ring->head = ring->tail = 0; 178 ring->magic = AIO_RING_MAGIC; 179 ring->compat_features = AIO_RING_COMPAT_FEATURES; 180 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 181 ring->header_length = sizeof(struct aio_ring); 182 kunmap_atomic(ring, KM_USER0); 183 184 return 0; 185} 186 187 188/* aio_ring_event: returns a pointer to the event at the given index from 189 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 190 */ 191#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 192#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 193#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 194 195#define aio_ring_event(info, nr, km) ({ \ 196 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 197 struct io_event *__event; \ 198 __event = kmap_atomic( \ 199 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 200 __event += pos % AIO_EVENTS_PER_PAGE; \ 201 __event; \ 202}) 203 204#define put_aio_ring_event(event, km) do { \ 205 struct io_event *__event = (event); \ 206 (void)__event; \ 207 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 208} while(0) 209 210static void ctx_rcu_free(struct rcu_head *head) 211{ 212 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); 213 unsigned nr_events = ctx->max_reqs; 214 215 kmem_cache_free(kioctx_cachep, ctx); 216 217 if (nr_events) { 218 spin_lock(&aio_nr_lock); 219 BUG_ON(aio_nr - nr_events > aio_nr); 220 aio_nr -= nr_events; 221 spin_unlock(&aio_nr_lock); 222 } 223} 224 225/* __put_ioctx 226 * Called when the last user of an aio context has gone away, 227 * and the struct needs to be freed. 228 */ 229static void __put_ioctx(struct kioctx *ctx) 230{ 231 BUG_ON(ctx->reqs_active); 232 233 cancel_delayed_work(&ctx->wq); 234 cancel_work_sync(&ctx->wq.work); 235 aio_free_ring(ctx); 236 mmdrop(ctx->mm); 237 ctx->mm = NULL; 238 pr_debug("__put_ioctx: freeing %p\n", ctx); 239 call_rcu(&ctx->rcu_head, ctx_rcu_free); 240} 241 242#define get_ioctx(kioctx) do { \ 243 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 244 atomic_inc(&(kioctx)->users); \ 245} while (0) 246#define put_ioctx(kioctx) do { \ 247 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 248 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 249 __put_ioctx(kioctx); \ 250} while (0) 251 252/* ioctx_alloc 253 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 254 */ 255static struct kioctx *ioctx_alloc(unsigned nr_events) 256{ 257 struct mm_struct *mm; 258 struct kioctx *ctx; 259 int did_sync = 0; 260 261 /* Prevent overflows */ 262 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || 263 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 264 pr_debug("ENOMEM: nr_events too high\n"); 265 return ERR_PTR(-EINVAL); 266 } 267 268 if ((unsigned long)nr_events > aio_max_nr) 269 return ERR_PTR(-EAGAIN); 270 271 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 272 if (!ctx) 273 return ERR_PTR(-ENOMEM); 274 275 ctx->max_reqs = nr_events; 276 mm = ctx->mm = current->mm; 277 atomic_inc(&mm->mm_count); 278 279 atomic_set(&ctx->users, 1); 280 spin_lock_init(&ctx->ctx_lock); 281 spin_lock_init(&ctx->ring_info.ring_lock); 282 init_waitqueue_head(&ctx->wait); 283 284 INIT_LIST_HEAD(&ctx->active_reqs); 285 INIT_LIST_HEAD(&ctx->run_list); 286 INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 287 288 if (aio_setup_ring(ctx) < 0) 289 goto out_freectx; 290 291 /* limit the number of system wide aios */ 292 do { 293 spin_lock_bh(&aio_nr_lock); 294 if (aio_nr + nr_events > aio_max_nr || 295 aio_nr + nr_events < aio_nr) 296 ctx->max_reqs = 0; 297 else 298 aio_nr += ctx->max_reqs; 299 spin_unlock_bh(&aio_nr_lock); 300 if (ctx->max_reqs || did_sync) 301 break; 302 303 /* wait for rcu callbacks to have completed before giving up */ 304 synchronize_rcu(); 305 did_sync = 1; 306 ctx->max_reqs = nr_events; 307 } while (1); 308 309 if (ctx->max_reqs == 0) 310 goto out_cleanup; 311 312 /* now link into global list. */ 313 spin_lock(&mm->ioctx_lock); 314 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 315 spin_unlock(&mm->ioctx_lock); 316 317 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 318 ctx, ctx->user_id, current->mm, ctx->ring_info.nr); 319 return ctx; 320 321out_cleanup: 322 __put_ioctx(ctx); 323 return ERR_PTR(-EAGAIN); 324 325out_freectx: 326 mmdrop(mm); 327 kmem_cache_free(kioctx_cachep, ctx); 328 ctx = ERR_PTR(-ENOMEM); 329 330 dprintk("aio: error allocating ioctx %p\n", ctx); 331 return ctx; 332} 333 334/* aio_cancel_all 335 * Cancels all outstanding aio requests on an aio context. Used 336 * when the processes owning a context have all exited to encourage 337 * the rapid destruction of the kioctx. 338 */ 339static void aio_cancel_all(struct kioctx *ctx) 340{ 341 int (*cancel)(struct kiocb *, struct io_event *); 342 struct io_event res; 343 spin_lock_irq(&ctx->ctx_lock); 344 ctx->dead = 1; 345 while (!list_empty(&ctx->active_reqs)) { 346 struct list_head *pos = ctx->active_reqs.next; 347 struct kiocb *iocb = list_kiocb(pos); 348 list_del_init(&iocb->ki_list); 349 cancel = iocb->ki_cancel; 350 kiocbSetCancelled(iocb); 351 if (cancel) { 352 iocb->ki_users++; 353 spin_unlock_irq(&ctx->ctx_lock); 354 cancel(iocb, &res); 355 spin_lock_irq(&ctx->ctx_lock); 356 } 357 } 358 spin_unlock_irq(&ctx->ctx_lock); 359} 360 361static void wait_for_all_aios(struct kioctx *ctx) 362{ 363 struct task_struct *tsk = current; 364 DECLARE_WAITQUEUE(wait, tsk); 365 366 spin_lock_irq(&ctx->ctx_lock); 367 if (!ctx->reqs_active) 368 goto out; 369 370 add_wait_queue(&ctx->wait, &wait); 371 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 372 while (ctx->reqs_active) { 373 spin_unlock_irq(&ctx->ctx_lock); 374 io_schedule(); 375 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 376 spin_lock_irq(&ctx->ctx_lock); 377 } 378 __set_task_state(tsk, TASK_RUNNING); 379 remove_wait_queue(&ctx->wait, &wait); 380 381out: 382 spin_unlock_irq(&ctx->ctx_lock); 383} 384 385/* wait_on_sync_kiocb: 386 * Waits on the given sync kiocb to complete. 387 */ 388ssize_t wait_on_sync_kiocb(struct kiocb *iocb) 389{ 390 while (iocb->ki_users) { 391 set_current_state(TASK_UNINTERRUPTIBLE); 392 if (!iocb->ki_users) 393 break; 394 io_schedule(); 395 } 396 __set_current_state(TASK_RUNNING); 397 return iocb->ki_user_data; 398} 399EXPORT_SYMBOL(wait_on_sync_kiocb); 400 401/* exit_aio: called when the last user of mm goes away. At this point, 402 * there is no way for any new requests to be submited or any of the 403 * io_* syscalls to be called on the context. However, there may be 404 * outstanding requests which hold references to the context; as they 405 * go away, they will call put_ioctx and release any pinned memory 406 * associated with the request (held via struct page * references). 407 */ 408void exit_aio(struct mm_struct *mm) 409{ 410 struct kioctx *ctx; 411 412 while (!hlist_empty(&mm->ioctx_list)) { 413 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 414 hlist_del_rcu(&ctx->list); 415 416 aio_cancel_all(ctx); 417 418 wait_for_all_aios(ctx); 419 /* 420 * Ensure we don't leave the ctx on the aio_wq 421 */ 422 cancel_work_sync(&ctx->wq.work); 423 424 if (1 != atomic_read(&ctx->users)) 425 printk(KERN_DEBUG 426 "exit_aio:ioctx still alive: %d %d %d\n", 427 atomic_read(&ctx->users), ctx->dead, 428 ctx->reqs_active); 429 put_ioctx(ctx); 430 } 431} 432 433/* aio_get_req 434 * Allocate a slot for an aio request. Increments the users count 435 * of the kioctx so that the kioctx stays around until all requests are 436 * complete. Returns NULL if no requests are free. 437 * 438 * Returns with kiocb->users set to 2. The io submit code path holds 439 * an extra reference while submitting the i/o. 440 * This prevents races between the aio code path referencing the 441 * req (after submitting it) and aio_complete() freeing the req. 442 */ 443static struct kiocb *__aio_get_req(struct kioctx *ctx) 444{ 445 struct kiocb *req = NULL; 446 struct aio_ring *ring; 447 int okay = 0; 448 449 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 450 if (unlikely(!req)) 451 return NULL; 452 453 req->ki_flags = 0; 454 req->ki_users = 2; 455 req->ki_key = 0; 456 req->ki_ctx = ctx; 457 req->ki_cancel = NULL; 458 req->ki_retry = NULL; 459 req->ki_dtor = NULL; 460 req->private = NULL; 461 req->ki_iovec = NULL; 462 INIT_LIST_HEAD(&req->ki_run_list); 463 req->ki_eventfd = NULL; 464 465 /* Check if the completion queue has enough free space to 466 * accept an event from this io. 467 */ 468 spin_lock_irq(&ctx->ctx_lock); 469 ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); 470 if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) { 471 list_add(&req->ki_list, &ctx->active_reqs); 472 ctx->reqs_active++; 473 okay = 1; 474 } 475 kunmap_atomic(ring, KM_USER0); 476 spin_unlock_irq(&ctx->ctx_lock); 477 478 if (!okay) { 479 kmem_cache_free(kiocb_cachep, req); 480 req = NULL; 481 } 482 483 return req; 484} 485 486static inline struct kiocb *aio_get_req(struct kioctx *ctx) 487{ 488 struct kiocb *req; 489 /* Handle a potential starvation case -- should be exceedingly rare as 490 * requests will be stuck on fput_head only if the aio_fput_routine is 491 * delayed and the requests were the last user of the struct file. 492 */ 493 req = __aio_get_req(ctx); 494 if (unlikely(NULL == req)) { 495 aio_fput_routine(NULL); 496 req = __aio_get_req(ctx); 497 } 498 return req; 499} 500 501static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) 502{ 503 assert_spin_locked(&ctx->ctx_lock); 504 505 if (req->ki_eventfd != NULL) 506 eventfd_ctx_put(req->ki_eventfd); 507 if (req->ki_dtor) 508 req->ki_dtor(req); 509 if (req->ki_iovec != &req->ki_inline_vec) 510 kfree(req->ki_iovec); 511 kmem_cache_free(kiocb_cachep, req); 512 ctx->reqs_active--; 513 514 if (unlikely(!ctx->reqs_active && ctx->dead)) 515 wake_up(&ctx->wait); 516} 517 518static void aio_fput_routine(struct work_struct *data) 519{ 520 spin_lock_irq(&fput_lock); 521 while (likely(!list_empty(&fput_head))) { 522 struct kiocb *req = list_kiocb(fput_head.next); 523 struct kioctx *ctx = req->ki_ctx; 524 525 list_del(&req->ki_list); 526 spin_unlock_irq(&fput_lock); 527 528 /* Complete the fput(s) */ 529 if (req->ki_filp != NULL) 530 fput(req->ki_filp); 531 532 /* Link the iocb into the context's free list */ 533 spin_lock_irq(&ctx->ctx_lock); 534 really_put_req(ctx, req); 535 spin_unlock_irq(&ctx->ctx_lock); 536 537 put_ioctx(ctx); 538 spin_lock_irq(&fput_lock); 539 } 540 spin_unlock_irq(&fput_lock); 541} 542 543/* __aio_put_req 544 * Returns true if this put was the last user of the request. 545 */ 546static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) 547{ 548 dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", 549 req, atomic_long_read(&req->ki_filp->f_count)); 550 551 assert_spin_locked(&ctx->ctx_lock); 552 553 req->ki_users--; 554 BUG_ON(req->ki_users < 0); 555 if (likely(req->ki_users)) 556 return 0; 557 list_del(&req->ki_list); /* remove from active_reqs */ 558 req->ki_cancel = NULL; 559 req->ki_retry = NULL; 560 561 /* 562 * Try to optimize the aio and eventfd file* puts, by avoiding to 563 * schedule work in case it is not final fput() time. In normal cases, 564 * we would not be holding the last reference to the file*, so 565 * this function will be executed w/out any aio kthread wakeup. 566 */ 567 if (unlikely(!fput_atomic(req->ki_filp))) { 568 get_ioctx(ctx); 569 spin_lock(&fput_lock); 570 list_add(&req->ki_list, &fput_head); 571 spin_unlock(&fput_lock); 572 queue_work(aio_wq, &fput_work); 573 } else { 574 req->ki_filp = NULL; 575 really_put_req(ctx, req); 576 } 577 return 1; 578} 579 580/* aio_put_req 581 * Returns true if this put was the last user of the kiocb, 582 * false if the request is still in use. 583 */ 584int aio_put_req(struct kiocb *req) 585{ 586 struct kioctx *ctx = req->ki_ctx; 587 int ret; 588 spin_lock_irq(&ctx->ctx_lock); 589 ret = __aio_put_req(ctx, req); 590 spin_unlock_irq(&ctx->ctx_lock); 591 return ret; 592} 593EXPORT_SYMBOL(aio_put_req); 594 595static struct kioctx *lookup_ioctx(unsigned long ctx_id) 596{ 597 struct mm_struct *mm = current->mm; 598 struct kioctx *ctx, *ret = NULL; 599 struct hlist_node *n; 600 601 rcu_read_lock(); 602 603 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 604 if (ctx->user_id == ctx_id && !ctx->dead) { 605 get_ioctx(ctx); 606 ret = ctx; 607 break; 608 } 609 } 610 611 rcu_read_unlock(); 612 return ret; 613} 614 615/* 616 * Queue up a kiocb to be retried. Assumes that the kiocb 617 * has already been marked as kicked, and places it on 618 * the retry run list for the corresponding ioctx, if it 619 * isn't already queued. Returns 1 if it actually queued 620 * the kiocb (to tell the caller to activate the work 621 * queue to process it), or 0, if it found that it was 622 * already queued. 623 */ 624static inline int __queue_kicked_iocb(struct kiocb *iocb) 625{ 626 struct kioctx *ctx = iocb->ki_ctx; 627 628 assert_spin_locked(&ctx->ctx_lock); 629 630 if (list_empty(&iocb->ki_run_list)) { 631 list_add_tail(&iocb->ki_run_list, 632 &ctx->run_list); 633 return 1; 634 } 635 return 0; 636} 637 638/* aio_run_iocb 639 * This is the core aio execution routine. It is 640 * invoked both for initial i/o submission and 641 * subsequent retries via the aio_kick_handler. 642 * Expects to be invoked with iocb->ki_ctx->lock 643 * already held. The lock is released and reacquired 644 * as needed during processing. 645 * 646 * Calls the iocb retry method (already setup for the 647 * iocb on initial submission) for operation specific 648 * handling, but takes care of most of common retry 649 * execution details for a given iocb. The retry method 650 * needs to be non-blocking as far as possible, to avoid 651 * holding up other iocbs waiting to be serviced by the 652 * retry kernel thread. 653 * 654 * The trickier parts in this code have to do with 655 * ensuring that only one retry instance is in progress 656 * for a given iocb at any time. Providing that guarantee 657 * simplifies the coding of individual aio operations as 658 * it avoids various potential races. 659 */ 660static ssize_t aio_run_iocb(struct kiocb *iocb) 661{ 662 struct kioctx *ctx = iocb->ki_ctx; 663 ssize_t (*retry)(struct kiocb *); 664 ssize_t ret; 665 666 if (!(retry = iocb->ki_retry)) { 667 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 668 return 0; 669 } 670 671 /* 672 * We don't want the next retry iteration for this 673 * operation to start until this one has returned and 674 * updated the iocb state. However, wait_queue functions 675 * can trigger a kick_iocb from interrupt context in the 676 * meantime, indicating that data is available for the next 677 * iteration. We want to remember that and enable the 678 * next retry iteration _after_ we are through with 679 * this one. 680 * 681 * So, in order to be able to register a "kick", but 682 * prevent it from being queued now, we clear the kick 683 * flag, but make the kick code *think* that the iocb is 684 * still on the run list until we are actually done. 685 * When we are done with this iteration, we check if 686 * the iocb was kicked in the meantime and if so, queue 687 * it up afresh. 688 */ 689 690 kiocbClearKicked(iocb); 691 692 /* 693 * This is so that aio_complete knows it doesn't need to 694 * pull the iocb off the run list (We can't just call 695 * INIT_LIST_HEAD because we don't want a kick_iocb to 696 * queue this on the run list yet) 697 */ 698 iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL; 699 spin_unlock_irq(&ctx->ctx_lock); 700 701 /* Quit retrying if the i/o has been cancelled */ 702 if (kiocbIsCancelled(iocb)) { 703 ret = -EINTR; 704 aio_complete(iocb, ret, 0); 705 /* must not access the iocb after this */ 706 goto out; 707 } 708 709 /* 710 * Now we are all set to call the retry method in async 711 * context. 712 */ 713 ret = retry(iocb); 714 715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 716 /* 717 * There's no easy way to restart the syscall since other AIO's 718 * may be already running. Just fail this IO with EINTR. 719 */ 720 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 721 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) 722 ret = -EINTR; 723 aio_complete(iocb, ret, 0); 724 } 725out: 726 spin_lock_irq(&ctx->ctx_lock); 727 728 if (-EIOCBRETRY == ret) { 729 /* 730 * OK, now that we are done with this iteration 731 * and know that there is more left to go, 732 * this is where we let go so that a subsequent 733 * "kick" can start the next iteration 734 */ 735 736 /* will make __queue_kicked_iocb succeed from here on */ 737 INIT_LIST_HEAD(&iocb->ki_run_list); 738 /* we must queue the next iteration ourselves, if it 739 * has already been kicked */ 740 if (kiocbIsKicked(iocb)) { 741 __queue_kicked_iocb(iocb); 742 743 /* 744 * __queue_kicked_iocb will always return 1 here, because 745 * iocb->ki_run_list is empty at this point so it should 746 * be safe to unconditionally queue the context into the 747 * work queue. 748 */ 749 aio_queue_work(ctx); 750 } 751 } 752 return ret; 753} 754 755/* 756 * __aio_run_iocbs: 757 * Process all pending retries queued on the ioctx 758 * run list. 759 * Assumes it is operating within the aio issuer's mm 760 * context. 761 */ 762static int __aio_run_iocbs(struct kioctx *ctx) 763{ 764 struct kiocb *iocb; 765 struct list_head run_list; 766 767 assert_spin_locked(&ctx->ctx_lock); 768 769 list_replace_init(&ctx->run_list, &run_list); 770 while (!list_empty(&run_list)) { 771 iocb = list_entry(run_list.next, struct kiocb, 772 ki_run_list); 773 list_del(&iocb->ki_run_list); 774 /* 775 * Hold an extra reference while retrying i/o. 776 */ 777 iocb->ki_users++; /* grab extra reference */ 778 aio_run_iocb(iocb); 779 __aio_put_req(ctx, iocb); 780 } 781 if (!list_empty(&ctx->run_list)) 782 return 1; 783 return 0; 784} 785 786static void aio_queue_work(struct kioctx * ctx) 787{ 788 unsigned long timeout; 789 /* 790 * if someone is waiting, get the work started right 791 * away, otherwise, use a longer delay 792 */ 793 smp_mb(); 794 if (waitqueue_active(&ctx->wait)) 795 timeout = 1; 796 else 797 timeout = HZ/10; 798 queue_delayed_work(aio_wq, &ctx->wq, timeout); 799} 800 801 802/* 803 * aio_run_iocbs: 804 * Process all pending retries queued on the ioctx 805 * run list. 806 * Assumes it is operating within the aio issuer's mm 807 * context. 808 */ 809static inline void aio_run_iocbs(struct kioctx *ctx) 810{ 811 int requeue; 812 813 spin_lock_irq(&ctx->ctx_lock); 814 815 requeue = __aio_run_iocbs(ctx); 816 spin_unlock_irq(&ctx->ctx_lock); 817 if (requeue) 818 aio_queue_work(ctx); 819} 820 821/* 822 * just like aio_run_iocbs, but keeps running them until 823 * the list stays empty 824 */ 825static inline void aio_run_all_iocbs(struct kioctx *ctx) 826{ 827 spin_lock_irq(&ctx->ctx_lock); 828 while (__aio_run_iocbs(ctx)) 829 ; 830 spin_unlock_irq(&ctx->ctx_lock); 831} 832 833/* 834 * aio_kick_handler: 835 * Work queue handler triggered to process pending 836 * retries on an ioctx. Takes on the aio issuer's 837 * mm context before running the iocbs, so that 838 * copy_xxx_user operates on the issuer's address 839 * space. 840 * Run on aiod's context. 841 */ 842static void aio_kick_handler(struct work_struct *work) 843{ 844 struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 845 mm_segment_t oldfs = get_fs(); 846 struct mm_struct *mm; 847 int requeue; 848 849 set_fs(USER_DS); 850 use_mm(ctx->mm); 851 spin_lock_irq(&ctx->ctx_lock); 852 requeue =__aio_run_iocbs(ctx); 853 mm = ctx->mm; 854 spin_unlock_irq(&ctx->ctx_lock); 855 unuse_mm(mm); 856 set_fs(oldfs); 857 /* 858 * we're in a worker thread already, don't use queue_delayed_work, 859 */ 860 if (requeue) 861 queue_delayed_work(aio_wq, &ctx->wq, 0); 862} 863 864 865/* 866 * Called by kick_iocb to queue the kiocb for retry 867 * and if required activate the aio work queue to process 868 * it 869 */ 870static void try_queue_kicked_iocb(struct kiocb *iocb) 871{ 872 struct kioctx *ctx = iocb->ki_ctx; 873 unsigned long flags; 874 int run = 0; 875 876 spin_lock_irqsave(&ctx->ctx_lock, flags); 877 /* set this inside the lock so that we can't race with aio_run_iocb() 878 * testing it and putting the iocb on the run list under the lock */ 879 if (!kiocbTryKick(iocb)) 880 run = __queue_kicked_iocb(iocb); 881 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 882 if (run) 883 aio_queue_work(ctx); 884} 885 886/* 887 * kick_iocb: 888 * Called typically from a wait queue callback context 889 * to trigger a retry of the iocb. 890 * The retry is usually executed by aio workqueue 891 * threads (See aio_kick_handler). 892 */ 893void kick_iocb(struct kiocb *iocb) 894{ 895 /* sync iocbs are easy: they can only ever be executing from a 896 * single context. */ 897 if (is_sync_kiocb(iocb)) { 898 kiocbSetKicked(iocb); 899 wake_up_process(iocb->ki_obj.tsk); 900 return; 901 } 902 903 try_queue_kicked_iocb(iocb); 904} 905EXPORT_SYMBOL(kick_iocb); 906 907/* aio_complete 908 * Called when the io request on the given iocb is complete. 909 * Returns true if this is the last user of the request. The 910 * only other user of the request can be the cancellation code. 911 */ 912int aio_complete(struct kiocb *iocb, long res, long res2) 913{ 914 struct kioctx *ctx = iocb->ki_ctx; 915 struct aio_ring_info *info; 916 struct aio_ring *ring; 917 struct io_event *event; 918 unsigned long flags; 919 unsigned long tail; 920 int ret; 921 922 /* 923 * Special case handling for sync iocbs: 924 * - events go directly into the iocb for fast handling 925 * - the sync task with the iocb in its stack holds the single iocb 926 * ref, no other paths have a way to get another ref 927 * - the sync task helpfully left a reference to itself in the iocb 928 */ 929 if (is_sync_kiocb(iocb)) { 930 BUG_ON(iocb->ki_users != 1); 931 iocb->ki_user_data = res; 932 iocb->ki_users = 0; 933 wake_up_process(iocb->ki_obj.tsk); 934 return 1; 935 } 936 937 info = &ctx->ring_info; 938 939 /* add a completion event to the ring buffer. 940 * must be done holding ctx->ctx_lock to prevent 941 * other code from messing with the tail 942 * pointer since we might be called from irq 943 * context. 944 */ 945 spin_lock_irqsave(&ctx->ctx_lock, flags); 946 947 if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list)) 948 list_del_init(&iocb->ki_run_list); 949 950 /* 951 * cancelled requests don't get events, userland was given one 952 * when the event got cancelled. 953 */ 954 if (kiocbIsCancelled(iocb)) 955 goto put_rq; 956 957 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 958 959 tail = info->tail; 960 event = aio_ring_event(info, tail, KM_IRQ0); 961 if (++tail >= info->nr) 962 tail = 0; 963 964 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 965 event->data = iocb->ki_user_data; 966 event->res = res; 967 event->res2 = res2; 968 969 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n", 970 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, 971 res, res2); 972 973 /* after flagging the request as done, we 974 * must never even look at it again 975 */ 976 smp_wmb(); /* make event visible before updating tail */ 977 978 info->tail = tail; 979 ring->tail = tail; 980 981 put_aio_ring_event(event, KM_IRQ0); 982 kunmap_atomic(ring, KM_IRQ1); 983 984 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 985 986 /* 987 * Check if the user asked us to deliver the result through an 988 * eventfd. The eventfd_signal() function is safe to be called 989 * from IRQ context. 990 */ 991 if (iocb->ki_eventfd != NULL) 992 eventfd_signal(iocb->ki_eventfd, 1); 993 994put_rq: 995 /* everything turned out well, dispose of the aiocb. */ 996 ret = __aio_put_req(ctx, iocb); 997 998 /* 999 * We have to order our ring_info tail store above and test 1000 * of the wait list below outside the wait lock. This is 1001 * like in wake_up_bit() where clearing a bit has to be 1002 * ordered with the unlocked test. 1003 */ 1004 smp_mb(); 1005 1006 if (waitqueue_active(&ctx->wait)) 1007 wake_up(&ctx->wait); 1008 1009 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1010 return ret; 1011} 1012EXPORT_SYMBOL(aio_complete); 1013 1014static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) 1015{ 1016 struct aio_ring_info *info = &ioctx->ring_info; 1017 struct aio_ring *ring; 1018 unsigned long head; 1019 int ret = 0; 1020 1021 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1022 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1023 (unsigned long)ring->head, (unsigned long)ring->tail, 1024 (unsigned long)ring->nr); 1025 1026 if (ring->head == ring->tail) 1027 goto out; 1028 1029 spin_lock(&info->ring_lock); 1030 1031 head = ring->head % info->nr; 1032 if (head != ring->tail) { 1033 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1034 *ent = *evp; 1035 head = (head + 1) % info->nr; 1036 smp_mb(); /* finish reading the event before updatng the head */ 1037 ring->head = head; 1038 ret = 1; 1039 put_aio_ring_event(evp, KM_USER1); 1040 } 1041 spin_unlock(&info->ring_lock); 1042 1043out: 1044 kunmap_atomic(ring, KM_USER0); 1045 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1046 (unsigned long)ring->head, (unsigned long)ring->tail); 1047 return ret; 1048} 1049 1050struct aio_timeout { 1051 struct timer_list timer; 1052 int timed_out; 1053 struct task_struct *p; 1054}; 1055 1056static void timeout_func(unsigned long data) 1057{ 1058 struct aio_timeout *to = (struct aio_timeout *)data; 1059 1060 to->timed_out = 1; 1061 wake_up_process(to->p); 1062} 1063 1064static inline void init_timeout(struct aio_timeout *to) 1065{ 1066 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to); 1067 to->timed_out = 0; 1068 to->p = current; 1069} 1070 1071static inline void set_timeout(long start_jiffies, struct aio_timeout *to, 1072 const struct timespec *ts) 1073{ 1074 to->timer.expires = start_jiffies + timespec_to_jiffies(ts); 1075 if (time_after(to->timer.expires, jiffies)) 1076 add_timer(&to->timer); 1077 else 1078 to->timed_out = 1; 1079} 1080 1081static inline void clear_timeout(struct aio_timeout *to) 1082{ 1083 del_singleshot_timer_sync(&to->timer); 1084} 1085 1086static int read_events(struct kioctx *ctx, 1087 long min_nr, long nr, 1088 struct io_event __user *event, 1089 struct timespec __user *timeout) 1090{ 1091 long start_jiffies = jiffies; 1092 struct task_struct *tsk = current; 1093 DECLARE_WAITQUEUE(wait, tsk); 1094 int ret; 1095 int i = 0; 1096 struct io_event ent; 1097 struct aio_timeout to; 1098 int retry = 0; 1099 1100 /* needed to zero any padding within an entry (there shouldn't be 1101 * any, but C is fun! 1102 */ 1103 memset(&ent, 0, sizeof(ent)); 1104retry: 1105 ret = 0; 1106 while (likely(i < nr)) { 1107 ret = aio_read_evt(ctx, &ent); 1108 if (unlikely(ret <= 0)) 1109 break; 1110 1111 dprintk("read event: %Lx %Lx %Lx %Lx\n", 1112 ent.data, ent.obj, ent.res, ent.res2); 1113 1114 /* Could we split the check in two? */ 1115 ret = -EFAULT; 1116 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1117 dprintk("aio: lost an event due to EFAULT.\n"); 1118 break; 1119 } 1120 ret = 0; 1121 1122 /* Good, event copied to userland, update counts. */ 1123 event ++; 1124 i ++; 1125 } 1126 1127 if (min_nr <= i) 1128 return i; 1129 if (ret) 1130 return ret; 1131 1132 /* End fast path */ 1133 1134 /* racey check, but it gets redone */ 1135 if (!retry && unlikely(!list_empty(&ctx->run_list))) { 1136 retry = 1; 1137 aio_run_all_iocbs(ctx); 1138 goto retry; 1139 } 1140 1141 init_timeout(&to); 1142 if (timeout) { 1143 struct timespec ts; 1144 ret = -EFAULT; 1145 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) 1146 goto out; 1147 1148 set_timeout(start_jiffies, &to, &ts); 1149 } 1150 1151 while (likely(i < nr)) { 1152 add_wait_queue_exclusive(&ctx->wait, &wait); 1153 do { 1154 set_task_state(tsk, TASK_INTERRUPTIBLE); 1155 ret = aio_read_evt(ctx, &ent); 1156 if (ret) 1157 break; 1158 if (min_nr <= i) 1159 break; 1160 if (unlikely(ctx->dead)) { 1161 ret = -EINVAL; 1162 break; 1163 } 1164 if (to.timed_out) /* Only check after read evt */ 1165 break; 1166 /* Try to only show up in io wait if there are ops 1167 * in flight */ 1168 if (ctx->reqs_active) 1169 io_schedule(); 1170 else 1171 schedule(); 1172 if (signal_pending(tsk)) { 1173 ret = -EINTR; 1174 break; 1175 } 1176 /*ret = aio_read_evt(ctx, &ent);*/ 1177 } while (1) ; 1178 1179 set_task_state(tsk, TASK_RUNNING); 1180 remove_wait_queue(&ctx->wait, &wait); 1181 1182 if (unlikely(ret <= 0)) 1183 break; 1184 1185 ret = -EFAULT; 1186 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) { 1187 dprintk("aio: lost an event due to EFAULT.\n"); 1188 break; 1189 } 1190 1191 /* Good, event copied to userland, update counts. */ 1192 event ++; 1193 i ++; 1194 } 1195 1196 if (timeout) 1197 clear_timeout(&to); 1198out: 1199 destroy_timer_on_stack(&to.timer); 1200 return i ? i : ret; 1201} 1202 1203/* Take an ioctx and remove it from the list of ioctx's. Protects 1204 * against races with itself via ->dead. 1205 */ 1206static void io_destroy(struct kioctx *ioctx) 1207{ 1208 struct mm_struct *mm = current->mm; 1209 int was_dead; 1210 1211 /* delete the entry from the list is someone else hasn't already */ 1212 spin_lock(&mm->ioctx_lock); 1213 was_dead = ioctx->dead; 1214 ioctx->dead = 1; 1215 hlist_del_rcu(&ioctx->list); 1216 spin_unlock(&mm->ioctx_lock); 1217 1218 dprintk("aio_release(%p)\n", ioctx); 1219 if (likely(!was_dead)) 1220 put_ioctx(ioctx); /* twice for the list */ 1221 1222 aio_cancel_all(ioctx); 1223 wait_for_all_aios(ioctx); 1224 1225 /* 1226 * Wake up any waiters. The setting of ctx->dead must be seen 1227 * by other CPUs at this point. Right now, we rely on the 1228 * locking done by the above calls to ensure this consistency. 1229 */ 1230 wake_up(&ioctx->wait); 1231 put_ioctx(ioctx); /* once for the lookup */ 1232} 1233 1234/* sys_io_setup: 1235 * Create an aio_context capable of receiving at least nr_events. 1236 * ctxp must not point to an aio_context that already exists, and 1237 * must be initialized to 0 prior to the call. On successful 1238 * creation of the aio_context, *ctxp is filled in with the resulting 1239 * handle. May fail with -EINVAL if *ctxp is not initialized, 1240 * if the specified nr_events exceeds internal limits. May fail 1241 * with -EAGAIN if the specified nr_events exceeds the user's limit 1242 * of available events. May fail with -ENOMEM if insufficient kernel 1243 * resources are available. May fail with -EFAULT if an invalid 1244 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1245 * implemented. 1246 */ 1247SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1248{ 1249 struct kioctx *ioctx = NULL; 1250 unsigned long ctx; 1251 long ret; 1252 1253 ret = get_user(ctx, ctxp); 1254 if (unlikely(ret)) 1255 goto out; 1256 1257 ret = -EINVAL; 1258 if (unlikely(ctx || nr_events == 0)) { 1259 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1260 ctx, nr_events); 1261 goto out; 1262 } 1263 1264 ioctx = ioctx_alloc(nr_events); 1265 ret = PTR_ERR(ioctx); 1266 if (!IS_ERR(ioctx)) { 1267 ret = put_user(ioctx->user_id, ctxp); 1268 if (!ret) 1269 return 0; 1270 1271 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ 1272 io_destroy(ioctx); 1273 } 1274 1275out: 1276 return ret; 1277} 1278 1279/* sys_io_destroy: 1280 * Destroy the aio_context specified. May cancel any outstanding 1281 * AIOs and block on completion. Will fail with -ENOSYS if not 1282 * implemented. May fail with -EINVAL if the context pointed to 1283 * is invalid. 1284 */ 1285SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1286{ 1287 struct kioctx *ioctx = lookup_ioctx(ctx); 1288 if (likely(NULL != ioctx)) { 1289 io_destroy(ioctx); 1290 return 0; 1291 } 1292 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1293 return -EINVAL; 1294} 1295 1296static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1297{ 1298 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1299 1300 BUG_ON(ret <= 0); 1301 1302 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1303 ssize_t this = min((ssize_t)iov->iov_len, ret); 1304 iov->iov_base += this; 1305 iov->iov_len -= this; 1306 iocb->ki_left -= this; 1307 ret -= this; 1308 if (iov->iov_len == 0) { 1309 iocb->ki_cur_seg++; 1310 iov++; 1311 } 1312 } 1313 1314 /* the caller should not have done more io than what fit in 1315 * the remaining iovecs */ 1316 BUG_ON(ret > 0 && iocb->ki_left == 0); 1317} 1318 1319static ssize_t aio_rw_vect_retry(struct kiocb *iocb) 1320{ 1321 struct file *file = iocb->ki_filp; 1322 struct address_space *mapping = file->f_mapping; 1323 struct inode *inode = mapping->host; 1324 ssize_t (*rw_op)(struct kiocb *, const struct iovec *, 1325 unsigned long, loff_t); 1326 ssize_t ret = 0; 1327 unsigned short opcode; 1328 1329 if ((iocb->ki_opcode == IOCB_CMD_PREADV) || 1330 (iocb->ki_opcode == IOCB_CMD_PREAD)) { 1331 rw_op = file->f_op->aio_read; 1332 opcode = IOCB_CMD_PREADV; 1333 } else { 1334 rw_op = file->f_op->aio_write; 1335 opcode = IOCB_CMD_PWRITEV; 1336 } 1337 1338 /* This matches the pread()/pwrite() logic */ 1339 if (iocb->ki_pos < 0) 1340 return -EINVAL; 1341 1342 do { 1343 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1344 iocb->ki_nr_segs - iocb->ki_cur_seg, 1345 iocb->ki_pos); 1346 if (ret > 0) 1347 aio_advance_iovec(iocb, ret); 1348 1349 /* retry all partial writes. retry partial reads as long as its a 1350 * regular file. */ 1351 } while (ret > 0 && iocb->ki_left > 0 && 1352 (opcode == IOCB_CMD_PWRITEV || 1353 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); 1354 1355 /* This means we must have transferred all that we could */ 1356 /* No need to retry anymore */ 1357 if ((ret == 0) || (iocb->ki_left == 0)) 1358 ret = iocb->ki_nbytes - iocb->ki_left; 1359 1360 /* If we managed to write some out we return that, rather than 1361 * the eventual error. */ 1362 if (opcode == IOCB_CMD_PWRITEV 1363 && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY 1364 && iocb->ki_nbytes - iocb->ki_left) 1365 ret = iocb->ki_nbytes - iocb->ki_left; 1366 1367 return ret; 1368} 1369 1370static ssize_t aio_fdsync(struct kiocb *iocb) 1371{ 1372 struct file *file = iocb->ki_filp; 1373 ssize_t ret = -EINVAL; 1374 1375 if (file->f_op->aio_fsync) 1376 ret = file->f_op->aio_fsync(iocb, 1); 1377 return ret; 1378} 1379 1380static ssize_t aio_fsync(struct kiocb *iocb) 1381{ 1382 struct file *file = iocb->ki_filp; 1383 ssize_t ret = -EINVAL; 1384 1385 if (file->f_op->aio_fsync) 1386 ret = file->f_op->aio_fsync(iocb, 0); 1387 return ret; 1388} 1389 1390static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) 1391{ 1392 ssize_t ret; 1393 1394#ifdef CONFIG_COMPAT 1395 if (compat) 1396 ret = compat_rw_copy_check_uvector(type, 1397 (struct compat_iovec __user *)kiocb->ki_buf, 1398 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1399 &kiocb->ki_iovec); 1400 else 1401#endif 1402 ret = rw_copy_check_uvector(type, 1403 (struct iovec __user *)kiocb->ki_buf, 1404 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1405 &kiocb->ki_iovec); 1406 if (ret < 0) 1407 goto out; 1408 1409 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1410 kiocb->ki_cur_seg = 0; 1411 /* ki_nbytes/left now reflect bytes instead of segs */ 1412 kiocb->ki_nbytes = ret; 1413 kiocb->ki_left = ret; 1414 1415 ret = 0; 1416out: 1417 return ret; 1418} 1419 1420static ssize_t aio_setup_single_vector(struct kiocb *kiocb) 1421{ 1422 kiocb->ki_iovec = &kiocb->ki_inline_vec; 1423 kiocb->ki_iovec->iov_base = kiocb->ki_buf; 1424 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1425 kiocb->ki_nr_segs = 1; 1426 kiocb->ki_cur_seg = 0; 1427 return 0; 1428} 1429 1430/* 1431 * aio_setup_iocb: 1432 * Performs the initial checks and aio retry method 1433 * setup for the kiocb at the time of io submission. 1434 */ 1435static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) 1436{ 1437 struct file *file = kiocb->ki_filp; 1438 ssize_t ret = 0; 1439 1440 switch (kiocb->ki_opcode) { 1441 case IOCB_CMD_PREAD: 1442 ret = -EBADF; 1443 if (unlikely(!(file->f_mode & FMODE_READ))) 1444 break; 1445 ret = -EFAULT; 1446 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, 1447 kiocb->ki_left))) 1448 break; 1449 ret = security_file_permission(file, MAY_READ); 1450 if (unlikely(ret)) 1451 break; 1452 ret = aio_setup_single_vector(kiocb); 1453 if (ret) 1454 break; 1455 ret = -EINVAL; 1456 if (file->f_op->aio_read) 1457 kiocb->ki_retry = aio_rw_vect_retry; 1458 break; 1459 case IOCB_CMD_PWRITE: 1460 ret = -EBADF; 1461 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1462 break; 1463 ret = -EFAULT; 1464 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, 1465 kiocb->ki_left))) 1466 break; 1467 ret = security_file_permission(file, MAY_WRITE); 1468 if (unlikely(ret)) 1469 break; 1470 ret = aio_setup_single_vector(kiocb); 1471 if (ret) 1472 break; 1473 ret = -EINVAL; 1474 if (file->f_op->aio_write) 1475 kiocb->ki_retry = aio_rw_vect_retry; 1476 break; 1477 case IOCB_CMD_PREADV: 1478 ret = -EBADF; 1479 if (unlikely(!(file->f_mode & FMODE_READ))) 1480 break; 1481 ret = security_file_permission(file, MAY_READ); 1482 if (unlikely(ret)) 1483 break; 1484 ret = aio_setup_vectored_rw(READ, kiocb, compat); 1485 if (ret) 1486 break; 1487 ret = -EINVAL; 1488 if (file->f_op->aio_read) 1489 kiocb->ki_retry = aio_rw_vect_retry; 1490 break; 1491 case IOCB_CMD_PWRITEV: 1492 ret = -EBADF; 1493 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1494 break; 1495 ret = security_file_permission(file, MAY_WRITE); 1496 if (unlikely(ret)) 1497 break; 1498 ret = aio_setup_vectored_rw(WRITE, kiocb, compat); 1499 if (ret) 1500 break; 1501 ret = -EINVAL; 1502 if (file->f_op->aio_write) 1503 kiocb->ki_retry = aio_rw_vect_retry; 1504 break; 1505 case IOCB_CMD_FDSYNC: 1506 ret = -EINVAL; 1507 if (file->f_op->aio_fsync) 1508 kiocb->ki_retry = aio_fdsync; 1509 break; 1510 case IOCB_CMD_FSYNC: 1511 ret = -EINVAL; 1512 if (file->f_op->aio_fsync) 1513 kiocb->ki_retry = aio_fsync; 1514 break; 1515 default: 1516 dprintk("EINVAL: io_submit: no operation provided\n"); 1517 ret = -EINVAL; 1518 } 1519 1520 if (!kiocb->ki_retry) 1521 return ret; 1522 1523 return 0; 1524} 1525 1526static void aio_batch_add(struct address_space *mapping, 1527 struct hlist_head *batch_hash) 1528{ 1529 struct aio_batch_entry *abe; 1530 struct hlist_node *pos; 1531 unsigned bucket; 1532 1533 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS); 1534 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) { 1535 if (abe->mapping == mapping) 1536 return; 1537 } 1538 1539 abe = mempool_alloc(abe_pool, GFP_KERNEL); 1540 BUG_ON(!igrab(mapping->host)); 1541 abe->mapping = mapping; 1542 hlist_add_head(&abe->list, &batch_hash[bucket]); 1543 return; 1544} 1545 1546static void aio_batch_free(struct hlist_head *batch_hash) 1547{ 1548 struct aio_batch_entry *abe; 1549 struct hlist_node *pos, *n; 1550 int i; 1551 1552 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { 1553 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { 1554 blk_run_address_space(abe->mapping); 1555 iput(abe->mapping->host); 1556 hlist_del(&abe->list); 1557 mempool_free(abe, abe_pool); 1558 } 1559 } 1560} 1561 1562static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1563 struct iocb *iocb, struct hlist_head *batch_hash, 1564 bool compat) 1565{ 1566 struct kiocb *req; 1567 struct file *file; 1568 ssize_t ret; 1569 1570 /* enforce forwards compatibility on users */ 1571 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 1572 pr_debug("EINVAL: io_submit: reserve field set\n"); 1573 return -EINVAL; 1574 } 1575 1576 /* prevent overflows */ 1577 if (unlikely( 1578 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1579 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1580 ((ssize_t)iocb->aio_nbytes < 0) 1581 )) { 1582 pr_debug("EINVAL: io_submit: overflow check\n"); 1583 return -EINVAL; 1584 } 1585 1586 file = fget(iocb->aio_fildes); 1587 if (unlikely(!file)) 1588 return -EBADF; 1589 1590 req = aio_get_req(ctx); /* returns with 2 references to req */ 1591 if (unlikely(!req)) { 1592 fput(file); 1593 return -EAGAIN; 1594 } 1595 req->ki_filp = file; 1596 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1597 /* 1598 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1599 * instance of the file* now. The file descriptor must be 1600 * an eventfd() fd, and will be signaled for each completed 1601 * event using the eventfd_signal() function. 1602 */ 1603 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1604 if (IS_ERR(req->ki_eventfd)) { 1605 ret = PTR_ERR(req->ki_eventfd); 1606 req->ki_eventfd = NULL; 1607 goto out_put_req; 1608 } 1609 } 1610 1611 ret = put_user(req->ki_key, &user_iocb->aio_key); 1612 if (unlikely(ret)) { 1613 dprintk("EFAULT: aio_key\n"); 1614 goto out_put_req; 1615 } 1616 1617 req->ki_obj.user = user_iocb; 1618 req->ki_user_data = iocb->aio_data; 1619 req->ki_pos = iocb->aio_offset; 1620 1621 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1622 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1623 req->ki_opcode = iocb->aio_lio_opcode; 1624 1625 ret = aio_setup_iocb(req, compat); 1626 1627 if (ret) 1628 goto out_put_req; 1629 1630 spin_lock_irq(&ctx->ctx_lock); 1631 aio_run_iocb(req); 1632 if (!list_empty(&ctx->run_list)) { 1633 /* drain the run list */ 1634 while (__aio_run_iocbs(ctx)) 1635 ; 1636 } 1637 spin_unlock_irq(&ctx->ctx_lock); 1638 if (req->ki_opcode == IOCB_CMD_PREAD || 1639 req->ki_opcode == IOCB_CMD_PREADV || 1640 req->ki_opcode == IOCB_CMD_PWRITE || 1641 req->ki_opcode == IOCB_CMD_PWRITEV) 1642 aio_batch_add(file->f_mapping, batch_hash); 1643 1644 aio_put_req(req); /* drop extra ref to req */ 1645 return 0; 1646 1647out_put_req: 1648 aio_put_req(req); /* drop extra ref to req */ 1649 aio_put_req(req); /* drop i/o ref to req */ 1650 return ret; 1651} 1652 1653long do_io_submit(aio_context_t ctx_id, long nr, 1654 struct iocb __user *__user *iocbpp, bool compat) 1655{ 1656 struct kioctx *ctx; 1657 long ret = 0; 1658 int i; 1659 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; 1660 1661 if (unlikely(nr < 0)) 1662 return -EINVAL; 1663 1664 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) 1665 nr = LONG_MAX/sizeof(*iocbpp); 1666 1667 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1668 return -EFAULT; 1669 1670 ctx = lookup_ioctx(ctx_id); 1671 if (unlikely(!ctx)) { 1672 pr_debug("EINVAL: io_submit: invalid context id\n"); 1673 return -EINVAL; 1674 } 1675 1676 /* 1677 * AKPM: should this return a partial result if some of the IOs were 1678 * successfully submitted? 1679 */ 1680 for (i=0; i<nr; i++) { 1681 struct iocb __user *user_iocb; 1682 struct iocb tmp; 1683 1684 if (unlikely(__get_user(user_iocb, iocbpp + i))) { 1685 ret = -EFAULT; 1686 break; 1687 } 1688 1689 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 1690 ret = -EFAULT; 1691 break; 1692 } 1693 1694 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); 1695 if (ret) 1696 break; 1697 } 1698 aio_batch_free(batch_hash); 1699 1700 put_ioctx(ctx); 1701 return i ? i : ret; 1702} 1703 1704/* sys_io_submit: 1705 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1706 * the number of iocbs queued. May return -EINVAL if the aio_context 1707 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 1708 * *iocbpp[0] is not properly initialized, if the operation specified 1709 * is invalid for the file descriptor in the iocb. May fail with 1710 * -EFAULT if any of the data structures point to invalid data. May 1711 * fail with -EBADF if the file descriptor specified in the first 1712 * iocb is invalid. May fail with -EAGAIN if insufficient resources 1713 * are available to queue any iocbs. Will return 0 if nr is 0. Will 1714 * fail with -ENOSYS if not implemented. 1715 */ 1716SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 1717 struct iocb __user * __user *, iocbpp) 1718{ 1719 return do_io_submit(ctx_id, nr, iocbpp, 0); 1720} 1721 1722/* lookup_kiocb 1723 * Finds a given iocb for cancellation. 1724 */ 1725static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, 1726 u32 key) 1727{ 1728 struct list_head *pos; 1729 1730 assert_spin_locked(&ctx->ctx_lock); 1731 1732 /* TODO: use a hash or array, this sucks. */ 1733 list_for_each(pos, &ctx->active_reqs) { 1734 struct kiocb *kiocb = list_kiocb(pos); 1735 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key) 1736 return kiocb; 1737 } 1738 return NULL; 1739} 1740 1741/* sys_io_cancel: 1742 * Attempts to cancel an iocb previously passed to io_submit. If 1743 * the operation is successfully cancelled, the resulting event is 1744 * copied into the memory pointed to by result without being placed 1745 * into the completion queue and 0 is returned. May fail with 1746 * -EFAULT if any of the data structures pointed to are invalid. 1747 * May fail with -EINVAL if aio_context specified by ctx_id is 1748 * invalid. May fail with -EAGAIN if the iocb specified was not 1749 * cancelled. Will fail with -ENOSYS if not implemented. 1750 */ 1751SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 1752 struct io_event __user *, result) 1753{ 1754 int (*cancel)(struct kiocb *iocb, struct io_event *res); 1755 struct kioctx *ctx; 1756 struct kiocb *kiocb; 1757 u32 key; 1758 int ret; 1759 1760 ret = get_user(key, &iocb->aio_key); 1761 if (unlikely(ret)) 1762 return -EFAULT; 1763 1764 ctx = lookup_ioctx(ctx_id); 1765 if (unlikely(!ctx)) 1766 return -EINVAL; 1767 1768 spin_lock_irq(&ctx->ctx_lock); 1769 ret = -EAGAIN; 1770 kiocb = lookup_kiocb(ctx, iocb, key); 1771 if (kiocb && kiocb->ki_cancel) { 1772 cancel = kiocb->ki_cancel; 1773 kiocb->ki_users ++; 1774 kiocbSetCancelled(kiocb); 1775 } else 1776 cancel = NULL; 1777 spin_unlock_irq(&ctx->ctx_lock); 1778 1779 if (NULL != cancel) { 1780 struct io_event tmp; 1781 pr_debug("calling cancel\n"); 1782 memset(&tmp, 0, sizeof(tmp)); 1783 tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; 1784 tmp.data = kiocb->ki_user_data; 1785 ret = cancel(kiocb, &tmp); 1786 if (!ret) { 1787 /* Cancellation succeeded -- copy the result 1788 * into the user's buffer. 1789 */ 1790 if (copy_to_user(result, &tmp, sizeof(tmp))) 1791 ret = -EFAULT; 1792 } 1793 } else 1794 ret = -EINVAL; 1795 1796 put_ioctx(ctx); 1797 1798 return ret; 1799} 1800 1801/* io_getevents: 1802 * Attempts to read at least min_nr events and up to nr events from 1803 * the completion queue for the aio_context specified by ctx_id. If 1804 * it succeeds, the number of read events is returned. May fail with 1805 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 1806 * out of range, if timeout is out of range. May fail with -EFAULT 1807 * if any of the memory specified is invalid. May return 0 or 1808 * < min_nr if the timeout specified by timeout has elapsed 1809 * before sufficient events are available, where timeout == NULL 1810 * specifies an infinite timeout. Note that the timeout pointed to by 1811 * timeout is relative and will be updated if not NULL and the 1812 * operation blocks. Will fail with -ENOSYS if not implemented. 1813 */ 1814SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1815 long, min_nr, 1816 long, nr, 1817 struct io_event __user *, events, 1818 struct timespec __user *, timeout) 1819{ 1820 struct kioctx *ioctx = lookup_ioctx(ctx_id); 1821 long ret = -EINVAL; 1822 1823 if (likely(ioctx)) { 1824 if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) 1825 ret = read_events(ioctx, min_nr, nr, events, timeout); 1826 put_ioctx(ioctx); 1827 } 1828 1829 asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); 1830 return ret; 1831} 1832