1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * This file contains the procedures for the handling of select and poll 4 * 5 * Created for Linux based loosely upon Mathius Lattner's minix 6 * patches by Peter MacDonald. Heavily edited by Linus. 7 * 8 * 4 February 1994 9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 10 * flag set in its personality we do *not* modify the given timeout 11 * parameter to reflect time remaining. 12 * 13 * 24 January 2000 14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 16 */ 17 18#include <linux/kernel.h> 19#include <linux/sched.h> 20#include <linux/syscalls.h> 21#include <linux/module.h> 22#include <linux/slab.h> 23#include <linux/poll.h> 24#include <linux/personality.h> /* for STICKY_TIMEOUTS */ 25#include <linux/file.h> 26#include <linux/fdtable.h> 27#include <linux/fs.h> 28#include <linux/rcupdate.h> 29#include <linux/hrtimer.h> 30 31#include <asm/uaccess.h> 32 33#include <typedefs.h> 34#include <bcmdefs.h> 35 36/* 37 * Estimate expected accuracy in ns from a timeval. 38 * 39 * After quite a bit of churning around, we've settled on 40 * a simple thing of taking 0.1% of the timeout as the 41 * slack, with a cap of 100 msec. 42 * "nice" tasks get a 0.5% slack instead. 43 * 44 * Consider this comment an open invitation to come up with even 45 * better solutions.. 46 */ 47 48#define MAX_SLACK (100 * NSEC_PER_MSEC) 49 50static long __estimate_accuracy(struct timespec *tv) 51{ 52 long slack; 53 int divfactor = 1000; 54 55 if (tv->tv_sec < 0) 56 return 0; 57 58 if (task_nice(current) > 0) 59 divfactor = divfactor / 5; 60 61 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 62 return MAX_SLACK; 63 64 slack = tv->tv_nsec / divfactor; 65 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 66 67 if (slack > MAX_SLACK) 68 return MAX_SLACK; 69 70 return slack; 71} 72 73static long estimate_accuracy(struct timespec *tv) 74{ 75 unsigned long ret; 76 struct timespec now; 77 78 /* 79 * Realtime tasks get a slack of 0 for obvious reasons. 80 */ 81 82 if (rt_task(current)) 83 return 0; 84 85 ktime_get_ts(&now); 86 now = timespec_sub(*tv, now); 87 ret = __estimate_accuracy(&now); 88 if (ret < current->timer_slack_ns) 89 return current->timer_slack_ns; 90 return ret; 91} 92 93 94 95struct poll_table_page { 96 struct poll_table_page * next; 97 struct poll_table_entry * entry; 98 struct poll_table_entry entries[0]; 99}; 100 101#define POLL_TABLE_FULL(table) \ 102 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 103 104/* 105 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 106 * I have rewritten this, taking some shortcuts: This code may not be easy to 107 * follow, but it should be free of race-conditions, and it's practical. If you 108 * understand what I'm doing here, then you understand how the linux 109 * sleep/wakeup mechanism works. 110 * 111 * Two very simple procedures, poll_wait() and poll_freewait() make all the 112 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 113 * as all select/poll functions have to call it to add an entry to the 114 * poll table. 115 */ 116static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 117 poll_table *p); 118 119void poll_initwait(struct poll_wqueues *pwq) 120{ 121 init_poll_funcptr(&pwq->pt, __pollwait); 122 pwq->polling_task = current; 123 pwq->triggered = 0; 124 pwq->error = 0; 125 pwq->table = NULL; 126 pwq->inline_index = 0; 127} 128EXPORT_SYMBOL(poll_initwait); 129 130static void free_poll_entry(struct poll_table_entry *entry) 131{ 132 remove_wait_queue(entry->wait_address, &entry->wait); 133 fput(entry->filp); 134} 135 136void poll_freewait(struct poll_wqueues *pwq) 137{ 138 struct poll_table_page * p = pwq->table; 139 int i; 140 for (i = 0; i < pwq->inline_index; i++) 141 free_poll_entry(pwq->inline_entries + i); 142 while (p) { 143 struct poll_table_entry * entry; 144 struct poll_table_page *old; 145 146 entry = p->entry; 147 do { 148 entry--; 149 free_poll_entry(entry); 150 } while (entry > p->entries); 151 old = p; 152 p = p->next; 153 free_page((unsigned long) old); 154 } 155} 156EXPORT_SYMBOL(poll_freewait); 157 158static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 159{ 160 struct poll_table_page *table = p->table; 161 162 if (p->inline_index < N_INLINE_POLL_ENTRIES) 163 return p->inline_entries + p->inline_index++; 164 165 if (!table || POLL_TABLE_FULL(table)) { 166 struct poll_table_page *new_table; 167 168 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 169 if (!new_table) { 170 p->error = -ENOMEM; 171 return NULL; 172 } 173 new_table->entry = new_table->entries; 174 new_table->next = table; 175 p->table = new_table; 176 table = new_table; 177 } 178 179 return table->entry++; 180} 181 182static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 183{ 184 struct poll_wqueues *pwq = wait->private; 185 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 186 187 /* 188 * Although this function is called under waitqueue lock, LOCK 189 * doesn't imply write barrier and the users expect write 190 * barrier semantics on wakeup functions. The following 191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 192 * and is paired with set_mb() in poll_schedule_timeout. 193 */ 194 smp_wmb(); 195 pwq->triggered = 1; 196 197 /* 198 * Perform the default wake up operation using a dummy 199 * waitqueue. 200 * 201 * TODO: This is hacky but there currently is no interface to 202 * pass in @sync. @sync is scheduled to be removed and once 203 * that happens, wake_up_process() can be used directly. 204 */ 205 return default_wake_function(&dummy_wait, mode, sync, key); 206} 207 208static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 209{ 210 struct poll_table_entry *entry; 211 212 entry = container_of(wait, struct poll_table_entry, wait); 213 if (key && !((unsigned long)key & entry->key)) 214 return 0; 215 return __pollwake(wait, mode, sync, key); 216} 217 218/* Add a new entry */ 219static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 220 poll_table *p) 221{ 222 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 223 struct poll_table_entry *entry = poll_get_entry(pwq); 224 if (!entry) 225 return; 226 get_file(filp); 227 entry->filp = filp; 228 entry->wait_address = wait_address; 229 entry->key = p->key; 230 init_waitqueue_func_entry(&entry->wait, pollwake); 231 entry->wait.private = pwq; 232 add_wait_queue(wait_address, &entry->wait); 233} 234 235int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 236 ktime_t *expires, unsigned long slack) 237{ 238 int rc = -EINTR; 239 240 set_current_state(state); 241 if (!pwq->triggered) 242 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 243 __set_current_state(TASK_RUNNING); 244 245 /* 246 * Prepare for the next iteration. 247 * 248 * The following set_mb() serves two purposes. First, it's 249 * the counterpart rmb of the wmb in pollwake() such that data 250 * written before wake up is always visible after wake up. 251 * Second, the full barrier guarantees that triggered clearing 252 * doesn't pass event check of the next iteration. Note that 253 * this problem doesn't exist for the first iteration as 254 * add_wait_queue() has full barrier semantics. 255 */ 256 set_mb(pwq->triggered, 0); 257 258 return rc; 259} 260EXPORT_SYMBOL(poll_schedule_timeout); 261 262/** 263 * poll_select_set_timeout - helper function to setup the timeout value 264 * @to: pointer to timespec variable for the final timeout 265 * @sec: seconds (from user space) 266 * @nsec: nanoseconds (from user space) 267 * 268 * Note, we do not use a timespec for the user space value here, That 269 * way we can use the function for timeval and compat interfaces as well. 270 * 271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 272 */ 273int poll_select_set_timeout(struct timespec *to, long sec, long nsec) 274{ 275 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; 276 277 if (!timespec_valid(&ts)) 278 return -EINVAL; 279 280 /* Optimize for the zero timeout value here */ 281 if (!sec && !nsec) { 282 to->tv_sec = to->tv_nsec = 0; 283 } else { 284 ktime_get_ts(to); 285 *to = timespec_add_safe(*to, ts); 286 } 287 return 0; 288} 289 290static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, 291 int timeval, int ret) 292{ 293 struct timespec rts; 294 struct timeval rtv; 295 296 if (!p) 297 return ret; 298 299 if (current->personality & STICKY_TIMEOUTS) 300 goto sticky; 301 302 /* No update for zero timeout */ 303 if (!end_time->tv_sec && !end_time->tv_nsec) 304 return ret; 305 306 ktime_get_ts(&rts); 307 rts = timespec_sub(*end_time, rts); 308 if (rts.tv_sec < 0) 309 rts.tv_sec = rts.tv_nsec = 0; 310 311 if (timeval) { 312 rtv.tv_sec = rts.tv_sec; 313 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; 314 315 if (!copy_to_user(p, &rtv, sizeof(rtv))) 316 return ret; 317 318 } else if (!copy_to_user(p, &rts, sizeof(rts))) 319 return ret; 320 321 /* 322 * If an application puts its timeval in read-only memory, we 323 * don't want the Linux-specific update to the timeval to 324 * cause a fault after the select has completed 325 * successfully. However, because we're not updating the 326 * timeval, we can't restart the system call. 327 */ 328 329sticky: 330 if (ret == -ERESTARTNOHAND) 331 ret = -EINTR; 332 return ret; 333} 334 335#define FDS_IN(fds, n) (fds->in + n) 336#define FDS_OUT(fds, n) (fds->out + n) 337#define FDS_EX(fds, n) (fds->ex + n) 338 339#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 340 341static int max_select_fd(unsigned long n, fd_set_bits *fds) 342{ 343 unsigned long *open_fds; 344 unsigned long set; 345 int max; 346 struct fdtable *fdt; 347 348 /* handle last in-complete long-word first */ 349 set = ~(~0UL << (n & (__NFDBITS-1))); 350 n /= __NFDBITS; 351 fdt = files_fdtable(current->files); 352 open_fds = fdt->open_fds->fds_bits+n; 353 max = 0; 354 if (set) { 355 set &= BITS(fds, n); 356 if (set) { 357 if (!(set & ~*open_fds)) 358 goto get_max; 359 return -EBADF; 360 } 361 } 362 while (n) { 363 open_fds--; 364 n--; 365 set = BITS(fds, n); 366 if (!set) 367 continue; 368 if (set & ~*open_fds) 369 return -EBADF; 370 if (max) 371 continue; 372get_max: 373 do { 374 max++; 375 set >>= 1; 376 } while (set); 377 max += n * __NFDBITS; 378 } 379 380 return max; 381} 382 383#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 384#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 385#define POLLEX_SET (POLLPRI) 386 387static inline void wait_key_set(poll_table *wait, unsigned long in, 388 unsigned long out, unsigned long bit) 389{ 390 if (wait) { 391 wait->key = POLLEX_SET; 392 if (in & bit) 393 wait->key |= POLLIN_SET; 394 if (out & bit) 395 wait->key |= POLLOUT_SET; 396 } 397} 398 399int BCMFASTPATH_HOST do_select(int n, fd_set_bits *fds, struct timespec *end_time) 400{ 401 ktime_t expire, *to = NULL; 402 struct poll_wqueues table; 403 poll_table *wait; 404 int retval, i, timed_out = 0; 405 unsigned long slack = 0; 406 407 rcu_read_lock(); 408 retval = max_select_fd(n, fds); 409 rcu_read_unlock(); 410 411 if (retval < 0) 412 return retval; 413 n = retval; 414 415 poll_initwait(&table); 416 wait = &table.pt; 417 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 418 wait = NULL; 419 timed_out = 1; 420 } 421 422 if (end_time && !timed_out) 423 slack = estimate_accuracy(end_time); 424 425 retval = 0; 426 for (;;) { 427 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 428 429 inp = fds->in; outp = fds->out; exp = fds->ex; 430 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 431 432 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 433 unsigned long in, out, ex, all_bits, bit = 1, mask, j; 434 unsigned long res_in = 0, res_out = 0, res_ex = 0; 435 const struct file_operations *f_op = NULL; 436 struct file *file = NULL; 437 438 in = *inp++; out = *outp++; ex = *exp++; 439 all_bits = in | out | ex; 440 if (all_bits == 0) { 441 i += __NFDBITS; 442 continue; 443 } 444 445 for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { 446 int fput_needed; 447 if (i >= n) 448 break; 449 if (!(bit & all_bits)) 450 continue; 451 file = fget_light(i, &fput_needed); 452 if (file) { 453 f_op = file->f_op; 454 mask = DEFAULT_POLLMASK; 455 if (f_op && f_op->poll) { 456 wait_key_set(wait, in, out, bit); 457 mask = (*f_op->poll)(file, wait); 458 } 459 fput_light(file, fput_needed); 460 if ((mask & POLLIN_SET) && (in & bit)) { 461 res_in |= bit; 462 retval++; 463 wait = NULL; 464 } 465 if ((mask & POLLOUT_SET) && (out & bit)) { 466 res_out |= bit; 467 retval++; 468 wait = NULL; 469 } 470 if ((mask & POLLEX_SET) && (ex & bit)) { 471 res_ex |= bit; 472 retval++; 473 wait = NULL; 474 } 475 } 476 } 477 if (res_in) 478 *rinp = res_in; 479 if (res_out) 480 *routp = res_out; 481 if (res_ex) 482 *rexp = res_ex; 483 cond_resched(); 484 } 485 wait = NULL; 486 if (retval || timed_out || signal_pending(current)) 487 break; 488 if (table.error) { 489 retval = table.error; 490 break; 491 } 492 493 /* 494 * If this is the first loop and we have a timeout 495 * given, then we convert to ktime_t and set the to 496 * pointer to the expiry value. 497 */ 498 if (end_time && !to) { 499 expire = timespec_to_ktime(*end_time); 500 to = &expire; 501 } 502 503 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 504 to, slack)) 505 timed_out = 1; 506 } 507 508 poll_freewait(&table); 509 510 return retval; 511} 512 513/* 514 * We can actually return ERESTARTSYS instead of EINTR, but I'd 515 * like to be certain this leads to no problems. So I return 516 * EINTR just for safety. 517 * 518 * Update: ERESTARTSYS breaks at least the xview clock binary, so 519 * I'm trying ERESTARTNOHAND which restart only when you want to. 520 */ 521#define MAX_SELECT_SECONDS \ 522 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) 523 524int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 525 fd_set __user *exp, struct timespec *end_time) 526{ 527 fd_set_bits fds; 528 void *bits; 529 int ret, max_fds; 530 unsigned int size; 531 struct fdtable *fdt; 532 /* Allocate small arguments on the stack to save memory and be faster */ 533 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 534 535 ret = -EINVAL; 536 if (n < 0) 537 goto out_nofds; 538 539 /* max_fds can increase, so grab it once to avoid race */ 540 rcu_read_lock(); 541 fdt = files_fdtable(current->files); 542 max_fds = fdt->max_fds; 543 rcu_read_unlock(); 544 if (n > max_fds) 545 n = max_fds; 546 547 /* 548 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 549 * since we used fdset we need to allocate memory in units of 550 * long-words. 551 */ 552 size = FDS_BYTES(n); 553 bits = stack_fds; 554 if (size > sizeof(stack_fds) / 6) { 555 /* Not enough space in on-stack array; must use kmalloc */ 556 ret = -ENOMEM; 557 bits = kmalloc(6 * size, GFP_KERNEL); 558 if (!bits) 559 goto out_nofds; 560 } 561 fds.in = bits; 562 fds.out = bits + size; 563 fds.ex = bits + 2*size; 564 fds.res_in = bits + 3*size; 565 fds.res_out = bits + 4*size; 566 fds.res_ex = bits + 5*size; 567 568 if ((ret = get_fd_set(n, inp, fds.in)) || 569 (ret = get_fd_set(n, outp, fds.out)) || 570 (ret = get_fd_set(n, exp, fds.ex))) 571 goto out; 572 zero_fd_set(n, fds.res_in); 573 zero_fd_set(n, fds.res_out); 574 zero_fd_set(n, fds.res_ex); 575 576 ret = do_select(n, &fds, end_time); 577 578 if (ret < 0) 579 goto out; 580 if (!ret) { 581 ret = -ERESTARTNOHAND; 582 if (signal_pending(current)) 583 goto out; 584 ret = 0; 585 } 586 587 if (set_fd_set(n, inp, fds.res_in) || 588 set_fd_set(n, outp, fds.res_out) || 589 set_fd_set(n, exp, fds.res_ex)) 590 ret = -EFAULT; 591 592out: 593 if (bits != stack_fds) 594 kfree(bits); 595out_nofds: 596 return ret; 597} 598 599SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 600 fd_set __user *, exp, struct timeval __user *, tvp) 601{ 602 struct timespec end_time, *to = NULL; 603 struct timeval tv; 604 int ret; 605 606 if (tvp) { 607 if (copy_from_user(&tv, tvp, sizeof(tv))) 608 return -EFAULT; 609 610 to = &end_time; 611 if (poll_select_set_timeout(to, 612 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 613 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 614 return -EINVAL; 615 } 616 617 ret = core_sys_select(n, inp, outp, exp, to); 618 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 619 620 return ret; 621} 622 623#ifdef HAVE_SET_RESTORE_SIGMASK 624static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 625 fd_set __user *exp, struct timespec __user *tsp, 626 const sigset_t __user *sigmask, size_t sigsetsize) 627{ 628 sigset_t ksigmask, sigsaved; 629 struct timespec ts, end_time, *to = NULL; 630 int ret; 631 632 if (tsp) { 633 if (copy_from_user(&ts, tsp, sizeof(ts))) 634 return -EFAULT; 635 636 to = &end_time; 637 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 638 return -EINVAL; 639 } 640 641 if (sigmask) { 642 if (sigsetsize != sizeof(sigset_t)) 643 return -EINVAL; 644 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 645 return -EFAULT; 646 647 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 648 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 649 } 650 651 ret = core_sys_select(n, inp, outp, exp, to); 652 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 653 654 if (ret == -ERESTARTNOHAND) { 655 /* 656 * Don't restore the signal mask yet. Let do_signal() deliver 657 * the signal on the way back to userspace, before the signal 658 * mask is restored. 659 */ 660 if (sigmask) { 661 memcpy(¤t->saved_sigmask, &sigsaved, 662 sizeof(sigsaved)); 663 set_restore_sigmask(); 664 } 665 } else if (sigmask) 666 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 667 668 return ret; 669} 670 671/* 672 * Most architectures can't handle 7-argument syscalls. So we provide a 673 * 6-argument version where the sixth argument is a pointer to a structure 674 * which has a pointer to the sigset_t itself followed by a size_t containing 675 * the sigset size. 676 */ 677SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 678 fd_set __user *, exp, struct timespec __user *, tsp, 679 void __user *, sig) 680{ 681 size_t sigsetsize = 0; 682 sigset_t __user *up = NULL; 683 684 if (sig) { 685 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 686 || __get_user(up, (sigset_t __user * __user *)sig) 687 || __get_user(sigsetsize, 688 (size_t __user *)(sig+sizeof(void *)))) 689 return -EFAULT; 690 } 691 692 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 693} 694#endif /* HAVE_SET_RESTORE_SIGMASK */ 695 696#ifdef __ARCH_WANT_SYS_OLD_SELECT 697struct sel_arg_struct { 698 unsigned long n; 699 fd_set __user *inp, *outp, *exp; 700 struct timeval __user *tvp; 701}; 702 703SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 704{ 705 struct sel_arg_struct a; 706 707 if (copy_from_user(&a, arg, sizeof(a))) 708 return -EFAULT; 709 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 710} 711#endif 712 713struct poll_list { 714 struct poll_list *next; 715 int len; 716 struct pollfd entries[0]; 717}; 718 719#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 720 721/* 722 * Fish for pollable events on the pollfd->fd file descriptor. We're only 723 * interested in events matching the pollfd->events mask, and the result 724 * matching that mask is both recorded in pollfd->revents and returned. The 725 * pwait poll_table will be used by the fd-provided poll handler for waiting, 726 * if non-NULL. 727 */ 728static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) 729{ 730 unsigned int mask; 731 int fd; 732 733 mask = 0; 734 fd = pollfd->fd; 735 if (fd >= 0) { 736 int fput_needed; 737 struct file * file; 738 739 file = fget_light(fd, &fput_needed); 740 mask = POLLNVAL; 741 if (file != NULL) { 742 mask = DEFAULT_POLLMASK; 743 if (file->f_op && file->f_op->poll) { 744 if (pwait) 745 pwait->key = pollfd->events | 746 POLLERR | POLLHUP; 747 mask = file->f_op->poll(file, pwait); 748 } 749 /* Mask out unneeded events. */ 750 mask &= pollfd->events | POLLERR | POLLHUP; 751 fput_light(file, fput_needed); 752 } 753 } 754 pollfd->revents = mask; 755 756 return mask; 757} 758 759static int do_poll(unsigned int nfds, struct poll_list *list, 760 struct poll_wqueues *wait, struct timespec *end_time) 761{ 762 poll_table* pt = &wait->pt; 763 ktime_t expire, *to = NULL; 764 int timed_out = 0, count = 0; 765 unsigned long slack = 0; 766 767 /* Optimise the no-wait case */ 768 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 769 pt = NULL; 770 timed_out = 1; 771 } 772 773 if (end_time && !timed_out) 774 slack = estimate_accuracy(end_time); 775 776 for (;;) { 777 struct poll_list *walk; 778 779 for (walk = list; walk != NULL; walk = walk->next) { 780 struct pollfd * pfd, * pfd_end; 781 782 pfd = walk->entries; 783 pfd_end = pfd + walk->len; 784 for (; pfd != pfd_end; pfd++) { 785 /* 786 * Fish for events. If we found one, record it 787 * and kill the poll_table, so we don't 788 * needlessly register any other waiters after 789 * this. They'll get immediately deregistered 790 * when we break out and return. 791 */ 792 if (do_pollfd(pfd, pt)) { 793 count++; 794 pt = NULL; 795 } 796 } 797 } 798 /* 799 * All waiters have already been registered, so don't provide 800 * a poll_table to them on the next loop iteration. 801 */ 802 pt = NULL; 803 if (!count) { 804 count = wait->error; 805 if (signal_pending(current)) 806 count = -EINTR; 807 } 808 if (count || timed_out) 809 break; 810 811 /* 812 * If this is the first loop and we have a timeout 813 * given, then we convert to ktime_t and set the to 814 * pointer to the expiry value. 815 */ 816 if (end_time && !to) { 817 expire = timespec_to_ktime(*end_time); 818 to = &expire; 819 } 820 821 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 822 timed_out = 1; 823 } 824 return count; 825} 826 827#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 828 sizeof(struct pollfd)) 829 830int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 831 struct timespec *end_time) 832{ 833 struct poll_wqueues table; 834 int err = -EFAULT, fdcount, len, size; 835 /* Allocate small arguments on the stack to save memory and be 836 faster - use long to make sure the buffer is aligned properly 837 on 64 bit archs to avoid unaligned access */ 838 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 839 struct poll_list *const head = (struct poll_list *)stack_pps; 840 struct poll_list *walk = head; 841 unsigned long todo = nfds; 842 843 if (nfds > rlimit(RLIMIT_NOFILE)) 844 return -EINVAL; 845 846 len = min_t(unsigned int, nfds, N_STACK_PPS); 847 for (;;) { 848 walk->next = NULL; 849 walk->len = len; 850 if (!len) 851 break; 852 853 if (copy_from_user(walk->entries, ufds + nfds-todo, 854 sizeof(struct pollfd) * walk->len)) 855 goto out_fds; 856 857 todo -= walk->len; 858 if (!todo) 859 break; 860 861 len = min(todo, POLLFD_PER_PAGE); 862 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 863 walk = walk->next = kmalloc(size, GFP_KERNEL); 864 if (!walk) { 865 err = -ENOMEM; 866 goto out_fds; 867 } 868 } 869 870 poll_initwait(&table); 871 fdcount = do_poll(nfds, head, &table, end_time); 872 poll_freewait(&table); 873 874 for (walk = head; walk; walk = walk->next) { 875 struct pollfd *fds = walk->entries; 876 int j; 877 878 for (j = 0; j < walk->len; j++, ufds++) 879 if (__put_user(fds[j].revents, &ufds->revents)) 880 goto out_fds; 881 } 882 883 err = fdcount; 884out_fds: 885 walk = head->next; 886 while (walk) { 887 struct poll_list *pos = walk; 888 walk = walk->next; 889 kfree(pos); 890 } 891 892 return err; 893} 894 895static long do_restart_poll(struct restart_block *restart_block) 896{ 897 struct pollfd __user *ufds = restart_block->poll.ufds; 898 int nfds = restart_block->poll.nfds; 899 struct timespec *to = NULL, end_time; 900 int ret; 901 902 if (restart_block->poll.has_timeout) { 903 end_time.tv_sec = restart_block->poll.tv_sec; 904 end_time.tv_nsec = restart_block->poll.tv_nsec; 905 to = &end_time; 906 } 907 908 ret = do_sys_poll(ufds, nfds, to); 909 910 if (ret == -EINTR) { 911 restart_block->fn = do_restart_poll; 912 ret = -ERESTART_RESTARTBLOCK; 913 } 914 return ret; 915} 916 917SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 918 long, timeout_msecs) 919{ 920 struct timespec end_time, *to = NULL; 921 int ret; 922 923 if (timeout_msecs >= 0) { 924 to = &end_time; 925 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 926 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 927 } 928 929 ret = do_sys_poll(ufds, nfds, to); 930 931 if (ret == -EINTR) { 932 struct restart_block *restart_block; 933 934 restart_block = ¤t_thread_info()->restart_block; 935 restart_block->fn = do_restart_poll; 936 restart_block->poll.ufds = ufds; 937 restart_block->poll.nfds = nfds; 938 939 if (timeout_msecs >= 0) { 940 restart_block->poll.tv_sec = end_time.tv_sec; 941 restart_block->poll.tv_nsec = end_time.tv_nsec; 942 restart_block->poll.has_timeout = 1; 943 } else 944 restart_block->poll.has_timeout = 0; 945 946 ret = -ERESTART_RESTARTBLOCK; 947 } 948 return ret; 949} 950 951#ifdef HAVE_SET_RESTORE_SIGMASK 952SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 953 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 954 size_t, sigsetsize) 955{ 956 sigset_t ksigmask, sigsaved; 957 struct timespec ts, end_time, *to = NULL; 958 int ret; 959 960 if (tsp) { 961 if (copy_from_user(&ts, tsp, sizeof(ts))) 962 return -EFAULT; 963 964 to = &end_time; 965 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 966 return -EINVAL; 967 } 968 969 if (sigmask) { 970 if (sigsetsize != sizeof(sigset_t)) 971 return -EINVAL; 972 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 973 return -EFAULT; 974 975 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 976 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 977 } 978 979 ret = do_sys_poll(ufds, nfds, to); 980 981 /* We can restart this syscall, usually */ 982 if (ret == -EINTR) { 983 /* 984 * Don't restore the signal mask yet. Let do_signal() deliver 985 * the signal on the way back to userspace, before the signal 986 * mask is restored. 987 */ 988 if (sigmask) { 989 memcpy(¤t->saved_sigmask, &sigsaved, 990 sizeof(sigsaved)); 991 set_restore_sigmask(); 992 } 993 ret = -ERESTARTNOHAND; 994 } else if (sigmask) 995 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 996 997 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 998 999 return ret; 1000} 1001#endif /* HAVE_SET_RESTORE_SIGMASK */ 1002