1 2 3#include <linux/capability.h> 4#include <linux/file.h> 5#include <linux/fdtable.h> 6#include <linux/fs.h> 7#include <linux/init.h> 8#include <linux/module.h> 9#include <linux/security.h> 10#include <linux/slab.h> 11#include <linux/smp_lock.h> 12#include <linux/syscalls.h> 13#include <linux/time.h> 14#include <linux/rcupdate.h> 15#include <linux/pid_namespace.h> 16 17#include <asm/uaccess.h> 18 19#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 20#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 21#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 22 23int leases_enable = 1; 24int lease_break_time = 45; 25 26#define for_each_lock(inode, lockp) \ 27 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 28 29static LIST_HEAD(file_lock_list); 30static LIST_HEAD(blocked_list); 31 32static struct kmem_cache *filelock_cache __read_mostly; 33 34/* Allocate an empty lock structure. */ 35static struct file_lock *locks_alloc_lock(void) 36{ 37 return kmem_cache_alloc(filelock_cache, GFP_KERNEL); 38} 39 40void locks_release_private(struct file_lock *fl) 41{ 42 if (fl->fl_ops) { 43 if (fl->fl_ops->fl_release_private) 44 fl->fl_ops->fl_release_private(fl); 45 fl->fl_ops = NULL; 46 } 47 if (fl->fl_lmops) { 48 if (fl->fl_lmops->fl_release_private) 49 fl->fl_lmops->fl_release_private(fl); 50 fl->fl_lmops = NULL; 51 } 52 53} 54EXPORT_SYMBOL_GPL(locks_release_private); 55 56/* Free a lock which is not in use. */ 57static void locks_free_lock(struct file_lock *fl) 58{ 59 BUG_ON(waitqueue_active(&fl->fl_wait)); 60 BUG_ON(!list_empty(&fl->fl_block)); 61 BUG_ON(!list_empty(&fl->fl_link)); 62 63 locks_release_private(fl); 64 kmem_cache_free(filelock_cache, fl); 65} 66 67void locks_init_lock(struct file_lock *fl) 68{ 69 INIT_LIST_HEAD(&fl->fl_link); 70 INIT_LIST_HEAD(&fl->fl_block); 71 init_waitqueue_head(&fl->fl_wait); 72 fl->fl_next = NULL; 73 fl->fl_fasync = NULL; 74 fl->fl_owner = NULL; 75 fl->fl_pid = 0; 76 fl->fl_nspid = NULL; 77 fl->fl_file = NULL; 78 fl->fl_flags = 0; 79 fl->fl_type = 0; 80 fl->fl_start = fl->fl_end = 0; 81 fl->fl_ops = NULL; 82 fl->fl_lmops = NULL; 83} 84 85EXPORT_SYMBOL(locks_init_lock); 86 87/* 88 * Initialises the fields of the file lock which are invariant for 89 * free file_locks. 90 */ 91static void init_once(void *foo) 92{ 93 struct file_lock *lock = (struct file_lock *) foo; 94 95 locks_init_lock(lock); 96} 97 98static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 99{ 100 if (fl->fl_ops) { 101 if (fl->fl_ops->fl_copy_lock) 102 fl->fl_ops->fl_copy_lock(new, fl); 103 new->fl_ops = fl->fl_ops; 104 } 105 if (fl->fl_lmops) { 106 if (fl->fl_lmops->fl_copy_lock) 107 fl->fl_lmops->fl_copy_lock(new, fl); 108 new->fl_lmops = fl->fl_lmops; 109 } 110} 111 112/* 113 * Initialize a new lock from an existing file_lock structure. 114 */ 115void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 116{ 117 new->fl_owner = fl->fl_owner; 118 new->fl_pid = fl->fl_pid; 119 new->fl_file = NULL; 120 new->fl_flags = fl->fl_flags; 121 new->fl_type = fl->fl_type; 122 new->fl_start = fl->fl_start; 123 new->fl_end = fl->fl_end; 124 new->fl_ops = NULL; 125 new->fl_lmops = NULL; 126} 127EXPORT_SYMBOL(__locks_copy_lock); 128 129void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 130{ 131 locks_release_private(new); 132 133 __locks_copy_lock(new, fl); 134 new->fl_file = fl->fl_file; 135 new->fl_ops = fl->fl_ops; 136 new->fl_lmops = fl->fl_lmops; 137 138 locks_copy_private(new, fl); 139} 140 141EXPORT_SYMBOL(locks_copy_lock); 142 143static inline int flock_translate_cmd(int cmd) { 144 if (cmd & LOCK_MAND) 145 return cmd & (LOCK_MAND | LOCK_RW); 146 switch (cmd) { 147 case LOCK_SH: 148 return F_RDLCK; 149 case LOCK_EX: 150 return F_WRLCK; 151 case LOCK_UN: 152 return F_UNLCK; 153 } 154 return -EINVAL; 155} 156 157/* Fill in a file_lock structure with an appropriate FLOCK lock. */ 158static int flock_make_lock(struct file *filp, struct file_lock **lock, 159 unsigned int cmd) 160{ 161 struct file_lock *fl; 162 int type = flock_translate_cmd(cmd); 163 if (type < 0) 164 return type; 165 166 fl = locks_alloc_lock(); 167 if (fl == NULL) 168 return -ENOMEM; 169 170 fl->fl_file = filp; 171 fl->fl_pid = current->tgid; 172 fl->fl_flags = FL_FLOCK; 173 fl->fl_type = type; 174 fl->fl_end = OFFSET_MAX; 175 176 *lock = fl; 177 return 0; 178} 179 180static int assign_type(struct file_lock *fl, int type) 181{ 182 switch (type) { 183 case F_RDLCK: 184 case F_WRLCK: 185 case F_UNLCK: 186 fl->fl_type = type; 187 break; 188 default: 189 return -EINVAL; 190 } 191 return 0; 192} 193 194/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 195 * style lock. 196 */ 197static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 198 struct flock *l) 199{ 200 off_t start, end; 201 202 switch (l->l_whence) { 203 case SEEK_SET: 204 start = 0; 205 break; 206 case SEEK_CUR: 207 start = filp->f_pos; 208 break; 209 case SEEK_END: 210 start = i_size_read(filp->f_path.dentry->d_inode); 211 break; 212 default: 213 return -EINVAL; 214 } 215 216 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 217 POSIX-2001 defines it. */ 218 start += l->l_start; 219 if (start < 0) 220 return -EINVAL; 221 fl->fl_end = OFFSET_MAX; 222 if (l->l_len > 0) { 223 end = start + l->l_len - 1; 224 fl->fl_end = end; 225 } else if (l->l_len < 0) { 226 end = start - 1; 227 fl->fl_end = end; 228 start += l->l_len; 229 if (start < 0) 230 return -EINVAL; 231 } 232 fl->fl_start = start; /* we record the absolute position */ 233 if (fl->fl_end < fl->fl_start) 234 return -EOVERFLOW; 235 236 fl->fl_owner = current->files; 237 fl->fl_pid = current->tgid; 238 fl->fl_file = filp; 239 fl->fl_flags = FL_POSIX; 240 fl->fl_ops = NULL; 241 fl->fl_lmops = NULL; 242 243 return assign_type(fl, l->l_type); 244} 245 246#if BITS_PER_LONG == 32 247static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 248 struct flock64 *l) 249{ 250 loff_t start; 251 252 switch (l->l_whence) { 253 case SEEK_SET: 254 start = 0; 255 break; 256 case SEEK_CUR: 257 start = filp->f_pos; 258 break; 259 case SEEK_END: 260 start = i_size_read(filp->f_path.dentry->d_inode); 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 start += l->l_start; 267 if (start < 0) 268 return -EINVAL; 269 fl->fl_end = OFFSET_MAX; 270 if (l->l_len > 0) { 271 fl->fl_end = start + l->l_len - 1; 272 } else if (l->l_len < 0) { 273 fl->fl_end = start - 1; 274 start += l->l_len; 275 if (start < 0) 276 return -EINVAL; 277 } 278 fl->fl_start = start; /* we record the absolute position */ 279 if (fl->fl_end < fl->fl_start) 280 return -EOVERFLOW; 281 282 fl->fl_owner = current->files; 283 fl->fl_pid = current->tgid; 284 fl->fl_file = filp; 285 fl->fl_flags = FL_POSIX; 286 fl->fl_ops = NULL; 287 fl->fl_lmops = NULL; 288 289 switch (l->l_type) { 290 case F_RDLCK: 291 case F_WRLCK: 292 case F_UNLCK: 293 fl->fl_type = l->l_type; 294 break; 295 default: 296 return -EINVAL; 297 } 298 299 return (0); 300} 301#endif 302 303/* default lease lock manager operations */ 304static void lease_break_callback(struct file_lock *fl) 305{ 306 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 307} 308 309static void lease_release_private_callback(struct file_lock *fl) 310{ 311 if (!fl->fl_file) 312 return; 313 314 f_delown(fl->fl_file); 315 fl->fl_file->f_owner.signum = 0; 316} 317 318static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 319{ 320 return fl->fl_file == try->fl_file; 321} 322 323static const struct lock_manager_operations lease_manager_ops = { 324 .fl_break = lease_break_callback, 325 .fl_release_private = lease_release_private_callback, 326 .fl_mylease = lease_mylease_callback, 327 .fl_change = lease_modify, 328}; 329 330/* 331 * Initialize a lease, use the default lock manager operations 332 */ 333static int lease_init(struct file *filp, int type, struct file_lock *fl) 334 { 335 if (assign_type(fl, type) != 0) 336 return -EINVAL; 337 338 fl->fl_owner = current->files; 339 fl->fl_pid = current->tgid; 340 341 fl->fl_file = filp; 342 fl->fl_flags = FL_LEASE; 343 fl->fl_start = 0; 344 fl->fl_end = OFFSET_MAX; 345 fl->fl_ops = NULL; 346 fl->fl_lmops = &lease_manager_ops; 347 return 0; 348} 349 350/* Allocate a file_lock initialised to this type of lease */ 351static struct file_lock *lease_alloc(struct file *filp, int type) 352{ 353 struct file_lock *fl = locks_alloc_lock(); 354 int error = -ENOMEM; 355 356 if (fl == NULL) 357 return ERR_PTR(error); 358 359 error = lease_init(filp, type, fl); 360 if (error) { 361 locks_free_lock(fl); 362 return ERR_PTR(error); 363 } 364 return fl; 365} 366 367/* Check if two locks overlap each other. 368 */ 369static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 370{ 371 return ((fl1->fl_end >= fl2->fl_start) && 372 (fl2->fl_end >= fl1->fl_start)); 373} 374 375/* 376 * Check whether two locks have the same owner. 377 */ 378static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 379{ 380 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 381 return fl2->fl_lmops == fl1->fl_lmops && 382 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 383 return fl1->fl_owner == fl2->fl_owner; 384} 385 386/* Remove waiter from blocker's block list. 387 * When blocker ends up pointing to itself then the list is empty. 388 */ 389static void __locks_delete_block(struct file_lock *waiter) 390{ 391 list_del_init(&waiter->fl_block); 392 list_del_init(&waiter->fl_link); 393 waiter->fl_next = NULL; 394} 395 396/* 397 */ 398static void locks_delete_block(struct file_lock *waiter) 399{ 400 lock_kernel(); 401 __locks_delete_block(waiter); 402 unlock_kernel(); 403} 404 405/* Insert waiter into blocker's block list. 406 * We use a circular list so that processes can be easily woken up in 407 * the order they blocked. The documentation doesn't require this but 408 * it seems like the reasonable thing to do. 409 */ 410static void locks_insert_block(struct file_lock *blocker, 411 struct file_lock *waiter) 412{ 413 BUG_ON(!list_empty(&waiter->fl_block)); 414 list_add_tail(&waiter->fl_block, &blocker->fl_block); 415 waiter->fl_next = blocker; 416 if (IS_POSIX(blocker)) 417 list_add(&waiter->fl_link, &blocked_list); 418} 419 420/* Wake up processes blocked waiting for blocker. 421 * If told to wait then schedule the processes until the block list 422 * is empty, otherwise empty the block list ourselves. 423 */ 424static void locks_wake_up_blocks(struct file_lock *blocker) 425{ 426 while (!list_empty(&blocker->fl_block)) { 427 struct file_lock *waiter; 428 429 waiter = list_first_entry(&blocker->fl_block, 430 struct file_lock, fl_block); 431 __locks_delete_block(waiter); 432 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 433 waiter->fl_lmops->fl_notify(waiter); 434 else 435 wake_up(&waiter->fl_wait); 436 } 437} 438 439/* Insert file lock fl into an inode's lock list at the position indicated 440 * by pos. At the same time add the lock to the global file lock list. 441 */ 442static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 443{ 444 list_add(&fl->fl_link, &file_lock_list); 445 446 fl->fl_nspid = get_pid(task_tgid(current)); 447 448 /* insert into file's list */ 449 fl->fl_next = *pos; 450 *pos = fl; 451} 452 453/* 454 * Delete a lock and then free it. 455 * Wake up processes that are blocked waiting for this lock, 456 * notify the FS that the lock has been cleared and 457 * finally free the lock. 458 */ 459static void locks_delete_lock(struct file_lock **thisfl_p) 460{ 461 struct file_lock *fl = *thisfl_p; 462 463 *thisfl_p = fl->fl_next; 464 fl->fl_next = NULL; 465 list_del_init(&fl->fl_link); 466 467 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 468 if (fl->fl_fasync != NULL) { 469 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 470 fl->fl_fasync = NULL; 471 } 472 473 if (fl->fl_nspid) { 474 put_pid(fl->fl_nspid); 475 fl->fl_nspid = NULL; 476 } 477 478 locks_wake_up_blocks(fl); 479 locks_free_lock(fl); 480} 481 482/* Determine if lock sys_fl blocks lock caller_fl. Common functionality 483 * checks for shared/exclusive status of overlapping locks. 484 */ 485static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 486{ 487 if (sys_fl->fl_type == F_WRLCK) 488 return 1; 489 if (caller_fl->fl_type == F_WRLCK) 490 return 1; 491 return 0; 492} 493 494/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 495 * checking before calling the locks_conflict(). 496 */ 497static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 498{ 499 /* POSIX locks owned by the same process do not conflict with 500 * each other. 501 */ 502 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 503 return (0); 504 505 /* Check whether they overlap */ 506 if (!locks_overlap(caller_fl, sys_fl)) 507 return 0; 508 509 return (locks_conflict(caller_fl, sys_fl)); 510} 511 512/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 513 * checking before calling the locks_conflict(). 514 */ 515static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 516{ 517 /* FLOCK locks referring to the same filp do not conflict with 518 * each other. 519 */ 520 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 521 return (0); 522 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 523 return 0; 524 525 return (locks_conflict(caller_fl, sys_fl)); 526} 527 528void 529posix_test_lock(struct file *filp, struct file_lock *fl) 530{ 531 struct file_lock *cfl; 532 533 lock_kernel(); 534 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 535 if (!IS_POSIX(cfl)) 536 continue; 537 if (posix_locks_conflict(fl, cfl)) 538 break; 539 } 540 if (cfl) { 541 __locks_copy_lock(fl, cfl); 542 if (cfl->fl_nspid) 543 fl->fl_pid = pid_vnr(cfl->fl_nspid); 544 } else 545 fl->fl_type = F_UNLCK; 546 unlock_kernel(); 547 return; 548} 549EXPORT_SYMBOL(posix_test_lock); 550 551/* 552 * Deadlock detection: 553 * 554 * We attempt to detect deadlocks that are due purely to posix file 555 * locks. 556 * 557 * We assume that a task can be waiting for at most one lock at a time. 558 * So for any acquired lock, the process holding that lock may be 559 * waiting on at most one other lock. That lock in turns may be held by 560 * someone waiting for at most one other lock. Given a requested lock 561 * caller_fl which is about to wait for a conflicting lock block_fl, we 562 * follow this chain of waiters to ensure we are not about to create a 563 * cycle. 564 * 565 * Since we do this before we ever put a process to sleep on a lock, we 566 * are ensured that there is never a cycle; that is what guarantees that 567 * the while() loop in posix_locks_deadlock() eventually completes. 568 * 569 * Note: the above assumption may not be true when handling lock 570 * requests from a broken NFS client. It may also fail in the presence 571 * of tasks (such as posix threads) sharing the same open file table. 572 * 573 * To handle those cases, we just bail out after a few iterations. 574 */ 575 576#define MAX_DEADLK_ITERATIONS 10 577 578/* Find a lock that the owner of the given block_fl is blocking on. */ 579static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) 580{ 581 struct file_lock *fl; 582 583 list_for_each_entry(fl, &blocked_list, fl_link) { 584 if (posix_same_owner(fl, block_fl)) 585 return fl->fl_next; 586 } 587 return NULL; 588} 589 590static int posix_locks_deadlock(struct file_lock *caller_fl, 591 struct file_lock *block_fl) 592{ 593 int i = 0; 594 595 while ((block_fl = what_owner_is_waiting_for(block_fl))) { 596 if (i++ > MAX_DEADLK_ITERATIONS) 597 return 0; 598 if (posix_same_owner(caller_fl, block_fl)) 599 return 1; 600 } 601 return 0; 602} 603 604/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 605 * after any leases, but before any posix locks. 606 * 607 * Note that if called with an FL_EXISTS argument, the caller may determine 608 * whether or not a lock was successfully freed by testing the return 609 * value for -ENOENT. 610 */ 611static int flock_lock_file(struct file *filp, struct file_lock *request) 612{ 613 struct file_lock *new_fl = NULL; 614 struct file_lock **before; 615 struct inode * inode = filp->f_path.dentry->d_inode; 616 int error = 0; 617 int found = 0; 618 619 lock_kernel(); 620 if (request->fl_flags & FL_ACCESS) 621 goto find_conflict; 622 623 if (request->fl_type != F_UNLCK) { 624 error = -ENOMEM; 625 new_fl = locks_alloc_lock(); 626 if (new_fl == NULL) 627 goto out; 628 error = 0; 629 } 630 631 for_each_lock(inode, before) { 632 struct file_lock *fl = *before; 633 if (IS_POSIX(fl)) 634 break; 635 if (IS_LEASE(fl)) 636 continue; 637 if (filp != fl->fl_file) 638 continue; 639 if (request->fl_type == fl->fl_type) 640 goto out; 641 found = 1; 642 locks_delete_lock(before); 643 break; 644 } 645 646 if (request->fl_type == F_UNLCK) { 647 if ((request->fl_flags & FL_EXISTS) && !found) 648 error = -ENOENT; 649 goto out; 650 } 651 652 /* 653 * If a higher-priority process was blocked on the old file lock, 654 * give it the opportunity to lock the file. 655 */ 656 if (found) 657 cond_resched(); 658 659find_conflict: 660 for_each_lock(inode, before) { 661 struct file_lock *fl = *before; 662 if (IS_POSIX(fl)) 663 break; 664 if (IS_LEASE(fl)) 665 continue; 666 if (!flock_locks_conflict(request, fl)) 667 continue; 668 error = -EAGAIN; 669 if (!(request->fl_flags & FL_SLEEP)) 670 goto out; 671 error = FILE_LOCK_DEFERRED; 672 locks_insert_block(fl, request); 673 goto out; 674 } 675 if (request->fl_flags & FL_ACCESS) 676 goto out; 677 locks_copy_lock(new_fl, request); 678 locks_insert_lock(before, new_fl); 679 new_fl = NULL; 680 error = 0; 681 682out: 683 unlock_kernel(); 684 if (new_fl) 685 locks_free_lock(new_fl); 686 return error; 687} 688 689static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 690{ 691 struct file_lock *fl; 692 struct file_lock *new_fl = NULL; 693 struct file_lock *new_fl2 = NULL; 694 struct file_lock *left = NULL; 695 struct file_lock *right = NULL; 696 struct file_lock **before; 697 int error, added = 0; 698 699 /* 700 * We may need two file_lock structures for this operation, 701 * so we get them in advance to avoid races. 702 * 703 * In some cases we can be sure, that no new locks will be needed 704 */ 705 if (!(request->fl_flags & FL_ACCESS) && 706 (request->fl_type != F_UNLCK || 707 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 708 new_fl = locks_alloc_lock(); 709 new_fl2 = locks_alloc_lock(); 710 } 711 712 lock_kernel(); 713 if (request->fl_type != F_UNLCK) { 714 for_each_lock(inode, before) { 715 fl = *before; 716 if (!IS_POSIX(fl)) 717 continue; 718 if (!posix_locks_conflict(request, fl)) 719 continue; 720 if (conflock) 721 __locks_copy_lock(conflock, fl); 722 error = -EAGAIN; 723 if (!(request->fl_flags & FL_SLEEP)) 724 goto out; 725 error = -EDEADLK; 726 if (posix_locks_deadlock(request, fl)) 727 goto out; 728 error = FILE_LOCK_DEFERRED; 729 locks_insert_block(fl, request); 730 goto out; 731 } 732 } 733 734 /* If we're just looking for a conflict, we're done. */ 735 error = 0; 736 if (request->fl_flags & FL_ACCESS) 737 goto out; 738 739 /* 740 * Find the first old lock with the same owner as the new lock. 741 */ 742 743 before = &inode->i_flock; 744 745 /* First skip locks owned by other processes. */ 746 while ((fl = *before) && (!IS_POSIX(fl) || 747 !posix_same_owner(request, fl))) { 748 before = &fl->fl_next; 749 } 750 751 /* Process locks with this owner. */ 752 while ((fl = *before) && posix_same_owner(request, fl)) { 753 /* Detect adjacent or overlapping regions (if same lock type) 754 */ 755 if (request->fl_type == fl->fl_type) { 756 /* In all comparisons of start vs end, use 757 * "start - 1" rather than "end + 1". If end 758 * is OFFSET_MAX, end + 1 will become negative. 759 */ 760 if (fl->fl_end < request->fl_start - 1) 761 goto next_lock; 762 /* If the next lock in the list has entirely bigger 763 * addresses than the new one, insert the lock here. 764 */ 765 if (fl->fl_start - 1 > request->fl_end) 766 break; 767 768 /* If we come here, the new and old lock are of the 769 * same type and adjacent or overlapping. Make one 770 * lock yielding from the lower start address of both 771 * locks to the higher end address. 772 */ 773 if (fl->fl_start > request->fl_start) 774 fl->fl_start = request->fl_start; 775 else 776 request->fl_start = fl->fl_start; 777 if (fl->fl_end < request->fl_end) 778 fl->fl_end = request->fl_end; 779 else 780 request->fl_end = fl->fl_end; 781 if (added) { 782 locks_delete_lock(before); 783 continue; 784 } 785 request = fl; 786 added = 1; 787 } 788 else { 789 /* Processing for different lock types is a bit 790 * more complex. 791 */ 792 if (fl->fl_end < request->fl_start) 793 goto next_lock; 794 if (fl->fl_start > request->fl_end) 795 break; 796 if (request->fl_type == F_UNLCK) 797 added = 1; 798 if (fl->fl_start < request->fl_start) 799 left = fl; 800 /* If the next lock in the list has a higher end 801 * address than the new one, insert the new one here. 802 */ 803 if (fl->fl_end > request->fl_end) { 804 right = fl; 805 break; 806 } 807 if (fl->fl_start >= request->fl_start) { 808 /* The new lock completely replaces an old 809 * one (This may happen several times). 810 */ 811 if (added) { 812 locks_delete_lock(before); 813 continue; 814 } 815 /* Replace the old lock with the new one. 816 * Wake up anybody waiting for the old one, 817 * as the change in lock type might satisfy 818 * their needs. 819 */ 820 locks_wake_up_blocks(fl); 821 fl->fl_start = request->fl_start; 822 fl->fl_end = request->fl_end; 823 fl->fl_type = request->fl_type; 824 locks_release_private(fl); 825 locks_copy_private(fl, request); 826 request = fl; 827 added = 1; 828 } 829 } 830 /* Go on to next lock. 831 */ 832 next_lock: 833 before = &fl->fl_next; 834 } 835 836 /* 837 * The above code only modifies existing locks in case of 838 * merging or replacing. If new lock(s) need to be inserted 839 * all modifications are done bellow this, so it's safe yet to 840 * bail out. 841 */ 842 error = -ENOLCK; /* "no luck" */ 843 if (right && left == right && !new_fl2) 844 goto out; 845 846 error = 0; 847 if (!added) { 848 if (request->fl_type == F_UNLCK) { 849 if (request->fl_flags & FL_EXISTS) 850 error = -ENOENT; 851 goto out; 852 } 853 854 if (!new_fl) { 855 error = -ENOLCK; 856 goto out; 857 } 858 locks_copy_lock(new_fl, request); 859 locks_insert_lock(before, new_fl); 860 new_fl = NULL; 861 } 862 if (right) { 863 if (left == right) { 864 /* The new lock breaks the old one in two pieces, 865 * so we have to use the second new lock. 866 */ 867 left = new_fl2; 868 new_fl2 = NULL; 869 locks_copy_lock(left, right); 870 locks_insert_lock(before, left); 871 } 872 right->fl_start = request->fl_end + 1; 873 locks_wake_up_blocks(right); 874 } 875 if (left) { 876 left->fl_end = request->fl_start - 1; 877 locks_wake_up_blocks(left); 878 } 879 out: 880 unlock_kernel(); 881 /* 882 * Free any unused locks. 883 */ 884 if (new_fl) 885 locks_free_lock(new_fl); 886 if (new_fl2) 887 locks_free_lock(new_fl2); 888 return error; 889} 890 891/** 892 * posix_lock_file - Apply a POSIX-style lock to a file 893 * @filp: The file to apply the lock to 894 * @fl: The lock to be applied 895 * @conflock: Place to return a copy of the conflicting lock, if found. 896 * 897 * Add a POSIX style lock to a file. 898 * We merge adjacent & overlapping locks whenever possible. 899 * POSIX locks are sorted by owner task, then by starting address 900 * 901 * Note that if called with an FL_EXISTS argument, the caller may determine 902 * whether or not a lock was successfully freed by testing the return 903 * value for -ENOENT. 904 */ 905int posix_lock_file(struct file *filp, struct file_lock *fl, 906 struct file_lock *conflock) 907{ 908 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); 909} 910EXPORT_SYMBOL(posix_lock_file); 911 912/** 913 * posix_lock_file_wait - Apply a POSIX-style lock to a file 914 * @filp: The file to apply the lock to 915 * @fl: The lock to be applied 916 * 917 * Add a POSIX style lock to a file. 918 * We merge adjacent & overlapping locks whenever possible. 919 * POSIX locks are sorted by owner task, then by starting address 920 */ 921int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 922{ 923 int error; 924 might_sleep (); 925 for (;;) { 926 error = posix_lock_file(filp, fl, NULL); 927 if (error != FILE_LOCK_DEFERRED) 928 break; 929 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 930 if (!error) 931 continue; 932 933 locks_delete_block(fl); 934 break; 935 } 936 return error; 937} 938EXPORT_SYMBOL(posix_lock_file_wait); 939 940/** 941 * locks_mandatory_locked - Check for an active lock 942 * @inode: the file to check 943 * 944 * Searches the inode's list of locks to find any POSIX locks which conflict. 945 * This function is called from locks_verify_locked() only. 946 */ 947int locks_mandatory_locked(struct inode *inode) 948{ 949 fl_owner_t owner = current->files; 950 struct file_lock *fl; 951 952 /* 953 * Search the lock list for this inode for any POSIX locks. 954 */ 955 lock_kernel(); 956 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 957 if (!IS_POSIX(fl)) 958 continue; 959 if (fl->fl_owner != owner) 960 break; 961 } 962 unlock_kernel(); 963 return fl ? -EAGAIN : 0; 964} 965 966/** 967 * locks_mandatory_area - Check for a conflicting lock 968 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 969 * for shared 970 * @inode: the file to check 971 * @filp: how the file was opened (if it was) 972 * @offset: start of area to check 973 * @count: length of area to check 974 * 975 * Searches the inode's list of locks to find any POSIX locks which conflict. 976 * This function is called from rw_verify_area() and 977 * locks_verify_truncate(). 978 */ 979int locks_mandatory_area(int read_write, struct inode *inode, 980 struct file *filp, loff_t offset, 981 size_t count) 982{ 983 struct file_lock fl; 984 int error; 985 986 locks_init_lock(&fl); 987 fl.fl_owner = current->files; 988 fl.fl_pid = current->tgid; 989 fl.fl_file = filp; 990 fl.fl_flags = FL_POSIX | FL_ACCESS; 991 if (filp && !(filp->f_flags & O_NONBLOCK)) 992 fl.fl_flags |= FL_SLEEP; 993 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 994 fl.fl_start = offset; 995 fl.fl_end = offset + count - 1; 996 997 for (;;) { 998 error = __posix_lock_file(inode, &fl, NULL); 999 if (error != FILE_LOCK_DEFERRED) 1000 break; 1001 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1002 if (!error) { 1003 /* 1004 * If we've been sleeping someone might have 1005 * changed the permissions behind our back. 1006 */ 1007 if (__mandatory_lock(inode)) 1008 continue; 1009 } 1010 1011 locks_delete_block(&fl); 1012 break; 1013 } 1014 1015 return error; 1016} 1017 1018EXPORT_SYMBOL(locks_mandatory_area); 1019 1020/* We already had a lease on this file; just change its type */ 1021int lease_modify(struct file_lock **before, int arg) 1022{ 1023 struct file_lock *fl = *before; 1024 int error = assign_type(fl, arg); 1025 1026 if (error) 1027 return error; 1028 locks_wake_up_blocks(fl); 1029 if (arg == F_UNLCK) 1030 locks_delete_lock(before); 1031 return 0; 1032} 1033 1034EXPORT_SYMBOL(lease_modify); 1035 1036static void time_out_leases(struct inode *inode) 1037{ 1038 struct file_lock **before; 1039 struct file_lock *fl; 1040 1041 before = &inode->i_flock; 1042 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1043 if ((fl->fl_break_time == 0) 1044 || time_before(jiffies, fl->fl_break_time)) { 1045 before = &fl->fl_next; 1046 continue; 1047 } 1048 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1049 if (fl == *before) /* lease_modify may have freed fl */ 1050 before = &fl->fl_next; 1051 } 1052} 1053 1054/** 1055 * __break_lease - revoke all outstanding leases on file 1056 * @inode: the inode of the file to return 1057 * @mode: the open mode (read or write) 1058 * 1059 * break_lease (inlined for speed) has checked there already is at least 1060 * some kind of lock (maybe a lease) on this file. Leases are broken on 1061 * a call to open() or truncate(). This function can sleep unless you 1062 * specified %O_NONBLOCK to your open(). 1063 */ 1064int __break_lease(struct inode *inode, unsigned int mode) 1065{ 1066 int error = 0, future; 1067 struct file_lock *new_fl, *flock; 1068 struct file_lock *fl; 1069 unsigned long break_time; 1070 int i_have_this_lease = 0; 1071 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1072 1073 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); 1074 1075 lock_kernel(); 1076 1077 time_out_leases(inode); 1078 1079 flock = inode->i_flock; 1080 if ((flock == NULL) || !IS_LEASE(flock)) 1081 goto out; 1082 1083 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1084 if (fl->fl_owner == current->files) 1085 i_have_this_lease = 1; 1086 1087 if (want_write) { 1088 /* If we want write access, we have to revoke any lease. */ 1089 future = F_UNLCK | F_INPROGRESS; 1090 } else if (flock->fl_type & F_INPROGRESS) { 1091 /* If the lease is already being broken, we just leave it */ 1092 future = flock->fl_type; 1093 } else if (flock->fl_type & F_WRLCK) { 1094 /* Downgrade the exclusive lease to a read-only lease. */ 1095 future = F_RDLCK | F_INPROGRESS; 1096 } else { 1097 /* the existing lease was read-only, so we can read too. */ 1098 goto out; 1099 } 1100 1101 if (IS_ERR(new_fl) && !i_have_this_lease 1102 && ((mode & O_NONBLOCK) == 0)) { 1103 error = PTR_ERR(new_fl); 1104 goto out; 1105 } 1106 1107 break_time = 0; 1108 if (lease_break_time > 0) { 1109 break_time = jiffies + lease_break_time * HZ; 1110 if (break_time == 0) 1111 break_time++; /* so that 0 means no break time */ 1112 } 1113 1114 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1115 if (fl->fl_type != future) { 1116 fl->fl_type = future; 1117 fl->fl_break_time = break_time; 1118 /* lease must have lmops break callback */ 1119 fl->fl_lmops->fl_break(fl); 1120 } 1121 } 1122 1123 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1124 error = -EWOULDBLOCK; 1125 goto out; 1126 } 1127 1128restart: 1129 break_time = flock->fl_break_time; 1130 if (break_time != 0) { 1131 break_time -= jiffies; 1132 if (break_time == 0) 1133 break_time++; 1134 } 1135 locks_insert_block(flock, new_fl); 1136 error = wait_event_interruptible_timeout(new_fl->fl_wait, 1137 !new_fl->fl_next, break_time); 1138 __locks_delete_block(new_fl); 1139 if (error >= 0) { 1140 if (error == 0) 1141 time_out_leases(inode); 1142 /* Wait for the next lease that has not been broken yet */ 1143 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1144 flock = flock->fl_next) { 1145 if (flock->fl_type & F_INPROGRESS) 1146 goto restart; 1147 } 1148 error = 0; 1149 } 1150 1151out: 1152 unlock_kernel(); 1153 if (!IS_ERR(new_fl)) 1154 locks_free_lock(new_fl); 1155 return error; 1156} 1157 1158EXPORT_SYMBOL(__break_lease); 1159 1160/** 1161 * lease_get_mtime - get the last modified time of an inode 1162 * @inode: the inode 1163 * @time: pointer to a timespec which will contain the last modified time 1164 * 1165 * This is to force NFS clients to flush their caches for files with 1166 * exclusive leases. The justification is that if someone has an 1167 * exclusive lease, then they could be modifying it. 1168 */ 1169void lease_get_mtime(struct inode *inode, struct timespec *time) 1170{ 1171 struct file_lock *flock = inode->i_flock; 1172 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1173 *time = current_fs_time(inode->i_sb); 1174 else 1175 *time = inode->i_mtime; 1176} 1177 1178EXPORT_SYMBOL(lease_get_mtime); 1179 1180int fcntl_getlease(struct file *filp) 1181{ 1182 struct file_lock *fl; 1183 int type = F_UNLCK; 1184 1185 lock_kernel(); 1186 time_out_leases(filp->f_path.dentry->d_inode); 1187 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1188 fl = fl->fl_next) { 1189 if (fl->fl_file == filp) { 1190 type = fl->fl_type & ~F_INPROGRESS; 1191 break; 1192 } 1193 } 1194 unlock_kernel(); 1195 return type; 1196} 1197 1198/** 1199 * generic_setlease - sets a lease on an open file 1200 * @filp: file pointer 1201 * @arg: type of lease to obtain 1202 * @flp: input - file_lock to use, output - file_lock inserted 1203 * 1204 * The (input) flp->fl_lmops->fl_break function is required 1205 * by break_lease(). 1206 * 1207 * Called with kernel lock held. 1208 */ 1209int generic_setlease(struct file *filp, long arg, struct file_lock **flp) 1210{ 1211 struct file_lock *fl, **before, **my_before = NULL, *lease; 1212 struct file_lock *new_fl = NULL; 1213 struct dentry *dentry = filp->f_path.dentry; 1214 struct inode *inode = dentry->d_inode; 1215 int error, rdlease_count = 0, wrlease_count = 0; 1216 1217 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE)) 1218 return -EACCES; 1219 if (!S_ISREG(inode->i_mode)) 1220 return -EINVAL; 1221 error = security_file_lock(filp, arg); 1222 if (error) 1223 return error; 1224 1225 time_out_leases(inode); 1226 1227 BUG_ON(!(*flp)->fl_lmops->fl_break); 1228 1229 lease = *flp; 1230 1231 if (arg != F_UNLCK) { 1232 error = -ENOMEM; 1233 new_fl = locks_alloc_lock(); 1234 if (new_fl == NULL) 1235 goto out; 1236 1237 error = -EAGAIN; 1238 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1239 goto out; 1240 if ((arg == F_WRLCK) 1241 && ((atomic_read(&dentry->d_count) > 1) 1242 || (atomic_read(&inode->i_count) > 1))) 1243 goto out; 1244 } 1245 1246 /* 1247 * At this point, we know that if there is an exclusive 1248 * lease on this file, then we hold it on this filp 1249 * (otherwise our open of this file would have blocked). 1250 * And if we are trying to acquire an exclusive lease, 1251 * then the file is not open by anyone (including us) 1252 * except for this filp. 1253 */ 1254 for (before = &inode->i_flock; 1255 ((fl = *before) != NULL) && IS_LEASE(fl); 1256 before = &fl->fl_next) { 1257 if (lease->fl_lmops->fl_mylease(fl, lease)) 1258 my_before = before; 1259 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1260 /* 1261 * Someone is in the process of opening this 1262 * file for writing so we may not take an 1263 * exclusive lease on it. 1264 */ 1265 wrlease_count++; 1266 else 1267 rdlease_count++; 1268 } 1269 1270 error = -EAGAIN; 1271 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1272 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1273 goto out; 1274 1275 if (my_before != NULL) { 1276 *flp = *my_before; 1277 error = lease->fl_lmops->fl_change(my_before, arg); 1278 goto out; 1279 } 1280 1281 error = 0; 1282 if (arg == F_UNLCK) 1283 goto out; 1284 1285 error = -EINVAL; 1286 if (!leases_enable) 1287 goto out; 1288 1289 locks_copy_lock(new_fl, lease); 1290 locks_insert_lock(before, new_fl); 1291 1292 *flp = new_fl; 1293 return 0; 1294 1295out: 1296 if (new_fl != NULL) 1297 locks_free_lock(new_fl); 1298 return error; 1299} 1300EXPORT_SYMBOL(generic_setlease); 1301 1302 /** 1303 * vfs_setlease - sets a lease on an open file 1304 * @filp: file pointer 1305 * @arg: type of lease to obtain 1306 * @lease: file_lock to use 1307 * 1308 * Call this to establish a lease on the file. 1309 * The (*lease)->fl_lmops->fl_break operation must be set; if not, 1310 * break_lease will oops! 1311 * 1312 * This will call the filesystem's setlease file method, if 1313 * defined. Note that there is no getlease method; instead, the 1314 * filesystem setlease method should call back to setlease() to 1315 * add a lease to the inode's lease list, where fcntl_getlease() can 1316 * find it. Since fcntl_getlease() only reports whether the current 1317 * task holds a lease, a cluster filesystem need only do this for 1318 * leases held by processes on this node. 1319 * 1320 * There is also no break_lease method; filesystems that 1321 * handle their own leases should break leases themselves from the 1322 * filesystem's open, create, and (on truncate) setattr methods. 1323 * 1324 * Warning: the only current setlease methods exist only to disable 1325 * leases in certain cases. More vfs changes may be required to 1326 * allow a full filesystem lease implementation. 1327 */ 1328 1329int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) 1330{ 1331 int error; 1332 1333 lock_kernel(); 1334 if (filp->f_op && filp->f_op->setlease) 1335 error = filp->f_op->setlease(filp, arg, lease); 1336 else 1337 error = generic_setlease(filp, arg, lease); 1338 unlock_kernel(); 1339 1340 return error; 1341} 1342EXPORT_SYMBOL_GPL(vfs_setlease); 1343 1344/** 1345 * fcntl_setlease - sets a lease on an open file 1346 * @fd: open file descriptor 1347 * @filp: file pointer 1348 * @arg: type of lease to obtain 1349 * 1350 * Call this fcntl to establish a lease on the file. 1351 * Note that you also need to call %F_SETSIG to 1352 * receive a signal when the lease is broken. 1353 */ 1354int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1355{ 1356 struct file_lock fl, *flp = &fl; 1357 struct inode *inode = filp->f_path.dentry->d_inode; 1358 int error; 1359 1360 locks_init_lock(&fl); 1361 error = lease_init(filp, arg, &fl); 1362 if (error) 1363 return error; 1364 1365 lock_kernel(); 1366 1367 error = vfs_setlease(filp, arg, &flp); 1368 if (error || arg == F_UNLCK) 1369 goto out_unlock; 1370 1371 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1372 if (error < 0) { 1373 /* remove lease just inserted by setlease */ 1374 flp->fl_type = F_UNLCK | F_INPROGRESS; 1375 flp->fl_break_time = jiffies - 10; 1376 time_out_leases(inode); 1377 goto out_unlock; 1378 } 1379 1380 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1381out_unlock: 1382 unlock_kernel(); 1383 return error; 1384} 1385 1386/** 1387 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1388 * @filp: The file to apply the lock to 1389 * @fl: The lock to be applied 1390 * 1391 * Add a FLOCK style lock to a file. 1392 */ 1393int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1394{ 1395 int error; 1396 might_sleep(); 1397 for (;;) { 1398 error = flock_lock_file(filp, fl); 1399 if (error != FILE_LOCK_DEFERRED) 1400 break; 1401 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1402 if (!error) 1403 continue; 1404 1405 locks_delete_block(fl); 1406 break; 1407 } 1408 return error; 1409} 1410 1411EXPORT_SYMBOL(flock_lock_file_wait); 1412 1413/** 1414 * sys_flock: - flock() system call. 1415 * @fd: the file descriptor to lock. 1416 * @cmd: the type of lock to apply. 1417 * 1418 * Apply a %FL_FLOCK style lock to an open file descriptor. 1419 * The @cmd can be one of 1420 * 1421 * %LOCK_SH -- a shared lock. 1422 * 1423 * %LOCK_EX -- an exclusive lock. 1424 * 1425 * %LOCK_UN -- remove an existing lock. 1426 * 1427 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1428 * 1429 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1430 * processes read and write access respectively. 1431 */ 1432SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 1433{ 1434 struct file *filp; 1435 struct file_lock *lock; 1436 int can_sleep, unlock; 1437 int error; 1438 1439 error = -EBADF; 1440 filp = fget(fd); 1441 if (!filp) 1442 goto out; 1443 1444 can_sleep = !(cmd & LOCK_NB); 1445 cmd &= ~LOCK_NB; 1446 unlock = (cmd == LOCK_UN); 1447 1448 if (!unlock && !(cmd & LOCK_MAND) && 1449 !(filp->f_mode & (FMODE_READ|FMODE_WRITE))) 1450 goto out_putf; 1451 1452 error = flock_make_lock(filp, &lock, cmd); 1453 if (error) 1454 goto out_putf; 1455 if (can_sleep) 1456 lock->fl_flags |= FL_SLEEP; 1457 1458 error = security_file_lock(filp, lock->fl_type); 1459 if (error) 1460 goto out_free; 1461 1462 if (filp->f_op && filp->f_op->flock) 1463 error = filp->f_op->flock(filp, 1464 (can_sleep) ? F_SETLKW : F_SETLK, 1465 lock); 1466 else 1467 error = flock_lock_file_wait(filp, lock); 1468 1469 out_free: 1470 locks_free_lock(lock); 1471 1472 out_putf: 1473 fput(filp); 1474 out: 1475 return error; 1476} 1477 1478/** 1479 * vfs_test_lock - test file byte range lock 1480 * @filp: The file to test lock for 1481 * @fl: The lock to test; also used to hold result 1482 * 1483 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 1484 * setting conf->fl_type to something other than F_UNLCK. 1485 */ 1486int vfs_test_lock(struct file *filp, struct file_lock *fl) 1487{ 1488 if (filp->f_op && filp->f_op->lock) 1489 return filp->f_op->lock(filp, F_GETLK, fl); 1490 posix_test_lock(filp, fl); 1491 return 0; 1492} 1493EXPORT_SYMBOL_GPL(vfs_test_lock); 1494 1495static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 1496{ 1497 flock->l_pid = fl->fl_pid; 1498#if BITS_PER_LONG == 32 1499 /* 1500 * Make sure we can represent the posix lock via 1501 * legacy 32bit flock. 1502 */ 1503 if (fl->fl_start > OFFT_OFFSET_MAX) 1504 return -EOVERFLOW; 1505 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 1506 return -EOVERFLOW; 1507#endif 1508 flock->l_start = fl->fl_start; 1509 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1510 fl->fl_end - fl->fl_start + 1; 1511 flock->l_whence = 0; 1512 flock->l_type = fl->fl_type; 1513 return 0; 1514} 1515 1516#if BITS_PER_LONG == 32 1517static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 1518{ 1519 flock->l_pid = fl->fl_pid; 1520 flock->l_start = fl->fl_start; 1521 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1522 fl->fl_end - fl->fl_start + 1; 1523 flock->l_whence = 0; 1524 flock->l_type = fl->fl_type; 1525} 1526#endif 1527 1528/* Report the first existing lock that would conflict with l. 1529 * This implements the F_GETLK command of fcntl(). 1530 */ 1531int fcntl_getlk(struct file *filp, struct flock __user *l) 1532{ 1533 struct file_lock file_lock; 1534 struct flock flock; 1535 int error; 1536 1537 error = -EFAULT; 1538 if (copy_from_user(&flock, l, sizeof(flock))) 1539 goto out; 1540 error = -EINVAL; 1541 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1542 goto out; 1543 1544 error = flock_to_posix_lock(filp, &file_lock, &flock); 1545 if (error) 1546 goto out; 1547 1548 error = vfs_test_lock(filp, &file_lock); 1549 if (error) 1550 goto out; 1551 1552 flock.l_type = file_lock.fl_type; 1553 if (file_lock.fl_type != F_UNLCK) { 1554 error = posix_lock_to_flock(&flock, &file_lock); 1555 if (error) 1556 goto out; 1557 } 1558 error = -EFAULT; 1559 if (!copy_to_user(l, &flock, sizeof(flock))) 1560 error = 0; 1561out: 1562 return error; 1563} 1564 1565/** 1566 * vfs_lock_file - file byte range lock 1567 * @filp: The file to apply the lock to 1568 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 1569 * @fl: The lock to be applied 1570 * @conf: Place to return a copy of the conflicting lock, if found. 1571 * 1572 * A caller that doesn't care about the conflicting lock may pass NULL 1573 * as the final argument. 1574 * 1575 * If the filesystem defines a private ->lock() method, then @conf will 1576 * be left unchanged; so a caller that cares should initialize it to 1577 * some acceptable default. 1578 * 1579 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 1580 * locks, the ->lock() interface may return asynchronously, before the lock has 1581 * been granted or denied by the underlying filesystem, if (and only if) 1582 * fl_grant is set. Callers expecting ->lock() to return asynchronously 1583 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 1584 * the request is for a blocking lock. When ->lock() does return asynchronously, 1585 * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock 1586 * request completes. 1587 * If the request is for non-blocking lock the file system should return 1588 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine 1589 * with the result. If the request timed out the callback routine will return a 1590 * nonzero return code and the file system should release the lock. The file 1591 * system is also responsible to keep a corresponding posix lock when it 1592 * grants a lock so the VFS can find out which locks are locally held and do 1593 * the correct lock cleanup when required. 1594 * The underlying filesystem must not drop the kernel lock or call 1595 * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED 1596 * return code. 1597 */ 1598int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 1599{ 1600 if (filp->f_op && filp->f_op->lock) 1601 return filp->f_op->lock(filp, cmd, fl); 1602 else 1603 return posix_lock_file(filp, fl, conf); 1604} 1605EXPORT_SYMBOL_GPL(vfs_lock_file); 1606 1607static int do_lock_file_wait(struct file *filp, unsigned int cmd, 1608 struct file_lock *fl) 1609{ 1610 int error; 1611 1612 error = security_file_lock(filp, fl->fl_type); 1613 if (error) 1614 return error; 1615 1616 for (;;) { 1617 error = vfs_lock_file(filp, cmd, fl, NULL); 1618 if (error != FILE_LOCK_DEFERRED) 1619 break; 1620 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1621 if (!error) 1622 continue; 1623 1624 locks_delete_block(fl); 1625 break; 1626 } 1627 1628 return error; 1629} 1630 1631/* Apply the lock described by l to an open file descriptor. 1632 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1633 */ 1634int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1635 struct flock __user *l) 1636{ 1637 struct file_lock *file_lock = locks_alloc_lock(); 1638 struct flock flock; 1639 struct inode *inode; 1640 struct file *f; 1641 int error; 1642 1643 if (file_lock == NULL) 1644 return -ENOLCK; 1645 1646 /* 1647 * This might block, so we do it before checking the inode. 1648 */ 1649 error = -EFAULT; 1650 if (copy_from_user(&flock, l, sizeof(flock))) 1651 goto out; 1652 1653 inode = filp->f_path.dentry->d_inode; 1654 1655 /* Don't allow mandatory locks on files that may be memory mapped 1656 * and shared. 1657 */ 1658 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1659 error = -EAGAIN; 1660 goto out; 1661 } 1662 1663again: 1664 error = flock_to_posix_lock(filp, file_lock, &flock); 1665 if (error) 1666 goto out; 1667 if (cmd == F_SETLKW) { 1668 file_lock->fl_flags |= FL_SLEEP; 1669 } 1670 1671 error = -EBADF; 1672 switch (flock.l_type) { 1673 case F_RDLCK: 1674 if (!(filp->f_mode & FMODE_READ)) 1675 goto out; 1676 break; 1677 case F_WRLCK: 1678 if (!(filp->f_mode & FMODE_WRITE)) 1679 goto out; 1680 break; 1681 case F_UNLCK: 1682 break; 1683 default: 1684 error = -EINVAL; 1685 goto out; 1686 } 1687 1688 error = do_lock_file_wait(filp, cmd, file_lock); 1689 1690 /* 1691 * Attempt to detect a close/fcntl race and recover by 1692 * releasing the lock that was just acquired. 1693 */ 1694 /* 1695 * we need that spin_lock here - it prevents reordering between 1696 * update of inode->i_flock and check for it done in close(). 1697 * rcu_read_lock() wouldn't do. 1698 */ 1699 spin_lock(¤t->files->file_lock); 1700 f = fcheck(fd); 1701 spin_unlock(¤t->files->file_lock); 1702 if (!error && f != filp && flock.l_type != F_UNLCK) { 1703 flock.l_type = F_UNLCK; 1704 goto again; 1705 } 1706 1707out: 1708 locks_free_lock(file_lock); 1709 return error; 1710} 1711 1712#if BITS_PER_LONG == 32 1713/* Report the first existing lock that would conflict with l. 1714 * This implements the F_GETLK command of fcntl(). 1715 */ 1716int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1717{ 1718 struct file_lock file_lock; 1719 struct flock64 flock; 1720 int error; 1721 1722 error = -EFAULT; 1723 if (copy_from_user(&flock, l, sizeof(flock))) 1724 goto out; 1725 error = -EINVAL; 1726 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1727 goto out; 1728 1729 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1730 if (error) 1731 goto out; 1732 1733 error = vfs_test_lock(filp, &file_lock); 1734 if (error) 1735 goto out; 1736 1737 flock.l_type = file_lock.fl_type; 1738 if (file_lock.fl_type != F_UNLCK) 1739 posix_lock_to_flock64(&flock, &file_lock); 1740 1741 error = -EFAULT; 1742 if (!copy_to_user(l, &flock, sizeof(flock))) 1743 error = 0; 1744 1745out: 1746 return error; 1747} 1748 1749/* Apply the lock described by l to an open file descriptor. 1750 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1751 */ 1752int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1753 struct flock64 __user *l) 1754{ 1755 struct file_lock *file_lock = locks_alloc_lock(); 1756 struct flock64 flock; 1757 struct inode *inode; 1758 struct file *f; 1759 int error; 1760 1761 if (file_lock == NULL) 1762 return -ENOLCK; 1763 1764 /* 1765 * This might block, so we do it before checking the inode. 1766 */ 1767 error = -EFAULT; 1768 if (copy_from_user(&flock, l, sizeof(flock))) 1769 goto out; 1770 1771 inode = filp->f_path.dentry->d_inode; 1772 1773 /* Don't allow mandatory locks on files that may be memory mapped 1774 * and shared. 1775 */ 1776 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { 1777 error = -EAGAIN; 1778 goto out; 1779 } 1780 1781again: 1782 error = flock64_to_posix_lock(filp, file_lock, &flock); 1783 if (error) 1784 goto out; 1785 if (cmd == F_SETLKW64) { 1786 file_lock->fl_flags |= FL_SLEEP; 1787 } 1788 1789 error = -EBADF; 1790 switch (flock.l_type) { 1791 case F_RDLCK: 1792 if (!(filp->f_mode & FMODE_READ)) 1793 goto out; 1794 break; 1795 case F_WRLCK: 1796 if (!(filp->f_mode & FMODE_WRITE)) 1797 goto out; 1798 break; 1799 case F_UNLCK: 1800 break; 1801 default: 1802 error = -EINVAL; 1803 goto out; 1804 } 1805 1806 error = do_lock_file_wait(filp, cmd, file_lock); 1807 1808 /* 1809 * Attempt to detect a close/fcntl race and recover by 1810 * releasing the lock that was just acquired. 1811 */ 1812 spin_lock(¤t->files->file_lock); 1813 f = fcheck(fd); 1814 spin_unlock(¤t->files->file_lock); 1815 if (!error && f != filp && flock.l_type != F_UNLCK) { 1816 flock.l_type = F_UNLCK; 1817 goto again; 1818 } 1819 1820out: 1821 locks_free_lock(file_lock); 1822 return error; 1823} 1824#endif /* BITS_PER_LONG == 32 */ 1825 1826/* 1827 * This function is called when the file is being removed 1828 * from the task's fd array. POSIX locks belonging to this task 1829 * are deleted at this time. 1830 */ 1831void locks_remove_posix(struct file *filp, fl_owner_t owner) 1832{ 1833 struct file_lock lock; 1834 1835 /* 1836 * If there are no locks held on this file, we don't need to call 1837 * posix_lock_file(). Another process could be setting a lock on this 1838 * file at the same time, but we wouldn't remove that lock anyway. 1839 */ 1840 if (!filp->f_path.dentry->d_inode->i_flock) 1841 return; 1842 1843 lock.fl_type = F_UNLCK; 1844 lock.fl_flags = FL_POSIX | FL_CLOSE; 1845 lock.fl_start = 0; 1846 lock.fl_end = OFFSET_MAX; 1847 lock.fl_owner = owner; 1848 lock.fl_pid = current->tgid; 1849 lock.fl_file = filp; 1850 lock.fl_ops = NULL; 1851 lock.fl_lmops = NULL; 1852 1853 vfs_lock_file(filp, F_SETLK, &lock, NULL); 1854 1855 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1856 lock.fl_ops->fl_release_private(&lock); 1857} 1858 1859EXPORT_SYMBOL(locks_remove_posix); 1860 1861/* 1862 * This function is called on the last close of an open file. 1863 */ 1864void locks_remove_flock(struct file *filp) 1865{ 1866 struct inode * inode = filp->f_path.dentry->d_inode; 1867 struct file_lock *fl; 1868 struct file_lock **before; 1869 1870 if (!inode->i_flock) 1871 return; 1872 1873 if (filp->f_op && filp->f_op->flock) { 1874 struct file_lock fl = { 1875 .fl_pid = current->tgid, 1876 .fl_file = filp, 1877 .fl_flags = FL_FLOCK, 1878 .fl_type = F_UNLCK, 1879 .fl_end = OFFSET_MAX, 1880 }; 1881 filp->f_op->flock(filp, F_SETLKW, &fl); 1882 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1883 fl.fl_ops->fl_release_private(&fl); 1884 } 1885 1886 lock_kernel(); 1887 before = &inode->i_flock; 1888 1889 while ((fl = *before) != NULL) { 1890 if (fl->fl_file == filp) { 1891 if (IS_FLOCK(fl)) { 1892 locks_delete_lock(before); 1893 continue; 1894 } 1895 if (IS_LEASE(fl)) { 1896 lease_modify(before, F_UNLCK); 1897 continue; 1898 } 1899 /* What? */ 1900 BUG(); 1901 } 1902 before = &fl->fl_next; 1903 } 1904 unlock_kernel(); 1905} 1906 1907/** 1908 * posix_unblock_lock - stop waiting for a file lock 1909 * @filp: how the file was opened 1910 * @waiter: the lock which was waiting 1911 * 1912 * lockd needs to block waiting for locks. 1913 */ 1914int 1915posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1916{ 1917 int status = 0; 1918 1919 lock_kernel(); 1920 if (waiter->fl_next) 1921 __locks_delete_block(waiter); 1922 else 1923 status = -ENOENT; 1924 unlock_kernel(); 1925 return status; 1926} 1927 1928EXPORT_SYMBOL(posix_unblock_lock); 1929 1930/** 1931 * vfs_cancel_lock - file byte range unblock lock 1932 * @filp: The file to apply the unblock to 1933 * @fl: The lock to be unblocked 1934 * 1935 * Used by lock managers to cancel blocked requests 1936 */ 1937int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 1938{ 1939 if (filp->f_op && filp->f_op->lock) 1940 return filp->f_op->lock(filp, F_CANCELLK, fl); 1941 return 0; 1942} 1943 1944EXPORT_SYMBOL_GPL(vfs_cancel_lock); 1945 1946#ifdef CONFIG_PROC_FS 1947#include <linux/proc_fs.h> 1948#include <linux/seq_file.h> 1949 1950static void lock_get_status(struct seq_file *f, struct file_lock *fl, 1951 int id, char *pfx) 1952{ 1953 struct inode *inode = NULL; 1954 unsigned int fl_pid; 1955 1956 if (fl->fl_nspid) 1957 fl_pid = pid_vnr(fl->fl_nspid); 1958 else 1959 fl_pid = fl->fl_pid; 1960 1961 if (fl->fl_file != NULL) 1962 inode = fl->fl_file->f_path.dentry->d_inode; 1963 1964 seq_printf(f, "%d:%s ", id, pfx); 1965 if (IS_POSIX(fl)) { 1966 seq_printf(f, "%6s %s ", 1967 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1968 (inode == NULL) ? "*NOINODE*" : 1969 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); 1970 } else if (IS_FLOCK(fl)) { 1971 if (fl->fl_type & LOCK_MAND) { 1972 seq_printf(f, "FLOCK MSNFS "); 1973 } else { 1974 seq_printf(f, "FLOCK ADVISORY "); 1975 } 1976 } else if (IS_LEASE(fl)) { 1977 seq_printf(f, "LEASE "); 1978 if (fl->fl_type & F_INPROGRESS) 1979 seq_printf(f, "BREAKING "); 1980 else if (fl->fl_file) 1981 seq_printf(f, "ACTIVE "); 1982 else 1983 seq_printf(f, "BREAKER "); 1984 } else { 1985 seq_printf(f, "UNKNOWN UNKNOWN "); 1986 } 1987 if (fl->fl_type & LOCK_MAND) { 1988 seq_printf(f, "%s ", 1989 (fl->fl_type & LOCK_READ) 1990 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 1991 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 1992 } else { 1993 seq_printf(f, "%s ", 1994 (fl->fl_type & F_INPROGRESS) 1995 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 1996 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 1997 } 1998 if (inode) { 1999#ifdef WE_CAN_BREAK_LSLK_NOW 2000 seq_printf(f, "%d %s:%ld ", fl_pid, 2001 inode->i_sb->s_id, inode->i_ino); 2002#else 2003 /* userspace relies on this representation of dev_t ;-( */ 2004 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, 2005 MAJOR(inode->i_sb->s_dev), 2006 MINOR(inode->i_sb->s_dev), inode->i_ino); 2007#endif 2008 } else { 2009 seq_printf(f, "%d <none>:0 ", fl_pid); 2010 } 2011 if (IS_POSIX(fl)) { 2012 if (fl->fl_end == OFFSET_MAX) 2013 seq_printf(f, "%Ld EOF\n", fl->fl_start); 2014 else 2015 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); 2016 } else { 2017 seq_printf(f, "0 EOF\n"); 2018 } 2019} 2020 2021static int locks_show(struct seq_file *f, void *v) 2022{ 2023 struct file_lock *fl, *bfl; 2024 2025 fl = list_entry(v, struct file_lock, fl_link); 2026 2027 lock_get_status(f, fl, (long)f->private, ""); 2028 2029 list_for_each_entry(bfl, &fl->fl_block, fl_block) 2030 lock_get_status(f, bfl, (long)f->private, " ->"); 2031 2032 f->private++; 2033 return 0; 2034} 2035 2036static void *locks_start(struct seq_file *f, loff_t *pos) 2037{ 2038 lock_kernel(); 2039 f->private = (void *)1; 2040 return seq_list_start(&file_lock_list, *pos); 2041} 2042 2043static void *locks_next(struct seq_file *f, void *v, loff_t *pos) 2044{ 2045 return seq_list_next(v, &file_lock_list, pos); 2046} 2047 2048static void locks_stop(struct seq_file *f, void *v) 2049{ 2050 unlock_kernel(); 2051} 2052 2053static const struct seq_operations locks_seq_operations = { 2054 .start = locks_start, 2055 .next = locks_next, 2056 .stop = locks_stop, 2057 .show = locks_show, 2058}; 2059 2060static int locks_open(struct inode *inode, struct file *filp) 2061{ 2062 return seq_open(filp, &locks_seq_operations); 2063} 2064 2065static const struct file_operations proc_locks_operations = { 2066 .open = locks_open, 2067 .read = seq_read, 2068 .llseek = seq_lseek, 2069 .release = seq_release, 2070}; 2071 2072static int __init proc_locks_init(void) 2073{ 2074 proc_create("locks", 0, NULL, &proc_locks_operations); 2075 return 0; 2076} 2077module_init(proc_locks_init); 2078#endif 2079 2080/** 2081 * lock_may_read - checks that the region is free of locks 2082 * @inode: the inode that is being read 2083 * @start: the first byte to read 2084 * @len: the number of bytes to read 2085 * 2086 * Emulates Windows locking requirements. Whole-file 2087 * mandatory locks (share modes) can prohibit a read and 2088 * byte-range POSIX locks can prohibit a read if they overlap. 2089 * 2090 * N.B. this function is only ever called 2091 * from knfsd and ownership of locks is never checked. 2092 */ 2093int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2094{ 2095 struct file_lock *fl; 2096 int result = 1; 2097 lock_kernel(); 2098 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2099 if (IS_POSIX(fl)) { 2100 if (fl->fl_type == F_RDLCK) 2101 continue; 2102 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2103 continue; 2104 } else if (IS_FLOCK(fl)) { 2105 if (!(fl->fl_type & LOCK_MAND)) 2106 continue; 2107 if (fl->fl_type & LOCK_READ) 2108 continue; 2109 } else 2110 continue; 2111 result = 0; 2112 break; 2113 } 2114 unlock_kernel(); 2115 return result; 2116} 2117 2118EXPORT_SYMBOL(lock_may_read); 2119 2120/** 2121 * lock_may_write - checks that the region is free of locks 2122 * @inode: the inode that is being written 2123 * @start: the first byte to write 2124 * @len: the number of bytes to write 2125 * 2126 * Emulates Windows locking requirements. Whole-file 2127 * mandatory locks (share modes) can prohibit a write and 2128 * byte-range POSIX locks can prohibit a write if they overlap. 2129 * 2130 * N.B. this function is only ever called 2131 * from knfsd and ownership of locks is never checked. 2132 */ 2133int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2134{ 2135 struct file_lock *fl; 2136 int result = 1; 2137 lock_kernel(); 2138 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2139 if (IS_POSIX(fl)) { 2140 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2141 continue; 2142 } else if (IS_FLOCK(fl)) { 2143 if (!(fl->fl_type & LOCK_MAND)) 2144 continue; 2145 if (fl->fl_type & LOCK_WRITE) 2146 continue; 2147 } else 2148 continue; 2149 result = 0; 2150 break; 2151 } 2152 unlock_kernel(); 2153 return result; 2154} 2155 2156EXPORT_SYMBOL(lock_may_write); 2157 2158static int __init filelock_init(void) 2159{ 2160 filelock_cache = kmem_cache_create("file_lock_cache", 2161 sizeof(struct file_lock), 0, SLAB_PANIC, 2162 init_once); 2163 return 0; 2164} 2165 2166core_initcall(filelock_init); 2167