1 2 3#include <linux/capability.h> 4#include <linux/file.h> 5#include <linux/fs.h> 6#include <linux/init.h> 7#include <linux/module.h> 8#include <linux/security.h> 9#include <linux/slab.h> 10#include <linux/smp_lock.h> 11#include <linux/syscalls.h> 12#include <linux/time.h> 13#include <linux/rcupdate.h> 14 15#include <asm/semaphore.h> 16#include <asm/uaccess.h> 17 18#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) 19#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) 20#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) 21 22int leases_enable = 1; 23int lease_break_time = 45; 24 25#define for_each_lock(inode, lockp) \ 26 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) 27 28static LIST_HEAD(file_lock_list); 29static LIST_HEAD(blocked_list); 30 31static struct kmem_cache *filelock_cache __read_mostly; 32 33/* Allocate an empty lock structure. */ 34static struct file_lock *locks_alloc_lock(void) 35{ 36 return kmem_cache_alloc(filelock_cache, GFP_KERNEL); 37} 38 39static void locks_release_private(struct file_lock *fl) 40{ 41 if (fl->fl_ops) { 42 if (fl->fl_ops->fl_release_private) 43 fl->fl_ops->fl_release_private(fl); 44 fl->fl_ops = NULL; 45 } 46 if (fl->fl_lmops) { 47 if (fl->fl_lmops->fl_release_private) 48 fl->fl_lmops->fl_release_private(fl); 49 fl->fl_lmops = NULL; 50 } 51 52} 53 54/* Free a lock which is not in use. */ 55static void locks_free_lock(struct file_lock *fl) 56{ 57 BUG_ON(waitqueue_active(&fl->fl_wait)); 58 BUG_ON(!list_empty(&fl->fl_block)); 59 BUG_ON(!list_empty(&fl->fl_link)); 60 61 locks_release_private(fl); 62 kmem_cache_free(filelock_cache, fl); 63} 64 65void locks_init_lock(struct file_lock *fl) 66{ 67 INIT_LIST_HEAD(&fl->fl_link); 68 INIT_LIST_HEAD(&fl->fl_block); 69 init_waitqueue_head(&fl->fl_wait); 70 fl->fl_next = NULL; 71 fl->fl_fasync = NULL; 72 fl->fl_owner = NULL; 73 fl->fl_pid = 0; 74 fl->fl_file = NULL; 75 fl->fl_flags = 0; 76 fl->fl_type = 0; 77 fl->fl_start = fl->fl_end = 0; 78 fl->fl_ops = NULL; 79 fl->fl_lmops = NULL; 80} 81 82EXPORT_SYMBOL(locks_init_lock); 83 84/* 85 * Initialises the fields of the file lock which are invariant for 86 * free file_locks. 87 */ 88static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) 89{ 90 struct file_lock *lock = (struct file_lock *) foo; 91 92 locks_init_lock(lock); 93} 94 95static void locks_copy_private(struct file_lock *new, struct file_lock *fl) 96{ 97 if (fl->fl_ops) { 98 if (fl->fl_ops->fl_copy_lock) 99 fl->fl_ops->fl_copy_lock(new, fl); 100 new->fl_ops = fl->fl_ops; 101 } 102 if (fl->fl_lmops) { 103 if (fl->fl_lmops->fl_copy_lock) 104 fl->fl_lmops->fl_copy_lock(new, fl); 105 new->fl_lmops = fl->fl_lmops; 106 } 107} 108 109/* 110 * Initialize a new lock from an existing file_lock structure. 111 */ 112static void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) 113{ 114 new->fl_owner = fl->fl_owner; 115 new->fl_pid = fl->fl_pid; 116 new->fl_file = NULL; 117 new->fl_flags = fl->fl_flags; 118 new->fl_type = fl->fl_type; 119 new->fl_start = fl->fl_start; 120 new->fl_end = fl->fl_end; 121 new->fl_ops = NULL; 122 new->fl_lmops = NULL; 123} 124 125void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 126{ 127 locks_release_private(new); 128 129 __locks_copy_lock(new, fl); 130 new->fl_file = fl->fl_file; 131 new->fl_ops = fl->fl_ops; 132 new->fl_lmops = fl->fl_lmops; 133 134 locks_copy_private(new, fl); 135} 136 137EXPORT_SYMBOL(locks_copy_lock); 138 139static inline int flock_translate_cmd(int cmd) { 140 if (cmd & LOCK_MAND) 141 return cmd & (LOCK_MAND | LOCK_RW); 142 switch (cmd) { 143 case LOCK_SH: 144 return F_RDLCK; 145 case LOCK_EX: 146 return F_WRLCK; 147 case LOCK_UN: 148 return F_UNLCK; 149 } 150 return -EINVAL; 151} 152 153/* Fill in a file_lock structure with an appropriate FLOCK lock. */ 154static int flock_make_lock(struct file *filp, struct file_lock **lock, 155 unsigned int cmd) 156{ 157 struct file_lock *fl; 158 int type = flock_translate_cmd(cmd); 159 if (type < 0) 160 return type; 161 162 fl = locks_alloc_lock(); 163 if (fl == NULL) 164 return -ENOMEM; 165 166 fl->fl_file = filp; 167 fl->fl_pid = current->tgid; 168 fl->fl_flags = FL_FLOCK; 169 fl->fl_type = type; 170 fl->fl_end = OFFSET_MAX; 171 172 *lock = fl; 173 return 0; 174} 175 176static int assign_type(struct file_lock *fl, int type) 177{ 178 switch (type) { 179 case F_RDLCK: 180 case F_WRLCK: 181 case F_UNLCK: 182 fl->fl_type = type; 183 break; 184 default: 185 return -EINVAL; 186 } 187 return 0; 188} 189 190/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX 191 * style lock. 192 */ 193static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, 194 struct flock *l) 195{ 196 off_t start, end; 197 198 switch (l->l_whence) { 199 case SEEK_SET: 200 start = 0; 201 break; 202 case SEEK_CUR: 203 start = filp->f_pos; 204 break; 205 case SEEK_END: 206 start = i_size_read(filp->f_path.dentry->d_inode); 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 /* POSIX-1996 leaves the case l->l_len < 0 undefined; 213 POSIX-2001 defines it. */ 214 start += l->l_start; 215 if (start < 0) 216 return -EINVAL; 217 fl->fl_end = OFFSET_MAX; 218 if (l->l_len > 0) { 219 end = start + l->l_len - 1; 220 fl->fl_end = end; 221 } else if (l->l_len < 0) { 222 end = start - 1; 223 fl->fl_end = end; 224 start += l->l_len; 225 if (start < 0) 226 return -EINVAL; 227 } 228 fl->fl_start = start; /* we record the absolute position */ 229 if (fl->fl_end < fl->fl_start) 230 return -EOVERFLOW; 231 232 fl->fl_owner = current->files; 233 fl->fl_pid = current->tgid; 234 fl->fl_file = filp; 235 fl->fl_flags = FL_POSIX; 236 fl->fl_ops = NULL; 237 fl->fl_lmops = NULL; 238 239 return assign_type(fl, l->l_type); 240} 241 242#if BITS_PER_LONG == 32 243static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, 244 struct flock64 *l) 245{ 246 loff_t start; 247 248 switch (l->l_whence) { 249 case SEEK_SET: 250 start = 0; 251 break; 252 case SEEK_CUR: 253 start = filp->f_pos; 254 break; 255 case SEEK_END: 256 start = i_size_read(filp->f_path.dentry->d_inode); 257 break; 258 default: 259 return -EINVAL; 260 } 261 262 start += l->l_start; 263 if (start < 0) 264 return -EINVAL; 265 fl->fl_end = OFFSET_MAX; 266 if (l->l_len > 0) { 267 fl->fl_end = start + l->l_len - 1; 268 } else if (l->l_len < 0) { 269 fl->fl_end = start - 1; 270 start += l->l_len; 271 if (start < 0) 272 return -EINVAL; 273 } 274 fl->fl_start = start; /* we record the absolute position */ 275 if (fl->fl_end < fl->fl_start) 276 return -EOVERFLOW; 277 278 fl->fl_owner = current->files; 279 fl->fl_pid = current->tgid; 280 fl->fl_file = filp; 281 fl->fl_flags = FL_POSIX; 282 fl->fl_ops = NULL; 283 fl->fl_lmops = NULL; 284 285 switch (l->l_type) { 286 case F_RDLCK: 287 case F_WRLCK: 288 case F_UNLCK: 289 fl->fl_type = l->l_type; 290 break; 291 default: 292 return -EINVAL; 293 } 294 295 return (0); 296} 297#endif 298 299/* default lease lock manager operations */ 300static void lease_break_callback(struct file_lock *fl) 301{ 302 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); 303} 304 305static void lease_release_private_callback(struct file_lock *fl) 306{ 307 if (!fl->fl_file) 308 return; 309 310 f_delown(fl->fl_file); 311 fl->fl_file->f_owner.signum = 0; 312} 313 314static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) 315{ 316 return fl->fl_file == try->fl_file; 317} 318 319static struct lock_manager_operations lease_manager_ops = { 320 .fl_break = lease_break_callback, 321 .fl_release_private = lease_release_private_callback, 322 .fl_mylease = lease_mylease_callback, 323 .fl_change = lease_modify, 324}; 325 326/* 327 * Initialize a lease, use the default lock manager operations 328 */ 329static int lease_init(struct file *filp, int type, struct file_lock *fl) 330 { 331 if (assign_type(fl, type) != 0) 332 return -EINVAL; 333 334 fl->fl_owner = current->files; 335 fl->fl_pid = current->tgid; 336 337 fl->fl_file = filp; 338 fl->fl_flags = FL_LEASE; 339 fl->fl_start = 0; 340 fl->fl_end = OFFSET_MAX; 341 fl->fl_ops = NULL; 342 fl->fl_lmops = &lease_manager_ops; 343 return 0; 344} 345 346/* Allocate a file_lock initialised to this type of lease */ 347static int lease_alloc(struct file *filp, int type, struct file_lock **flp) 348{ 349 struct file_lock *fl = locks_alloc_lock(); 350 int error = -ENOMEM; 351 352 if (fl == NULL) 353 goto out; 354 355 error = lease_init(filp, type, fl); 356 if (error) { 357 locks_free_lock(fl); 358 fl = NULL; 359 } 360out: 361 *flp = fl; 362 return error; 363} 364 365/* Check if two locks overlap each other. 366 */ 367static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) 368{ 369 return ((fl1->fl_end >= fl2->fl_start) && 370 (fl2->fl_end >= fl1->fl_start)); 371} 372 373/* 374 * Check whether two locks have the same owner. 375 */ 376static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) 377{ 378 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) 379 return fl2->fl_lmops == fl1->fl_lmops && 380 fl1->fl_lmops->fl_compare_owner(fl1, fl2); 381 return fl1->fl_owner == fl2->fl_owner; 382} 383 384/* Remove waiter from blocker's block list. 385 * When blocker ends up pointing to itself then the list is empty. 386 */ 387static void __locks_delete_block(struct file_lock *waiter) 388{ 389 list_del_init(&waiter->fl_block); 390 list_del_init(&waiter->fl_link); 391 waiter->fl_next = NULL; 392} 393 394/* 395 */ 396static void locks_delete_block(struct file_lock *waiter) 397{ 398 lock_kernel(); 399 __locks_delete_block(waiter); 400 unlock_kernel(); 401} 402 403/* Insert waiter into blocker's block list. 404 * We use a circular list so that processes can be easily woken up in 405 * the order they blocked. The documentation doesn't require this but 406 * it seems like the reasonable thing to do. 407 */ 408static void locks_insert_block(struct file_lock *blocker, 409 struct file_lock *waiter) 410{ 411 BUG_ON(!list_empty(&waiter->fl_block)); 412 list_add_tail(&waiter->fl_block, &blocker->fl_block); 413 waiter->fl_next = blocker; 414 if (IS_POSIX(blocker)) 415 list_add(&waiter->fl_link, &blocked_list); 416} 417 418/* Wake up processes blocked waiting for blocker. 419 * If told to wait then schedule the processes until the block list 420 * is empty, otherwise empty the block list ourselves. 421 */ 422static void locks_wake_up_blocks(struct file_lock *blocker) 423{ 424 while (!list_empty(&blocker->fl_block)) { 425 struct file_lock *waiter = list_entry(blocker->fl_block.next, 426 struct file_lock, fl_block); 427 __locks_delete_block(waiter); 428 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) 429 waiter->fl_lmops->fl_notify(waiter); 430 else 431 wake_up(&waiter->fl_wait); 432 } 433} 434 435/* Insert file lock fl into an inode's lock list at the position indicated 436 * by pos. At the same time add the lock to the global file lock list. 437 */ 438static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) 439{ 440 list_add(&fl->fl_link, &file_lock_list); 441 442 /* insert into file's list */ 443 fl->fl_next = *pos; 444 *pos = fl; 445 446 if (fl->fl_ops && fl->fl_ops->fl_insert) 447 fl->fl_ops->fl_insert(fl); 448} 449 450/* 451 * Delete a lock and then free it. 452 * Wake up processes that are blocked waiting for this lock, 453 * notify the FS that the lock has been cleared and 454 * finally free the lock. 455 */ 456static void locks_delete_lock(struct file_lock **thisfl_p) 457{ 458 struct file_lock *fl = *thisfl_p; 459 460 *thisfl_p = fl->fl_next; 461 fl->fl_next = NULL; 462 list_del_init(&fl->fl_link); 463 464 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); 465 if (fl->fl_fasync != NULL) { 466 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); 467 fl->fl_fasync = NULL; 468 } 469 470 if (fl->fl_ops && fl->fl_ops->fl_remove) 471 fl->fl_ops->fl_remove(fl); 472 473 locks_wake_up_blocks(fl); 474 locks_free_lock(fl); 475} 476 477/* Determine if lock sys_fl blocks lock caller_fl. Common functionality 478 * checks for shared/exclusive status of overlapping locks. 479 */ 480static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 481{ 482 if (sys_fl->fl_type == F_WRLCK) 483 return 1; 484 if (caller_fl->fl_type == F_WRLCK) 485 return 1; 486 return 0; 487} 488 489/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific 490 * checking before calling the locks_conflict(). 491 */ 492static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 493{ 494 /* POSIX locks owned by the same process do not conflict with 495 * each other. 496 */ 497 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl)) 498 return (0); 499 500 /* Check whether they overlap */ 501 if (!locks_overlap(caller_fl, sys_fl)) 502 return 0; 503 504 return (locks_conflict(caller_fl, sys_fl)); 505} 506 507/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific 508 * checking before calling the locks_conflict(). 509 */ 510static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) 511{ 512 /* FLOCK locks referring to the same filp do not conflict with 513 * each other. 514 */ 515 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file)) 516 return (0); 517 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND)) 518 return 0; 519 520 return (locks_conflict(caller_fl, sys_fl)); 521} 522 523static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) 524{ 525 int result = 0; 526 DECLARE_WAITQUEUE(wait, current); 527 528 __set_current_state(TASK_INTERRUPTIBLE); 529 add_wait_queue(fl_wait, &wait); 530 if (timeout == 0) 531 schedule(); 532 else 533 result = schedule_timeout(timeout); 534 if (signal_pending(current)) 535 result = -ERESTARTSYS; 536 remove_wait_queue(fl_wait, &wait); 537 __set_current_state(TASK_RUNNING); 538 return result; 539} 540 541static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) 542{ 543 int result; 544 locks_insert_block(blocker, waiter); 545 result = interruptible_sleep_on_locked(&waiter->fl_wait, time); 546 __locks_delete_block(waiter); 547 return result; 548} 549 550int 551posix_test_lock(struct file *filp, struct file_lock *fl) 552{ 553 struct file_lock *cfl; 554 555 lock_kernel(); 556 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { 557 if (!IS_POSIX(cfl)) 558 continue; 559 if (posix_locks_conflict(cfl, fl)) 560 break; 561 } 562 if (cfl) { 563 __locks_copy_lock(fl, cfl); 564 unlock_kernel(); 565 return 1; 566 } else 567 fl->fl_type = F_UNLCK; 568 unlock_kernel(); 569 return 0; 570} 571 572EXPORT_SYMBOL(posix_test_lock); 573 574/* This function tests for deadlock condition before putting a process to 575 * sleep. The detection scheme is no longer recursive. Recursive was neat, 576 * but dangerous - we risked stack corruption if the lock data was bad, or 577 * if the recursion was too deep for any other reason. 578 * 579 * We rely on the fact that a task can only be on one lock's wait queue 580 * at a time. When we find blocked_task on a wait queue we can re-search 581 * with blocked_task equal to that queue's owner, until either blocked_task 582 * isn't found, or blocked_task is found on a queue owned by my_task. 583 * 584 * Note: the above assumption may not be true when handling lock requests 585 * from a broken NFS client. But broken NFS clients have a lot more to 586 * worry about than proper deadlock detection anyway... --okir 587 */ 588static int posix_locks_deadlock(struct file_lock *caller_fl, 589 struct file_lock *block_fl) 590{ 591 struct list_head *tmp; 592 593next_task: 594 if (posix_same_owner(caller_fl, block_fl)) 595 return 1; 596 list_for_each(tmp, &blocked_list) { 597 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 598 if (posix_same_owner(fl, block_fl)) { 599 fl = fl->fl_next; 600 block_fl = fl; 601 goto next_task; 602 } 603 } 604 return 0; 605} 606 607/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 608 * at the head of the list, but that's secret knowledge known only to 609 * flock_lock_file and posix_lock_file. 610 * 611 * Note that if called with an FL_EXISTS argument, the caller may determine 612 * whether or not a lock was successfully freed by testing the return 613 * value for -ENOENT. 614 */ 615static int flock_lock_file(struct file *filp, struct file_lock *request) 616{ 617 struct file_lock *new_fl = NULL; 618 struct file_lock **before; 619 struct inode * inode = filp->f_path.dentry->d_inode; 620 int error = 0; 621 int found = 0; 622 623 lock_kernel(); 624 if (request->fl_flags & FL_ACCESS) 625 goto find_conflict; 626 for_each_lock(inode, before) { 627 struct file_lock *fl = *before; 628 if (IS_POSIX(fl)) 629 break; 630 if (IS_LEASE(fl)) 631 continue; 632 if (filp != fl->fl_file) 633 continue; 634 if (request->fl_type == fl->fl_type) 635 goto out; 636 found = 1; 637 locks_delete_lock(before); 638 break; 639 } 640 641 if (request->fl_type == F_UNLCK) { 642 if ((request->fl_flags & FL_EXISTS) && !found) 643 error = -ENOENT; 644 goto out; 645 } 646 647 error = -ENOMEM; 648 new_fl = locks_alloc_lock(); 649 if (new_fl == NULL) 650 goto out; 651 /* 652 * If a higher-priority process was blocked on the old file lock, 653 * give it the opportunity to lock the file. 654 */ 655 if (found) 656 cond_resched(); 657 658find_conflict: 659 for_each_lock(inode, before) { 660 struct file_lock *fl = *before; 661 if (IS_POSIX(fl)) 662 break; 663 if (IS_LEASE(fl)) 664 continue; 665 if (!flock_locks_conflict(request, fl)) 666 continue; 667 error = -EAGAIN; 668 if (request->fl_flags & FL_SLEEP) 669 locks_insert_block(fl, request); 670 goto out; 671 } 672 if (request->fl_flags & FL_ACCESS) 673 goto out; 674 locks_copy_lock(new_fl, request); 675 locks_insert_lock(&inode->i_flock, new_fl); 676 new_fl = NULL; 677 error = 0; 678 679out: 680 unlock_kernel(); 681 if (new_fl) 682 locks_free_lock(new_fl); 683 return error; 684} 685 686static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) 687{ 688 struct file_lock *fl; 689 struct file_lock *new_fl = NULL; 690 struct file_lock *new_fl2 = NULL; 691 struct file_lock *left = NULL; 692 struct file_lock *right = NULL; 693 struct file_lock **before; 694 int error, added = 0; 695 696 /* 697 * We may need two file_lock structures for this operation, 698 * so we get them in advance to avoid races. 699 * 700 * In some cases we can be sure, that no new locks will be needed 701 */ 702 if (!(request->fl_flags & FL_ACCESS) && 703 (request->fl_type != F_UNLCK || 704 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { 705 new_fl = locks_alloc_lock(); 706 new_fl2 = locks_alloc_lock(); 707 } 708 709 lock_kernel(); 710 if (request->fl_type != F_UNLCK) { 711 for_each_lock(inode, before) { 712 struct file_lock *fl = *before; 713 if (!IS_POSIX(fl)) 714 continue; 715 if (!posix_locks_conflict(request, fl)) 716 continue; 717 if (conflock) 718 locks_copy_lock(conflock, fl); 719 error = -EAGAIN; 720 if (!(request->fl_flags & FL_SLEEP)) 721 goto out; 722 error = -EDEADLK; 723 if (posix_locks_deadlock(request, fl)) 724 goto out; 725 error = -EAGAIN; 726 locks_insert_block(fl, request); 727 goto out; 728 } 729 } 730 731 /* If we're just looking for a conflict, we're done. */ 732 error = 0; 733 if (request->fl_flags & FL_ACCESS) 734 goto out; 735 736 /* 737 * Find the first old lock with the same owner as the new lock. 738 */ 739 740 before = &inode->i_flock; 741 742 /* First skip locks owned by other processes. */ 743 while ((fl = *before) && (!IS_POSIX(fl) || 744 !posix_same_owner(request, fl))) { 745 before = &fl->fl_next; 746 } 747 748 /* Process locks with this owner. */ 749 while ((fl = *before) && posix_same_owner(request, fl)) { 750 /* Detect adjacent or overlapping regions (if same lock type) 751 */ 752 if (request->fl_type == fl->fl_type) { 753 /* In all comparisons of start vs end, use 754 * "start - 1" rather than "end + 1". If end 755 * is OFFSET_MAX, end + 1 will become negative. 756 */ 757 if (fl->fl_end < request->fl_start - 1) 758 goto next_lock; 759 /* If the next lock in the list has entirely bigger 760 * addresses than the new one, insert the lock here. 761 */ 762 if (fl->fl_start - 1 > request->fl_end) 763 break; 764 765 /* If we come here, the new and old lock are of the 766 * same type and adjacent or overlapping. Make one 767 * lock yielding from the lower start address of both 768 * locks to the higher end address. 769 */ 770 if (fl->fl_start > request->fl_start) 771 fl->fl_start = request->fl_start; 772 else 773 request->fl_start = fl->fl_start; 774 if (fl->fl_end < request->fl_end) 775 fl->fl_end = request->fl_end; 776 else 777 request->fl_end = fl->fl_end; 778 if (added) { 779 locks_delete_lock(before); 780 continue; 781 } 782 request = fl; 783 added = 1; 784 } 785 else { 786 /* Processing for different lock types is a bit 787 * more complex. 788 */ 789 if (fl->fl_end < request->fl_start) 790 goto next_lock; 791 if (fl->fl_start > request->fl_end) 792 break; 793 if (request->fl_type == F_UNLCK) 794 added = 1; 795 if (fl->fl_start < request->fl_start) 796 left = fl; 797 /* If the next lock in the list has a higher end 798 * address than the new one, insert the new one here. 799 */ 800 if (fl->fl_end > request->fl_end) { 801 right = fl; 802 break; 803 } 804 if (fl->fl_start >= request->fl_start) { 805 /* The new lock completely replaces an old 806 * one (This may happen several times). 807 */ 808 if (added) { 809 locks_delete_lock(before); 810 continue; 811 } 812 /* Replace the old lock with the new one. 813 * Wake up anybody waiting for the old one, 814 * as the change in lock type might satisfy 815 * their needs. 816 */ 817 locks_wake_up_blocks(fl); 818 fl->fl_start = request->fl_start; 819 fl->fl_end = request->fl_end; 820 fl->fl_type = request->fl_type; 821 locks_release_private(fl); 822 locks_copy_private(fl, request); 823 request = fl; 824 added = 1; 825 } 826 } 827 /* Go on to next lock. 828 */ 829 next_lock: 830 before = &fl->fl_next; 831 } 832 833 /* 834 * The above code only modifies existing locks in case of 835 * merging or replacing. If new lock(s) need to be inserted 836 * all modifications are done bellow this, so it's safe yet to 837 * bail out. 838 */ 839 error = -ENOLCK; /* "no luck" */ 840 if (right && left == right && !new_fl2) 841 goto out; 842 843 error = 0; 844 if (!added) { 845 if (request->fl_type == F_UNLCK) { 846 if (request->fl_flags & FL_EXISTS) 847 error = -ENOENT; 848 goto out; 849 } 850 851 if (!new_fl) { 852 error = -ENOLCK; 853 goto out; 854 } 855 locks_copy_lock(new_fl, request); 856 locks_insert_lock(before, new_fl); 857 new_fl = NULL; 858 } 859 if (right) { 860 if (left == right) { 861 /* The new lock breaks the old one in two pieces, 862 * so we have to use the second new lock. 863 */ 864 left = new_fl2; 865 new_fl2 = NULL; 866 locks_copy_lock(left, right); 867 locks_insert_lock(before, left); 868 } 869 right->fl_start = request->fl_end + 1; 870 locks_wake_up_blocks(right); 871 } 872 if (left) { 873 left->fl_end = request->fl_start - 1; 874 locks_wake_up_blocks(left); 875 } 876 out: 877 unlock_kernel(); 878 /* 879 * Free any unused locks. 880 */ 881 if (new_fl) 882 locks_free_lock(new_fl); 883 if (new_fl2) 884 locks_free_lock(new_fl2); 885 return error; 886} 887 888/** 889 * posix_lock_file - Apply a POSIX-style lock to a file 890 * @filp: The file to apply the lock to 891 * @fl: The lock to be applied 892 * @conflock: Place to return a copy of the conflicting lock, if found. 893 * 894 * Add a POSIX style lock to a file. 895 * We merge adjacent & overlapping locks whenever possible. 896 * POSIX locks are sorted by owner task, then by starting address 897 * 898 * Note that if called with an FL_EXISTS argument, the caller may determine 899 * whether or not a lock was successfully freed by testing the return 900 * value for -ENOENT. 901 */ 902int posix_lock_file(struct file *filp, struct file_lock *fl, 903 struct file_lock *conflock) 904{ 905 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock); 906} 907EXPORT_SYMBOL(posix_lock_file); 908 909/** 910 * posix_lock_file_wait - Apply a POSIX-style lock to a file 911 * @filp: The file to apply the lock to 912 * @fl: The lock to be applied 913 * 914 * Add a POSIX style lock to a file. 915 * We merge adjacent & overlapping locks whenever possible. 916 * POSIX locks are sorted by owner task, then by starting address 917 */ 918int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 919{ 920 int error; 921 might_sleep (); 922 for (;;) { 923 error = posix_lock_file(filp, fl, NULL); 924 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 925 break; 926 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 927 if (!error) 928 continue; 929 930 locks_delete_block(fl); 931 break; 932 } 933 return error; 934} 935EXPORT_SYMBOL(posix_lock_file_wait); 936 937/** 938 * locks_mandatory_locked - Check for an active lock 939 * @inode: the file to check 940 * 941 * Searches the inode's list of locks to find any POSIX locks which conflict. 942 * This function is called from locks_verify_locked() only. 943 */ 944int locks_mandatory_locked(struct inode *inode) 945{ 946 fl_owner_t owner = current->files; 947 struct file_lock *fl; 948 949 /* 950 * Search the lock list for this inode for any POSIX locks. 951 */ 952 lock_kernel(); 953 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 954 if (!IS_POSIX(fl)) 955 continue; 956 if (fl->fl_owner != owner) 957 break; 958 } 959 unlock_kernel(); 960 return fl ? -EAGAIN : 0; 961} 962 963/** 964 * locks_mandatory_area - Check for a conflicting lock 965 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ 966 * for shared 967 * @inode: the file to check 968 * @filp: how the file was opened (if it was) 969 * @offset: start of area to check 970 * @count: length of area to check 971 * 972 * Searches the inode's list of locks to find any POSIX locks which conflict. 973 * This function is called from rw_verify_area() and 974 * locks_verify_truncate(). 975 */ 976int locks_mandatory_area(int read_write, struct inode *inode, 977 struct file *filp, loff_t offset, 978 size_t count) 979{ 980 struct file_lock fl; 981 int error; 982 983 locks_init_lock(&fl); 984 fl.fl_owner = current->files; 985 fl.fl_pid = current->tgid; 986 fl.fl_file = filp; 987 fl.fl_flags = FL_POSIX | FL_ACCESS; 988 if (filp && !(filp->f_flags & O_NONBLOCK)) 989 fl.fl_flags |= FL_SLEEP; 990 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; 991 fl.fl_start = offset; 992 fl.fl_end = offset + count - 1; 993 994 for (;;) { 995 error = __posix_lock_file(inode, &fl, NULL); 996 if (error != -EAGAIN) 997 break; 998 if (!(fl.fl_flags & FL_SLEEP)) 999 break; 1000 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); 1001 if (!error) { 1002 /* 1003 * If we've been sleeping someone might have 1004 * changed the permissions behind our back. 1005 */ 1006 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 1007 continue; 1008 } 1009 1010 locks_delete_block(&fl); 1011 break; 1012 } 1013 1014 return error; 1015} 1016 1017EXPORT_SYMBOL(locks_mandatory_area); 1018 1019/* We already had a lease on this file; just change its type */ 1020int lease_modify(struct file_lock **before, int arg) 1021{ 1022 struct file_lock *fl = *before; 1023 int error = assign_type(fl, arg); 1024 1025 if (error) 1026 return error; 1027 locks_wake_up_blocks(fl); 1028 if (arg == F_UNLCK) 1029 locks_delete_lock(before); 1030 return 0; 1031} 1032 1033EXPORT_SYMBOL(lease_modify); 1034 1035static void time_out_leases(struct inode *inode) 1036{ 1037 struct file_lock **before; 1038 struct file_lock *fl; 1039 1040 before = &inode->i_flock; 1041 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { 1042 if ((fl->fl_break_time == 0) 1043 || time_before(jiffies, fl->fl_break_time)) { 1044 before = &fl->fl_next; 1045 continue; 1046 } 1047 lease_modify(before, fl->fl_type & ~F_INPROGRESS); 1048 if (fl == *before) /* lease_modify may have freed fl */ 1049 before = &fl->fl_next; 1050 } 1051} 1052 1053/** 1054 * __break_lease - revoke all outstanding leases on file 1055 * @inode: the inode of the file to return 1056 * @mode: the open mode (read or write) 1057 * 1058 * break_lease (inlined for speed) has checked there already 1059 * is a lease on this file. Leases are broken on a call to open() 1060 * or truncate(). This function can sleep unless you 1061 * specified %O_NONBLOCK to your open(). 1062 */ 1063int __break_lease(struct inode *inode, unsigned int mode) 1064{ 1065 int error = 0, future; 1066 struct file_lock *new_fl, *flock; 1067 struct file_lock *fl; 1068 int alloc_err; 1069 unsigned long break_time; 1070 int i_have_this_lease = 0; 1071 1072 alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, 1073 &new_fl); 1074 1075 lock_kernel(); 1076 1077 time_out_leases(inode); 1078 1079 flock = inode->i_flock; 1080 if ((flock == NULL) || !IS_LEASE(flock)) 1081 goto out; 1082 1083 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) 1084 if (fl->fl_owner == current->files) 1085 i_have_this_lease = 1; 1086 1087 if (mode & FMODE_WRITE) { 1088 /* If we want write access, we have to revoke any lease. */ 1089 future = F_UNLCK | F_INPROGRESS; 1090 } else if (flock->fl_type & F_INPROGRESS) { 1091 /* If the lease is already being broken, we just leave it */ 1092 future = flock->fl_type; 1093 } else if (flock->fl_type & F_WRLCK) { 1094 /* Downgrade the exclusive lease to a read-only lease. */ 1095 future = F_RDLCK | F_INPROGRESS; 1096 } else { 1097 /* the existing lease was read-only, so we can read too. */ 1098 goto out; 1099 } 1100 1101 if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { 1102 error = alloc_err; 1103 goto out; 1104 } 1105 1106 break_time = 0; 1107 if (lease_break_time > 0) { 1108 break_time = jiffies + lease_break_time * HZ; 1109 if (break_time == 0) 1110 break_time++; /* so that 0 means no break time */ 1111 } 1112 1113 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { 1114 if (fl->fl_type != future) { 1115 fl->fl_type = future; 1116 fl->fl_break_time = break_time; 1117 /* lease must have lmops break callback */ 1118 fl->fl_lmops->fl_break(fl); 1119 } 1120 } 1121 1122 if (i_have_this_lease || (mode & O_NONBLOCK)) { 1123 error = -EWOULDBLOCK; 1124 goto out; 1125 } 1126 1127restart: 1128 break_time = flock->fl_break_time; 1129 if (break_time != 0) { 1130 break_time -= jiffies; 1131 if (break_time == 0) 1132 break_time++; 1133 } 1134 error = locks_block_on_timeout(flock, new_fl, break_time); 1135 if (error >= 0) { 1136 if (error == 0) 1137 time_out_leases(inode); 1138 /* Wait for the next lease that has not been broken yet */ 1139 for (flock = inode->i_flock; flock && IS_LEASE(flock); 1140 flock = flock->fl_next) { 1141 if (flock->fl_type & F_INPROGRESS) 1142 goto restart; 1143 } 1144 error = 0; 1145 } 1146 1147out: 1148 unlock_kernel(); 1149 if (!alloc_err) 1150 locks_free_lock(new_fl); 1151 return error; 1152} 1153 1154EXPORT_SYMBOL(__break_lease); 1155 1156/** 1157 * lease_get_mtime 1158 * @inode: the inode 1159 * @time: pointer to a timespec which will contain the last modified time 1160 * 1161 * This is to force NFS clients to flush their caches for files with 1162 * exclusive leases. The justification is that if someone has an 1163 * exclusive lease, then they could be modifiying it. 1164 */ 1165void lease_get_mtime(struct inode *inode, struct timespec *time) 1166{ 1167 struct file_lock *flock = inode->i_flock; 1168 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) 1169 *time = current_fs_time(inode->i_sb); 1170 else 1171 *time = inode->i_mtime; 1172} 1173 1174EXPORT_SYMBOL(lease_get_mtime); 1175 1176int fcntl_getlease(struct file *filp) 1177{ 1178 struct file_lock *fl; 1179 int type = F_UNLCK; 1180 1181 lock_kernel(); 1182 time_out_leases(filp->f_path.dentry->d_inode); 1183 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl); 1184 fl = fl->fl_next) { 1185 if (fl->fl_file == filp) { 1186 type = fl->fl_type & ~F_INPROGRESS; 1187 break; 1188 } 1189 } 1190 unlock_kernel(); 1191 return type; 1192} 1193 1194/** 1195 * __setlease - sets a lease on an open file 1196 * @filp: file pointer 1197 * @arg: type of lease to obtain 1198 * @flp: input - file_lock to use, output - file_lock inserted 1199 * 1200 * The (input) flp->fl_lmops->fl_break function is required 1201 * by break_lease(). 1202 * 1203 * Called with kernel lock held. 1204 */ 1205static int __setlease(struct file *filp, long arg, struct file_lock **flp) 1206{ 1207 struct file_lock *fl, **before, **my_before = NULL, *lease; 1208 struct dentry *dentry = filp->f_path.dentry; 1209 struct inode *inode = dentry->d_inode; 1210 int error, rdlease_count = 0, wrlease_count = 0; 1211 1212 time_out_leases(inode); 1213 1214 error = -EINVAL; 1215 if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) 1216 goto out; 1217 1218 lease = *flp; 1219 1220 error = -EAGAIN; 1221 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) 1222 goto out; 1223 if ((arg == F_WRLCK) 1224 && ((atomic_read(&dentry->d_count) > 1) 1225 || (atomic_read(&inode->i_count) > 1))) 1226 goto out; 1227 1228 /* 1229 * At this point, we know that if there is an exclusive 1230 * lease on this file, then we hold it on this filp 1231 * (otherwise our open of this file would have blocked). 1232 * And if we are trying to acquire an exclusive lease, 1233 * then the file is not open by anyone (including us) 1234 * except for this filp. 1235 */ 1236 for (before = &inode->i_flock; 1237 ((fl = *before) != NULL) && IS_LEASE(fl); 1238 before = &fl->fl_next) { 1239 if (lease->fl_lmops->fl_mylease(fl, lease)) 1240 my_before = before; 1241 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) 1242 /* 1243 * Someone is in the process of opening this 1244 * file for writing so we may not take an 1245 * exclusive lease on it. 1246 */ 1247 wrlease_count++; 1248 else 1249 rdlease_count++; 1250 } 1251 1252 if ((arg == F_RDLCK && (wrlease_count > 0)) || 1253 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) 1254 goto out; 1255 1256 if (my_before != NULL) { 1257 *flp = *my_before; 1258 error = lease->fl_lmops->fl_change(my_before, arg); 1259 goto out; 1260 } 1261 1262 error = 0; 1263 if (arg == F_UNLCK) 1264 goto out; 1265 1266 error = -EINVAL; 1267 if (!leases_enable) 1268 goto out; 1269 1270 error = -ENOMEM; 1271 fl = locks_alloc_lock(); 1272 if (fl == NULL) 1273 goto out; 1274 1275 locks_copy_lock(fl, lease); 1276 1277 locks_insert_lock(before, fl); 1278 1279 *flp = fl; 1280 error = 0; 1281out: 1282 return error; 1283} 1284 1285 /** 1286 * setlease - sets a lease on an open file 1287 * @filp: file pointer 1288 * @arg: type of lease to obtain 1289 * @lease: file_lock to use 1290 * 1291 * Call this to establish a lease on the file. 1292 * The fl_lmops fl_break function is required by break_lease 1293 */ 1294 1295int setlease(struct file *filp, long arg, struct file_lock **lease) 1296{ 1297 struct dentry *dentry = filp->f_path.dentry; 1298 struct inode *inode = dentry->d_inode; 1299 int error; 1300 1301 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1302 return -EACCES; 1303 if (!S_ISREG(inode->i_mode)) 1304 return -EINVAL; 1305 error = security_file_lock(filp, arg); 1306 if (error) 1307 return error; 1308 1309 lock_kernel(); 1310 error = __setlease(filp, arg, lease); 1311 unlock_kernel(); 1312 1313 return error; 1314} 1315 1316EXPORT_SYMBOL(setlease); 1317 1318/** 1319 * fcntl_setlease - sets a lease on an open file 1320 * @fd: open file descriptor 1321 * @filp: file pointer 1322 * @arg: type of lease to obtain 1323 * 1324 * Call this fcntl to establish a lease on the file. 1325 * Note that you also need to call %F_SETSIG to 1326 * receive a signal when the lease is broken. 1327 */ 1328int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1329{ 1330 struct file_lock fl, *flp = &fl; 1331 struct dentry *dentry = filp->f_path.dentry; 1332 struct inode *inode = dentry->d_inode; 1333 int error; 1334 1335 if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) 1336 return -EACCES; 1337 if (!S_ISREG(inode->i_mode)) 1338 return -EINVAL; 1339 error = security_file_lock(filp, arg); 1340 if (error) 1341 return error; 1342 1343 locks_init_lock(&fl); 1344 error = lease_init(filp, arg, &fl); 1345 if (error) 1346 return error; 1347 1348 lock_kernel(); 1349 1350 error = __setlease(filp, arg, &flp); 1351 if (error || arg == F_UNLCK) 1352 goto out_unlock; 1353 1354 error = fasync_helper(fd, filp, 1, &flp->fl_fasync); 1355 if (error < 0) { 1356 /* remove lease just inserted by __setlease */ 1357 flp->fl_type = F_UNLCK | F_INPROGRESS; 1358 flp->fl_break_time = jiffies- 10; 1359 time_out_leases(inode); 1360 goto out_unlock; 1361 } 1362 1363 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1364out_unlock: 1365 unlock_kernel(); 1366 return error; 1367} 1368 1369/** 1370 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1371 * @filp: The file to apply the lock to 1372 * @fl: The lock to be applied 1373 * 1374 * Add a FLOCK style lock to a file. 1375 */ 1376int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1377{ 1378 int error; 1379 might_sleep(); 1380 for (;;) { 1381 error = flock_lock_file(filp, fl); 1382 if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) 1383 break; 1384 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1385 if (!error) 1386 continue; 1387 1388 locks_delete_block(fl); 1389 break; 1390 } 1391 return error; 1392} 1393 1394EXPORT_SYMBOL(flock_lock_file_wait); 1395 1396/** 1397 * sys_flock: - flock() system call. 1398 * @fd: the file descriptor to lock. 1399 * @cmd: the type of lock to apply. 1400 * 1401 * Apply a %FL_FLOCK style lock to an open file descriptor. 1402 * The @cmd can be one of 1403 * 1404 * %LOCK_SH -- a shared lock. 1405 * 1406 * %LOCK_EX -- an exclusive lock. 1407 * 1408 * %LOCK_UN -- remove an existing lock. 1409 * 1410 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1411 * 1412 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1413 * processes read and write access respectively. 1414 */ 1415asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) 1416{ 1417 struct file *filp; 1418 struct file_lock *lock; 1419 int can_sleep, unlock; 1420 int error; 1421 1422 error = -EBADF; 1423 filp = fget(fd); 1424 if (!filp) 1425 goto out; 1426 1427 can_sleep = !(cmd & LOCK_NB); 1428 cmd &= ~LOCK_NB; 1429 unlock = (cmd == LOCK_UN); 1430 1431 if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) 1432 goto out_putf; 1433 1434 error = flock_make_lock(filp, &lock, cmd); 1435 if (error) 1436 goto out_putf; 1437 if (can_sleep) 1438 lock->fl_flags |= FL_SLEEP; 1439 1440 error = security_file_lock(filp, cmd); 1441 if (error) 1442 goto out_free; 1443 1444 if (filp->f_op && filp->f_op->flock) 1445 error = filp->f_op->flock(filp, 1446 (can_sleep) ? F_SETLKW : F_SETLK, 1447 lock); 1448 else 1449 error = flock_lock_file_wait(filp, lock); 1450 1451 out_free: 1452 locks_free_lock(lock); 1453 1454 out_putf: 1455 fput(filp); 1456 out: 1457 return error; 1458} 1459 1460/** 1461 * vfs_test_lock - test file byte range lock 1462 * @filp: The file to test lock for 1463 * @fl: The lock to test 1464 * @conf: Place to return a copy of the conflicting lock, if found 1465 * 1466 * Returns -ERRNO on failure. Indicates presence of conflicting lock by 1467 * setting conf->fl_type to something other than F_UNLCK. 1468 */ 1469int vfs_test_lock(struct file *filp, struct file_lock *fl) 1470{ 1471 if (filp->f_op && filp->f_op->lock) 1472 return filp->f_op->lock(filp, F_GETLK, fl); 1473 posix_test_lock(filp, fl); 1474 return 0; 1475} 1476EXPORT_SYMBOL_GPL(vfs_test_lock); 1477 1478static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) 1479{ 1480 flock->l_pid = fl->fl_pid; 1481#if BITS_PER_LONG == 32 1482 /* 1483 * Make sure we can represent the posix lock via 1484 * legacy 32bit flock. 1485 */ 1486 if (fl->fl_start > OFFT_OFFSET_MAX) 1487 return -EOVERFLOW; 1488 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) 1489 return -EOVERFLOW; 1490#endif 1491 flock->l_start = fl->fl_start; 1492 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1493 fl->fl_end - fl->fl_start + 1; 1494 flock->l_whence = 0; 1495 flock->l_type = fl->fl_type; 1496 return 0; 1497} 1498 1499#if BITS_PER_LONG == 32 1500static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) 1501{ 1502 flock->l_pid = fl->fl_pid; 1503 flock->l_start = fl->fl_start; 1504 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : 1505 fl->fl_end - fl->fl_start + 1; 1506 flock->l_whence = 0; 1507 flock->l_type = fl->fl_type; 1508} 1509#endif 1510 1511/* Report the first existing lock that would conflict with l. 1512 * This implements the F_GETLK command of fcntl(). 1513 */ 1514int fcntl_getlk(struct file *filp, struct flock __user *l) 1515{ 1516 struct file_lock file_lock; 1517 struct flock flock; 1518 int error; 1519 1520 error = -EFAULT; 1521 if (copy_from_user(&flock, l, sizeof(flock))) 1522 goto out; 1523 error = -EINVAL; 1524 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1525 goto out; 1526 1527 error = flock_to_posix_lock(filp, &file_lock, &flock); 1528 if (error) 1529 goto out; 1530 1531 error = vfs_test_lock(filp, &file_lock); 1532 if (error) 1533 goto out; 1534 1535 flock.l_type = file_lock.fl_type; 1536 if (file_lock.fl_type != F_UNLCK) { 1537 error = posix_lock_to_flock(&flock, &file_lock); 1538 if (error) 1539 goto out; 1540 } 1541 error = -EFAULT; 1542 if (!copy_to_user(l, &flock, sizeof(flock))) 1543 error = 0; 1544out: 1545 return error; 1546} 1547 1548/** 1549 * vfs_lock_file - file byte range lock 1550 * @filp: The file to apply the lock to 1551 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) 1552 * @fl: The lock to be applied 1553 * @conf: Place to return a copy of the conflicting lock, if found. 1554 * 1555 * A caller that doesn't care about the conflicting lock may pass NULL 1556 * as the final argument. 1557 * 1558 * If the filesystem defines a private ->lock() method, then @conf will 1559 * be left unchanged; so a caller that cares should initialize it to 1560 * some acceptable default. 1561 * 1562 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX 1563 * locks, the ->lock() interface may return asynchronously, before the lock has 1564 * been granted or denied by the underlying filesystem, if (and only if) 1565 * fl_grant is set. Callers expecting ->lock() to return asynchronously 1566 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) 1567 * the request is for a blocking lock. When ->lock() does return asynchronously, 1568 * it must return -EINPROGRESS, and call ->fl_grant() when the lock 1569 * request completes. 1570 * If the request is for non-blocking lock the file system should return 1571 * -EINPROGRESS then try to get the lock and call the callback routine with 1572 * the result. If the request timed out the callback routine will return a 1573 * nonzero return code and the file system should release the lock. The file 1574 * system is also responsible to keep a corresponding posix lock when it 1575 * grants a lock so the VFS can find out which locks are locally held and do 1576 * the correct lock cleanup when required. 1577 * The underlying filesystem must not drop the kernel lock or call 1578 * ->fl_grant() before returning to the caller with a -EINPROGRESS 1579 * return code. 1580 */ 1581int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) 1582{ 1583 if (filp->f_op && filp->f_op->lock) 1584 return filp->f_op->lock(filp, cmd, fl); 1585 else 1586 return posix_lock_file(filp, fl, conf); 1587} 1588EXPORT_SYMBOL_GPL(vfs_lock_file); 1589 1590/* Apply the lock described by l to an open file descriptor. 1591 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1592 */ 1593int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 1594 struct flock __user *l) 1595{ 1596 struct file_lock *file_lock = locks_alloc_lock(); 1597 struct flock flock; 1598 struct inode *inode; 1599 int error; 1600 1601 if (file_lock == NULL) 1602 return -ENOLCK; 1603 1604 /* 1605 * This might block, so we do it before checking the inode. 1606 */ 1607 error = -EFAULT; 1608 if (copy_from_user(&flock, l, sizeof(flock))) 1609 goto out; 1610 1611 inode = filp->f_path.dentry->d_inode; 1612 1613 /* Don't allow mandatory locks on files that may be memory mapped 1614 * and shared. 1615 */ 1616 if (IS_MANDLOCK(inode) && 1617 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1618 mapping_writably_mapped(filp->f_mapping)) { 1619 error = -EAGAIN; 1620 goto out; 1621 } 1622 1623again: 1624 error = flock_to_posix_lock(filp, file_lock, &flock); 1625 if (error) 1626 goto out; 1627 if (cmd == F_SETLKW) { 1628 file_lock->fl_flags |= FL_SLEEP; 1629 } 1630 1631 error = -EBADF; 1632 switch (flock.l_type) { 1633 case F_RDLCK: 1634 if (!(filp->f_mode & FMODE_READ)) 1635 goto out; 1636 break; 1637 case F_WRLCK: 1638 if (!(filp->f_mode & FMODE_WRITE)) 1639 goto out; 1640 break; 1641 case F_UNLCK: 1642 break; 1643 default: 1644 error = -EINVAL; 1645 goto out; 1646 } 1647 1648 error = security_file_lock(filp, file_lock->fl_type); 1649 if (error) 1650 goto out; 1651 1652 for (;;) { 1653 error = vfs_lock_file(filp, cmd, file_lock, NULL); 1654 if (error != -EAGAIN || cmd == F_SETLK) 1655 break; 1656 error = wait_event_interruptible(file_lock->fl_wait, 1657 !file_lock->fl_next); 1658 if (!error) 1659 continue; 1660 1661 locks_delete_block(file_lock); 1662 break; 1663 } 1664 1665 /* 1666 * Attempt to detect a close/fcntl race and recover by 1667 * releasing the lock that was just acquired. 1668 */ 1669 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1670 flock.l_type = F_UNLCK; 1671 goto again; 1672 } 1673 1674out: 1675 locks_free_lock(file_lock); 1676 return error; 1677} 1678 1679#if BITS_PER_LONG == 32 1680/* Report the first existing lock that would conflict with l. 1681 * This implements the F_GETLK command of fcntl(). 1682 */ 1683int fcntl_getlk64(struct file *filp, struct flock64 __user *l) 1684{ 1685 struct file_lock file_lock; 1686 struct flock64 flock; 1687 int error; 1688 1689 error = -EFAULT; 1690 if (copy_from_user(&flock, l, sizeof(flock))) 1691 goto out; 1692 error = -EINVAL; 1693 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 1694 goto out; 1695 1696 error = flock64_to_posix_lock(filp, &file_lock, &flock); 1697 if (error) 1698 goto out; 1699 1700 error = vfs_test_lock(filp, &file_lock); 1701 if (error) 1702 goto out; 1703 1704 flock.l_type = file_lock.fl_type; 1705 if (file_lock.fl_type != F_UNLCK) 1706 posix_lock_to_flock64(&flock, &file_lock); 1707 1708 error = -EFAULT; 1709 if (!copy_to_user(l, &flock, sizeof(flock))) 1710 error = 0; 1711 1712out: 1713 return error; 1714} 1715 1716/* Apply the lock described by l to an open file descriptor. 1717 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 1718 */ 1719int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 1720 struct flock64 __user *l) 1721{ 1722 struct file_lock *file_lock = locks_alloc_lock(); 1723 struct flock64 flock; 1724 struct inode *inode; 1725 int error; 1726 1727 if (file_lock == NULL) 1728 return -ENOLCK; 1729 1730 /* 1731 * This might block, so we do it before checking the inode. 1732 */ 1733 error = -EFAULT; 1734 if (copy_from_user(&flock, l, sizeof(flock))) 1735 goto out; 1736 1737 inode = filp->f_path.dentry->d_inode; 1738 1739 /* Don't allow mandatory locks on files that may be memory mapped 1740 * and shared. 1741 */ 1742 if (IS_MANDLOCK(inode) && 1743 (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && 1744 mapping_writably_mapped(filp->f_mapping)) { 1745 error = -EAGAIN; 1746 goto out; 1747 } 1748 1749again: 1750 error = flock64_to_posix_lock(filp, file_lock, &flock); 1751 if (error) 1752 goto out; 1753 if (cmd == F_SETLKW64) { 1754 file_lock->fl_flags |= FL_SLEEP; 1755 } 1756 1757 error = -EBADF; 1758 switch (flock.l_type) { 1759 case F_RDLCK: 1760 if (!(filp->f_mode & FMODE_READ)) 1761 goto out; 1762 break; 1763 case F_WRLCK: 1764 if (!(filp->f_mode & FMODE_WRITE)) 1765 goto out; 1766 break; 1767 case F_UNLCK: 1768 break; 1769 default: 1770 error = -EINVAL; 1771 goto out; 1772 } 1773 1774 error = security_file_lock(filp, file_lock->fl_type); 1775 if (error) 1776 goto out; 1777 1778 for (;;) { 1779 error = vfs_lock_file(filp, cmd, file_lock, NULL); 1780 if (error != -EAGAIN || cmd == F_SETLK64) 1781 break; 1782 error = wait_event_interruptible(file_lock->fl_wait, 1783 !file_lock->fl_next); 1784 if (!error) 1785 continue; 1786 1787 locks_delete_block(file_lock); 1788 break; 1789 } 1790 1791 /* 1792 * Attempt to detect a close/fcntl race and recover by 1793 * releasing the lock that was just acquired. 1794 */ 1795 if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { 1796 flock.l_type = F_UNLCK; 1797 goto again; 1798 } 1799 1800out: 1801 locks_free_lock(file_lock); 1802 return error; 1803} 1804#endif /* BITS_PER_LONG == 32 */ 1805 1806/* 1807 * This function is called when the file is being removed 1808 * from the task's fd array. POSIX locks belonging to this task 1809 * are deleted at this time. 1810 */ 1811void locks_remove_posix(struct file *filp, fl_owner_t owner) 1812{ 1813 struct file_lock lock; 1814 1815 /* 1816 * If there are no locks held on this file, we don't need to call 1817 * posix_lock_file(). Another process could be setting a lock on this 1818 * file at the same time, but we wouldn't remove that lock anyway. 1819 */ 1820 if (!filp->f_path.dentry->d_inode->i_flock) 1821 return; 1822 1823 lock.fl_type = F_UNLCK; 1824 lock.fl_flags = FL_POSIX | FL_CLOSE; 1825 lock.fl_start = 0; 1826 lock.fl_end = OFFSET_MAX; 1827 lock.fl_owner = owner; 1828 lock.fl_pid = current->tgid; 1829 lock.fl_file = filp; 1830 lock.fl_ops = NULL; 1831 lock.fl_lmops = NULL; 1832 1833 vfs_lock_file(filp, F_SETLK, &lock, NULL); 1834 1835 if (lock.fl_ops && lock.fl_ops->fl_release_private) 1836 lock.fl_ops->fl_release_private(&lock); 1837} 1838 1839EXPORT_SYMBOL(locks_remove_posix); 1840 1841/* 1842 * This function is called on the last close of an open file. 1843 */ 1844void locks_remove_flock(struct file *filp) 1845{ 1846 struct inode * inode = filp->f_path.dentry->d_inode; 1847 struct file_lock *fl; 1848 struct file_lock **before; 1849 1850 if (!inode->i_flock) 1851 return; 1852 1853 if (filp->f_op && filp->f_op->flock) { 1854 struct file_lock fl = { 1855 .fl_pid = current->tgid, 1856 .fl_file = filp, 1857 .fl_flags = FL_FLOCK, 1858 .fl_type = F_UNLCK, 1859 .fl_end = OFFSET_MAX, 1860 }; 1861 filp->f_op->flock(filp, F_SETLKW, &fl); 1862 if (fl.fl_ops && fl.fl_ops->fl_release_private) 1863 fl.fl_ops->fl_release_private(&fl); 1864 } 1865 1866 lock_kernel(); 1867 before = &inode->i_flock; 1868 1869 while ((fl = *before) != NULL) { 1870 if (fl->fl_file == filp) { 1871 if (IS_FLOCK(fl)) { 1872 locks_delete_lock(before); 1873 continue; 1874 } 1875 if (IS_LEASE(fl)) { 1876 lease_modify(before, F_UNLCK); 1877 continue; 1878 } 1879 /* What? */ 1880 BUG(); 1881 } 1882 before = &fl->fl_next; 1883 } 1884 unlock_kernel(); 1885} 1886 1887/** 1888 * posix_unblock_lock - stop waiting for a file lock 1889 * @filp: how the file was opened 1890 * @waiter: the lock which was waiting 1891 * 1892 * lockd needs to block waiting for locks. 1893 */ 1894int 1895posix_unblock_lock(struct file *filp, struct file_lock *waiter) 1896{ 1897 int status = 0; 1898 1899 lock_kernel(); 1900 if (waiter->fl_next) 1901 __locks_delete_block(waiter); 1902 else 1903 status = -ENOENT; 1904 unlock_kernel(); 1905 return status; 1906} 1907 1908EXPORT_SYMBOL(posix_unblock_lock); 1909 1910/** 1911 * vfs_cancel_lock - file byte range unblock lock 1912 * @filp: The file to apply the unblock to 1913 * @fl: The lock to be unblocked 1914 * 1915 * Used by lock managers to cancel blocked requests 1916 */ 1917int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 1918{ 1919 if (filp->f_op && filp->f_op->lock) 1920 return filp->f_op->lock(filp, F_CANCELLK, fl); 1921 return 0; 1922} 1923 1924EXPORT_SYMBOL_GPL(vfs_cancel_lock); 1925 1926static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) 1927{ 1928 struct inode *inode = NULL; 1929 1930 if (fl->fl_file != NULL) 1931 inode = fl->fl_file->f_path.dentry->d_inode; 1932 1933 out += sprintf(out, "%d:%s ", id, pfx); 1934 if (IS_POSIX(fl)) { 1935 out += sprintf(out, "%6s %s ", 1936 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", 1937 (inode == NULL) ? "*NOINODE*" : 1938 (IS_MANDLOCK(inode) && 1939 (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? 1940 "MANDATORY" : "ADVISORY "); 1941 } else if (IS_FLOCK(fl)) { 1942 if (fl->fl_type & LOCK_MAND) { 1943 out += sprintf(out, "FLOCK MSNFS "); 1944 } else { 1945 out += sprintf(out, "FLOCK ADVISORY "); 1946 } 1947 } else if (IS_LEASE(fl)) { 1948 out += sprintf(out, "LEASE "); 1949 if (fl->fl_type & F_INPROGRESS) 1950 out += sprintf(out, "BREAKING "); 1951 else if (fl->fl_file) 1952 out += sprintf(out, "ACTIVE "); 1953 else 1954 out += sprintf(out, "BREAKER "); 1955 } else { 1956 out += sprintf(out, "UNKNOWN UNKNOWN "); 1957 } 1958 if (fl->fl_type & LOCK_MAND) { 1959 out += sprintf(out, "%s ", 1960 (fl->fl_type & LOCK_READ) 1961 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " 1962 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); 1963 } else { 1964 out += sprintf(out, "%s ", 1965 (fl->fl_type & F_INPROGRESS) 1966 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " 1967 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); 1968 } 1969 if (inode) { 1970#ifdef WE_CAN_BREAK_LSLK_NOW 1971 out += sprintf(out, "%d %s:%ld ", fl->fl_pid, 1972 inode->i_sb->s_id, inode->i_ino); 1973#else 1974 /* userspace relies on this representation of dev_t ;-( */ 1975 out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, 1976 MAJOR(inode->i_sb->s_dev), 1977 MINOR(inode->i_sb->s_dev), inode->i_ino); 1978#endif 1979 } else { 1980 out += sprintf(out, "%d <none>:0 ", fl->fl_pid); 1981 } 1982 if (IS_POSIX(fl)) { 1983 if (fl->fl_end == OFFSET_MAX) 1984 out += sprintf(out, "%Ld EOF\n", fl->fl_start); 1985 else 1986 out += sprintf(out, "%Ld %Ld\n", fl->fl_start, 1987 fl->fl_end); 1988 } else { 1989 out += sprintf(out, "0 EOF\n"); 1990 } 1991} 1992 1993static void move_lock_status(char **p, off_t* pos, off_t offset) 1994{ 1995 int len; 1996 len = strlen(*p); 1997 if(*pos >= offset) { 1998 /* the complete line is valid */ 1999 *p += len; 2000 *pos += len; 2001 return; 2002 } 2003 if(*pos+len > offset) { 2004 /* use the second part of the line */ 2005 int i = offset-*pos; 2006 memmove(*p,*p+i,len-i); 2007 *p += len-i; 2008 *pos += len; 2009 return; 2010 } 2011 /* discard the complete line */ 2012 *pos += len; 2013} 2014 2015/** 2016 * get_locks_status - reports lock usage in /proc/locks 2017 * @buffer: address in userspace to write into 2018 * @start: ? 2019 * @offset: how far we are through the buffer 2020 * @length: how much to read 2021 */ 2022 2023int get_locks_status(char *buffer, char **start, off_t offset, int length) 2024{ 2025 struct list_head *tmp; 2026 char *q = buffer; 2027 off_t pos = 0; 2028 int i = 0; 2029 2030 lock_kernel(); 2031 list_for_each(tmp, &file_lock_list) { 2032 struct list_head *btmp; 2033 struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); 2034 lock_get_status(q, fl, ++i, ""); 2035 move_lock_status(&q, &pos, offset); 2036 2037 if(pos >= offset+length) 2038 goto done; 2039 2040 list_for_each(btmp, &fl->fl_block) { 2041 struct file_lock *bfl = list_entry(btmp, 2042 struct file_lock, fl_block); 2043 lock_get_status(q, bfl, i, " ->"); 2044 move_lock_status(&q, &pos, offset); 2045 2046 if(pos >= offset+length) 2047 goto done; 2048 } 2049 } 2050done: 2051 unlock_kernel(); 2052 *start = buffer; 2053 if(q-buffer < length) 2054 return (q-buffer); 2055 return length; 2056} 2057 2058/** 2059 * lock_may_read - checks that the region is free of locks 2060 * @inode: the inode that is being read 2061 * @start: the first byte to read 2062 * @len: the number of bytes to read 2063 * 2064 * Emulates Windows locking requirements. Whole-file 2065 * mandatory locks (share modes) can prohibit a read and 2066 * byte-range POSIX locks can prohibit a read if they overlap. 2067 * 2068 * N.B. this function is only ever called 2069 * from knfsd and ownership of locks is never checked. 2070 */ 2071int lock_may_read(struct inode *inode, loff_t start, unsigned long len) 2072{ 2073 struct file_lock *fl; 2074 int result = 1; 2075 lock_kernel(); 2076 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2077 if (IS_POSIX(fl)) { 2078 if (fl->fl_type == F_RDLCK) 2079 continue; 2080 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2081 continue; 2082 } else if (IS_FLOCK(fl)) { 2083 if (!(fl->fl_type & LOCK_MAND)) 2084 continue; 2085 if (fl->fl_type & LOCK_READ) 2086 continue; 2087 } else 2088 continue; 2089 result = 0; 2090 break; 2091 } 2092 unlock_kernel(); 2093 return result; 2094} 2095 2096EXPORT_SYMBOL(lock_may_read); 2097 2098/** 2099 * lock_may_write - checks that the region is free of locks 2100 * @inode: the inode that is being written 2101 * @start: the first byte to write 2102 * @len: the number of bytes to write 2103 * 2104 * Emulates Windows locking requirements. Whole-file 2105 * mandatory locks (share modes) can prohibit a write and 2106 * byte-range POSIX locks can prohibit a write if they overlap. 2107 * 2108 * N.B. this function is only ever called 2109 * from knfsd and ownership of locks is never checked. 2110 */ 2111int lock_may_write(struct inode *inode, loff_t start, unsigned long len) 2112{ 2113 struct file_lock *fl; 2114 int result = 1; 2115 lock_kernel(); 2116 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 2117 if (IS_POSIX(fl)) { 2118 if ((fl->fl_end < start) || (fl->fl_start > (start + len))) 2119 continue; 2120 } else if (IS_FLOCK(fl)) { 2121 if (!(fl->fl_type & LOCK_MAND)) 2122 continue; 2123 if (fl->fl_type & LOCK_WRITE) 2124 continue; 2125 } else 2126 continue; 2127 result = 0; 2128 break; 2129 } 2130 unlock_kernel(); 2131 return result; 2132} 2133 2134EXPORT_SYMBOL(lock_may_write); 2135 2136static int __init filelock_init(void) 2137{ 2138 filelock_cache = kmem_cache_create("file_lock_cache", 2139 sizeof(struct file_lock), 0, SLAB_PANIC, 2140 init_once, NULL); 2141 return 0; 2142} 2143 2144core_initcall(filelock_init); 2145