kern_lockf.c revision 20676
1/* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.6 1996/09/03 14:21:52 bde Exp $ 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/kernel.h> 43#include <sys/proc.h> 44#include <sys/unistd.h> 45#include <sys/vnode.h> 46#include <sys/malloc.h> 47#include <sys/fcntl.h> 48 49#include <sys/lockf.h> 50 51/* 52 * This variable controls the maximum number of processes that will 53 * be checked in doing deadlock detection. 54 */ 55int maxlockdepth = MAXDEPTH; 56 57#ifdef LOCKF_DEBUG 58int lockf_debug = 0; 59#endif 60 61#define NOLOCKF (struct lockf *)0 62#define SELF 0x1 63#define OTHERS 0x2 64static void lf_addblock __P((struct lockf *, struct lockf *)); 65static int lf_clearlock __P((struct lockf *)); 66static int lf_findoverlap __P((struct lockf *, 67 struct lockf *, int, struct lockf ***, struct lockf **)); 68static struct lockf * 69 lf_getblock __P((struct lockf *)); 70static int lf_getlock __P((struct lockf *, struct flock *)); 71static int lf_setlock __P((struct lockf *)); 72static void lf_split __P((struct lockf *, struct lockf *)); 73static void lf_wakelock __P((struct lockf *)); 74 75/* 76 * Advisory record locking support 77 */ 78int 79lf_advlock(ap, head, size) 80 struct vop_advlock_args /* { 81 struct vnode *a_vp; 82 caddr_t a_id; 83 int a_op; 84 struct flock *a_fl; 85 int a_flags; 86 } */ *ap; 87 struct lockf **head; 88 u_quad_t size; 89{ 90 register struct flock *fl = ap->a_fl; 91 register struct lockf *lock; 92 off_t start, end; 93 int error; 94 95 /* 96 * Convert the flock structure into a start and end. 97 */ 98 switch (fl->l_whence) { 99 100 case SEEK_SET: 101 case SEEK_CUR: 102 /* 103 * Caller is responsible for adding any necessary offset 104 * when SEEK_CUR is used. 105 */ 106 start = fl->l_start; 107 break; 108 109 case SEEK_END: 110 start = size + fl->l_start; 111 break; 112 113 default: 114 return (EINVAL); 115 } 116 if (start < 0) 117 return (EINVAL); 118 if (fl->l_len == 0) 119 end = -1; 120 else { 121 end = start + fl->l_len - 1; 122 if (end < start) 123 return (EINVAL); 124 } 125 /* 126 * Avoid the common case of unlocking when inode has no locks. 127 */ 128 if (*head == (struct lockf *)0) { 129 if (ap->a_op != F_SETLK) { 130 fl->l_type = F_UNLCK; 131 return (0); 132 } 133 } 134 /* 135 * Create the lockf structure 136 */ 137 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 138 lock->lf_start = start; 139 lock->lf_end = end; 140 lock->lf_id = ap->a_id; 141 lock->lf_head = head; 142 lock->lf_type = fl->l_type; 143 lock->lf_next = (struct lockf *)0; 144 lock->lf_block = (struct lockf *)0; 145 lock->lf_flags = ap->a_flags; 146 /* 147 * Do the requested operation. 148 */ 149 switch(ap->a_op) { 150 case F_SETLK: 151 return (lf_setlock(lock)); 152 153 case F_UNLCK: 154 error = lf_clearlock(lock); 155 FREE(lock, M_LOCKF); 156 return (error); 157 158 case F_GETLK: 159 error = lf_getlock(lock, fl); 160 FREE(lock, M_LOCKF); 161 return (error); 162 163 default: 164 free(lock, M_LOCKF); 165 return (EINVAL); 166 } 167 /* NOTREACHED */ 168} 169 170/* 171 * Set a byte-range lock. 172 */ 173static int 174lf_setlock(lock) 175 register struct lockf *lock; 176{ 177 register struct lockf *block; 178 struct lockf **head = lock->lf_head; 179 struct lockf **prev, *overlap, *ltmp; 180 static char lockstr[] = "lockf"; 181 int ovcase, priority, needtolink, error; 182 183#ifdef LOCKF_DEBUG 184 if (lockf_debug & 1) 185 lf_print("lf_setlock", lock); 186#endif /* LOCKF_DEBUG */ 187 188 /* 189 * Set the priority 190 */ 191 priority = PLOCK; 192 if (lock->lf_type == F_WRLCK) 193 priority += 4; 194 priority |= PCATCH; 195 /* 196 * Scan lock list for this file looking for locks that would block us. 197 */ 198 while ((block = lf_getblock(lock))) { 199 /* 200 * Free the structure and return if nonblocking. 201 */ 202 if ((lock->lf_flags & F_WAIT) == 0) { 203 FREE(lock, M_LOCKF); 204 return (EAGAIN); 205 } 206 /* 207 * We are blocked. Since flock style locks cover 208 * the whole file, there is no chance for deadlock. 209 * For byte-range locks we must check for deadlock. 210 * 211 * Deadlock detection is done by looking through the 212 * wait channels to see if there are any cycles that 213 * involve us. MAXDEPTH is set just to make sure we 214 * do not go off into neverland. 215 */ 216 if ((lock->lf_flags & F_POSIX) && 217 (block->lf_flags & F_POSIX)) { 218 register struct proc *wproc; 219 register struct lockf *waitblock; 220 int i = 0; 221 222 /* The block is waiting on something */ 223 wproc = (struct proc *)block->lf_id; 224 while (wproc->p_wchan && 225 (wproc->p_wmesg == lockstr) && 226 (i++ < maxlockdepth)) { 227 waitblock = (struct lockf *)wproc->p_wchan; 228 /* Get the owner of the blocking lock */ 229 waitblock = waitblock->lf_next; 230 if ((waitblock->lf_flags & F_POSIX) == 0) 231 break; 232 wproc = (struct proc *)waitblock->lf_id; 233 if (wproc == (struct proc *)lock->lf_id) { 234 free(lock, M_LOCKF); 235 return (EDEADLK); 236 } 237 } 238 } 239 /* 240 * For flock type locks, we must first remove 241 * any shared locks that we hold before we sleep 242 * waiting for an exclusive lock. 243 */ 244 if ((lock->lf_flags & F_FLOCK) && 245 lock->lf_type == F_WRLCK) { 246 lock->lf_type = F_UNLCK; 247 (void) lf_clearlock(lock); 248 lock->lf_type = F_WRLCK; 249 } 250 /* 251 * Add our lock to the blocked list and sleep until we're free. 252 * Remember who blocked us (for deadlock detection). 253 */ 254 lock->lf_next = block; 255 lf_addblock(block, lock); 256#ifdef LOCKF_DEBUG 257 if (lockf_debug & 1) { 258 lf_print("lf_setlock: blocking on", block); 259 lf_printlist("lf_setlock", block); 260 } 261#endif /* LOCKF_DEBUG */ 262 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 263 /* 264 * Delete ourselves from the waiting to lock list. 265 */ 266 for (block = lock->lf_next; 267 block != NOLOCKF; 268 block = block->lf_block) { 269 if (block->lf_block != lock) 270 continue; 271 block->lf_block = block->lf_block->lf_block; 272 break; 273 } 274 /* 275 * If we did not find ourselves on the list, but 276 * are still linked onto a lock list, then something 277 * is very wrong. 278 */ 279 if (block == NOLOCKF && lock->lf_next != NOLOCKF) 280 panic("lf_setlock: lost lock"); 281 free(lock, M_LOCKF); 282 return (error); 283 } 284 } 285 /* 286 * No blocks!! Add the lock. Note that we will 287 * downgrade or upgrade any overlapping locks this 288 * process already owns. 289 * 290 * Skip over locks owned by other processes. 291 * Handle any locks that overlap and are owned by ourselves. 292 */ 293 prev = head; 294 block = *head; 295 needtolink = 1; 296 for (;;) { 297 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 298 if (ovcase) 299 block = overlap->lf_next; 300 /* 301 * Six cases: 302 * 0) no overlap 303 * 1) overlap == lock 304 * 2) overlap contains lock 305 * 3) lock contains overlap 306 * 4) overlap starts before lock 307 * 5) overlap ends after lock 308 */ 309 switch (ovcase) { 310 case 0: /* no overlap */ 311 if (needtolink) { 312 *prev = lock; 313 lock->lf_next = overlap; 314 } 315 break; 316 317 case 1: /* overlap == lock */ 318 /* 319 * If downgrading lock, others may be 320 * able to acquire it. 321 */ 322 if (lock->lf_type == F_RDLCK && 323 overlap->lf_type == F_WRLCK) 324 lf_wakelock(overlap); 325 overlap->lf_type = lock->lf_type; 326 FREE(lock, M_LOCKF); 327 lock = overlap; /* for debug output below */ 328 break; 329 330 case 2: /* overlap contains lock */ 331 /* 332 * Check for common starting point and different types. 333 */ 334 if (overlap->lf_type == lock->lf_type) { 335 free(lock, M_LOCKF); 336 lock = overlap; /* for debug output below */ 337 break; 338 } 339 if (overlap->lf_start == lock->lf_start) { 340 *prev = lock; 341 lock->lf_next = overlap; 342 overlap->lf_start = lock->lf_end + 1; 343 } else 344 lf_split(overlap, lock); 345 lf_wakelock(overlap); 346 break; 347 348 case 3: /* lock contains overlap */ 349 /* 350 * If downgrading lock, others may be able to 351 * acquire it, otherwise take the list. 352 */ 353 if (lock->lf_type == F_RDLCK && 354 overlap->lf_type == F_WRLCK) { 355 lf_wakelock(overlap); 356 } else { 357 ltmp = lock->lf_block; 358 lock->lf_block = overlap->lf_block; 359 lf_addblock(lock, ltmp); 360 } 361 /* 362 * Add the new lock if necessary and delete the overlap. 363 */ 364 if (needtolink) { 365 *prev = lock; 366 lock->lf_next = overlap->lf_next; 367 prev = &lock->lf_next; 368 needtolink = 0; 369 } else 370 *prev = overlap->lf_next; 371 free(overlap, M_LOCKF); 372 continue; 373 374 case 4: /* overlap starts before lock */ 375 /* 376 * Add lock after overlap on the list. 377 */ 378 lock->lf_next = overlap->lf_next; 379 overlap->lf_next = lock; 380 overlap->lf_end = lock->lf_start - 1; 381 prev = &lock->lf_next; 382 lf_wakelock(overlap); 383 needtolink = 0; 384 continue; 385 386 case 5: /* overlap ends after lock */ 387 /* 388 * Add the new lock before overlap. 389 */ 390 if (needtolink) { 391 *prev = lock; 392 lock->lf_next = overlap; 393 } 394 overlap->lf_start = lock->lf_end + 1; 395 lf_wakelock(overlap); 396 break; 397 } 398 break; 399 } 400#ifdef LOCKF_DEBUG 401 if (lockf_debug & 1) { 402 lf_print("lf_setlock: got the lock", lock); 403 lf_printlist("lf_setlock", lock); 404 } 405#endif /* LOCKF_DEBUG */ 406 return (0); 407} 408 409/* 410 * Remove a byte-range lock on an inode. 411 * 412 * Generally, find the lock (or an overlap to that lock) 413 * and remove it (or shrink it), then wakeup anyone we can. 414 */ 415static int 416lf_clearlock(unlock) 417 register struct lockf *unlock; 418{ 419 struct lockf **head = unlock->lf_head; 420 register struct lockf *lf = *head; 421 struct lockf *overlap, **prev; 422 int ovcase; 423 424 if (lf == NOLOCKF) 425 return (0); 426#ifdef LOCKF_DEBUG 427 if (unlock->lf_type != F_UNLCK) 428 panic("lf_clearlock: bad type"); 429 if (lockf_debug & 1) 430 lf_print("lf_clearlock", unlock); 431#endif /* LOCKF_DEBUG */ 432 prev = head; 433 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 434 /* 435 * Wakeup the list of locks to be retried. 436 */ 437 lf_wakelock(overlap); 438 439 switch (ovcase) { 440 441 case 1: /* overlap == lock */ 442 *prev = overlap->lf_next; 443 FREE(overlap, M_LOCKF); 444 break; 445 446 case 2: /* overlap contains lock: split it */ 447 if (overlap->lf_start == unlock->lf_start) { 448 overlap->lf_start = unlock->lf_end + 1; 449 break; 450 } 451 lf_split(overlap, unlock); 452 overlap->lf_next = unlock->lf_next; 453 break; 454 455 case 3: /* lock contains overlap */ 456 *prev = overlap->lf_next; 457 lf = overlap->lf_next; 458 free(overlap, M_LOCKF); 459 continue; 460 461 case 4: /* overlap starts before lock */ 462 overlap->lf_end = unlock->lf_start - 1; 463 prev = &overlap->lf_next; 464 lf = overlap->lf_next; 465 continue; 466 467 case 5: /* overlap ends after lock */ 468 overlap->lf_start = unlock->lf_end + 1; 469 break; 470 } 471 break; 472 } 473#ifdef LOCKF_DEBUG 474 if (lockf_debug & 1) 475 lf_printlist("lf_clearlock", unlock); 476#endif /* LOCKF_DEBUG */ 477 return (0); 478} 479 480/* 481 * Check whether there is a blocking lock, 482 * and if so return its process identifier. 483 */ 484static int 485lf_getlock(lock, fl) 486 register struct lockf *lock; 487 register struct flock *fl; 488{ 489 register struct lockf *block; 490 491#ifdef LOCKF_DEBUG 492 if (lockf_debug & 1) 493 lf_print("lf_getlock", lock); 494#endif /* LOCKF_DEBUG */ 495 496 if ((block = lf_getblock(lock))) { 497 fl->l_type = block->lf_type; 498 fl->l_whence = SEEK_SET; 499 fl->l_start = block->lf_start; 500 if (block->lf_end == -1) 501 fl->l_len = 0; 502 else 503 fl->l_len = block->lf_end - block->lf_start + 1; 504 if (block->lf_flags & F_POSIX) 505 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 506 else 507 fl->l_pid = -1; 508 } else { 509 fl->l_type = F_UNLCK; 510 } 511 return (0); 512} 513 514/* 515 * Walk the list of locks for an inode and 516 * return the first blocking lock. 517 */ 518static struct lockf * 519lf_getblock(lock) 520 register struct lockf *lock; 521{ 522 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 523 int ovcase; 524 525 prev = lock->lf_head; 526 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 527 /* 528 * We've found an overlap, see if it blocks us 529 */ 530 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 531 return (overlap); 532 /* 533 * Nope, point to the next one on the list and 534 * see if it blocks us 535 */ 536 lf = overlap->lf_next; 537 } 538 return (NOLOCKF); 539} 540 541/* 542 * Walk the list of locks for an inode to 543 * find an overlapping lock (if any). 544 * 545 * NOTE: this returns only the FIRST overlapping lock. There 546 * may be more than one. 547 */ 548static int 549lf_findoverlap(lf, lock, type, prev, overlap) 550 register struct lockf *lf; 551 struct lockf *lock; 552 int type; 553 struct lockf ***prev; 554 struct lockf **overlap; 555{ 556 off_t start, end; 557 558 *overlap = lf; 559 if (lf == NOLOCKF) 560 return (0); 561#ifdef LOCKF_DEBUG 562 if (lockf_debug & 2) 563 lf_print("lf_findoverlap: looking for overlap in", lock); 564#endif /* LOCKF_DEBUG */ 565 start = lock->lf_start; 566 end = lock->lf_end; 567 while (lf != NOLOCKF) { 568 if (((type & SELF) && lf->lf_id != lock->lf_id) || 569 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 570 *prev = &lf->lf_next; 571 *overlap = lf = lf->lf_next; 572 continue; 573 } 574#ifdef LOCKF_DEBUG 575 if (lockf_debug & 2) 576 lf_print("\tchecking", lf); 577#endif /* LOCKF_DEBUG */ 578 /* 579 * OK, check for overlap 580 * 581 * Six cases: 582 * 0) no overlap 583 * 1) overlap == lock 584 * 2) overlap contains lock 585 * 3) lock contains overlap 586 * 4) overlap starts before lock 587 * 5) overlap ends after lock 588 */ 589 if ((lf->lf_end != -1 && start > lf->lf_end) || 590 (end != -1 && lf->lf_start > end)) { 591 /* Case 0 */ 592#ifdef LOCKF_DEBUG 593 if (lockf_debug & 2) 594 printf("no overlap\n"); 595#endif /* LOCKF_DEBUG */ 596 if ((type & SELF) && end != -1 && lf->lf_start > end) 597 return (0); 598 *prev = &lf->lf_next; 599 *overlap = lf = lf->lf_next; 600 continue; 601 } 602 if ((lf->lf_start == start) && (lf->lf_end == end)) { 603 /* Case 1 */ 604#ifdef LOCKF_DEBUG 605 if (lockf_debug & 2) 606 printf("overlap == lock\n"); 607#endif /* LOCKF_DEBUG */ 608 return (1); 609 } 610 if ((lf->lf_start <= start) && 611 (end != -1) && 612 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 613 /* Case 2 */ 614#ifdef LOCKF_DEBUG 615 if (lockf_debug & 2) 616 printf("overlap contains lock\n"); 617#endif /* LOCKF_DEBUG */ 618 return (2); 619 } 620 if (start <= lf->lf_start && 621 (end == -1 || 622 (lf->lf_end != -1 && end >= lf->lf_end))) { 623 /* Case 3 */ 624#ifdef LOCKF_DEBUG 625 if (lockf_debug & 2) 626 printf("lock contains overlap\n"); 627#endif /* LOCKF_DEBUG */ 628 return (3); 629 } 630 if ((lf->lf_start < start) && 631 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 632 /* Case 4 */ 633#ifdef LOCKF_DEBUG 634 if (lockf_debug & 2) 635 printf("overlap starts before lock\n"); 636#endif /* LOCKF_DEBUG */ 637 return (4); 638 } 639 if ((lf->lf_start > start) && 640 (end != -1) && 641 ((lf->lf_end > end) || (lf->lf_end == -1))) { 642 /* Case 5 */ 643#ifdef LOCKF_DEBUG 644 if (lockf_debug & 2) 645 printf("overlap ends after lock\n"); 646#endif /* LOCKF_DEBUG */ 647 return (5); 648 } 649 panic("lf_findoverlap: default"); 650 } 651 return (0); 652} 653 654/* 655 * Add a lock to the end of the blocked list. 656 */ 657static void 658lf_addblock(blocklist, lock) 659 struct lockf *blocklist; 660 struct lockf *lock; 661{ 662 register struct lockf *lf; 663 664 if (lock == NOLOCKF) 665 return; 666#ifdef LOCKF_DEBUG 667 if (lockf_debug & 2) { 668 lf_print("addblock: adding", lock); 669 lf_print("to blocked list of", blocklist); 670 } 671#endif /* LOCKF_DEBUG */ 672 if ((lf = blocklist->lf_block) == NOLOCKF) { 673 blocklist->lf_block = lock; 674 return; 675 } 676 while (lf->lf_block != NOLOCKF) 677 lf = lf->lf_block; 678 lf->lf_block = lock; 679 return; 680} 681 682/* 683 * Split a lock and a contained region into 684 * two or three locks as necessary. 685 */ 686static void 687lf_split(lock1, lock2) 688 register struct lockf *lock1; 689 register struct lockf *lock2; 690{ 691 register struct lockf *splitlock; 692 693#ifdef LOCKF_DEBUG 694 if (lockf_debug & 2) { 695 lf_print("lf_split", lock1); 696 lf_print("splitting from", lock2); 697 } 698#endif /* LOCKF_DEBUG */ 699 /* 700 * Check to see if spliting into only two pieces. 701 */ 702 if (lock1->lf_start == lock2->lf_start) { 703 lock1->lf_start = lock2->lf_end + 1; 704 lock2->lf_next = lock1; 705 return; 706 } 707 if (lock1->lf_end == lock2->lf_end) { 708 lock1->lf_end = lock2->lf_start - 1; 709 lock2->lf_next = lock1->lf_next; 710 lock1->lf_next = lock2; 711 return; 712 } 713 /* 714 * Make a new lock consisting of the last part of 715 * the encompassing lock 716 */ 717 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 718 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 719 splitlock->lf_start = lock2->lf_end + 1; 720 splitlock->lf_block = NOLOCKF; 721 lock1->lf_end = lock2->lf_start - 1; 722 /* 723 * OK, now link it in 724 */ 725 splitlock->lf_next = lock1->lf_next; 726 lock2->lf_next = splitlock; 727 lock1->lf_next = lock2; 728} 729 730/* 731 * Wakeup a blocklist 732 */ 733static void 734lf_wakelock(listhead) 735 struct lockf *listhead; 736{ 737 register struct lockf *blocklist, *wakelock; 738 739 blocklist = listhead->lf_block; 740 listhead->lf_block = NOLOCKF; 741 while (blocklist != NOLOCKF) { 742 wakelock = blocklist; 743 blocklist = blocklist->lf_block; 744 wakelock->lf_block = NOLOCKF; 745 wakelock->lf_next = NOLOCKF; 746#ifdef LOCKF_DEBUG 747 if (lockf_debug & 2) 748 lf_print("lf_wakelock: awakening", wakelock); 749#endif /* LOCKF_DEBUG */ 750 wakeup((caddr_t)wakelock); 751 } 752} 753 754#ifdef LOCKF_DEBUG 755/* 756 * Print out a lock. 757 */ 758void 759lf_print(tag, lock) 760 char *tag; 761 register struct lockf *lock; 762{ 763 764 printf("%s: lock 0x%lx for ", tag, lock); 765 if (lock->lf_flags & F_POSIX) 766 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 767 else 768 printf("id 0x%x", lock->lf_id); 769 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 770 lock->lf_inode->i_number, 771 major(lock->lf_inode->i_dev), 772 minor(lock->lf_inode->i_dev), 773 lock->lf_type == F_RDLCK ? "shared" : 774 lock->lf_type == F_WRLCK ? "exclusive" : 775 lock->lf_type == F_UNLCK ? "unlock" : 776 "unknown", lock->lf_start, lock->lf_end); 777 if (lock->lf_block) 778 printf(" block 0x%x\n", lock->lf_block); 779 else 780 printf("\n"); 781} 782 783void 784lf_printlist(tag, lock) 785 char *tag; 786 struct lockf *lock; 787{ 788 register struct lockf *lf; 789 790 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 791 tag, lock->lf_inode->i_number, 792 major(lock->lf_inode->i_dev), 793 minor(lock->lf_inode->i_dev)); 794 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 795 printf("\tlock 0x%lx for ", lf); 796 if (lf->lf_flags & F_POSIX) 797 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 798 else 799 printf("id 0x%x", lf->lf_id); 800 printf(", %s, start %d, end %d", 801 lf->lf_type == F_RDLCK ? "shared" : 802 lf->lf_type == F_WRLCK ? "exclusive" : 803 lf->lf_type == F_UNLCK ? "unlock" : 804 "unknown", lf->lf_start, lf->lf_end); 805 if (lf->lf_block) 806 printf(" block 0x%x\n", lf->lf_block); 807 else 808 printf("\n"); 809 } 810} 811#endif /* LOCKF_DEBUG */ 812