kern_lockf.c revision 30354
1/* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.15 1997/10/11 18:31:23 phk Exp $ 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/proc.h> 43#include <sys/unistd.h> 44#include <sys/vnode.h> 45#include <sys/malloc.h> 46#include <sys/fcntl.h> 47 48#include <sys/lockf.h> 49 50/* 51 * This variable controls the maximum number of processes that will 52 * be checked in doing deadlock detection. 53 */ 54static int maxlockdepth = MAXDEPTH; 55 56#ifdef LOCKF_DEBUG 57#include <sys/kernel.h> 58#include <sys/sysctl.h> 59 60#include <ufs/ufs/quota.h> 61#include <ufs/ufs/inode.h> 62 63 64static int lockf_debug = 0; 65SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 66#endif 67 68static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 69 70#define NOLOCKF (struct lockf *)0 71#define SELF 0x1 72#define OTHERS 0x2 73static int lf_clearlock __P((struct lockf *)); 74static int lf_findoverlap __P((struct lockf *, 75 struct lockf *, int, struct lockf ***, struct lockf **)); 76static struct lockf * 77 lf_getblock __P((struct lockf *)); 78static int lf_getlock __P((struct lockf *, struct flock *)); 79static int lf_setlock __P((struct lockf *)); 80static void lf_split __P((struct lockf *, struct lockf *)); 81static void lf_wakelock __P((struct lockf *)); 82 83/* 84 * Advisory record locking support 85 */ 86int 87lf_advlock(ap, head, size) 88 struct vop_advlock_args /* { 89 struct vnode *a_vp; 90 caddr_t a_id; 91 int a_op; 92 struct flock *a_fl; 93 int a_flags; 94 } */ *ap; 95 struct lockf **head; 96 u_quad_t size; 97{ 98 register struct flock *fl = ap->a_fl; 99 register struct lockf *lock; 100 off_t start, end; 101 int error; 102 103 /* 104 * Convert the flock structure into a start and end. 105 */ 106 switch (fl->l_whence) { 107 108 case SEEK_SET: 109 case SEEK_CUR: 110 /* 111 * Caller is responsible for adding any necessary offset 112 * when SEEK_CUR is used. 113 */ 114 start = fl->l_start; 115 break; 116 117 case SEEK_END: 118 start = size + fl->l_start; 119 break; 120 121 default: 122 return (EINVAL); 123 } 124 if (start < 0) 125 return (EINVAL); 126 if (fl->l_len == 0) 127 end = -1; 128 else { 129 end = start + fl->l_len - 1; 130 if (end < start) 131 return (EINVAL); 132 } 133 /* 134 * Avoid the common case of unlocking when inode has no locks. 135 */ 136 if (*head == (struct lockf *)0) { 137 if (ap->a_op != F_SETLK) { 138 fl->l_type = F_UNLCK; 139 return (0); 140 } 141 } 142 /* 143 * Create the lockf structure 144 */ 145 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 146 lock->lf_start = start; 147 lock->lf_end = end; 148 lock->lf_id = ap->a_id; 149/* lock->lf_inode = ip; */ /* XXX JH */ 150 lock->lf_type = fl->l_type; 151 lock->lf_head = head; 152 lock->lf_next = (struct lockf *)0; 153 TAILQ_INIT(&lock->lf_blkhd); 154 lock->lf_flags = ap->a_flags; 155 /* 156 * Do the requested operation. 157 */ 158 switch(ap->a_op) { 159 case F_SETLK: 160 return (lf_setlock(lock)); 161 162 case F_UNLCK: 163 error = lf_clearlock(lock); 164 FREE(lock, M_LOCKF); 165 return (error); 166 167 case F_GETLK: 168 error = lf_getlock(lock, fl); 169 FREE(lock, M_LOCKF); 170 return (error); 171 172 default: 173 free(lock, M_LOCKF); 174 return (EINVAL); 175 } 176 /* NOTREACHED */ 177} 178 179/* 180 * Set a byte-range lock. 181 */ 182static int 183lf_setlock(lock) 184 register struct lockf *lock; 185{ 186 register struct lockf *block; 187 struct lockf **head = lock->lf_head; 188 struct lockf **prev, *overlap, *ltmp; 189 static char lockstr[] = "lockf"; 190 int ovcase, priority, needtolink, error; 191 192#ifdef LOCKF_DEBUG 193 if (lockf_debug & 1) 194 lf_print("lf_setlock", lock); 195#endif /* LOCKF_DEBUG */ 196 197 /* 198 * Set the priority 199 */ 200 priority = PLOCK; 201 if (lock->lf_type == F_WRLCK) 202 priority += 4; 203 priority |= PCATCH; 204 /* 205 * Scan lock list for this file looking for locks that would block us. 206 */ 207 while ((block = lf_getblock(lock))) { 208 /* 209 * Free the structure and return if nonblocking. 210 */ 211 if ((lock->lf_flags & F_WAIT) == 0) { 212 FREE(lock, M_LOCKF); 213 return (EAGAIN); 214 } 215 /* 216 * We are blocked. Since flock style locks cover 217 * the whole file, there is no chance for deadlock. 218 * For byte-range locks we must check for deadlock. 219 * 220 * Deadlock detection is done by looking through the 221 * wait channels to see if there are any cycles that 222 * involve us. MAXDEPTH is set just to make sure we 223 * do not go off into neverland. 224 */ 225 if ((lock->lf_flags & F_POSIX) && 226 (block->lf_flags & F_POSIX)) { 227 register struct proc *wproc; 228 register struct lockf *waitblock; 229 int i = 0; 230 231 /* The block is waiting on something */ 232 wproc = (struct proc *)block->lf_id; 233 while (wproc->p_wchan && 234 (wproc->p_wmesg == lockstr) && 235 (i++ < maxlockdepth)) { 236 waitblock = (struct lockf *)wproc->p_wchan; 237 /* Get the owner of the blocking lock */ 238 waitblock = waitblock->lf_next; 239 if ((waitblock->lf_flags & F_POSIX) == 0) 240 break; 241 wproc = (struct proc *)waitblock->lf_id; 242 if (wproc == (struct proc *)lock->lf_id) { 243 free(lock, M_LOCKF); 244 return (EDEADLK); 245 } 246 } 247 } 248 /* 249 * For flock type locks, we must first remove 250 * any shared locks that we hold before we sleep 251 * waiting for an exclusive lock. 252 */ 253 if ((lock->lf_flags & F_FLOCK) && 254 lock->lf_type == F_WRLCK) { 255 lock->lf_type = F_UNLCK; 256 (void) lf_clearlock(lock); 257 lock->lf_type = F_WRLCK; 258 } 259 /* 260 * Add our lock to the blocked list and sleep until we're free. 261 * Remember who blocked us (for deadlock detection). 262 */ 263 lock->lf_next = block; 264 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 265#ifdef LOCKF_DEBUG 266 if (lockf_debug & 1) { 267 lf_print("lf_setlock: blocking on", block); 268 lf_printlist("lf_setlock", block); 269 } 270#endif /* LOCKF_DEBUG */ 271 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 272 /* 273 * We may have been awakened by a signal (in 274 * which case we must remove ourselves from the 275 * blocked list) and/or by another process 276 * releasing a lock (in which case we have already 277 * been removed from the blocked list and our 278 * lf_next field set to NOLOCKF). 279 */ 280 if (lock->lf_next) 281 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, 282 lf_block); 283 free(lock, M_LOCKF); 284 return (error); 285 } 286 } 287 /* 288 * No blocks!! Add the lock. Note that we will 289 * downgrade or upgrade any overlapping locks this 290 * process already owns. 291 * 292 * Skip over locks owned by other processes. 293 * Handle any locks that overlap and are owned by ourselves. 294 */ 295 prev = head; 296 block = *head; 297 needtolink = 1; 298 for (;;) { 299 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 300 if (ovcase) 301 block = overlap->lf_next; 302 /* 303 * Six cases: 304 * 0) no overlap 305 * 1) overlap == lock 306 * 2) overlap contains lock 307 * 3) lock contains overlap 308 * 4) overlap starts before lock 309 * 5) overlap ends after lock 310 */ 311 switch (ovcase) { 312 case 0: /* no overlap */ 313 if (needtolink) { 314 *prev = lock; 315 lock->lf_next = overlap; 316 } 317 break; 318 319 case 1: /* overlap == lock */ 320 /* 321 * If downgrading lock, others may be 322 * able to acquire it. 323 */ 324 if (lock->lf_type == F_RDLCK && 325 overlap->lf_type == F_WRLCK) 326 lf_wakelock(overlap); 327 overlap->lf_type = lock->lf_type; 328 FREE(lock, M_LOCKF); 329 lock = overlap; /* for debug output below */ 330 break; 331 332 case 2: /* overlap contains lock */ 333 /* 334 * Check for common starting point and different types. 335 */ 336 if (overlap->lf_type == lock->lf_type) { 337 free(lock, M_LOCKF); 338 lock = overlap; /* for debug output below */ 339 break; 340 } 341 if (overlap->lf_start == lock->lf_start) { 342 *prev = lock; 343 lock->lf_next = overlap; 344 overlap->lf_start = lock->lf_end + 1; 345 } else 346 lf_split(overlap, lock); 347 lf_wakelock(overlap); 348 break; 349 350 case 3: /* lock contains overlap */ 351 /* 352 * If downgrading lock, others may be able to 353 * acquire it, otherwise take the list. 354 */ 355 if (lock->lf_type == F_RDLCK && 356 overlap->lf_type == F_WRLCK) { 357 lf_wakelock(overlap); 358 } else { 359 while (ltmp = overlap->lf_blkhd.tqh_first) { 360 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 361 lf_block); 362 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 363 ltmp, lf_block); 364 } 365 } 366 /* 367 * Add the new lock if necessary and delete the overlap. 368 */ 369 if (needtolink) { 370 *prev = lock; 371 lock->lf_next = overlap->lf_next; 372 prev = &lock->lf_next; 373 needtolink = 0; 374 } else 375 *prev = overlap->lf_next; 376 free(overlap, M_LOCKF); 377 continue; 378 379 case 4: /* overlap starts before lock */ 380 /* 381 * Add lock after overlap on the list. 382 */ 383 lock->lf_next = overlap->lf_next; 384 overlap->lf_next = lock; 385 overlap->lf_end = lock->lf_start - 1; 386 prev = &lock->lf_next; 387 lf_wakelock(overlap); 388 needtolink = 0; 389 continue; 390 391 case 5: /* overlap ends after lock */ 392 /* 393 * Add the new lock before overlap. 394 */ 395 if (needtolink) { 396 *prev = lock; 397 lock->lf_next = overlap; 398 } 399 overlap->lf_start = lock->lf_end + 1; 400 lf_wakelock(overlap); 401 break; 402 } 403 break; 404 } 405#ifdef LOCKF_DEBUG 406 if (lockf_debug & 1) { 407 lf_print("lf_setlock: got the lock", lock); 408 lf_printlist("lf_setlock", lock); 409 } 410#endif /* LOCKF_DEBUG */ 411 return (0); 412} 413 414/* 415 * Remove a byte-range lock on an inode. 416 * 417 * Generally, find the lock (or an overlap to that lock) 418 * and remove it (or shrink it), then wakeup anyone we can. 419 */ 420static int 421lf_clearlock(unlock) 422 register struct lockf *unlock; 423{ 424 struct lockf **head = unlock->lf_head; 425 register struct lockf *lf = *head; 426 struct lockf *overlap, **prev; 427 int ovcase; 428 429 if (lf == NOLOCKF) 430 return (0); 431#ifdef LOCKF_DEBUG 432 if (unlock->lf_type != F_UNLCK) 433 panic("lf_clearlock: bad type"); 434 if (lockf_debug & 1) 435 lf_print("lf_clearlock", unlock); 436#endif /* LOCKF_DEBUG */ 437 prev = head; 438 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 439 /* 440 * Wakeup the list of locks to be retried. 441 */ 442 lf_wakelock(overlap); 443 444 switch (ovcase) { 445 446 case 1: /* overlap == lock */ 447 *prev = overlap->lf_next; 448 FREE(overlap, M_LOCKF); 449 break; 450 451 case 2: /* overlap contains lock: split it */ 452 if (overlap->lf_start == unlock->lf_start) { 453 overlap->lf_start = unlock->lf_end + 1; 454 break; 455 } 456 lf_split(overlap, unlock); 457 overlap->lf_next = unlock->lf_next; 458 break; 459 460 case 3: /* lock contains overlap */ 461 *prev = overlap->lf_next; 462 lf = overlap->lf_next; 463 free(overlap, M_LOCKF); 464 continue; 465 466 case 4: /* overlap starts before lock */ 467 overlap->lf_end = unlock->lf_start - 1; 468 prev = &overlap->lf_next; 469 lf = overlap->lf_next; 470 continue; 471 472 case 5: /* overlap ends after lock */ 473 overlap->lf_start = unlock->lf_end + 1; 474 break; 475 } 476 break; 477 } 478#ifdef LOCKF_DEBUG 479 if (lockf_debug & 1) 480 lf_printlist("lf_clearlock", unlock); 481#endif /* LOCKF_DEBUG */ 482 return (0); 483} 484 485/* 486 * Check whether there is a blocking lock, 487 * and if so return its process identifier. 488 */ 489static int 490lf_getlock(lock, fl) 491 register struct lockf *lock; 492 register struct flock *fl; 493{ 494 register struct lockf *block; 495 496#ifdef LOCKF_DEBUG 497 if (lockf_debug & 1) 498 lf_print("lf_getlock", lock); 499#endif /* LOCKF_DEBUG */ 500 501 if ((block = lf_getblock(lock))) { 502 fl->l_type = block->lf_type; 503 fl->l_whence = SEEK_SET; 504 fl->l_start = block->lf_start; 505 if (block->lf_end == -1) 506 fl->l_len = 0; 507 else 508 fl->l_len = block->lf_end - block->lf_start + 1; 509 if (block->lf_flags & F_POSIX) 510 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 511 else 512 fl->l_pid = -1; 513 } else { 514 fl->l_type = F_UNLCK; 515 } 516 return (0); 517} 518 519/* 520 * Walk the list of locks for an inode and 521 * return the first blocking lock. 522 */ 523static struct lockf * 524lf_getblock(lock) 525 register struct lockf *lock; 526{ 527 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 528 int ovcase; 529 530 prev = lock->lf_head; 531 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 532 /* 533 * We've found an overlap, see if it blocks us 534 */ 535 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 536 return (overlap); 537 /* 538 * Nope, point to the next one on the list and 539 * see if it blocks us 540 */ 541 lf = overlap->lf_next; 542 } 543 return (NOLOCKF); 544} 545 546/* 547 * Walk the list of locks for an inode to 548 * find an overlapping lock (if any). 549 * 550 * NOTE: this returns only the FIRST overlapping lock. There 551 * may be more than one. 552 */ 553static int 554lf_findoverlap(lf, lock, type, prev, overlap) 555 register struct lockf *lf; 556 struct lockf *lock; 557 int type; 558 struct lockf ***prev; 559 struct lockf **overlap; 560{ 561 off_t start, end; 562 563 *overlap = lf; 564 if (lf == NOLOCKF) 565 return (0); 566#ifdef LOCKF_DEBUG 567 if (lockf_debug & 2) 568 lf_print("lf_findoverlap: looking for overlap in", lock); 569#endif /* LOCKF_DEBUG */ 570 start = lock->lf_start; 571 end = lock->lf_end; 572 while (lf != NOLOCKF) { 573 if (((type & SELF) && lf->lf_id != lock->lf_id) || 574 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 575 *prev = &lf->lf_next; 576 *overlap = lf = lf->lf_next; 577 continue; 578 } 579#ifdef LOCKF_DEBUG 580 if (lockf_debug & 2) 581 lf_print("\tchecking", lf); 582#endif /* LOCKF_DEBUG */ 583 /* 584 * OK, check for overlap 585 * 586 * Six cases: 587 * 0) no overlap 588 * 1) overlap == lock 589 * 2) overlap contains lock 590 * 3) lock contains overlap 591 * 4) overlap starts before lock 592 * 5) overlap ends after lock 593 */ 594 if ((lf->lf_end != -1 && start > lf->lf_end) || 595 (end != -1 && lf->lf_start > end)) { 596 /* Case 0 */ 597#ifdef LOCKF_DEBUG 598 if (lockf_debug & 2) 599 printf("no overlap\n"); 600#endif /* LOCKF_DEBUG */ 601 if ((type & SELF) && end != -1 && lf->lf_start > end) 602 return (0); 603 *prev = &lf->lf_next; 604 *overlap = lf = lf->lf_next; 605 continue; 606 } 607 if ((lf->lf_start == start) && (lf->lf_end == end)) { 608 /* Case 1 */ 609#ifdef LOCKF_DEBUG 610 if (lockf_debug & 2) 611 printf("overlap == lock\n"); 612#endif /* LOCKF_DEBUG */ 613 return (1); 614 } 615 if ((lf->lf_start <= start) && 616 (end != -1) && 617 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 618 /* Case 2 */ 619#ifdef LOCKF_DEBUG 620 if (lockf_debug & 2) 621 printf("overlap contains lock\n"); 622#endif /* LOCKF_DEBUG */ 623 return (2); 624 } 625 if (start <= lf->lf_start && 626 (end == -1 || 627 (lf->lf_end != -1 && end >= lf->lf_end))) { 628 /* Case 3 */ 629#ifdef LOCKF_DEBUG 630 if (lockf_debug & 2) 631 printf("lock contains overlap\n"); 632#endif /* LOCKF_DEBUG */ 633 return (3); 634 } 635 if ((lf->lf_start < start) && 636 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 637 /* Case 4 */ 638#ifdef LOCKF_DEBUG 639 if (lockf_debug & 2) 640 printf("overlap starts before lock\n"); 641#endif /* LOCKF_DEBUG */ 642 return (4); 643 } 644 if ((lf->lf_start > start) && 645 (end != -1) && 646 ((lf->lf_end > end) || (lf->lf_end == -1))) { 647 /* Case 5 */ 648#ifdef LOCKF_DEBUG 649 if (lockf_debug & 2) 650 printf("overlap ends after lock\n"); 651#endif /* LOCKF_DEBUG */ 652 return (5); 653 } 654 panic("lf_findoverlap: default"); 655 } 656 return (0); 657} 658 659/* 660 * Split a lock and a contained region into 661 * two or three locks as necessary. 662 */ 663static void 664lf_split(lock1, lock2) 665 register struct lockf *lock1; 666 register struct lockf *lock2; 667{ 668 register struct lockf *splitlock; 669 670#ifdef LOCKF_DEBUG 671 if (lockf_debug & 2) { 672 lf_print("lf_split", lock1); 673 lf_print("splitting from", lock2); 674 } 675#endif /* LOCKF_DEBUG */ 676 /* 677 * Check to see if spliting into only two pieces. 678 */ 679 if (lock1->lf_start == lock2->lf_start) { 680 lock1->lf_start = lock2->lf_end + 1; 681 lock2->lf_next = lock1; 682 return; 683 } 684 if (lock1->lf_end == lock2->lf_end) { 685 lock1->lf_end = lock2->lf_start - 1; 686 lock2->lf_next = lock1->lf_next; 687 lock1->lf_next = lock2; 688 return; 689 } 690 /* 691 * Make a new lock consisting of the last part of 692 * the encompassing lock 693 */ 694 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 695 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 696 splitlock->lf_start = lock2->lf_end + 1; 697 TAILQ_INIT(&splitlock->lf_blkhd); 698 lock1->lf_end = lock2->lf_start - 1; 699 /* 700 * OK, now link it in 701 */ 702 splitlock->lf_next = lock1->lf_next; 703 lock2->lf_next = splitlock; 704 lock1->lf_next = lock2; 705} 706 707/* 708 * Wakeup a blocklist 709 */ 710static void 711lf_wakelock(listhead) 712 struct lockf *listhead; 713{ 714 register struct lockf *wakelock; 715 716 while (wakelock = listhead->lf_blkhd.tqh_first) { 717 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 718 wakelock->lf_next = NOLOCKF; 719#ifdef LOCKF_DEBUG 720 if (lockf_debug & 2) 721 lf_print("lf_wakelock: awakening", wakelock); 722#endif /* LOCKF_DEBUG */ 723 wakeup((caddr_t)wakelock); 724 } 725} 726 727#ifdef LOCKF_DEBUG 728/* 729 * Print out a lock. 730 */ 731void 732lf_print(tag, lock) 733 char *tag; 734 register struct lockf *lock; 735{ 736 737 printf("%s: lock 0x%lx for ", tag, lock); 738 if (lock->lf_flags & F_POSIX) 739 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 740 else 741 printf("id 0x%x", lock->lf_id); 742 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 743 lock->lf_inode->i_number, 744 major(lock->lf_inode->i_dev), 745 minor(lock->lf_inode->i_dev), 746 lock->lf_type == F_RDLCK ? "shared" : 747 lock->lf_type == F_WRLCK ? "exclusive" : 748 lock->lf_type == F_UNLCK ? "unlock" : 749 "unknown", lock->lf_start, lock->lf_end); 750 if (lock->lf_blkhd.tqh_first) 751 printf(" block 0x%x\n", lock->lf_blkhd.tqh_first); 752 else 753 printf("\n"); 754} 755 756void 757lf_printlist(tag, lock) 758 char *tag; 759 struct lockf *lock; 760{ 761 register struct lockf *lf, *blk; 762 763 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 764 tag, lock->lf_inode->i_number, 765 major(lock->lf_inode->i_dev), 766 minor(lock->lf_inode->i_dev)); 767 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 768 printf("\tlock 0x%lx for ", lf); 769 if (lf->lf_flags & F_POSIX) 770 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 771 else 772 printf("id 0x%x", lf->lf_id); 773 printf(", %s, start %d, end %d", 774 lf->lf_type == F_RDLCK ? "shared" : 775 lf->lf_type == F_WRLCK ? "exclusive" : 776 lf->lf_type == F_UNLCK ? "unlock" : 777 "unknown", lf->lf_start, lf->lf_end); 778 for (blk = lf->lf_blkhd.tqh_first; blk; 779 blk = blk->lf_block.tqe_next) { 780 printf("\n\t\tlock request 0x%lx for ", blk); 781 if (blk->lf_flags & F_POSIX) 782 printf("proc %d", 783 ((struct proc *)(blk->lf_id))->p_pid); 784 else 785 printf("id 0x%x", blk->lf_id); 786 printf(", %s, start %d, end %d", 787 blk->lf_type == F_RDLCK ? "shared" : 788 blk->lf_type == F_WRLCK ? "exclusive" : 789 blk->lf_type == F_UNLCK ? "unlock" : 790 "unknown", blk->lf_start, blk->lf_end); 791 if (blk->lf_blkhd.tqh_first) 792 panic("lf_printlist: bad list"); 793 } 794 printf("\n"); 795 } 796} 797#endif /* LOCKF_DEBUG */ 798