kern_lockf.c revision 75631
1/* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $FreeBSD: head/sys/kern/kern_lockf.c 75631 2001-04-17 20:45:23Z alfred $ 38 */ 39 40#include "opt_debug_lockf.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/lock.h> 46#include <sys/proc.h> 47#include <sys/unistd.h> 48#include <sys/vnode.h> 49#include <sys/malloc.h> 50#include <sys/fcntl.h> 51 52#include <sys/lockf.h> 53 54/* 55 * This variable controls the maximum number of processes that will 56 * be checked in doing deadlock detection. 57 */ 58static int maxlockdepth = MAXDEPTH; 59 60#ifdef LOCKF_DEBUG 61#include <sys/kernel.h> 62#include <sys/sysctl.h> 63 64#include <ufs/ufs/quota.h> 65#include <ufs/ufs/inode.h> 66 67 68static int lockf_debug = 0; 69SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 70#endif 71 72MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 73 74#define NOLOCKF (struct lockf *)0 75#define SELF 0x1 76#define OTHERS 0x2 77static int lf_clearlock __P((struct lockf *)); 78static int lf_findoverlap __P((struct lockf *, 79 struct lockf *, int, struct lockf ***, struct lockf **)); 80static struct lockf * 81 lf_getblock __P((struct lockf *)); 82static int lf_getlock __P((struct lockf *, struct flock *)); 83static int lf_setlock __P((struct lockf *)); 84static void lf_split __P((struct lockf *, struct lockf *)); 85static void lf_wakelock __P((struct lockf *)); 86 87/* 88 * Advisory record locking support 89 */ 90int 91lf_advlock(ap, head, size) 92 struct vop_advlock_args /* { 93 struct vnode *a_vp; 94 caddr_t a_id; 95 int a_op; 96 struct flock *a_fl; 97 int a_flags; 98 } */ *ap; 99 struct lockf **head; 100 u_quad_t size; 101{ 102 register struct flock *fl = ap->a_fl; 103 register struct lockf *lock; 104 off_t start, end; 105 int error; 106 107 /* 108 * Convert the flock structure into a start and end. 109 */ 110 switch (fl->l_whence) { 111 112 case SEEK_SET: 113 case SEEK_CUR: 114 /* 115 * Caller is responsible for adding any necessary offset 116 * when SEEK_CUR is used. 117 */ 118 start = fl->l_start; 119 break; 120 121 case SEEK_END: 122 start = size + fl->l_start; 123 break; 124 125 default: 126 return (EINVAL); 127 } 128 if (start < 0) 129 return (EINVAL); 130 if (fl->l_len == 0) 131 end = -1; 132 else { 133 end = start + fl->l_len - 1; 134 if (end < start) 135 return (EINVAL); 136 } 137 /* 138 * Avoid the common case of unlocking when inode has no locks. 139 */ 140 if (*head == (struct lockf *)0) { 141 if (ap->a_op != F_SETLK) { 142 fl->l_type = F_UNLCK; 143 return (0); 144 } 145 } 146 /* 147 * Create the lockf structure 148 */ 149 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 150 lock->lf_start = start; 151 lock->lf_end = end; 152 lock->lf_id = ap->a_id; 153/* lock->lf_inode = ip; */ /* XXX JH */ 154 lock->lf_type = fl->l_type; 155 lock->lf_head = head; 156 lock->lf_next = (struct lockf *)0; 157 TAILQ_INIT(&lock->lf_blkhd); 158 lock->lf_flags = ap->a_flags; 159 /* 160 * Do the requested operation. 161 */ 162 switch(ap->a_op) { 163 case F_SETLK: 164 return (lf_setlock(lock)); 165 166 case F_UNLCK: 167 error = lf_clearlock(lock); 168 FREE(lock, M_LOCKF); 169 return (error); 170 171 case F_GETLK: 172 error = lf_getlock(lock, fl); 173 FREE(lock, M_LOCKF); 174 return (error); 175 176 default: 177 free(lock, M_LOCKF); 178 return (EINVAL); 179 } 180 /* NOTREACHED */ 181} 182 183/* 184 * Set a byte-range lock. 185 */ 186static int 187lf_setlock(lock) 188 register struct lockf *lock; 189{ 190 register struct lockf *block; 191 struct lockf **head = lock->lf_head; 192 struct lockf **prev, *overlap, *ltmp; 193 static char lockstr[] = "lockf"; 194 int ovcase, priority, needtolink, error; 195 196#ifdef LOCKF_DEBUG 197 if (lockf_debug & 1) 198 lf_print("lf_setlock", lock); 199#endif /* LOCKF_DEBUG */ 200 201 /* 202 * Set the priority 203 */ 204 priority = PLOCK; 205 if (lock->lf_type == F_WRLCK) 206 priority += 4; 207 priority |= PCATCH; 208 /* 209 * Scan lock list for this file looking for locks that would block us. 210 */ 211 while ((block = lf_getblock(lock))) { 212 /* 213 * Free the structure and return if nonblocking. 214 */ 215 if ((lock->lf_flags & F_WAIT) == 0) { 216 FREE(lock, M_LOCKF); 217 return (EAGAIN); 218 } 219 /* 220 * We are blocked. Since flock style locks cover 221 * the whole file, there is no chance for deadlock. 222 * For byte-range locks we must check for deadlock. 223 * 224 * Deadlock detection is done by looking through the 225 * wait channels to see if there are any cycles that 226 * involve us. MAXDEPTH is set just to make sure we 227 * do not go off into neverland. 228 */ 229 if ((lock->lf_flags & F_POSIX) && 230 (block->lf_flags & F_POSIX)) { 231 register struct proc *wproc; 232 register struct lockf *waitblock; 233 int i = 0; 234 235 /* The block is waiting on something */ 236 wproc = (struct proc *)block->lf_id; 237 mtx_lock_spin(&sched_lock); 238 while (wproc->p_wchan && 239 (wproc->p_wmesg == lockstr) && 240 (i++ < maxlockdepth)) { 241 waitblock = (struct lockf *)wproc->p_wchan; 242 /* Get the owner of the blocking lock */ 243 waitblock = waitblock->lf_next; 244 if ((waitblock->lf_flags & F_POSIX) == 0) 245 break; 246 wproc = (struct proc *)waitblock->lf_id; 247 if (wproc == (struct proc *)lock->lf_id) { 248 mtx_unlock_spin(&sched_lock); 249 free(lock, M_LOCKF); 250 return (EDEADLK); 251 } 252 } 253 mtx_unlock_spin(&sched_lock); 254 } 255 /* 256 * For flock type locks, we must first remove 257 * any shared locks that we hold before we sleep 258 * waiting for an exclusive lock. 259 */ 260 if ((lock->lf_flags & F_FLOCK) && 261 lock->lf_type == F_WRLCK) { 262 lock->lf_type = F_UNLCK; 263 (void) lf_clearlock(lock); 264 lock->lf_type = F_WRLCK; 265 } 266 /* 267 * Add our lock to the blocked list and sleep until we're free. 268 * Remember who blocked us (for deadlock detection). 269 */ 270 lock->lf_next = block; 271 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 272#ifdef LOCKF_DEBUG 273 if (lockf_debug & 1) { 274 lf_print("lf_setlock: blocking on", block); 275 lf_printlist("lf_setlock", block); 276 } 277#endif /* LOCKF_DEBUG */ 278 error = tsleep((caddr_t)lock, priority, lockstr, 0); 279 /* 280 * We may have been awakened by a signal and/or by a 281 * debugger continuing us (in which cases we must remove 282 * ourselves from the blocked list) and/or by another 283 * process releasing a lock (in which case we have 284 * already been removed from the blocked list and our 285 * lf_next field set to NOLOCKF). 286 */ 287 if (lock->lf_next) { 288 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 289 lock->lf_next = NOLOCKF; 290 } 291 if (error) { 292 free(lock, M_LOCKF); 293 return (error); 294 } 295 } 296 /* 297 * No blocks!! Add the lock. Note that we will 298 * downgrade or upgrade any overlapping locks this 299 * process already owns. 300 * 301 * Skip over locks owned by other processes. 302 * Handle any locks that overlap and are owned by ourselves. 303 */ 304 prev = head; 305 block = *head; 306 needtolink = 1; 307 for (;;) { 308 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 309 if (ovcase) 310 block = overlap->lf_next; 311 /* 312 * Six cases: 313 * 0) no overlap 314 * 1) overlap == lock 315 * 2) overlap contains lock 316 * 3) lock contains overlap 317 * 4) overlap starts before lock 318 * 5) overlap ends after lock 319 */ 320 switch (ovcase) { 321 case 0: /* no overlap */ 322 if (needtolink) { 323 *prev = lock; 324 lock->lf_next = overlap; 325 } 326 break; 327 328 case 1: /* overlap == lock */ 329 /* 330 * If downgrading lock, others may be 331 * able to acquire it. 332 */ 333 if (lock->lf_type == F_RDLCK && 334 overlap->lf_type == F_WRLCK) 335 lf_wakelock(overlap); 336 overlap->lf_type = lock->lf_type; 337 FREE(lock, M_LOCKF); 338 lock = overlap; /* for debug output below */ 339 break; 340 341 case 2: /* overlap contains lock */ 342 /* 343 * Check for common starting point and different types. 344 */ 345 if (overlap->lf_type == lock->lf_type) { 346 free(lock, M_LOCKF); 347 lock = overlap; /* for debug output below */ 348 break; 349 } 350 if (overlap->lf_start == lock->lf_start) { 351 *prev = lock; 352 lock->lf_next = overlap; 353 overlap->lf_start = lock->lf_end + 1; 354 } else 355 lf_split(overlap, lock); 356 lf_wakelock(overlap); 357 break; 358 359 case 3: /* lock contains overlap */ 360 /* 361 * If downgrading lock, others may be able to 362 * acquire it, otherwise take the list. 363 */ 364 if (lock->lf_type == F_RDLCK && 365 overlap->lf_type == F_WRLCK) { 366 lf_wakelock(overlap); 367 } else { 368 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) { 369 ltmp = TAILQ_FIRST(&overlap->lf_blkhd); 370 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 371 lf_block); 372 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 373 ltmp, lf_block); 374 ltmp->lf_next = lock; 375 } 376 } 377 /* 378 * Add the new lock if necessary and delete the overlap. 379 */ 380 if (needtolink) { 381 *prev = lock; 382 lock->lf_next = overlap->lf_next; 383 prev = &lock->lf_next; 384 needtolink = 0; 385 } else 386 *prev = overlap->lf_next; 387 free(overlap, M_LOCKF); 388 continue; 389 390 case 4: /* overlap starts before lock */ 391 /* 392 * Add lock after overlap on the list. 393 */ 394 lock->lf_next = overlap->lf_next; 395 overlap->lf_next = lock; 396 overlap->lf_end = lock->lf_start - 1; 397 prev = &lock->lf_next; 398 lf_wakelock(overlap); 399 needtolink = 0; 400 continue; 401 402 case 5: /* overlap ends after lock */ 403 /* 404 * Add the new lock before overlap. 405 */ 406 if (needtolink) { 407 *prev = lock; 408 lock->lf_next = overlap; 409 } 410 overlap->lf_start = lock->lf_end + 1; 411 lf_wakelock(overlap); 412 break; 413 } 414 break; 415 } 416#ifdef LOCKF_DEBUG 417 if (lockf_debug & 1) { 418 lf_print("lf_setlock: got the lock", lock); 419 lf_printlist("lf_setlock", lock); 420 } 421#endif /* LOCKF_DEBUG */ 422 return (0); 423} 424 425/* 426 * Remove a byte-range lock on an inode. 427 * 428 * Generally, find the lock (or an overlap to that lock) 429 * and remove it (or shrink it), then wakeup anyone we can. 430 */ 431static int 432lf_clearlock(unlock) 433 register struct lockf *unlock; 434{ 435 struct lockf **head = unlock->lf_head; 436 register struct lockf *lf = *head; 437 struct lockf *overlap, **prev; 438 int ovcase; 439 440 if (lf == NOLOCKF) 441 return (0); 442#ifdef LOCKF_DEBUG 443 if (unlock->lf_type != F_UNLCK) 444 panic("lf_clearlock: bad type"); 445 if (lockf_debug & 1) 446 lf_print("lf_clearlock", unlock); 447#endif /* LOCKF_DEBUG */ 448 prev = head; 449 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 450 /* 451 * Wakeup the list of locks to be retried. 452 */ 453 lf_wakelock(overlap); 454 455 switch (ovcase) { 456 457 case 1: /* overlap == lock */ 458 *prev = overlap->lf_next; 459 FREE(overlap, M_LOCKF); 460 break; 461 462 case 2: /* overlap contains lock: split it */ 463 if (overlap->lf_start == unlock->lf_start) { 464 overlap->lf_start = unlock->lf_end + 1; 465 break; 466 } 467 lf_split(overlap, unlock); 468 overlap->lf_next = unlock->lf_next; 469 break; 470 471 case 3: /* lock contains overlap */ 472 *prev = overlap->lf_next; 473 lf = overlap->lf_next; 474 free(overlap, M_LOCKF); 475 continue; 476 477 case 4: /* overlap starts before lock */ 478 overlap->lf_end = unlock->lf_start - 1; 479 prev = &overlap->lf_next; 480 lf = overlap->lf_next; 481 continue; 482 483 case 5: /* overlap ends after lock */ 484 overlap->lf_start = unlock->lf_end + 1; 485 break; 486 } 487 break; 488 } 489#ifdef LOCKF_DEBUG 490 if (lockf_debug & 1) 491 lf_printlist("lf_clearlock", unlock); 492#endif /* LOCKF_DEBUG */ 493 return (0); 494} 495 496/* 497 * Check whether there is a blocking lock, 498 * and if so return its process identifier. 499 */ 500static int 501lf_getlock(lock, fl) 502 register struct lockf *lock; 503 register struct flock *fl; 504{ 505 register struct lockf *block; 506 507#ifdef LOCKF_DEBUG 508 if (lockf_debug & 1) 509 lf_print("lf_getlock", lock); 510#endif /* LOCKF_DEBUG */ 511 512 if ((block = lf_getblock(lock))) { 513 fl->l_type = block->lf_type; 514 fl->l_whence = SEEK_SET; 515 fl->l_start = block->lf_start; 516 if (block->lf_end == -1) 517 fl->l_len = 0; 518 else 519 fl->l_len = block->lf_end - block->lf_start + 1; 520 if (block->lf_flags & F_POSIX) 521 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 522 else 523 fl->l_pid = -1; 524 } else { 525 fl->l_type = F_UNLCK; 526 } 527 return (0); 528} 529 530/* 531 * Walk the list of locks for an inode and 532 * return the first blocking lock. 533 */ 534static struct lockf * 535lf_getblock(lock) 536 register struct lockf *lock; 537{ 538 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 539 int ovcase; 540 541 prev = lock->lf_head; 542 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 543 /* 544 * We've found an overlap, see if it blocks us 545 */ 546 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 547 return (overlap); 548 /* 549 * Nope, point to the next one on the list and 550 * see if it blocks us 551 */ 552 lf = overlap->lf_next; 553 } 554 return (NOLOCKF); 555} 556 557/* 558 * Walk the list of locks for an inode to 559 * find an overlapping lock (if any). 560 * 561 * NOTE: this returns only the FIRST overlapping lock. There 562 * may be more than one. 563 */ 564static int 565lf_findoverlap(lf, lock, type, prev, overlap) 566 register struct lockf *lf; 567 struct lockf *lock; 568 int type; 569 struct lockf ***prev; 570 struct lockf **overlap; 571{ 572 off_t start, end; 573 574 *overlap = lf; 575 if (lf == NOLOCKF) 576 return (0); 577#ifdef LOCKF_DEBUG 578 if (lockf_debug & 2) 579 lf_print("lf_findoverlap: looking for overlap in", lock); 580#endif /* LOCKF_DEBUG */ 581 start = lock->lf_start; 582 end = lock->lf_end; 583 while (lf != NOLOCKF) { 584 if (((type & SELF) && lf->lf_id != lock->lf_id) || 585 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 586 *prev = &lf->lf_next; 587 *overlap = lf = lf->lf_next; 588 continue; 589 } 590#ifdef LOCKF_DEBUG 591 if (lockf_debug & 2) 592 lf_print("\tchecking", lf); 593#endif /* LOCKF_DEBUG */ 594 /* 595 * OK, check for overlap 596 * 597 * Six cases: 598 * 0) no overlap 599 * 1) overlap == lock 600 * 2) overlap contains lock 601 * 3) lock contains overlap 602 * 4) overlap starts before lock 603 * 5) overlap ends after lock 604 */ 605 if ((lf->lf_end != -1 && start > lf->lf_end) || 606 (end != -1 && lf->lf_start > end)) { 607 /* Case 0 */ 608#ifdef LOCKF_DEBUG 609 if (lockf_debug & 2) 610 printf("no overlap\n"); 611#endif /* LOCKF_DEBUG */ 612 if ((type & SELF) && end != -1 && lf->lf_start > end) 613 return (0); 614 *prev = &lf->lf_next; 615 *overlap = lf = lf->lf_next; 616 continue; 617 } 618 if ((lf->lf_start == start) && (lf->lf_end == end)) { 619 /* Case 1 */ 620#ifdef LOCKF_DEBUG 621 if (lockf_debug & 2) 622 printf("overlap == lock\n"); 623#endif /* LOCKF_DEBUG */ 624 return (1); 625 } 626 if ((lf->lf_start <= start) && 627 (end != -1) && 628 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 629 /* Case 2 */ 630#ifdef LOCKF_DEBUG 631 if (lockf_debug & 2) 632 printf("overlap contains lock\n"); 633#endif /* LOCKF_DEBUG */ 634 return (2); 635 } 636 if (start <= lf->lf_start && 637 (end == -1 || 638 (lf->lf_end != -1 && end >= lf->lf_end))) { 639 /* Case 3 */ 640#ifdef LOCKF_DEBUG 641 if (lockf_debug & 2) 642 printf("lock contains overlap\n"); 643#endif /* LOCKF_DEBUG */ 644 return (3); 645 } 646 if ((lf->lf_start < start) && 647 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 648 /* Case 4 */ 649#ifdef LOCKF_DEBUG 650 if (lockf_debug & 2) 651 printf("overlap starts before lock\n"); 652#endif /* LOCKF_DEBUG */ 653 return (4); 654 } 655 if ((lf->lf_start > start) && 656 (end != -1) && 657 ((lf->lf_end > end) || (lf->lf_end == -1))) { 658 /* Case 5 */ 659#ifdef LOCKF_DEBUG 660 if (lockf_debug & 2) 661 printf("overlap ends after lock\n"); 662#endif /* LOCKF_DEBUG */ 663 return (5); 664 } 665 panic("lf_findoverlap: default"); 666 } 667 return (0); 668} 669 670/* 671 * Split a lock and a contained region into 672 * two or three locks as necessary. 673 */ 674static void 675lf_split(lock1, lock2) 676 register struct lockf *lock1; 677 register struct lockf *lock2; 678{ 679 register struct lockf *splitlock; 680 681#ifdef LOCKF_DEBUG 682 if (lockf_debug & 2) { 683 lf_print("lf_split", lock1); 684 lf_print("splitting from", lock2); 685 } 686#endif /* LOCKF_DEBUG */ 687 /* 688 * Check to see if spliting into only two pieces. 689 */ 690 if (lock1->lf_start == lock2->lf_start) { 691 lock1->lf_start = lock2->lf_end + 1; 692 lock2->lf_next = lock1; 693 return; 694 } 695 if (lock1->lf_end == lock2->lf_end) { 696 lock1->lf_end = lock2->lf_start - 1; 697 lock2->lf_next = lock1->lf_next; 698 lock1->lf_next = lock2; 699 return; 700 } 701 /* 702 * Make a new lock consisting of the last part of 703 * the encompassing lock 704 */ 705 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 706 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 707 splitlock->lf_start = lock2->lf_end + 1; 708 TAILQ_INIT(&splitlock->lf_blkhd); 709 lock1->lf_end = lock2->lf_start - 1; 710 /* 711 * OK, now link it in 712 */ 713 splitlock->lf_next = lock1->lf_next; 714 lock2->lf_next = splitlock; 715 lock1->lf_next = lock2; 716} 717 718/* 719 * Wakeup a blocklist 720 */ 721static void 722lf_wakelock(listhead) 723 struct lockf *listhead; 724{ 725 register struct lockf *wakelock; 726 727 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) { 728 wakelock = TAILQ_FIRST(&listhead->lf_blkhd); 729 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 730 wakelock->lf_next = NOLOCKF; 731#ifdef LOCKF_DEBUG 732 if (lockf_debug & 2) 733 lf_print("lf_wakelock: awakening", wakelock); 734#endif /* LOCKF_DEBUG */ 735 wakeup((caddr_t)wakelock); 736 } 737} 738 739#ifdef LOCKF_DEBUG 740/* 741 * Print out a lock. 742 */ 743void 744lf_print(tag, lock) 745 char *tag; 746 register struct lockf *lock; 747{ 748 749 printf("%s: lock %p for ", tag, (void *)lock); 750 if (lock->lf_flags & F_POSIX) 751 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid); 752 else 753 printf("id %p", (void *)lock->lf_id); 754 /* XXX no %qd in kernel. Truncate. */ 755 printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld", 756 (u_long)lock->lf_inode->i_number, 757 major(lock->lf_inode->i_dev), 758 minor(lock->lf_inode->i_dev), 759 lock->lf_type == F_RDLCK ? "shared" : 760 lock->lf_type == F_WRLCK ? "exclusive" : 761 lock->lf_type == F_UNLCK ? "unlock" : 762 "unknown", (long)lock->lf_start, (long)lock->lf_end); 763 if (!TAILQ_EMPTY(&lock->lf_blkhd)) 764 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd)); 765 else 766 printf("\n"); 767} 768 769void 770lf_printlist(tag, lock) 771 char *tag; 772 struct lockf *lock; 773{ 774 register struct lockf *lf, *blk; 775 776 printf("%s: Lock list for ino %lu on dev <%d, %d>:\n", 777 tag, (u_long)lock->lf_inode->i_number, 778 major(lock->lf_inode->i_dev), 779 minor(lock->lf_inode->i_dev)); 780 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 781 printf("\tlock %p for ",(void *)lf); 782 if (lf->lf_flags & F_POSIX) 783 printf("proc %ld", 784 (long)((struct proc *)lf->lf_id)->p_pid); 785 else 786 printf("id %p", (void *)lf->lf_id); 787 /* XXX no %qd in kernel. Truncate. */ 788 printf(", %s, start %ld, end %ld", 789 lf->lf_type == F_RDLCK ? "shared" : 790 lf->lf_type == F_WRLCK ? "exclusive" : 791 lf->lf_type == F_UNLCK ? "unlock" : 792 "unknown", (long)lf->lf_start, (long)lf->lf_end); 793 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { 794 printf("\n\t\tlock request %p for ", (void *)blk); 795 if (blk->lf_flags & F_POSIX) 796 printf("proc %ld", 797 (long)((struct proc *)blk->lf_id)->p_pid); 798 else 799 printf("id %p", (void *)blk->lf_id); 800 /* XXX no %qd in kernel. Truncate. */ 801 printf(", %s, start %ld, end %ld", 802 blk->lf_type == F_RDLCK ? "shared" : 803 blk->lf_type == F_WRLCK ? "exclusive" : 804 blk->lf_type == F_UNLCK ? "unlock" : 805 "unknown", (long)blk->lf_start, 806 (long)blk->lf_end); 807 if (!TAILQ_EMPTY(&blk->lf_blkhd)) 808 panic("lf_printlist: bad list"); 809 } 810 printf("\n"); 811 } 812} 813#endif /* LOCKF_DEBUG */ 814