kern_lockf.c revision 177841
1/*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27/*- 28 * Copyright (c) 1982, 1986, 1989, 1993 29 * The Regents of the University of California. All rights reserved. 30 * 31 * This code is derived from software contributed to Berkeley by 32 * Scooter Morris at Genentech Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 4. Neither the name of the University nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 59 */ 60 61#include <sys/cdefs.h> 62__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 177841 2008-04-01 16:07:01Z dfr $"); 63 64#include "opt_debug_lockf.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/hash.h> 69#include <sys/kernel.h> 70#include <sys/limits.h> 71#include <sys/lock.h> 72#include <sys/mount.h> 73#include <sys/mutex.h> 74#include <sys/proc.h> 75#include <sys/sx.h> 76#include <sys/unistd.h> 77#include <sys/vnode.h> 78#include <sys/malloc.h> 79#include <sys/fcntl.h> 80#include <sys/lockf.h> 81#include <sys/taskqueue.h> 82 83#ifdef LOCKF_DEBUG 84#include <sys/sysctl.h> 85 86#include <ufs/ufs/quota.h> 87#include <ufs/ufs/inode.h> 88 89static int lockf_debug = 0; /* control debug output */ 90SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 91#endif 92 93MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 94 95struct owner_edge; 96struct owner_vertex; 97struct owner_vertex_list; 98struct owner_graph; 99 100#define NOLOCKF (struct lockf_entry *)0 101#define SELF 0x1 102#define OTHERS 0x2 103static void lf_init(void *); 104static int lf_hash_owner(caddr_t, struct flock *, int); 105static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106 int); 107static struct lockf_entry * 108 lf_alloc_lock(struct lock_owner *); 109static void lf_free_lock(struct lockf_entry *); 110static int lf_clearlock(struct lockf *, struct lockf_entry *); 111static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113static void lf_free_edge(struct lockf_edge *); 114static struct lockf_edge * 115 lf_alloc_edge(void); 116static void lf_alloc_vertex(struct lockf_entry *); 117static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118static void lf_remove_edge(struct lockf_edge *); 119static void lf_remove_outgoing(struct lockf_entry *); 120static void lf_remove_incoming(struct lockf_entry *); 121static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124 int); 125static struct lockf_entry * 126 lf_getblock(struct lockf *, struct lockf_entry *); 127static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131 int all, struct lockf_entry_list *); 132static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133 struct lockf_entry_list*); 134static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135 struct lockf_entry_list*); 136static int lf_setlock(struct lockf *, struct lockf_entry *, 137 struct vnode *, void **cookiep); 138static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139static void lf_split(struct lockf *, struct lockf_entry *, 140 struct lockf_entry *, struct lockf_entry_list *); 141#ifdef LOCKF_DEBUG 142static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143 struct owner_vertex_list *path); 144static void graph_check(struct owner_graph *g, int checkorder); 145static void graph_print_vertices(struct owner_vertex_list *set); 146#endif 147static int graph_delta_forward(struct owner_graph *g, 148 struct owner_vertex *x, struct owner_vertex *y, 149 struct owner_vertex_list *delta); 150static int graph_delta_backward(struct owner_graph *g, 151 struct owner_vertex *x, struct owner_vertex *y, 152 struct owner_vertex_list *delta); 153static int graph_add_indices(int *indices, int n, 154 struct owner_vertex_list *set); 155static int graph_assign_indices(struct owner_graph *g, int *indices, 156 int nextunused, struct owner_vertex_list *set); 157static int graph_add_edge(struct owner_graph *g, 158 struct owner_vertex *x, struct owner_vertex *y); 159static void graph_remove_edge(struct owner_graph *g, 160 struct owner_vertex *x, struct owner_vertex *y); 161static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162 struct lock_owner *lo); 163static void graph_free_vertex(struct owner_graph *g, 164 struct owner_vertex *v); 165static struct owner_graph * graph_init(struct owner_graph *g); 166#ifdef LOCKF_DEBUG 167static void lf_print(char *, struct lockf_entry *); 168static void lf_printlist(char *, struct lockf_entry *); 169static void lf_print_owner(struct lock_owner *); 170#endif 171 172/* 173 * This structure is used to keep track of both local and remote lock 174 * owners. The lf_owner field of the struct lockf_entry points back at 175 * the lock owner structure. Each possible lock owner (local proc for 176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177 * pair for remote locks) is represented by a unique instance of 178 * struct lock_owner. 179 * 180 * If a lock owner has a lock that blocks some other lock or a lock 181 * that is waiting for some other lock, it also has a vertex in the 182 * owner_graph below. 183 * 184 * Locks: 185 * (s) locked by state->ls_lock 186 * (S) locked by lf_lock_states_lock 187 * (l) locked by lf_lock_owners_lock 188 * (g) locked by lf_owner_graph_lock 189 * (c) const until freeing 190 */ 191#define LOCK_OWNER_HASH_SIZE 256 192 193struct lock_owner { 194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195 int lo_refs; /* (l) Number of locks referring to this */ 196 int lo_flags; /* (c) Flags passwd to lf_advlock */ 197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198 pid_t lo_pid; /* (c) Process Id of the lock owner */ 199 int lo_sysid; /* (c) System Id of the lock owner */ 200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201}; 202 203LIST_HEAD(lock_owner_list, lock_owner); 204 205static struct sx lf_lock_states_lock; 206static struct lockf_list lf_lock_states; /* (S) */ 207static struct sx lf_lock_owners_lock; 208static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209 210/* 211 * Structures for deadlock detection. 212 * 213 * We have two types of directed graph, the first is the set of locks, 214 * both active and pending on a vnode. Within this graph, active locks 215 * are terminal nodes in the graph (i.e. have no out-going 216 * edges). Pending locks have out-going edges to each blocking active 217 * lock that prevents the lock from being granted and also to each 218 * older pending lock that would block them if it was active. The 219 * graph for each vnode is naturally acyclic; new edges are only ever 220 * added to or from new nodes (either new pending locks which only add 221 * out-going edges or new active locks which only add in-coming edges) 222 * therefore they cannot create loops in the lock graph. 223 * 224 * The second graph is a global graph of lock owners. Each lock owner 225 * is a vertex in that graph and an edge is added to the graph 226 * whenever an edge is added to a vnode graph, with end points 227 * corresponding to owner of the new pending lock and the owner of the 228 * lock upon which it waits. In order to prevent deadlock, we only add 229 * an edge to this graph if the new edge would not create a cycle. 230 * 231 * The lock owner graph is topologically sorted, i.e. if a node has 232 * any outgoing edges, then it has an order strictly less than any 233 * node to which it has an outgoing edge. We preserve this ordering 234 * (and detect cycles) on edge insertion using Algorithm PK from the 235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237 * No. 1.7) 238 */ 239struct owner_vertex; 240 241struct owner_edge { 242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244 int e_refs; /* (g) number of times added */ 245 struct owner_vertex *e_from; /* (c) out-going from here */ 246 struct owner_vertex *e_to; /* (c) in-coming to here */ 247}; 248LIST_HEAD(owner_edge_list, owner_edge); 249 250struct owner_vertex { 251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252 uint32_t v_gen; /* (g) workspace for edge insertion */ 253 int v_order; /* (g) order of vertex in graph */ 254 struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255 struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256 struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257}; 258TAILQ_HEAD(owner_vertex_list, owner_vertex); 259 260struct owner_graph { 261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262 int g_size; /* (g) number of vertices */ 263 int g_space; /* (g) space allocated for vertices */ 264 int *g_indexbuf; /* (g) workspace for loop detection */ 265 uint32_t g_gen; /* (g) increment when re-ordering */ 266}; 267 268static struct sx lf_owner_graph_lock; 269static struct owner_graph lf_owner_graph; 270 271/* 272 * Initialise various structures and locks. 273 */ 274static void 275lf_init(void *dummy) 276{ 277 int i; 278 279 sx_init(&lf_lock_states_lock, "lock states lock"); 280 LIST_INIT(&lf_lock_states); 281 282 sx_init(&lf_lock_owners_lock, "lock owners lock"); 283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284 LIST_INIT(&lf_lock_owners[i]); 285 286 sx_init(&lf_owner_graph_lock, "owner graph lock"); 287 graph_init(&lf_owner_graph); 288} 289SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290 291/* 292 * Generate a hash value for a lock owner. 293 */ 294static int 295lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296{ 297 uint32_t h; 298 299 if (flags & F_REMOTE) { 300 h = HASHSTEP(0, fl->l_pid); 301 h = HASHSTEP(h, fl->l_sysid); 302 } else if (flags & F_FLOCK) { 303 h = ((uintptr_t) id) >> 7; 304 } else { 305 struct proc *p = (struct proc *) id; 306 h = HASHSTEP(0, p->p_pid); 307 h = HASHSTEP(h, 0); 308 } 309 310 return (h % LOCK_OWNER_HASH_SIZE); 311} 312 313/* 314 * Return true if a lock owner matches the details passed to 315 * lf_advlock. 316 */ 317static int 318lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319 int flags) 320{ 321 if (flags & F_REMOTE) { 322 return lo->lo_pid == fl->l_pid 323 && lo->lo_sysid == fl->l_sysid; 324 } else { 325 return lo->lo_id == id; 326 } 327} 328 329static struct lockf_entry * 330lf_alloc_lock(struct lock_owner *lo) 331{ 332 struct lockf_entry *lf; 333 334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335 336#ifdef LOCKF_DEBUG 337 if (lockf_debug & 4) 338 printf("Allocated lock %p\n", lf); 339#endif 340 if (lo) { 341 sx_xlock(&lf_lock_owners_lock); 342 lo->lo_refs++; 343 sx_xunlock(&lf_lock_owners_lock); 344 lf->lf_owner = lo; 345 } 346 347 return (lf); 348} 349 350static void 351lf_free_lock(struct lockf_entry *lock) 352{ 353 /* 354 * Adjust the lock_owner reference count and 355 * reclaim the entry if this is the last lock 356 * for that owner. 357 */ 358 struct lock_owner *lo = lock->lf_owner; 359 if (lo) { 360 KASSERT(LIST_EMPTY(&lock->lf_outedges), 361 ("freeing lock with dependancies")); 362 KASSERT(LIST_EMPTY(&lock->lf_inedges), 363 ("freeing lock with dependants")); 364 sx_xlock(&lf_lock_owners_lock); 365 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366 lo->lo_refs--; 367 if (lo->lo_refs == 0) { 368#ifdef LOCKF_DEBUG 369 if (lockf_debug & 1) 370 printf("lf_free_lock: freeing lock owner %p\n", 371 lo); 372#endif 373 if (lo->lo_vertex) { 374 sx_xlock(&lf_owner_graph_lock); 375 graph_free_vertex(&lf_owner_graph, 376 lo->lo_vertex); 377 sx_xunlock(&lf_owner_graph_lock); 378 } 379 LIST_REMOVE(lo, lo_link); 380 free(lo, M_LOCKF); 381#ifdef LOCKF_DEBUG 382 if (lockf_debug & 4) 383 printf("Freed lock owner %p\n", lo); 384#endif 385 } 386 sx_unlock(&lf_lock_owners_lock); 387 } 388 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389 vrele(lock->lf_vnode); 390 lock->lf_vnode = NULL; 391 } 392#ifdef LOCKF_DEBUG 393 if (lockf_debug & 4) 394 printf("Freed lock %p\n", lock); 395#endif 396 free(lock, M_LOCKF); 397} 398 399/* 400 * Advisory record locking support 401 */ 402int 403lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404 u_quad_t size) 405{ 406 struct lockf *state, *freestate = NULL; 407 struct flock *fl = ap->a_fl; 408 struct lockf_entry *lock; 409 struct vnode *vp = ap->a_vp; 410 caddr_t id = ap->a_id; 411 int flags = ap->a_flags; 412 int hash; 413 struct lock_owner *lo; 414 off_t start, end, oadd; 415 int error; 416 417 /* 418 * Handle the F_UNLKSYS case first - no need to mess about 419 * creating a lock owner for this one. 420 */ 421 if (ap->a_op == F_UNLCKSYS) { 422 lf_clearremotesys(fl->l_sysid); 423 return (0); 424 } 425 426 /* 427 * Convert the flock structure into a start and end. 428 */ 429 switch (fl->l_whence) { 430 431 case SEEK_SET: 432 case SEEK_CUR: 433 /* 434 * Caller is responsible for adding any necessary offset 435 * when SEEK_CUR is used. 436 */ 437 start = fl->l_start; 438 break; 439 440 case SEEK_END: 441 if (size > OFF_MAX || 442 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443 return (EOVERFLOW); 444 start = size + fl->l_start; 445 break; 446 447 default: 448 return (EINVAL); 449 } 450 if (start < 0) 451 return (EINVAL); 452 if (fl->l_len < 0) { 453 if (start == 0) 454 return (EINVAL); 455 end = start - 1; 456 start += fl->l_len; 457 if (start < 0) 458 return (EINVAL); 459 } else if (fl->l_len == 0) { 460 end = OFF_MAX; 461 } else { 462 oadd = fl->l_len - 1; 463 if (oadd > OFF_MAX - start) 464 return (EOVERFLOW); 465 end = start + oadd; 466 } 467 /* 468 * Avoid the common case of unlocking when inode has no locks. 469 */ 470 if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) { 471 if (ap->a_op != F_SETLK) { 472 fl->l_type = F_UNLCK; 473 return (0); 474 } 475 } 476 477 /* 478 * Map our arguments to an existing lock owner or create one 479 * if this is the first time we have seen this owner. 480 */ 481 hash = lf_hash_owner(id, fl, flags); 482 sx_xlock(&lf_lock_owners_lock); 483 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 484 if (lf_owner_matches(lo, id, fl, flags)) 485 break; 486 if (!lo) { 487 /* 488 * We initialise the lock with a reference 489 * count which matches the new lockf_entry 490 * structure created below. 491 */ 492 lo = malloc(sizeof(struct lock_owner), M_LOCKF, 493 M_WAITOK|M_ZERO); 494#ifdef LOCKF_DEBUG 495 if (lockf_debug & 4) 496 printf("Allocated lock owner %p\n", lo); 497#endif 498 499 lo->lo_refs = 1; 500 lo->lo_flags = flags; 501 lo->lo_id = id; 502 if (flags & F_REMOTE) { 503 lo->lo_pid = fl->l_pid; 504 lo->lo_sysid = fl->l_sysid; 505 } else if (flags & F_FLOCK) { 506 lo->lo_pid = -1; 507 lo->lo_sysid = 0; 508 } else { 509 struct proc *p = (struct proc *) id; 510 lo->lo_pid = p->p_pid; 511 lo->lo_sysid = 0; 512 } 513 lo->lo_vertex = NULL; 514 515#ifdef LOCKF_DEBUG 516 if (lockf_debug & 1) { 517 printf("lf_advlockasync: new lock owner %p ", lo); 518 lf_print_owner(lo); 519 printf("\n"); 520 } 521#endif 522 523 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 524 } else { 525 /* 526 * We have seen this lock owner before, increase its 527 * reference count to account for the new lockf_entry 528 * structure we create below. 529 */ 530 lo->lo_refs++; 531 } 532 sx_xunlock(&lf_lock_owners_lock); 533 534 /* 535 * Create the lockf structure. We initialise the lf_owner 536 * field here instead of in lf_alloc_lock() to avoid paying 537 * the lf_lock_owners_lock tax twice. 538 */ 539 lock = lf_alloc_lock(NULL); 540 lock->lf_start = start; 541 lock->lf_end = end; 542 lock->lf_owner = lo; 543 lock->lf_vnode = vp; 544 if (flags & F_REMOTE) { 545 /* 546 * For remote locks, the caller may release its ref to 547 * the vnode at any time - we have to ref it here to 548 * prevent it from being recycled unexpectedly. 549 */ 550 vref(vp); 551 } 552 553 /* 554 * XXX The problem is that VTOI is ufs specific, so it will 555 * break LOCKF_DEBUG for all other FS's other than UFS because 556 * it casts the vnode->data ptr to struct inode *. 557 */ 558/* lock->lf_inode = VTOI(ap->a_vp); */ 559 lock->lf_inode = (struct inode *)0; 560 lock->lf_type = fl->l_type; 561 LIST_INIT(&lock->lf_outedges); 562 LIST_INIT(&lock->lf_inedges); 563 lock->lf_async_task = ap->a_task; 564 lock->lf_flags = ap->a_flags; 565 566 /* 567 * Do the requested operation. First find our state structure 568 * and create a new one if necessary - the caller's *statep 569 * variable and the state's ls_threads count is protected by 570 * the vnode interlock. 571 */ 572 VI_LOCK(vp); 573 574 /* 575 * Allocate a state structure if necessary. 576 */ 577 state = *statep; 578 if (state == NULL) { 579 struct lockf *ls; 580 581 VI_UNLOCK(vp); 582 583 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 584 sx_init(&ls->ls_lock, "ls_lock"); 585 LIST_INIT(&ls->ls_active); 586 LIST_INIT(&ls->ls_pending); 587 ls->ls_threads = 1; 588 589 sx_xlock(&lf_lock_states_lock); 590 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 591 sx_xunlock(&lf_lock_states_lock); 592 593 /* 594 * Cope if we lost a race with some other thread while 595 * trying to allocate memory. 596 */ 597 VI_LOCK(vp); 598 if ((*statep) == NULL) { 599 state = *statep = ls; 600 VI_UNLOCK(vp); 601 } else { 602 state = *statep; 603 state->ls_threads++; 604 VI_UNLOCK(vp); 605 606 sx_xlock(&lf_lock_states_lock); 607 LIST_REMOVE(ls, ls_link); 608 sx_xunlock(&lf_lock_states_lock); 609 sx_destroy(&ls->ls_lock); 610 free(ls, M_LOCKF); 611 } 612 } else { 613 state->ls_threads++; 614 VI_UNLOCK(vp); 615 } 616 617 sx_xlock(&state->ls_lock); 618 switch(ap->a_op) { 619 case F_SETLK: 620 error = lf_setlock(state, lock, vp, ap->a_cookiep); 621 break; 622 623 case F_UNLCK: 624 error = lf_clearlock(state, lock); 625 lf_free_lock(lock); 626 break; 627 628 case F_GETLK: 629 error = lf_getlock(state, lock, fl); 630 lf_free_lock(lock); 631 break; 632 633 case F_CANCEL: 634 if (ap->a_cookiep) 635 error = lf_cancel(state, lock, *ap->a_cookiep); 636 else 637 error = EINVAL; 638 lf_free_lock(lock); 639 break; 640 641 default: 642 lf_free_lock(lock); 643 error = EINVAL; 644 break; 645 } 646 647#ifdef INVARIANTS 648 /* 649 * Check for some can't happen stuff. In this case, the active 650 * lock list becoming disordered or containing mutually 651 * blocking locks. We also check the pending list for locks 652 * which should be active (i.e. have no out-going edges). 653 */ 654 LIST_FOREACH(lock, &state->ls_active, lf_link) { 655 struct lockf_entry *lf; 656 if (LIST_NEXT(lock, lf_link)) 657 KASSERT((lock->lf_start 658 <= LIST_NEXT(lock, lf_link)->lf_start), 659 ("locks disordered")); 660 LIST_FOREACH(lf, &state->ls_active, lf_link) { 661 if (lock == lf) 662 break; 663 KASSERT(!lf_blocks(lock, lf), 664 ("two conflicting active locks")); 665 if (lock->lf_owner == lf->lf_owner) 666 KASSERT(!lf_overlaps(lock, lf), 667 ("two overlapping locks from same owner")); 668 } 669 } 670 LIST_FOREACH(lock, &state->ls_pending, lf_link) { 671 KASSERT(!LIST_EMPTY(&lock->lf_outedges), 672 ("pending lock which should be active")); 673 } 674#endif 675 sx_xunlock(&state->ls_lock); 676 677 /* 678 * If we have removed the last active lock on the vnode and 679 * this is the last thread that was in-progress, we can free 680 * the state structure. We update the caller's pointer inside 681 * the vnode interlock but call free outside. 682 * 683 * XXX alternatively, keep the state structure around until 684 * the filesystem recycles - requires a callback from the 685 * filesystem. 686 */ 687 VI_LOCK(vp); 688 689 state->ls_threads--; 690 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 691 KASSERT(LIST_EMPTY(&state->ls_pending), 692 ("freeing state with pending locks")); 693 freestate = state; 694 *statep = NULL; 695 } 696 697 VI_UNLOCK(vp); 698 699 if (freestate) { 700 sx_xlock(&lf_lock_states_lock); 701 LIST_REMOVE(freestate, ls_link); 702 sx_xunlock(&lf_lock_states_lock); 703 sx_destroy(&freestate->ls_lock); 704 free(freestate, M_LOCKF); 705 } 706 return (error); 707} 708 709int 710lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 711{ 712 struct vop_advlockasync_args a; 713 714 a.a_vp = ap->a_vp; 715 a.a_id = ap->a_id; 716 a.a_op = ap->a_op; 717 a.a_fl = ap->a_fl; 718 a.a_flags = ap->a_flags; 719 a.a_task = NULL; 720 a.a_cookiep = NULL; 721 722 return (lf_advlockasync(&a, statep, size)); 723} 724 725/* 726 * Return non-zero if locks 'x' and 'y' overlap. 727 */ 728static int 729lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 730{ 731 732 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 733} 734 735/* 736 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 737 */ 738static int 739lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 740{ 741 742 return x->lf_owner != y->lf_owner 743 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 744 && lf_overlaps(x, y); 745} 746 747/* 748 * Allocate a lock edge from the free list 749 */ 750static struct lockf_edge * 751lf_alloc_edge(void) 752{ 753 754 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 755} 756 757/* 758 * Free a lock edge. 759 */ 760static void 761lf_free_edge(struct lockf_edge *e) 762{ 763 764 free(e, M_LOCKF); 765} 766 767 768/* 769 * Ensure that the lock's owner has a corresponding vertex in the 770 * owner graph. 771 */ 772static void 773lf_alloc_vertex(struct lockf_entry *lock) 774{ 775 struct owner_graph *g = &lf_owner_graph; 776 777 if (!lock->lf_owner->lo_vertex) 778 lock->lf_owner->lo_vertex = 779 graph_alloc_vertex(g, lock->lf_owner); 780} 781 782/* 783 * Attempt to record an edge from lock x to lock y. Return EDEADLK if 784 * the new edge would cause a cycle in the owner graph. 785 */ 786static int 787lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 788{ 789 struct owner_graph *g = &lf_owner_graph; 790 struct lockf_edge *e; 791 int error; 792 793#ifdef INVARIANTS 794 LIST_FOREACH(e, &x->lf_outedges, le_outlink) 795 KASSERT(e->le_to != y, ("adding lock edge twice")); 796#endif 797 798 /* 799 * Make sure the two owners have entries in the owner graph. 800 */ 801 lf_alloc_vertex(x); 802 lf_alloc_vertex(y); 803 804 error = graph_add_edge(g, x->lf_owner->lo_vertex, 805 y->lf_owner->lo_vertex); 806 if (error) 807 return (error); 808 809 e = lf_alloc_edge(); 810 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 811 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 812 e->le_from = x; 813 e->le_to = y; 814 815 return (0); 816} 817 818/* 819 * Remove an edge from the lock graph. 820 */ 821static void 822lf_remove_edge(struct lockf_edge *e) 823{ 824 struct owner_graph *g = &lf_owner_graph; 825 struct lockf_entry *x = e->le_from; 826 struct lockf_entry *y = e->le_to; 827 828 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 829 LIST_REMOVE(e, le_outlink); 830 LIST_REMOVE(e, le_inlink); 831 e->le_from = NULL; 832 e->le_to = NULL; 833 lf_free_edge(e); 834} 835 836/* 837 * Remove all out-going edges from lock x. 838 */ 839static void 840lf_remove_outgoing(struct lockf_entry *x) 841{ 842 struct lockf_edge *e; 843 844 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 845 lf_remove_edge(e); 846 } 847} 848 849/* 850 * Remove all in-coming edges from lock x. 851 */ 852static void 853lf_remove_incoming(struct lockf_entry *x) 854{ 855 struct lockf_edge *e; 856 857 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 858 lf_remove_edge(e); 859 } 860} 861 862/* 863 * Walk the list of locks for the file and create an out-going edge 864 * from lock to each blocking lock. 865 */ 866static int 867lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 868{ 869 struct lockf_entry *overlap; 870 int error; 871 872 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 873 /* 874 * We may assume that the active list is sorted by 875 * lf_start. 876 */ 877 if (overlap->lf_start > lock->lf_end) 878 break; 879 if (!lf_blocks(lock, overlap)) 880 continue; 881 882 /* 883 * We've found a blocking lock. Add the corresponding 884 * edge to the graphs and see if it would cause a 885 * deadlock. 886 */ 887 error = lf_add_edge(lock, overlap); 888 889 /* 890 * The only error that lf_add_edge returns is EDEADLK. 891 * Remove any edges we added and return the error. 892 */ 893 if (error) { 894 lf_remove_outgoing(lock); 895 return (error); 896 } 897 } 898 899 /* 900 * We also need to add edges to sleeping locks that block 901 * us. This ensures that lf_wakeup_lock cannot grant two 902 * mutually blocking locks simultaneously and also enforces a 903 * 'first come, first served' fairness model. Note that this 904 * only happens if we are blocked by at least one active lock 905 * due to the call to lf_getblock in lf_setlock below. 906 */ 907 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 908 if (!lf_blocks(lock, overlap)) 909 continue; 910 /* 911 * We've found a blocking lock. Add the corresponding 912 * edge to the graphs and see if it would cause a 913 * deadlock. 914 */ 915 error = lf_add_edge(lock, overlap); 916 917 /* 918 * The only error that lf_add_edge returns is EDEADLK. 919 * Remove any edges we added and return the error. 920 */ 921 if (error) { 922 lf_remove_outgoing(lock); 923 return (error); 924 } 925 } 926 927 return (0); 928} 929 930/* 931 * Walk the list of pending locks for the file and create an in-coming 932 * edge from lock to each blocking lock. 933 */ 934static int 935lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 936{ 937 struct lockf_entry *overlap; 938 int error; 939 940 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 941 if (!lf_blocks(lock, overlap)) 942 continue; 943 944 /* 945 * We've found a blocking lock. Add the corresponding 946 * edge to the graphs and see if it would cause a 947 * deadlock. 948 */ 949 error = lf_add_edge(overlap, lock); 950 951 /* 952 * The only error that lf_add_edge returns is EDEADLK. 953 * Remove any edges we added and return the error. 954 */ 955 if (error) { 956 lf_remove_incoming(lock); 957 return (error); 958 } 959 } 960 return (0); 961} 962 963/* 964 * Insert lock into the active list, keeping list entries ordered by 965 * increasing values of lf_start. 966 */ 967static void 968lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 969{ 970 struct lockf_entry *lf, *lfprev; 971 972 if (LIST_EMPTY(&state->ls_active)) { 973 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 974 return; 975 } 976 977 lfprev = NULL; 978 LIST_FOREACH(lf, &state->ls_active, lf_link) { 979 if (lf->lf_start > lock->lf_start) { 980 LIST_INSERT_BEFORE(lf, lock, lf_link); 981 return; 982 } 983 lfprev = lf; 984 } 985 LIST_INSERT_AFTER(lfprev, lock, lf_link); 986} 987 988/* 989 * Wake up a sleeping lock and remove it from the pending list now 990 * that all its dependancies have been resolved. The caller should 991 * arrange for the lock to be added to the active list, adjusting any 992 * existing locks for the same owner as needed. 993 */ 994static void 995lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 996{ 997 998 /* 999 * Remove from ls_pending list and wake up the caller 1000 * or start the async notification, as appropriate. 1001 */ 1002 LIST_REMOVE(wakelock, lf_link); 1003#ifdef LOCKF_DEBUG 1004 if (lockf_debug & 1) 1005 lf_print("lf_wakeup_lock: awakening", wakelock); 1006#endif /* LOCKF_DEBUG */ 1007 if (wakelock->lf_async_task) { 1008 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1009 } else { 1010 wakeup(wakelock); 1011 } 1012} 1013 1014/* 1015 * Re-check all dependant locks and remove edges to locks that we no 1016 * longer block. If 'all' is non-zero, the lock has been removed and 1017 * we must remove all the dependancies, otherwise it has simply been 1018 * reduced but remains active. Any pending locks which have been been 1019 * unblocked are added to 'granted' 1020 */ 1021static void 1022lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1023 struct lockf_entry_list *granted) 1024{ 1025 struct lockf_edge *e, *ne; 1026 struct lockf_entry *deplock; 1027 1028 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1029 deplock = e->le_from; 1030 if (all || !lf_blocks(lock, deplock)) { 1031 sx_xlock(&lf_owner_graph_lock); 1032 lf_remove_edge(e); 1033 sx_xunlock(&lf_owner_graph_lock); 1034 if (LIST_EMPTY(&deplock->lf_outedges)) { 1035 lf_wakeup_lock(state, deplock); 1036 LIST_INSERT_HEAD(granted, deplock, lf_link); 1037 } 1038 } 1039 } 1040} 1041 1042/* 1043 * Set the start of an existing active lock, updating dependancies and 1044 * adding any newly woken locks to 'granted'. 1045 */ 1046static void 1047lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1048 struct lockf_entry_list *granted) 1049{ 1050 1051 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1052 lock->lf_start = new_start; 1053 LIST_REMOVE(lock, lf_link); 1054 lf_insert_lock(state, lock); 1055 lf_update_dependancies(state, lock, FALSE, granted); 1056} 1057 1058/* 1059 * Set the end of an existing active lock, updating dependancies and 1060 * adding any newly woken locks to 'granted'. 1061 */ 1062static void 1063lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1064 struct lockf_entry_list *granted) 1065{ 1066 1067 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1068 lock->lf_end = new_end; 1069 lf_update_dependancies(state, lock, FALSE, granted); 1070} 1071 1072/* 1073 * Add a lock to the active list, updating or removing any current 1074 * locks owned by the same owner and processing any pending locks that 1075 * become unblocked as a result. This code is also used for unlock 1076 * since the logic for updating existing locks is identical. 1077 * 1078 * As a result of processing the new lock, we may unblock existing 1079 * pending locks as a result of downgrading/unlocking. We simply 1080 * activate the newly granted locks by looping. 1081 * 1082 * Since the new lock already has its dependancies set up, we always 1083 * add it to the list (unless its an unlock request). This may 1084 * fragment the lock list in some pathological cases but its probably 1085 * not a real problem. 1086 */ 1087static void 1088lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1089{ 1090 struct lockf_entry *overlap, *lf; 1091 struct lockf_entry_list granted; 1092 int ovcase; 1093 1094 LIST_INIT(&granted); 1095 LIST_INSERT_HEAD(&granted, lock, lf_link); 1096 1097 while (!LIST_EMPTY(&granted)) { 1098 lock = LIST_FIRST(&granted); 1099 LIST_REMOVE(lock, lf_link); 1100 1101 /* 1102 * Skip over locks owned by other processes. Handle 1103 * any locks that overlap and are owned by ourselves. 1104 */ 1105 overlap = LIST_FIRST(&state->ls_active); 1106 for (;;) { 1107 ovcase = lf_findoverlap(&overlap, lock, SELF); 1108 1109#ifdef LOCKF_DEBUG 1110 if (ovcase && (lockf_debug & 2)) { 1111 printf("lf_setlock: overlap %d", ovcase); 1112 lf_print("", overlap); 1113 } 1114#endif 1115 /* 1116 * Six cases: 1117 * 0) no overlap 1118 * 1) overlap == lock 1119 * 2) overlap contains lock 1120 * 3) lock contains overlap 1121 * 4) overlap starts before lock 1122 * 5) overlap ends after lock 1123 */ 1124 switch (ovcase) { 1125 case 0: /* no overlap */ 1126 break; 1127 1128 case 1: /* overlap == lock */ 1129 /* 1130 * We have already setup the 1131 * dependants for the new lock, taking 1132 * into account a possible downgrade 1133 * or unlock. Remove the old lock. 1134 */ 1135 LIST_REMOVE(overlap, lf_link); 1136 lf_update_dependancies(state, overlap, TRUE, 1137 &granted); 1138 lf_free_lock(overlap); 1139 break; 1140 1141 case 2: /* overlap contains lock */ 1142 /* 1143 * Just split the existing lock. 1144 */ 1145 lf_split(state, overlap, lock, &granted); 1146 break; 1147 1148 case 3: /* lock contains overlap */ 1149 /* 1150 * Delete the overlap and advance to 1151 * the next entry in the list. 1152 */ 1153 lf = LIST_NEXT(overlap, lf_link); 1154 LIST_REMOVE(overlap, lf_link); 1155 lf_update_dependancies(state, overlap, TRUE, 1156 &granted); 1157 lf_free_lock(overlap); 1158 overlap = lf; 1159 continue; 1160 1161 case 4: /* overlap starts before lock */ 1162 /* 1163 * Just update the overlap end and 1164 * move on. 1165 */ 1166 lf_set_end(state, overlap, lock->lf_start - 1, 1167 &granted); 1168 overlap = LIST_NEXT(overlap, lf_link); 1169 continue; 1170 1171 case 5: /* overlap ends after lock */ 1172 /* 1173 * Change the start of overlap and 1174 * re-insert. 1175 */ 1176 lf_set_start(state, overlap, lock->lf_end + 1, 1177 &granted); 1178 break; 1179 } 1180 break; 1181 } 1182#ifdef LOCKF_DEBUG 1183 if (lockf_debug & 1) { 1184 if (lock->lf_type != F_UNLCK) 1185 lf_print("lf_activate_lock: activated", lock); 1186 else 1187 lf_print("lf_activate_lock: unlocked", lock); 1188 lf_printlist("lf_activate_lock", lock); 1189 } 1190#endif /* LOCKF_DEBUG */ 1191 if (lock->lf_type != F_UNLCK) 1192 lf_insert_lock(state, lock); 1193 } 1194} 1195 1196/* 1197 * Cancel a pending lock request, either as a result of a signal or a 1198 * cancel request for an async lock. 1199 */ 1200static void 1201lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1202{ 1203 struct lockf_entry_list granted; 1204 1205 /* 1206 * Note it is theoretically possible that cancelling this lock 1207 * may allow some other pending lock to become 1208 * active. Consider this case: 1209 * 1210 * Owner Action Result Dependancies 1211 * 1212 * A: lock [0..0] succeeds 1213 * B: lock [2..2] succeeds 1214 * C: lock [1..2] blocked C->B 1215 * D: lock [0..1] blocked C->B,D->A,D->C 1216 * A: unlock [0..0] C->B,D->C 1217 * C: cancel [1..2] 1218 */ 1219 1220 LIST_REMOVE(lock, lf_link); 1221 1222 /* 1223 * Removing out-going edges is simple. 1224 */ 1225 sx_xlock(&lf_owner_graph_lock); 1226 lf_remove_outgoing(lock); 1227 sx_xunlock(&lf_owner_graph_lock); 1228 1229 /* 1230 * Removing in-coming edges may allow some other lock to 1231 * become active - we use lf_update_dependancies to figure 1232 * this out. 1233 */ 1234 LIST_INIT(&granted); 1235 lf_update_dependancies(state, lock, TRUE, &granted); 1236 lf_free_lock(lock); 1237 1238 /* 1239 * Feed any newly active locks to lf_activate_lock. 1240 */ 1241 while (!LIST_EMPTY(&granted)) { 1242 lock = LIST_FIRST(&granted); 1243 LIST_REMOVE(lock, lf_link); 1244 lf_activate_lock(state, lock); 1245 } 1246} 1247 1248/* 1249 * Set a byte-range lock. 1250 */ 1251static int 1252lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1253 void **cookiep) 1254{ 1255 struct lockf_entry *block; 1256 static char lockstr[] = "lockf"; 1257 int priority, error; 1258 1259#ifdef LOCKF_DEBUG 1260 if (lockf_debug & 1) 1261 lf_print("lf_setlock", lock); 1262#endif /* LOCKF_DEBUG */ 1263 1264 /* 1265 * Set the priority 1266 */ 1267 priority = PLOCK; 1268 if (lock->lf_type == F_WRLCK) 1269 priority += 4; 1270 priority |= PCATCH; 1271 /* 1272 * Scan lock list for this file looking for locks that would block us. 1273 */ 1274 while ((block = lf_getblock(state, lock))) { 1275 /* 1276 * Free the structure and return if nonblocking. 1277 */ 1278 if ((lock->lf_flags & F_WAIT) == 0 1279 && lock->lf_async_task == NULL) { 1280 lf_free_lock(lock); 1281 error = EAGAIN; 1282 goto out; 1283 } 1284 1285 /* 1286 * We are blocked. Create edges to each blocking lock, 1287 * checking for deadlock using the owner graph. For 1288 * simplicity, we run deadlock detection for all 1289 * locks, posix and otherwise. 1290 */ 1291 sx_xlock(&lf_owner_graph_lock); 1292 error = lf_add_outgoing(state, lock); 1293 sx_xunlock(&lf_owner_graph_lock); 1294 1295 if (error) { 1296#ifdef LOCKF_DEBUG 1297 if (lockf_debug & 1) 1298 lf_print("lf_setlock: deadlock", lock); 1299#endif 1300 lf_free_lock(lock); 1301 goto out; 1302 } 1303 1304 /* 1305 * For flock type locks, we must first remove 1306 * any shared locks that we hold before we sleep 1307 * waiting for an exclusive lock. 1308 */ 1309 if ((lock->lf_flags & F_FLOCK) && 1310 lock->lf_type == F_WRLCK) { 1311 lock->lf_type = F_UNLCK; 1312 lf_activate_lock(state, lock); 1313 lock->lf_type = F_WRLCK; 1314 } 1315 /* 1316 * We have added edges to everything that blocks 1317 * us. Sleep until they all go away. 1318 */ 1319 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 1320#ifdef LOCKF_DEBUG 1321 if (lockf_debug & 1) { 1322 struct lockf_edge *e; 1323 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1324 lf_print("lf_setlock: blocking on", e->le_to); 1325 lf_printlist("lf_setlock", e->le_to); 1326 } 1327 } 1328#endif /* LOCKF_DEBUG */ 1329 1330 if ((lock->lf_flags & F_WAIT) == 0) { 1331 /* 1332 * The caller requested async notification - 1333 * this callback happens when the blocking 1334 * lock is released, allowing the caller to 1335 * make another attempt to take the lock. 1336 */ 1337 *cookiep = (void *) lock; 1338 error = EINPROGRESS; 1339 goto out; 1340 } 1341 1342 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1343 /* 1344 * We may have been awakened by a signal and/or by a 1345 * debugger continuing us (in which cases we must 1346 * remove our lock graph edges) and/or by another 1347 * process releasing a lock (in which case our edges 1348 * have already been removed and we have been moved to 1349 * the active list). 1350 * 1351 * Note that it is possible to receive a signal after 1352 * we were successfully woken (and moved to the active 1353 * list) but before we resumed execution. In this 1354 * case, our lf_outedges list will be clear. We 1355 * pretend there was no error. 1356 * 1357 * Note also, if we have been sleeping long enough, we 1358 * may now have incoming edges from some newer lock 1359 * which is waiting behind us in the queue. 1360 */ 1361 if (LIST_EMPTY(&lock->lf_outedges)) { 1362 error = 0; 1363 } else { 1364 lf_cancel_lock(state, lock); 1365 goto out; 1366 } 1367#ifdef LOCKF_DEBUG 1368 if (lockf_debug & 1) { 1369 lf_print("lf_setlock: granted", lock); 1370 } 1371#endif 1372 goto out; 1373 } 1374 /* 1375 * It looks like we are going to grant the lock. First add 1376 * edges from any currently pending lock that the new lock 1377 * would block. 1378 */ 1379 sx_xlock(&lf_owner_graph_lock); 1380 error = lf_add_incoming(state, lock); 1381 sx_xunlock(&lf_owner_graph_lock); 1382 if (error) { 1383#ifdef LOCKF_DEBUG 1384 if (lockf_debug & 1) 1385 lf_print("lf_setlock: deadlock", lock); 1386#endif 1387 lf_free_lock(lock); 1388 goto out; 1389 } 1390 1391 /* 1392 * No blocks!! Add the lock. Note that we will 1393 * downgrade or upgrade any overlapping locks this 1394 * process already owns. 1395 */ 1396 lf_activate_lock(state, lock); 1397 error = 0; 1398out: 1399 return (error); 1400} 1401 1402/* 1403 * Remove a byte-range lock on an inode. 1404 * 1405 * Generally, find the lock (or an overlap to that lock) 1406 * and remove it (or shrink it), then wakeup anyone we can. 1407 */ 1408static int 1409lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 1410{ 1411 struct lockf_entry *overlap; 1412 1413 overlap = LIST_FIRST(&state->ls_active); 1414 1415 if (overlap == NOLOCKF) 1416 return (0); 1417#ifdef LOCKF_DEBUG 1418 if (unlock->lf_type != F_UNLCK) 1419 panic("lf_clearlock: bad type"); 1420 if (lockf_debug & 1) 1421 lf_print("lf_clearlock", unlock); 1422#endif /* LOCKF_DEBUG */ 1423 1424 lf_activate_lock(state, unlock); 1425 1426 return (0); 1427} 1428 1429/* 1430 * Check whether there is a blocking lock, and if so return its 1431 * details in '*fl'. 1432 */ 1433static int 1434lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 1435{ 1436 struct lockf_entry *block; 1437 1438#ifdef LOCKF_DEBUG 1439 if (lockf_debug & 1) 1440 lf_print("lf_getlock", lock); 1441#endif /* LOCKF_DEBUG */ 1442 1443 if ((block = lf_getblock(state, lock))) { 1444 fl->l_type = block->lf_type; 1445 fl->l_whence = SEEK_SET; 1446 fl->l_start = block->lf_start; 1447 if (block->lf_end == OFF_MAX) 1448 fl->l_len = 0; 1449 else 1450 fl->l_len = block->lf_end - block->lf_start + 1; 1451 fl->l_pid = block->lf_owner->lo_pid; 1452 fl->l_sysid = block->lf_owner->lo_sysid; 1453 } else { 1454 fl->l_type = F_UNLCK; 1455 } 1456 return (0); 1457} 1458 1459/* 1460 * Cancel an async lock request. 1461 */ 1462static int 1463lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1464{ 1465 struct lockf_entry *reallock; 1466 1467 /* 1468 * We need to match this request with an existing lock 1469 * request. 1470 */ 1471 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1472 if ((void *) reallock == cookie) { 1473 /* 1474 * Double-check that this lock looks right 1475 * (maybe use a rolling ID for the cancel 1476 * cookie instead?) 1477 */ 1478 if (!(reallock->lf_vnode == lock->lf_vnode 1479 && reallock->lf_start == lock->lf_start 1480 && reallock->lf_end == lock->lf_end)) { 1481 return (ENOENT); 1482 } 1483 1484 /* 1485 * Make sure this lock was async and then just 1486 * remove it from its wait lists. 1487 */ 1488 if (!reallock->lf_async_task) { 1489 return (ENOENT); 1490 } 1491 1492 /* 1493 * Note that since any other thread must take 1494 * state->ls_lock before it can possibly 1495 * trigger the async callback, we are safe 1496 * from a race with lf_wakeup_lock, i.e. we 1497 * can free the lock (actually our caller does 1498 * this). 1499 */ 1500 lf_cancel_lock(state, reallock); 1501 return (0); 1502 } 1503 } 1504 1505 /* 1506 * We didn't find a matching lock - not much we can do here. 1507 */ 1508 return (ENOENT); 1509} 1510 1511/* 1512 * Walk the list of locks for an inode and 1513 * return the first blocking lock. 1514 */ 1515static struct lockf_entry * 1516lf_getblock(struct lockf *state, struct lockf_entry *lock) 1517{ 1518 struct lockf_entry *overlap; 1519 1520 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 1521 /* 1522 * We may assume that the active list is sorted by 1523 * lf_start. 1524 */ 1525 if (overlap->lf_start > lock->lf_end) 1526 break; 1527 if (!lf_blocks(lock, overlap)) 1528 continue; 1529 return (overlap); 1530 } 1531 return (NOLOCKF); 1532} 1533 1534/* 1535 * Walk the list of locks for an inode to find an overlapping lock (if 1536 * any) and return a classification of that overlap. 1537 * 1538 * Arguments: 1539 * *overlap The place in the lock list to start looking 1540 * lock The lock which is being tested 1541 * type Pass 'SELF' to test only locks with the same 1542 * owner as lock, or 'OTHER' to test only locks 1543 * with a different owner 1544 * 1545 * Returns one of six values: 1546 * 0) no overlap 1547 * 1) overlap == lock 1548 * 2) overlap contains lock 1549 * 3) lock contains overlap 1550 * 4) overlap starts before lock 1551 * 5) overlap ends after lock 1552 * 1553 * If there is an overlapping lock, '*overlap' is set to point at the 1554 * overlapping lock. 1555 * 1556 * NOTE: this returns only the FIRST overlapping lock. There 1557 * may be more than one. 1558 */ 1559static int 1560lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 1561{ 1562 struct lockf_entry *lf; 1563 off_t start, end; 1564 int res; 1565 1566 if ((*overlap) == NOLOCKF) { 1567 return (0); 1568 } 1569#ifdef LOCKF_DEBUG 1570 if (lockf_debug & 2) 1571 lf_print("lf_findoverlap: looking for overlap in", lock); 1572#endif /* LOCKF_DEBUG */ 1573 start = lock->lf_start; 1574 end = lock->lf_end; 1575 res = 0; 1576 while (*overlap) { 1577 lf = *overlap; 1578 if (lf->lf_start > end) 1579 break; 1580 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1581 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1582 *overlap = LIST_NEXT(lf, lf_link); 1583 continue; 1584 } 1585#ifdef LOCKF_DEBUG 1586 if (lockf_debug & 2) 1587 lf_print("\tchecking", lf); 1588#endif /* LOCKF_DEBUG */ 1589 /* 1590 * OK, check for overlap 1591 * 1592 * Six cases: 1593 * 0) no overlap 1594 * 1) overlap == lock 1595 * 2) overlap contains lock 1596 * 3) lock contains overlap 1597 * 4) overlap starts before lock 1598 * 5) overlap ends after lock 1599 */ 1600 if (start > lf->lf_end) { 1601 /* Case 0 */ 1602#ifdef LOCKF_DEBUG 1603 if (lockf_debug & 2) 1604 printf("no overlap\n"); 1605#endif /* LOCKF_DEBUG */ 1606 *overlap = LIST_NEXT(lf, lf_link); 1607 continue; 1608 } 1609 if (lf->lf_start == start && lf->lf_end == end) { 1610 /* Case 1 */ 1611#ifdef LOCKF_DEBUG 1612 if (lockf_debug & 2) 1613 printf("overlap == lock\n"); 1614#endif /* LOCKF_DEBUG */ 1615 res = 1; 1616 break; 1617 } 1618 if (lf->lf_start <= start && lf->lf_end >= end) { 1619 /* Case 2 */ 1620#ifdef LOCKF_DEBUG 1621 if (lockf_debug & 2) 1622 printf("overlap contains lock\n"); 1623#endif /* LOCKF_DEBUG */ 1624 res = 2; 1625 break; 1626 } 1627 if (start <= lf->lf_start && end >= lf->lf_end) { 1628 /* Case 3 */ 1629#ifdef LOCKF_DEBUG 1630 if (lockf_debug & 2) 1631 printf("lock contains overlap\n"); 1632#endif /* LOCKF_DEBUG */ 1633 res = 3; 1634 break; 1635 } 1636 if (lf->lf_start < start && lf->lf_end >= start) { 1637 /* Case 4 */ 1638#ifdef LOCKF_DEBUG 1639 if (lockf_debug & 2) 1640 printf("overlap starts before lock\n"); 1641#endif /* LOCKF_DEBUG */ 1642 res = 4; 1643 break; 1644 } 1645 if (lf->lf_start > start && lf->lf_end > end) { 1646 /* Case 5 */ 1647#ifdef LOCKF_DEBUG 1648 if (lockf_debug & 2) 1649 printf("overlap ends after lock\n"); 1650#endif /* LOCKF_DEBUG */ 1651 res = 5; 1652 break; 1653 } 1654 panic("lf_findoverlap: default"); 1655 } 1656 return (res); 1657} 1658 1659/* 1660 * Split an the existing 'lock1', based on the extent of the lock 1661 * described by 'lock2'. The existing lock should cover 'lock2' 1662 * entirely. 1663 * 1664 * Any pending locks which have been been unblocked are added to 1665 * 'granted' 1666 */ 1667static void 1668lf_split(struct lockf *state, struct lockf_entry *lock1, 1669 struct lockf_entry *lock2, struct lockf_entry_list *granted) 1670{ 1671 struct lockf_entry *splitlock; 1672 1673#ifdef LOCKF_DEBUG 1674 if (lockf_debug & 2) { 1675 lf_print("lf_split", lock1); 1676 lf_print("splitting from", lock2); 1677 } 1678#endif /* LOCKF_DEBUG */ 1679 /* 1680 * Check to see if we don't need to split at all. 1681 */ 1682 if (lock1->lf_start == lock2->lf_start) { 1683 lf_set_start(state, lock1, lock2->lf_end + 1, granted); 1684 return; 1685 } 1686 if (lock1->lf_end == lock2->lf_end) { 1687 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1688 return; 1689 } 1690 /* 1691 * Make a new lock consisting of the last part of 1692 * the encompassing lock. 1693 */ 1694 splitlock = lf_alloc_lock(lock1->lf_owner); 1695 memcpy(splitlock, lock1, sizeof *splitlock); 1696 if (splitlock->lf_flags & F_REMOTE) 1697 vref(splitlock->lf_vnode); 1698 1699 /* 1700 * This cannot cause a deadlock since any edges we would add 1701 * to splitlock already exist in lock1. We must be sure to add 1702 * necessary dependancies to splitlock before we reduce lock1 1703 * otherwise we may accidentally grant a pending lock that 1704 * was blocked by the tail end of lock1. 1705 */ 1706 splitlock->lf_start = lock2->lf_end + 1; 1707 LIST_INIT(&splitlock->lf_outedges); 1708 LIST_INIT(&splitlock->lf_inedges); 1709 sx_xlock(&lf_owner_graph_lock); 1710 lf_add_incoming(state, splitlock); 1711 sx_xunlock(&lf_owner_graph_lock); 1712 1713 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1714 1715 /* 1716 * OK, now link it in 1717 */ 1718 lf_insert_lock(state, splitlock); 1719} 1720 1721struct clearlock { 1722 STAILQ_ENTRY(clearlock) link; 1723 struct vnode *vp; 1724 struct flock fl; 1725}; 1726STAILQ_HEAD(clearlocklist, clearlock); 1727 1728void 1729lf_clearremotesys(int sysid) 1730{ 1731 struct lockf *ls; 1732 struct lockf_entry *lf; 1733 struct clearlock *cl; 1734 struct clearlocklist locks; 1735 1736 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1737 1738 /* 1739 * In order to keep the locking simple, we iterate over the 1740 * active lock lists to build a list of locks that need 1741 * releasing. We then call VOP_ADVLOCK for each one in turn. 1742 * 1743 * We take an extra reference to the vnode for the duration to 1744 * make sure it doesn't go away before we are finished. 1745 */ 1746 STAILQ_INIT(&locks); 1747 sx_xlock(&lf_lock_states_lock); 1748 LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1749 sx_xlock(&ls->ls_lock); 1750 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1751 if (lf->lf_owner->lo_sysid != sysid) 1752 continue; 1753 1754 cl = malloc(sizeof(struct clearlock), M_LOCKF, 1755 M_WAITOK); 1756 cl->vp = lf->lf_vnode; 1757 vref(cl->vp); 1758 cl->fl.l_start = lf->lf_start; 1759 if (lf->lf_end == OFF_MAX) 1760 cl->fl.l_len = 0; 1761 else 1762 cl->fl.l_len = 1763 lf->lf_end - lf->lf_start + 1; 1764 cl->fl.l_whence = SEEK_SET; 1765 cl->fl.l_type = F_UNLCK; 1766 cl->fl.l_pid = lf->lf_owner->lo_pid; 1767 cl->fl.l_sysid = sysid; 1768 STAILQ_INSERT_TAIL(&locks, cl, link); 1769 } 1770 sx_xunlock(&ls->ls_lock); 1771 } 1772 sx_xunlock(&lf_lock_states_lock); 1773 1774 while ((cl = STAILQ_FIRST(&locks)) != NULL) { 1775 STAILQ_REMOVE_HEAD(&locks, link); 1776 VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE); 1777 vrele(cl->vp); 1778 free(cl, M_LOCKF); 1779 } 1780} 1781 1782int 1783lf_countlocks(int sysid) 1784{ 1785 int i; 1786 struct lock_owner *lo; 1787 int count; 1788 1789 count = 0; 1790 sx_xlock(&lf_lock_owners_lock); 1791 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1792 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1793 if (lo->lo_sysid == sysid) 1794 count += lo->lo_refs; 1795 sx_xunlock(&lf_lock_owners_lock); 1796 1797 return (count); 1798} 1799 1800#ifdef LOCKF_DEBUG 1801 1802/* 1803 * Return non-zero if y is reachable from x using a brute force 1804 * search. If reachable and path is non-null, return the route taken 1805 * in path. 1806 */ 1807static int 1808graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 1809 struct owner_vertex_list *path) 1810{ 1811 struct owner_edge *e; 1812 1813 if (x == y) { 1814 if (path) 1815 TAILQ_INSERT_HEAD(path, x, v_link); 1816 return 1; 1817 } 1818 1819 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 1820 if (graph_reaches(e->e_to, y, path)) { 1821 if (path) 1822 TAILQ_INSERT_HEAD(path, x, v_link); 1823 return 1; 1824 } 1825 } 1826 return 0; 1827} 1828 1829/* 1830 * Perform consistency checks on the graph. Make sure the values of 1831 * v_order are correct. If checkorder is non-zero, check no vertex can 1832 * reach any other vertex with a smaller order. 1833 */ 1834static void 1835graph_check(struct owner_graph *g, int checkorder) 1836{ 1837 int i, j; 1838 1839 for (i = 0; i < g->g_size; i++) { 1840 if (!g->g_vertices[i]->v_owner) 1841 continue; 1842 KASSERT(g->g_vertices[i]->v_order == i, 1843 ("lock graph vertices disordered")); 1844 if (checkorder) { 1845 for (j = 0; j < i; j++) { 1846 if (!g->g_vertices[j]->v_owner) 1847 continue; 1848 KASSERT(!graph_reaches(g->g_vertices[i], 1849 g->g_vertices[j], NULL), 1850 ("lock graph vertices disordered")); 1851 } 1852 } 1853 } 1854} 1855 1856static void 1857graph_print_vertices(struct owner_vertex_list *set) 1858{ 1859 struct owner_vertex *v; 1860 1861 printf("{ "); 1862 TAILQ_FOREACH(v, set, v_link) { 1863 printf("%d:", v->v_order); 1864 lf_print_owner(v->v_owner); 1865 if (TAILQ_NEXT(v, v_link)) 1866 printf(", "); 1867 } 1868 printf(" }\n"); 1869} 1870 1871#endif 1872 1873/* 1874 * Calculate the sub-set of vertices v from the affected region [y..x] 1875 * where v is reachable from y. Return -1 if a loop was detected 1876 * (i.e. x is reachable from y, otherwise the number of vertices in 1877 * this subset. 1878 */ 1879static int 1880graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 1881 struct owner_vertex *y, struct owner_vertex_list *delta) 1882{ 1883 uint32_t gen; 1884 struct owner_vertex *v; 1885 struct owner_edge *e; 1886 int n; 1887 1888 /* 1889 * We start with a set containing just y. Then for each vertex 1890 * v in the set so far unprocessed, we add each vertex that v 1891 * has an out-edge to and that is within the affected region 1892 * [y..x]. If we see the vertex x on our travels, stop 1893 * immediately. 1894 */ 1895 TAILQ_INIT(delta); 1896 TAILQ_INSERT_TAIL(delta, y, v_link); 1897 v = y; 1898 n = 1; 1899 gen = g->g_gen; 1900 while (v) { 1901 LIST_FOREACH(e, &v->v_outedges, e_outlink) { 1902 if (e->e_to == x) 1903 return -1; 1904 if (e->e_to->v_order < x->v_order 1905 && e->e_to->v_gen != gen) { 1906 e->e_to->v_gen = gen; 1907 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 1908 n++; 1909 } 1910 } 1911 v = TAILQ_NEXT(v, v_link); 1912 } 1913 1914 return (n); 1915} 1916 1917/* 1918 * Calculate the sub-set of vertices v from the affected region [y..x] 1919 * where v reaches x. Return the number of vertices in this subset. 1920 */ 1921static int 1922graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 1923 struct owner_vertex *y, struct owner_vertex_list *delta) 1924{ 1925 uint32_t gen; 1926 struct owner_vertex *v; 1927 struct owner_edge *e; 1928 int n; 1929 1930 /* 1931 * We start with a set containing just x. Then for each vertex 1932 * v in the set so far unprocessed, we add each vertex that v 1933 * has an in-edge from and that is within the affected region 1934 * [y..x]. 1935 */ 1936 TAILQ_INIT(delta); 1937 TAILQ_INSERT_TAIL(delta, x, v_link); 1938 v = x; 1939 n = 1; 1940 gen = g->g_gen; 1941 while (v) { 1942 LIST_FOREACH(e, &v->v_inedges, e_inlink) { 1943 if (e->e_from->v_order > y->v_order 1944 && e->e_from->v_gen != gen) { 1945 e->e_from->v_gen = gen; 1946 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 1947 n++; 1948 } 1949 } 1950 v = TAILQ_PREV(v, owner_vertex_list, v_link); 1951 } 1952 1953 return (n); 1954} 1955 1956static int 1957graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 1958{ 1959 struct owner_vertex *v; 1960 int i, j; 1961 1962 TAILQ_FOREACH(v, set, v_link) { 1963 for (i = n; 1964 i > 0 && indices[i - 1] > v->v_order; i--) 1965 ; 1966 for (j = n - 1; j >= i; j--) 1967 indices[j + 1] = indices[j]; 1968 indices[i] = v->v_order; 1969 n++; 1970 } 1971 1972 return (n); 1973} 1974 1975static int 1976graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 1977 struct owner_vertex_list *set) 1978{ 1979 struct owner_vertex *v, *vlowest; 1980 1981 while (!TAILQ_EMPTY(set)) { 1982 vlowest = NULL; 1983 TAILQ_FOREACH(v, set, v_link) { 1984 if (!vlowest || v->v_order < vlowest->v_order) 1985 vlowest = v; 1986 } 1987 TAILQ_REMOVE(set, vlowest, v_link); 1988 vlowest->v_order = indices[nextunused]; 1989 g->g_vertices[vlowest->v_order] = vlowest; 1990 nextunused++; 1991 } 1992 1993 return (nextunused); 1994} 1995 1996static int 1997graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 1998 struct owner_vertex *y) 1999{ 2000 struct owner_edge *e; 2001 struct owner_vertex_list deltaF, deltaB; 2002 int nF, nB, n, vi, i; 2003 int *indices; 2004 2005 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2006 2007 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2008 if (e->e_to == y) { 2009 e->e_refs++; 2010 return (0); 2011 } 2012 } 2013 2014#ifdef LOCKF_DEBUG 2015 if (lockf_debug & 8) { 2016 printf("adding edge %d:", x->v_order); 2017 lf_print_owner(x->v_owner); 2018 printf(" -> %d:", y->v_order); 2019 lf_print_owner(y->v_owner); 2020 printf("\n"); 2021 } 2022#endif 2023 if (y->v_order < x->v_order) { 2024 /* 2025 * The new edge violates the order. First find the set 2026 * of affected vertices reachable from y (deltaF) and 2027 * the set of affect vertices affected that reach x 2028 * (deltaB), using the graph generation number to 2029 * detect whether we have visited a given vertex 2030 * already. We re-order the graph so that each vertex 2031 * in deltaB appears before each vertex in deltaF. 2032 * 2033 * If x is a member of deltaF, then the new edge would 2034 * create a cycle. Otherwise, we may assume that 2035 * deltaF and deltaB are disjoint. 2036 */ 2037 g->g_gen++; 2038 if (g->g_gen == 0) { 2039 /* 2040 * Generation wrap. 2041 */ 2042 for (vi = 0; vi < g->g_size; vi++) { 2043 g->g_vertices[vi]->v_gen = 0; 2044 } 2045 g->g_gen++; 2046 } 2047 nF = graph_delta_forward(g, x, y, &deltaF); 2048 if (nF < 0) { 2049#ifdef LOCKF_DEBUG 2050 if (lockf_debug & 8) { 2051 struct owner_vertex_list path; 2052 printf("deadlock: "); 2053 TAILQ_INIT(&path); 2054 graph_reaches(y, x, &path); 2055 graph_print_vertices(&path); 2056 } 2057#endif 2058 return (EDEADLK); 2059 } 2060 2061#ifdef LOCKF_DEBUG 2062 if (lockf_debug & 8) { 2063 printf("re-ordering graph vertices\n"); 2064 printf("deltaF = "); 2065 graph_print_vertices(&deltaF); 2066 } 2067#endif 2068 2069 nB = graph_delta_backward(g, x, y, &deltaB); 2070 2071#ifdef LOCKF_DEBUG 2072 if (lockf_debug & 8) { 2073 printf("deltaB = "); 2074 graph_print_vertices(&deltaB); 2075 } 2076#endif 2077 2078 /* 2079 * We first build a set of vertex indices (vertex 2080 * order values) that we may use, then we re-assign 2081 * orders first to those vertices in deltaB, then to 2082 * deltaF. Note that the contents of deltaF and deltaB 2083 * may be partially disordered - we perform an 2084 * insertion sort while building our index set. 2085 */ 2086 indices = g->g_indexbuf; 2087 n = graph_add_indices(indices, 0, &deltaF); 2088 graph_add_indices(indices, n, &deltaB); 2089 2090 /* 2091 * We must also be sure to maintain the relative 2092 * ordering of deltaF and deltaB when re-assigning 2093 * vertices. We do this by iteratively removing the 2094 * lowest ordered element from the set and assigning 2095 * it the next value from our new ordering. 2096 */ 2097 i = graph_assign_indices(g, indices, 0, &deltaB); 2098 graph_assign_indices(g, indices, i, &deltaF); 2099 2100#ifdef LOCKF_DEBUG 2101 if (lockf_debug & 8) { 2102 struct owner_vertex_list set; 2103 TAILQ_INIT(&set); 2104 for (i = 0; i < nB + nF; i++) 2105 TAILQ_INSERT_TAIL(&set, 2106 g->g_vertices[indices[i]], v_link); 2107 printf("new ordering = "); 2108 graph_print_vertices(&set); 2109 } 2110#endif 2111 } 2112 2113 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2114 2115#ifdef LOCKF_DEBUG 2116 if (lockf_debug & 8) { 2117 graph_check(g, TRUE); 2118 } 2119#endif 2120 2121 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2122 2123 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2124 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2125 e->e_refs = 1; 2126 e->e_from = x; 2127 e->e_to = y; 2128 2129 return (0); 2130} 2131 2132/* 2133 * Remove an edge x->y from the graph. 2134 */ 2135static void 2136graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2137 struct owner_vertex *y) 2138{ 2139 struct owner_edge *e; 2140 2141 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2142 2143 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2144 if (e->e_to == y) 2145 break; 2146 } 2147 KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2148 2149 e->e_refs--; 2150 if (e->e_refs == 0) { 2151#ifdef LOCKF_DEBUG 2152 if (lockf_debug & 8) { 2153 printf("removing edge %d:", x->v_order); 2154 lf_print_owner(x->v_owner); 2155 printf(" -> %d:", y->v_order); 2156 lf_print_owner(y->v_owner); 2157 printf("\n"); 2158 } 2159#endif 2160 LIST_REMOVE(e, e_outlink); 2161 LIST_REMOVE(e, e_inlink); 2162 free(e, M_LOCKF); 2163 } 2164} 2165 2166/* 2167 * Allocate a vertex from the free list. Return ENOMEM if there are 2168 * none. 2169 */ 2170static struct owner_vertex * 2171graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2172{ 2173 struct owner_vertex *v; 2174 2175 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2176 2177 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2178 if (g->g_size == g->g_space) { 2179 g->g_vertices = realloc(g->g_vertices, 2180 2 * g->g_space * sizeof(struct owner_vertex *), 2181 M_LOCKF, M_WAITOK); 2182 free(g->g_indexbuf, M_LOCKF); 2183 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2184 M_LOCKF, M_WAITOK); 2185 g->g_space = 2 * g->g_space; 2186 } 2187 v->v_order = g->g_size; 2188 v->v_gen = g->g_gen; 2189 g->g_vertices[g->g_size] = v; 2190 g->g_size++; 2191 2192 LIST_INIT(&v->v_outedges); 2193 LIST_INIT(&v->v_inedges); 2194 v->v_owner = lo; 2195 2196 return (v); 2197} 2198 2199static void 2200graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2201{ 2202 struct owner_vertex *w; 2203 int i; 2204 2205 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2206 2207 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2208 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2209 2210 /* 2211 * Remove from the graph's array and close up the gap, 2212 * renumbering the other vertices. 2213 */ 2214 for (i = v->v_order + 1; i < g->g_size; i++) { 2215 w = g->g_vertices[i]; 2216 w->v_order--; 2217 g->g_vertices[i - 1] = w; 2218 } 2219 g->g_size--; 2220 2221 free(v, M_LOCKF); 2222} 2223 2224static struct owner_graph * 2225graph_init(struct owner_graph *g) 2226{ 2227 2228 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2229 M_LOCKF, M_WAITOK); 2230 g->g_size = 0; 2231 g->g_space = 10; 2232 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2233 g->g_gen = 0; 2234 2235 return (g); 2236} 2237 2238#ifdef LOCKF_DEBUG 2239/* 2240 * Print description of a lock owner 2241 */ 2242static void 2243lf_print_owner(struct lock_owner *lo) 2244{ 2245 2246 if (lo->lo_flags & F_REMOTE) { 2247 printf("remote pid %d, system %d", 2248 lo->lo_pid, lo->lo_sysid); 2249 } else if (lo->lo_flags & F_FLOCK) { 2250 printf("file %p", lo->lo_id); 2251 } else { 2252 printf("local pid %d", lo->lo_pid); 2253 } 2254} 2255 2256/* 2257 * Print out a lock. 2258 */ 2259static void 2260lf_print(char *tag, struct lockf_entry *lock) 2261{ 2262 2263 printf("%s: lock %p for ", tag, (void *)lock); 2264 lf_print_owner(lock->lf_owner); 2265 if (lock->lf_inode != (struct inode *)0) 2266 printf(" in ino %ju on dev <%s>,", 2267 (uintmax_t)lock->lf_inode->i_number, 2268 devtoname(lock->lf_inode->i_dev)); 2269 printf(" %s, start %jd, end ", 2270 lock->lf_type == F_RDLCK ? "shared" : 2271 lock->lf_type == F_WRLCK ? "exclusive" : 2272 lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2273 (intmax_t)lock->lf_start); 2274 if (lock->lf_end == OFF_MAX) 2275 printf("EOF"); 2276 else 2277 printf("%jd", (intmax_t)lock->lf_end); 2278 if (!LIST_EMPTY(&lock->lf_outedges)) 2279 printf(" block %p\n", 2280 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 2281 else 2282 printf("\n"); 2283} 2284 2285static void 2286lf_printlist(char *tag, struct lockf_entry *lock) 2287{ 2288 struct lockf_entry *lf, *blk; 2289 struct lockf_edge *e; 2290 2291 if (lock->lf_inode == (struct inode *)0) 2292 return; 2293 2294 printf("%s: Lock list for ino %ju on dev <%s>:\n", 2295 tag, (uintmax_t)lock->lf_inode->i_number, 2296 devtoname(lock->lf_inode->i_dev)); 2297 LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) { 2298 printf("\tlock %p for ",(void *)lf); 2299 lf_print_owner(lock->lf_owner); 2300 printf(", %s, start %jd, end %jd", 2301 lf->lf_type == F_RDLCK ? "shared" : 2302 lf->lf_type == F_WRLCK ? "exclusive" : 2303 lf->lf_type == F_UNLCK ? "unlock" : 2304 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2305 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2306 blk = e->le_to; 2307 printf("\n\t\tlock request %p for ", (void *)blk); 2308 lf_print_owner(blk->lf_owner); 2309 printf(", %s, start %jd, end %jd", 2310 blk->lf_type == F_RDLCK ? "shared" : 2311 blk->lf_type == F_WRLCK ? "exclusive" : 2312 blk->lf_type == F_UNLCK ? "unlock" : 2313 "unknown", (intmax_t)blk->lf_start, 2314 (intmax_t)blk->lf_end); 2315 if (!LIST_EMPTY(&blk->lf_inedges)) 2316 panic("lf_printlist: bad list"); 2317 } 2318 printf("\n"); 2319 } 2320} 2321#endif /* LOCKF_DEBUG */ 2322