kern_lockf.c revision 192683
1/*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27/*- 28 * Copyright (c) 1982, 1986, 1989, 1993 29 * The Regents of the University of California. All rights reserved. 30 * 31 * This code is derived from software contributed to Berkeley by 32 * Scooter Morris at Genentech Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 4. Neither the name of the University nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 59 */ 60 61#include <sys/cdefs.h> 62__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 192683 2009-05-24 12:33:16Z kib $"); 63 64#include "opt_debug_lockf.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/hash.h> 69#include <sys/kernel.h> 70#include <sys/limits.h> 71#include <sys/lock.h> 72#include <sys/mount.h> 73#include <sys/mutex.h> 74#include <sys/proc.h> 75#include <sys/sx.h> 76#include <sys/unistd.h> 77#include <sys/vnode.h> 78#include <sys/malloc.h> 79#include <sys/fcntl.h> 80#include <sys/lockf.h> 81#include <sys/taskqueue.h> 82 83#ifdef LOCKF_DEBUG 84#include <sys/sysctl.h> 85 86#include <ufs/ufs/quota.h> 87#include <ufs/ufs/inode.h> 88 89static int lockf_debug = 0; /* control debug output */ 90SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 91#endif 92 93MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 94 95struct owner_edge; 96struct owner_vertex; 97struct owner_vertex_list; 98struct owner_graph; 99 100#define NOLOCKF (struct lockf_entry *)0 101#define SELF 0x1 102#define OTHERS 0x2 103static void lf_init(void *); 104static int lf_hash_owner(caddr_t, struct flock *, int); 105static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106 int); 107static struct lockf_entry * 108 lf_alloc_lock(struct lock_owner *); 109static void lf_free_lock(struct lockf_entry *); 110static int lf_clearlock(struct lockf *, struct lockf_entry *); 111static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113static void lf_free_edge(struct lockf_edge *); 114static struct lockf_edge * 115 lf_alloc_edge(void); 116static void lf_alloc_vertex(struct lockf_entry *); 117static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118static void lf_remove_edge(struct lockf_edge *); 119static void lf_remove_outgoing(struct lockf_entry *); 120static void lf_remove_incoming(struct lockf_entry *); 121static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124 int); 125static struct lockf_entry * 126 lf_getblock(struct lockf *, struct lockf_entry *); 127static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131 int all, struct lockf_entry_list *); 132static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133 struct lockf_entry_list*); 134static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135 struct lockf_entry_list*); 136static int lf_setlock(struct lockf *, struct lockf_entry *, 137 struct vnode *, void **cookiep); 138static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139static void lf_split(struct lockf *, struct lockf_entry *, 140 struct lockf_entry *, struct lockf_entry_list *); 141#ifdef LOCKF_DEBUG 142static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143 struct owner_vertex_list *path); 144static void graph_check(struct owner_graph *g, int checkorder); 145static void graph_print_vertices(struct owner_vertex_list *set); 146#endif 147static int graph_delta_forward(struct owner_graph *g, 148 struct owner_vertex *x, struct owner_vertex *y, 149 struct owner_vertex_list *delta); 150static int graph_delta_backward(struct owner_graph *g, 151 struct owner_vertex *x, struct owner_vertex *y, 152 struct owner_vertex_list *delta); 153static int graph_add_indices(int *indices, int n, 154 struct owner_vertex_list *set); 155static int graph_assign_indices(struct owner_graph *g, int *indices, 156 int nextunused, struct owner_vertex_list *set); 157static int graph_add_edge(struct owner_graph *g, 158 struct owner_vertex *x, struct owner_vertex *y); 159static void graph_remove_edge(struct owner_graph *g, 160 struct owner_vertex *x, struct owner_vertex *y); 161static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162 struct lock_owner *lo); 163static void graph_free_vertex(struct owner_graph *g, 164 struct owner_vertex *v); 165static struct owner_graph * graph_init(struct owner_graph *g); 166#ifdef LOCKF_DEBUG 167static void lf_print(char *, struct lockf_entry *); 168static void lf_printlist(char *, struct lockf_entry *); 169static void lf_print_owner(struct lock_owner *); 170#endif 171 172/* 173 * This structure is used to keep track of both local and remote lock 174 * owners. The lf_owner field of the struct lockf_entry points back at 175 * the lock owner structure. Each possible lock owner (local proc for 176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177 * pair for remote locks) is represented by a unique instance of 178 * struct lock_owner. 179 * 180 * If a lock owner has a lock that blocks some other lock or a lock 181 * that is waiting for some other lock, it also has a vertex in the 182 * owner_graph below. 183 * 184 * Locks: 185 * (s) locked by state->ls_lock 186 * (S) locked by lf_lock_states_lock 187 * (l) locked by lf_lock_owners_lock 188 * (g) locked by lf_owner_graph_lock 189 * (c) const until freeing 190 */ 191#define LOCK_OWNER_HASH_SIZE 256 192 193struct lock_owner { 194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195 int lo_refs; /* (l) Number of locks referring to this */ 196 int lo_flags; /* (c) Flags passwd to lf_advlock */ 197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198 pid_t lo_pid; /* (c) Process Id of the lock owner */ 199 int lo_sysid; /* (c) System Id of the lock owner */ 200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201}; 202 203LIST_HEAD(lock_owner_list, lock_owner); 204 205static struct sx lf_lock_states_lock; 206static struct lockf_list lf_lock_states; /* (S) */ 207static struct sx lf_lock_owners_lock; 208static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209 210/* 211 * Structures for deadlock detection. 212 * 213 * We have two types of directed graph, the first is the set of locks, 214 * both active and pending on a vnode. Within this graph, active locks 215 * are terminal nodes in the graph (i.e. have no out-going 216 * edges). Pending locks have out-going edges to each blocking active 217 * lock that prevents the lock from being granted and also to each 218 * older pending lock that would block them if it was active. The 219 * graph for each vnode is naturally acyclic; new edges are only ever 220 * added to or from new nodes (either new pending locks which only add 221 * out-going edges or new active locks which only add in-coming edges) 222 * therefore they cannot create loops in the lock graph. 223 * 224 * The second graph is a global graph of lock owners. Each lock owner 225 * is a vertex in that graph and an edge is added to the graph 226 * whenever an edge is added to a vnode graph, with end points 227 * corresponding to owner of the new pending lock and the owner of the 228 * lock upon which it waits. In order to prevent deadlock, we only add 229 * an edge to this graph if the new edge would not create a cycle. 230 * 231 * The lock owner graph is topologically sorted, i.e. if a node has 232 * any outgoing edges, then it has an order strictly less than any 233 * node to which it has an outgoing edge. We preserve this ordering 234 * (and detect cycles) on edge insertion using Algorithm PK from the 235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237 * No. 1.7) 238 */ 239struct owner_vertex; 240 241struct owner_edge { 242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244 int e_refs; /* (g) number of times added */ 245 struct owner_vertex *e_from; /* (c) out-going from here */ 246 struct owner_vertex *e_to; /* (c) in-coming to here */ 247}; 248LIST_HEAD(owner_edge_list, owner_edge); 249 250struct owner_vertex { 251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252 uint32_t v_gen; /* (g) workspace for edge insertion */ 253 int v_order; /* (g) order of vertex in graph */ 254 struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255 struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256 struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257}; 258TAILQ_HEAD(owner_vertex_list, owner_vertex); 259 260struct owner_graph { 261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262 int g_size; /* (g) number of vertices */ 263 int g_space; /* (g) space allocated for vertices */ 264 int *g_indexbuf; /* (g) workspace for loop detection */ 265 uint32_t g_gen; /* (g) increment when re-ordering */ 266}; 267 268static struct sx lf_owner_graph_lock; 269static struct owner_graph lf_owner_graph; 270 271/* 272 * Initialise various structures and locks. 273 */ 274static void 275lf_init(void *dummy) 276{ 277 int i; 278 279 sx_init(&lf_lock_states_lock, "lock states lock"); 280 LIST_INIT(&lf_lock_states); 281 282 sx_init(&lf_lock_owners_lock, "lock owners lock"); 283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284 LIST_INIT(&lf_lock_owners[i]); 285 286 sx_init(&lf_owner_graph_lock, "owner graph lock"); 287 graph_init(&lf_owner_graph); 288} 289SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290 291/* 292 * Generate a hash value for a lock owner. 293 */ 294static int 295lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296{ 297 uint32_t h; 298 299 if (flags & F_REMOTE) { 300 h = HASHSTEP(0, fl->l_pid); 301 h = HASHSTEP(h, fl->l_sysid); 302 } else if (flags & F_FLOCK) { 303 h = ((uintptr_t) id) >> 7; 304 } else { 305 struct proc *p = (struct proc *) id; 306 h = HASHSTEP(0, p->p_pid); 307 h = HASHSTEP(h, 0); 308 } 309 310 return (h % LOCK_OWNER_HASH_SIZE); 311} 312 313/* 314 * Return true if a lock owner matches the details passed to 315 * lf_advlock. 316 */ 317static int 318lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319 int flags) 320{ 321 if (flags & F_REMOTE) { 322 return lo->lo_pid == fl->l_pid 323 && lo->lo_sysid == fl->l_sysid; 324 } else { 325 return lo->lo_id == id; 326 } 327} 328 329static struct lockf_entry * 330lf_alloc_lock(struct lock_owner *lo) 331{ 332 struct lockf_entry *lf; 333 334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335 336#ifdef LOCKF_DEBUG 337 if (lockf_debug & 4) 338 printf("Allocated lock %p\n", lf); 339#endif 340 if (lo) { 341 sx_xlock(&lf_lock_owners_lock); 342 lo->lo_refs++; 343 sx_xunlock(&lf_lock_owners_lock); 344 lf->lf_owner = lo; 345 } 346 347 return (lf); 348} 349 350static void 351lf_free_lock(struct lockf_entry *lock) 352{ 353 /* 354 * Adjust the lock_owner reference count and 355 * reclaim the entry if this is the last lock 356 * for that owner. 357 */ 358 struct lock_owner *lo = lock->lf_owner; 359 if (lo) { 360 KASSERT(LIST_EMPTY(&lock->lf_outedges), 361 ("freeing lock with dependancies")); 362 KASSERT(LIST_EMPTY(&lock->lf_inedges), 363 ("freeing lock with dependants")); 364 sx_xlock(&lf_lock_owners_lock); 365 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366 lo->lo_refs--; 367 if (lo->lo_refs == 0) { 368#ifdef LOCKF_DEBUG 369 if (lockf_debug & 1) 370 printf("lf_free_lock: freeing lock owner %p\n", 371 lo); 372#endif 373 if (lo->lo_vertex) { 374 sx_xlock(&lf_owner_graph_lock); 375 graph_free_vertex(&lf_owner_graph, 376 lo->lo_vertex); 377 sx_xunlock(&lf_owner_graph_lock); 378 } 379 LIST_REMOVE(lo, lo_link); 380 free(lo, M_LOCKF); 381#ifdef LOCKF_DEBUG 382 if (lockf_debug & 4) 383 printf("Freed lock owner %p\n", lo); 384#endif 385 } 386 sx_unlock(&lf_lock_owners_lock); 387 } 388 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389 vrele(lock->lf_vnode); 390 lock->lf_vnode = NULL; 391 } 392#ifdef LOCKF_DEBUG 393 if (lockf_debug & 4) 394 printf("Freed lock %p\n", lock); 395#endif 396 free(lock, M_LOCKF); 397} 398 399/* 400 * Advisory record locking support 401 */ 402int 403lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404 u_quad_t size) 405{ 406 struct lockf *state, *freestate = NULL; 407 struct flock *fl = ap->a_fl; 408 struct lockf_entry *lock; 409 struct vnode *vp = ap->a_vp; 410 caddr_t id = ap->a_id; 411 int flags = ap->a_flags; 412 int hash; 413 struct lock_owner *lo; 414 off_t start, end, oadd; 415 int error; 416 417 /* 418 * Handle the F_UNLKSYS case first - no need to mess about 419 * creating a lock owner for this one. 420 */ 421 if (ap->a_op == F_UNLCKSYS) { 422 lf_clearremotesys(fl->l_sysid); 423 return (0); 424 } 425 426 /* 427 * Convert the flock structure into a start and end. 428 */ 429 switch (fl->l_whence) { 430 431 case SEEK_SET: 432 case SEEK_CUR: 433 /* 434 * Caller is responsible for adding any necessary offset 435 * when SEEK_CUR is used. 436 */ 437 start = fl->l_start; 438 break; 439 440 case SEEK_END: 441 if (size > OFF_MAX || 442 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443 return (EOVERFLOW); 444 start = size + fl->l_start; 445 break; 446 447 default: 448 return (EINVAL); 449 } 450 if (start < 0) 451 return (EINVAL); 452 if (fl->l_len < 0) { 453 if (start == 0) 454 return (EINVAL); 455 end = start - 1; 456 start += fl->l_len; 457 if (start < 0) 458 return (EINVAL); 459 } else if (fl->l_len == 0) { 460 end = OFF_MAX; 461 } else { 462 oadd = fl->l_len - 1; 463 if (oadd > OFF_MAX - start) 464 return (EOVERFLOW); 465 end = start + oadd; 466 } 467 /* 468 * Avoid the common case of unlocking when inode has no locks. 469 */ 470 VI_LOCK(vp); 471 if ((*statep) == NULL) { 472 if (ap->a_op != F_SETLK) { 473 fl->l_type = F_UNLCK; 474 VI_UNLOCK(vp); 475 return (0); 476 } 477 } 478 VI_UNLOCK(vp); 479 480 /* 481 * Map our arguments to an existing lock owner or create one 482 * if this is the first time we have seen this owner. 483 */ 484 hash = lf_hash_owner(id, fl, flags); 485 sx_xlock(&lf_lock_owners_lock); 486 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 487 if (lf_owner_matches(lo, id, fl, flags)) 488 break; 489 if (!lo) { 490 /* 491 * We initialise the lock with a reference 492 * count which matches the new lockf_entry 493 * structure created below. 494 */ 495 lo = malloc(sizeof(struct lock_owner), M_LOCKF, 496 M_WAITOK|M_ZERO); 497#ifdef LOCKF_DEBUG 498 if (lockf_debug & 4) 499 printf("Allocated lock owner %p\n", lo); 500#endif 501 502 lo->lo_refs = 1; 503 lo->lo_flags = flags; 504 lo->lo_id = id; 505 if (flags & F_REMOTE) { 506 lo->lo_pid = fl->l_pid; 507 lo->lo_sysid = fl->l_sysid; 508 } else if (flags & F_FLOCK) { 509 lo->lo_pid = -1; 510 lo->lo_sysid = 0; 511 } else { 512 struct proc *p = (struct proc *) id; 513 lo->lo_pid = p->p_pid; 514 lo->lo_sysid = 0; 515 } 516 lo->lo_vertex = NULL; 517 518#ifdef LOCKF_DEBUG 519 if (lockf_debug & 1) { 520 printf("lf_advlockasync: new lock owner %p ", lo); 521 lf_print_owner(lo); 522 printf("\n"); 523 } 524#endif 525 526 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 527 } else { 528 /* 529 * We have seen this lock owner before, increase its 530 * reference count to account for the new lockf_entry 531 * structure we create below. 532 */ 533 lo->lo_refs++; 534 } 535 sx_xunlock(&lf_lock_owners_lock); 536 537 /* 538 * Create the lockf structure. We initialise the lf_owner 539 * field here instead of in lf_alloc_lock() to avoid paying 540 * the lf_lock_owners_lock tax twice. 541 */ 542 lock = lf_alloc_lock(NULL); 543 lock->lf_start = start; 544 lock->lf_end = end; 545 lock->lf_owner = lo; 546 lock->lf_vnode = vp; 547 if (flags & F_REMOTE) { 548 /* 549 * For remote locks, the caller may release its ref to 550 * the vnode at any time - we have to ref it here to 551 * prevent it from being recycled unexpectedly. 552 */ 553 vref(vp); 554 } 555 556 /* 557 * XXX The problem is that VTOI is ufs specific, so it will 558 * break LOCKF_DEBUG for all other FS's other than UFS because 559 * it casts the vnode->data ptr to struct inode *. 560 */ 561/* lock->lf_inode = VTOI(ap->a_vp); */ 562 lock->lf_inode = (struct inode *)0; 563 lock->lf_type = fl->l_type; 564 LIST_INIT(&lock->lf_outedges); 565 LIST_INIT(&lock->lf_inedges); 566 lock->lf_async_task = ap->a_task; 567 lock->lf_flags = ap->a_flags; 568 569 /* 570 * Do the requested operation. First find our state structure 571 * and create a new one if necessary - the caller's *statep 572 * variable and the state's ls_threads count is protected by 573 * the vnode interlock. 574 */ 575 VI_LOCK(vp); 576 if (vp->v_iflag & VI_DOOMED) { 577 VI_UNLOCK(vp); 578 lf_free_lock(lock); 579 return (ENOENT); 580 } 581 582 /* 583 * Allocate a state structure if necessary. 584 */ 585 state = *statep; 586 if (state == NULL) { 587 struct lockf *ls; 588 589 VI_UNLOCK(vp); 590 591 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 592 sx_init(&ls->ls_lock, "ls_lock"); 593 LIST_INIT(&ls->ls_active); 594 LIST_INIT(&ls->ls_pending); 595 ls->ls_threads = 1; 596 597 sx_xlock(&lf_lock_states_lock); 598 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 599 sx_xunlock(&lf_lock_states_lock); 600 601 /* 602 * Cope if we lost a race with some other thread while 603 * trying to allocate memory. 604 */ 605 VI_LOCK(vp); 606 if (vp->v_iflag & VI_DOOMED) { 607 VI_UNLOCK(vp); 608 sx_xlock(&lf_lock_states_lock); 609 LIST_REMOVE(ls, ls_link); 610 sx_xunlock(&lf_lock_states_lock); 611 sx_destroy(&ls->ls_lock); 612 free(ls, M_LOCKF); 613 lf_free_lock(lock); 614 return (ENOENT); 615 } 616 if ((*statep) == NULL) { 617 state = *statep = ls; 618 VI_UNLOCK(vp); 619 } else { 620 state = *statep; 621 state->ls_threads++; 622 VI_UNLOCK(vp); 623 624 sx_xlock(&lf_lock_states_lock); 625 LIST_REMOVE(ls, ls_link); 626 sx_xunlock(&lf_lock_states_lock); 627 sx_destroy(&ls->ls_lock); 628 free(ls, M_LOCKF); 629 } 630 } else { 631 state->ls_threads++; 632 VI_UNLOCK(vp); 633 } 634 635 sx_xlock(&state->ls_lock); 636 /* 637 * Recheck the doomed vnode after state->ls_lock is 638 * locked. lf_purgelocks() requires that no new threads add 639 * pending locks when vnode is marked by VI_DOOMED flag. 640 */ 641 VI_LOCK(vp); 642 if (vp->v_iflag & VI_DOOMED) { 643 VI_UNLOCK(vp); 644 lf_free_lock(lock); 645 return (ENOENT); 646 } 647 VI_UNLOCK(vp); 648 649 switch (ap->a_op) { 650 case F_SETLK: 651 error = lf_setlock(state, lock, vp, ap->a_cookiep); 652 break; 653 654 case F_UNLCK: 655 error = lf_clearlock(state, lock); 656 lf_free_lock(lock); 657 break; 658 659 case F_GETLK: 660 error = lf_getlock(state, lock, fl); 661 lf_free_lock(lock); 662 break; 663 664 case F_CANCEL: 665 if (ap->a_cookiep) 666 error = lf_cancel(state, lock, *ap->a_cookiep); 667 else 668 error = EINVAL; 669 lf_free_lock(lock); 670 break; 671 672 default: 673 lf_free_lock(lock); 674 error = EINVAL; 675 break; 676 } 677 678#ifdef INVARIANTS 679 /* 680 * Check for some can't happen stuff. In this case, the active 681 * lock list becoming disordered or containing mutually 682 * blocking locks. We also check the pending list for locks 683 * which should be active (i.e. have no out-going edges). 684 */ 685 LIST_FOREACH(lock, &state->ls_active, lf_link) { 686 struct lockf_entry *lf; 687 if (LIST_NEXT(lock, lf_link)) 688 KASSERT((lock->lf_start 689 <= LIST_NEXT(lock, lf_link)->lf_start), 690 ("locks disordered")); 691 LIST_FOREACH(lf, &state->ls_active, lf_link) { 692 if (lock == lf) 693 break; 694 KASSERT(!lf_blocks(lock, lf), 695 ("two conflicting active locks")); 696 if (lock->lf_owner == lf->lf_owner) 697 KASSERT(!lf_overlaps(lock, lf), 698 ("two overlapping locks from same owner")); 699 } 700 } 701 LIST_FOREACH(lock, &state->ls_pending, lf_link) { 702 KASSERT(!LIST_EMPTY(&lock->lf_outedges), 703 ("pending lock which should be active")); 704 } 705#endif 706 sx_xunlock(&state->ls_lock); 707 708 /* 709 * If we have removed the last active lock on the vnode and 710 * this is the last thread that was in-progress, we can free 711 * the state structure. We update the caller's pointer inside 712 * the vnode interlock but call free outside. 713 * 714 * XXX alternatively, keep the state structure around until 715 * the filesystem recycles - requires a callback from the 716 * filesystem. 717 */ 718 VI_LOCK(vp); 719 720 state->ls_threads--; 721 wakeup(state); 722 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 723 KASSERT(LIST_EMPTY(&state->ls_pending), 724 ("freeing state with pending locks")); 725 freestate = state; 726 *statep = NULL; 727 } 728 729 VI_UNLOCK(vp); 730 731 if (freestate) { 732 sx_xlock(&lf_lock_states_lock); 733 LIST_REMOVE(freestate, ls_link); 734 sx_xunlock(&lf_lock_states_lock); 735 sx_destroy(&freestate->ls_lock); 736 free(freestate, M_LOCKF); 737 } 738 return (error); 739} 740 741int 742lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 743{ 744 struct vop_advlockasync_args a; 745 746 a.a_vp = ap->a_vp; 747 a.a_id = ap->a_id; 748 a.a_op = ap->a_op; 749 a.a_fl = ap->a_fl; 750 a.a_flags = ap->a_flags; 751 a.a_task = NULL; 752 a.a_cookiep = NULL; 753 754 return (lf_advlockasync(&a, statep, size)); 755} 756 757void 758lf_purgelocks(struct vnode *vp, struct lockf **statep) 759{ 760 struct lockf *state; 761 struct lockf_entry *lock, *nlock; 762 763 /* 764 * For this to work correctly, the caller must ensure that no 765 * other threads enter the locking system for this vnode, 766 * e.g. by checking VI_DOOMED. We wake up any threads that are 767 * sleeping waiting for locks on this vnode and then free all 768 * the remaining locks. 769 */ 770 VI_LOCK(vp); 771 KASSERT(vp->v_iflag & VI_DOOMED, 772 ("lf_purgelocks: vp %p has not vgone yet", vp)); 773 state = *statep; 774 if (state) { 775 *statep = NULL; 776 state->ls_threads++; 777 VI_UNLOCK(vp); 778 779 sx_xlock(&state->ls_lock); 780 sx_xlock(&lf_owner_graph_lock); 781 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 782 LIST_REMOVE(lock, lf_link); 783 lf_remove_outgoing(lock); 784 lf_remove_incoming(lock); 785 786 /* 787 * If its an async lock, we can just free it 788 * here, otherwise we let the sleeping thread 789 * free it. 790 */ 791 if (lock->lf_async_task) { 792 lf_free_lock(lock); 793 } else { 794 lock->lf_flags |= F_INTR; 795 wakeup(lock); 796 } 797 } 798 sx_xunlock(&lf_owner_graph_lock); 799 sx_xunlock(&state->ls_lock); 800 801 /* 802 * Wait for all other threads, sleeping and otherwise 803 * to leave. 804 */ 805 VI_LOCK(vp); 806 while (state->ls_threads > 1) 807 msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 808 VI_UNLOCK(vp); 809 810 /* 811 * We can just free all the active locks since they 812 * will have no dependancies (we removed them all 813 * above). We don't need to bother locking since we 814 * are the last thread using this state structure. 815 */ 816 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 817 LIST_REMOVE(lock, lf_link); 818 lf_free_lock(lock); 819 } 820 sx_xlock(&lf_lock_states_lock); 821 LIST_REMOVE(state, ls_link); 822 sx_xunlock(&lf_lock_states_lock); 823 sx_destroy(&state->ls_lock); 824 free(state, M_LOCKF); 825 } else { 826 VI_UNLOCK(vp); 827 } 828} 829 830/* 831 * Return non-zero if locks 'x' and 'y' overlap. 832 */ 833static int 834lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 835{ 836 837 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 838} 839 840/* 841 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 842 */ 843static int 844lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 845{ 846 847 return x->lf_owner != y->lf_owner 848 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 849 && lf_overlaps(x, y); 850} 851 852/* 853 * Allocate a lock edge from the free list 854 */ 855static struct lockf_edge * 856lf_alloc_edge(void) 857{ 858 859 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 860} 861 862/* 863 * Free a lock edge. 864 */ 865static void 866lf_free_edge(struct lockf_edge *e) 867{ 868 869 free(e, M_LOCKF); 870} 871 872 873/* 874 * Ensure that the lock's owner has a corresponding vertex in the 875 * owner graph. 876 */ 877static void 878lf_alloc_vertex(struct lockf_entry *lock) 879{ 880 struct owner_graph *g = &lf_owner_graph; 881 882 if (!lock->lf_owner->lo_vertex) 883 lock->lf_owner->lo_vertex = 884 graph_alloc_vertex(g, lock->lf_owner); 885} 886 887/* 888 * Attempt to record an edge from lock x to lock y. Return EDEADLK if 889 * the new edge would cause a cycle in the owner graph. 890 */ 891static int 892lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 893{ 894 struct owner_graph *g = &lf_owner_graph; 895 struct lockf_edge *e; 896 int error; 897 898#ifdef INVARIANTS 899 LIST_FOREACH(e, &x->lf_outedges, le_outlink) 900 KASSERT(e->le_to != y, ("adding lock edge twice")); 901#endif 902 903 /* 904 * Make sure the two owners have entries in the owner graph. 905 */ 906 lf_alloc_vertex(x); 907 lf_alloc_vertex(y); 908 909 error = graph_add_edge(g, x->lf_owner->lo_vertex, 910 y->lf_owner->lo_vertex); 911 if (error) 912 return (error); 913 914 e = lf_alloc_edge(); 915 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 916 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 917 e->le_from = x; 918 e->le_to = y; 919 920 return (0); 921} 922 923/* 924 * Remove an edge from the lock graph. 925 */ 926static void 927lf_remove_edge(struct lockf_edge *e) 928{ 929 struct owner_graph *g = &lf_owner_graph; 930 struct lockf_entry *x = e->le_from; 931 struct lockf_entry *y = e->le_to; 932 933 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 934 LIST_REMOVE(e, le_outlink); 935 LIST_REMOVE(e, le_inlink); 936 e->le_from = NULL; 937 e->le_to = NULL; 938 lf_free_edge(e); 939} 940 941/* 942 * Remove all out-going edges from lock x. 943 */ 944static void 945lf_remove_outgoing(struct lockf_entry *x) 946{ 947 struct lockf_edge *e; 948 949 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 950 lf_remove_edge(e); 951 } 952} 953 954/* 955 * Remove all in-coming edges from lock x. 956 */ 957static void 958lf_remove_incoming(struct lockf_entry *x) 959{ 960 struct lockf_edge *e; 961 962 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 963 lf_remove_edge(e); 964 } 965} 966 967/* 968 * Walk the list of locks for the file and create an out-going edge 969 * from lock to each blocking lock. 970 */ 971static int 972lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 973{ 974 struct lockf_entry *overlap; 975 int error; 976 977 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 978 /* 979 * We may assume that the active list is sorted by 980 * lf_start. 981 */ 982 if (overlap->lf_start > lock->lf_end) 983 break; 984 if (!lf_blocks(lock, overlap)) 985 continue; 986 987 /* 988 * We've found a blocking lock. Add the corresponding 989 * edge to the graphs and see if it would cause a 990 * deadlock. 991 */ 992 error = lf_add_edge(lock, overlap); 993 994 /* 995 * The only error that lf_add_edge returns is EDEADLK. 996 * Remove any edges we added and return the error. 997 */ 998 if (error) { 999 lf_remove_outgoing(lock); 1000 return (error); 1001 } 1002 } 1003 1004 /* 1005 * We also need to add edges to sleeping locks that block 1006 * us. This ensures that lf_wakeup_lock cannot grant two 1007 * mutually blocking locks simultaneously and also enforces a 1008 * 'first come, first served' fairness model. Note that this 1009 * only happens if we are blocked by at least one active lock 1010 * due to the call to lf_getblock in lf_setlock below. 1011 */ 1012 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1013 if (!lf_blocks(lock, overlap)) 1014 continue; 1015 /* 1016 * We've found a blocking lock. Add the corresponding 1017 * edge to the graphs and see if it would cause a 1018 * deadlock. 1019 */ 1020 error = lf_add_edge(lock, overlap); 1021 1022 /* 1023 * The only error that lf_add_edge returns is EDEADLK. 1024 * Remove any edges we added and return the error. 1025 */ 1026 if (error) { 1027 lf_remove_outgoing(lock); 1028 return (error); 1029 } 1030 } 1031 1032 return (0); 1033} 1034 1035/* 1036 * Walk the list of pending locks for the file and create an in-coming 1037 * edge from lock to each blocking lock. 1038 */ 1039static int 1040lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1041{ 1042 struct lockf_entry *overlap; 1043 int error; 1044 1045 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1046 if (!lf_blocks(lock, overlap)) 1047 continue; 1048 1049 /* 1050 * We've found a blocking lock. Add the corresponding 1051 * edge to the graphs and see if it would cause a 1052 * deadlock. 1053 */ 1054 error = lf_add_edge(overlap, lock); 1055 1056 /* 1057 * The only error that lf_add_edge returns is EDEADLK. 1058 * Remove any edges we added and return the error. 1059 */ 1060 if (error) { 1061 lf_remove_incoming(lock); 1062 return (error); 1063 } 1064 } 1065 return (0); 1066} 1067 1068/* 1069 * Insert lock into the active list, keeping list entries ordered by 1070 * increasing values of lf_start. 1071 */ 1072static void 1073lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1074{ 1075 struct lockf_entry *lf, *lfprev; 1076 1077 if (LIST_EMPTY(&state->ls_active)) { 1078 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1079 return; 1080 } 1081 1082 lfprev = NULL; 1083 LIST_FOREACH(lf, &state->ls_active, lf_link) { 1084 if (lf->lf_start > lock->lf_start) { 1085 LIST_INSERT_BEFORE(lf, lock, lf_link); 1086 return; 1087 } 1088 lfprev = lf; 1089 } 1090 LIST_INSERT_AFTER(lfprev, lock, lf_link); 1091} 1092 1093/* 1094 * Wake up a sleeping lock and remove it from the pending list now 1095 * that all its dependancies have been resolved. The caller should 1096 * arrange for the lock to be added to the active list, adjusting any 1097 * existing locks for the same owner as needed. 1098 */ 1099static void 1100lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1101{ 1102 1103 /* 1104 * Remove from ls_pending list and wake up the caller 1105 * or start the async notification, as appropriate. 1106 */ 1107 LIST_REMOVE(wakelock, lf_link); 1108#ifdef LOCKF_DEBUG 1109 if (lockf_debug & 1) 1110 lf_print("lf_wakeup_lock: awakening", wakelock); 1111#endif /* LOCKF_DEBUG */ 1112 if (wakelock->lf_async_task) { 1113 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1114 } else { 1115 wakeup(wakelock); 1116 } 1117} 1118 1119/* 1120 * Re-check all dependant locks and remove edges to locks that we no 1121 * longer block. If 'all' is non-zero, the lock has been removed and 1122 * we must remove all the dependancies, otherwise it has simply been 1123 * reduced but remains active. Any pending locks which have been been 1124 * unblocked are added to 'granted' 1125 */ 1126static void 1127lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1128 struct lockf_entry_list *granted) 1129{ 1130 struct lockf_edge *e, *ne; 1131 struct lockf_entry *deplock; 1132 1133 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1134 deplock = e->le_from; 1135 if (all || !lf_blocks(lock, deplock)) { 1136 sx_xlock(&lf_owner_graph_lock); 1137 lf_remove_edge(e); 1138 sx_xunlock(&lf_owner_graph_lock); 1139 if (LIST_EMPTY(&deplock->lf_outedges)) { 1140 lf_wakeup_lock(state, deplock); 1141 LIST_INSERT_HEAD(granted, deplock, lf_link); 1142 } 1143 } 1144 } 1145} 1146 1147/* 1148 * Set the start of an existing active lock, updating dependancies and 1149 * adding any newly woken locks to 'granted'. 1150 */ 1151static void 1152lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1153 struct lockf_entry_list *granted) 1154{ 1155 1156 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1157 lock->lf_start = new_start; 1158 LIST_REMOVE(lock, lf_link); 1159 lf_insert_lock(state, lock); 1160 lf_update_dependancies(state, lock, FALSE, granted); 1161} 1162 1163/* 1164 * Set the end of an existing active lock, updating dependancies and 1165 * adding any newly woken locks to 'granted'. 1166 */ 1167static void 1168lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1169 struct lockf_entry_list *granted) 1170{ 1171 1172 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1173 lock->lf_end = new_end; 1174 lf_update_dependancies(state, lock, FALSE, granted); 1175} 1176 1177/* 1178 * Add a lock to the active list, updating or removing any current 1179 * locks owned by the same owner and processing any pending locks that 1180 * become unblocked as a result. This code is also used for unlock 1181 * since the logic for updating existing locks is identical. 1182 * 1183 * As a result of processing the new lock, we may unblock existing 1184 * pending locks as a result of downgrading/unlocking. We simply 1185 * activate the newly granted locks by looping. 1186 * 1187 * Since the new lock already has its dependancies set up, we always 1188 * add it to the list (unless its an unlock request). This may 1189 * fragment the lock list in some pathological cases but its probably 1190 * not a real problem. 1191 */ 1192static void 1193lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1194{ 1195 struct lockf_entry *overlap, *lf; 1196 struct lockf_entry_list granted; 1197 int ovcase; 1198 1199 LIST_INIT(&granted); 1200 LIST_INSERT_HEAD(&granted, lock, lf_link); 1201 1202 while (!LIST_EMPTY(&granted)) { 1203 lock = LIST_FIRST(&granted); 1204 LIST_REMOVE(lock, lf_link); 1205 1206 /* 1207 * Skip over locks owned by other processes. Handle 1208 * any locks that overlap and are owned by ourselves. 1209 */ 1210 overlap = LIST_FIRST(&state->ls_active); 1211 for (;;) { 1212 ovcase = lf_findoverlap(&overlap, lock, SELF); 1213 1214#ifdef LOCKF_DEBUG 1215 if (ovcase && (lockf_debug & 2)) { 1216 printf("lf_setlock: overlap %d", ovcase); 1217 lf_print("", overlap); 1218 } 1219#endif 1220 /* 1221 * Six cases: 1222 * 0) no overlap 1223 * 1) overlap == lock 1224 * 2) overlap contains lock 1225 * 3) lock contains overlap 1226 * 4) overlap starts before lock 1227 * 5) overlap ends after lock 1228 */ 1229 switch (ovcase) { 1230 case 0: /* no overlap */ 1231 break; 1232 1233 case 1: /* overlap == lock */ 1234 /* 1235 * We have already setup the 1236 * dependants for the new lock, taking 1237 * into account a possible downgrade 1238 * or unlock. Remove the old lock. 1239 */ 1240 LIST_REMOVE(overlap, lf_link); 1241 lf_update_dependancies(state, overlap, TRUE, 1242 &granted); 1243 lf_free_lock(overlap); 1244 break; 1245 1246 case 2: /* overlap contains lock */ 1247 /* 1248 * Just split the existing lock. 1249 */ 1250 lf_split(state, overlap, lock, &granted); 1251 break; 1252 1253 case 3: /* lock contains overlap */ 1254 /* 1255 * Delete the overlap and advance to 1256 * the next entry in the list. 1257 */ 1258 lf = LIST_NEXT(overlap, lf_link); 1259 LIST_REMOVE(overlap, lf_link); 1260 lf_update_dependancies(state, overlap, TRUE, 1261 &granted); 1262 lf_free_lock(overlap); 1263 overlap = lf; 1264 continue; 1265 1266 case 4: /* overlap starts before lock */ 1267 /* 1268 * Just update the overlap end and 1269 * move on. 1270 */ 1271 lf_set_end(state, overlap, lock->lf_start - 1, 1272 &granted); 1273 overlap = LIST_NEXT(overlap, lf_link); 1274 continue; 1275 1276 case 5: /* overlap ends after lock */ 1277 /* 1278 * Change the start of overlap and 1279 * re-insert. 1280 */ 1281 lf_set_start(state, overlap, lock->lf_end + 1, 1282 &granted); 1283 break; 1284 } 1285 break; 1286 } 1287#ifdef LOCKF_DEBUG 1288 if (lockf_debug & 1) { 1289 if (lock->lf_type != F_UNLCK) 1290 lf_print("lf_activate_lock: activated", lock); 1291 else 1292 lf_print("lf_activate_lock: unlocked", lock); 1293 lf_printlist("lf_activate_lock", lock); 1294 } 1295#endif /* LOCKF_DEBUG */ 1296 if (lock->lf_type != F_UNLCK) 1297 lf_insert_lock(state, lock); 1298 } 1299} 1300 1301/* 1302 * Cancel a pending lock request, either as a result of a signal or a 1303 * cancel request for an async lock. 1304 */ 1305static void 1306lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1307{ 1308 struct lockf_entry_list granted; 1309 1310 /* 1311 * Note it is theoretically possible that cancelling this lock 1312 * may allow some other pending lock to become 1313 * active. Consider this case: 1314 * 1315 * Owner Action Result Dependancies 1316 * 1317 * A: lock [0..0] succeeds 1318 * B: lock [2..2] succeeds 1319 * C: lock [1..2] blocked C->B 1320 * D: lock [0..1] blocked C->B,D->A,D->C 1321 * A: unlock [0..0] C->B,D->C 1322 * C: cancel [1..2] 1323 */ 1324 1325 LIST_REMOVE(lock, lf_link); 1326 1327 /* 1328 * Removing out-going edges is simple. 1329 */ 1330 sx_xlock(&lf_owner_graph_lock); 1331 lf_remove_outgoing(lock); 1332 sx_xunlock(&lf_owner_graph_lock); 1333 1334 /* 1335 * Removing in-coming edges may allow some other lock to 1336 * become active - we use lf_update_dependancies to figure 1337 * this out. 1338 */ 1339 LIST_INIT(&granted); 1340 lf_update_dependancies(state, lock, TRUE, &granted); 1341 lf_free_lock(lock); 1342 1343 /* 1344 * Feed any newly active locks to lf_activate_lock. 1345 */ 1346 while (!LIST_EMPTY(&granted)) { 1347 lock = LIST_FIRST(&granted); 1348 LIST_REMOVE(lock, lf_link); 1349 lf_activate_lock(state, lock); 1350 } 1351} 1352 1353/* 1354 * Set a byte-range lock. 1355 */ 1356static int 1357lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1358 void **cookiep) 1359{ 1360 static char lockstr[] = "lockf"; 1361 int priority, error; 1362 1363#ifdef LOCKF_DEBUG 1364 if (lockf_debug & 1) 1365 lf_print("lf_setlock", lock); 1366#endif /* LOCKF_DEBUG */ 1367 1368 /* 1369 * Set the priority 1370 */ 1371 priority = PLOCK; 1372 if (lock->lf_type == F_WRLCK) 1373 priority += 4; 1374 if (!(lock->lf_flags & F_NOINTR)) 1375 priority |= PCATCH; 1376 /* 1377 * Scan lock list for this file looking for locks that would block us. 1378 */ 1379 if (lf_getblock(state, lock)) { 1380 /* 1381 * Free the structure and return if nonblocking. 1382 */ 1383 if ((lock->lf_flags & F_WAIT) == 0 1384 && lock->lf_async_task == NULL) { 1385 lf_free_lock(lock); 1386 error = EAGAIN; 1387 goto out; 1388 } 1389 1390 /* 1391 * For flock type locks, we must first remove 1392 * any shared locks that we hold before we sleep 1393 * waiting for an exclusive lock. 1394 */ 1395 if ((lock->lf_flags & F_FLOCK) && 1396 lock->lf_type == F_WRLCK) { 1397 lock->lf_type = F_UNLCK; 1398 lf_activate_lock(state, lock); 1399 lock->lf_type = F_WRLCK; 1400 } 1401 1402 /* 1403 * We are blocked. Create edges to each blocking lock, 1404 * checking for deadlock using the owner graph. For 1405 * simplicity, we run deadlock detection for all 1406 * locks, posix and otherwise. 1407 */ 1408 sx_xlock(&lf_owner_graph_lock); 1409 error = lf_add_outgoing(state, lock); 1410 sx_xunlock(&lf_owner_graph_lock); 1411 1412 if (error) { 1413#ifdef LOCKF_DEBUG 1414 if (lockf_debug & 1) 1415 lf_print("lf_setlock: deadlock", lock); 1416#endif 1417 lf_free_lock(lock); 1418 goto out; 1419 } 1420 1421 /* 1422 * We have added edges to everything that blocks 1423 * us. Sleep until they all go away. 1424 */ 1425 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 1426#ifdef LOCKF_DEBUG 1427 if (lockf_debug & 1) { 1428 struct lockf_edge *e; 1429 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1430 lf_print("lf_setlock: blocking on", e->le_to); 1431 lf_printlist("lf_setlock", e->le_to); 1432 } 1433 } 1434#endif /* LOCKF_DEBUG */ 1435 1436 if ((lock->lf_flags & F_WAIT) == 0) { 1437 /* 1438 * The caller requested async notification - 1439 * this callback happens when the blocking 1440 * lock is released, allowing the caller to 1441 * make another attempt to take the lock. 1442 */ 1443 *cookiep = (void *) lock; 1444 error = EINPROGRESS; 1445 goto out; 1446 } 1447 1448 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1449 /* 1450 * We may have been awakened by a signal and/or by a 1451 * debugger continuing us (in which cases we must 1452 * remove our lock graph edges) and/or by another 1453 * process releasing a lock (in which case our edges 1454 * have already been removed and we have been moved to 1455 * the active list). We may also have been woken by 1456 * lf_purgelocks which we report to the caller as 1457 * EINTR. In that case, lf_purgelocks will have 1458 * removed our lock graph edges. 1459 * 1460 * Note that it is possible to receive a signal after 1461 * we were successfully woken (and moved to the active 1462 * list) but before we resumed execution. In this 1463 * case, our lf_outedges list will be clear. We 1464 * pretend there was no error. 1465 * 1466 * Note also, if we have been sleeping long enough, we 1467 * may now have incoming edges from some newer lock 1468 * which is waiting behind us in the queue. 1469 */ 1470 if (lock->lf_flags & F_INTR) { 1471 error = EINTR; 1472 lf_free_lock(lock); 1473 goto out; 1474 } 1475 if (LIST_EMPTY(&lock->lf_outedges)) { 1476 error = 0; 1477 } else { 1478 lf_cancel_lock(state, lock); 1479 goto out; 1480 } 1481#ifdef LOCKF_DEBUG 1482 if (lockf_debug & 1) { 1483 lf_print("lf_setlock: granted", lock); 1484 } 1485#endif 1486 goto out; 1487 } 1488 /* 1489 * It looks like we are going to grant the lock. First add 1490 * edges from any currently pending lock that the new lock 1491 * would block. 1492 */ 1493 sx_xlock(&lf_owner_graph_lock); 1494 error = lf_add_incoming(state, lock); 1495 sx_xunlock(&lf_owner_graph_lock); 1496 if (error) { 1497#ifdef LOCKF_DEBUG 1498 if (lockf_debug & 1) 1499 lf_print("lf_setlock: deadlock", lock); 1500#endif 1501 lf_free_lock(lock); 1502 goto out; 1503 } 1504 1505 /* 1506 * No blocks!! Add the lock. Note that we will 1507 * downgrade or upgrade any overlapping locks this 1508 * process already owns. 1509 */ 1510 lf_activate_lock(state, lock); 1511 error = 0; 1512out: 1513 return (error); 1514} 1515 1516/* 1517 * Remove a byte-range lock on an inode. 1518 * 1519 * Generally, find the lock (or an overlap to that lock) 1520 * and remove it (or shrink it), then wakeup anyone we can. 1521 */ 1522static int 1523lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 1524{ 1525 struct lockf_entry *overlap; 1526 1527 overlap = LIST_FIRST(&state->ls_active); 1528 1529 if (overlap == NOLOCKF) 1530 return (0); 1531#ifdef LOCKF_DEBUG 1532 if (unlock->lf_type != F_UNLCK) 1533 panic("lf_clearlock: bad type"); 1534 if (lockf_debug & 1) 1535 lf_print("lf_clearlock", unlock); 1536#endif /* LOCKF_DEBUG */ 1537 1538 lf_activate_lock(state, unlock); 1539 1540 return (0); 1541} 1542 1543/* 1544 * Check whether there is a blocking lock, and if so return its 1545 * details in '*fl'. 1546 */ 1547static int 1548lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 1549{ 1550 struct lockf_entry *block; 1551 1552#ifdef LOCKF_DEBUG 1553 if (lockf_debug & 1) 1554 lf_print("lf_getlock", lock); 1555#endif /* LOCKF_DEBUG */ 1556 1557 if ((block = lf_getblock(state, lock))) { 1558 fl->l_type = block->lf_type; 1559 fl->l_whence = SEEK_SET; 1560 fl->l_start = block->lf_start; 1561 if (block->lf_end == OFF_MAX) 1562 fl->l_len = 0; 1563 else 1564 fl->l_len = block->lf_end - block->lf_start + 1; 1565 fl->l_pid = block->lf_owner->lo_pid; 1566 fl->l_sysid = block->lf_owner->lo_sysid; 1567 } else { 1568 fl->l_type = F_UNLCK; 1569 } 1570 return (0); 1571} 1572 1573/* 1574 * Cancel an async lock request. 1575 */ 1576static int 1577lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1578{ 1579 struct lockf_entry *reallock; 1580 1581 /* 1582 * We need to match this request with an existing lock 1583 * request. 1584 */ 1585 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1586 if ((void *) reallock == cookie) { 1587 /* 1588 * Double-check that this lock looks right 1589 * (maybe use a rolling ID for the cancel 1590 * cookie instead?) 1591 */ 1592 if (!(reallock->lf_vnode == lock->lf_vnode 1593 && reallock->lf_start == lock->lf_start 1594 && reallock->lf_end == lock->lf_end)) { 1595 return (ENOENT); 1596 } 1597 1598 /* 1599 * Make sure this lock was async and then just 1600 * remove it from its wait lists. 1601 */ 1602 if (!reallock->lf_async_task) { 1603 return (ENOENT); 1604 } 1605 1606 /* 1607 * Note that since any other thread must take 1608 * state->ls_lock before it can possibly 1609 * trigger the async callback, we are safe 1610 * from a race with lf_wakeup_lock, i.e. we 1611 * can free the lock (actually our caller does 1612 * this). 1613 */ 1614 lf_cancel_lock(state, reallock); 1615 return (0); 1616 } 1617 } 1618 1619 /* 1620 * We didn't find a matching lock - not much we can do here. 1621 */ 1622 return (ENOENT); 1623} 1624 1625/* 1626 * Walk the list of locks for an inode and 1627 * return the first blocking lock. 1628 */ 1629static struct lockf_entry * 1630lf_getblock(struct lockf *state, struct lockf_entry *lock) 1631{ 1632 struct lockf_entry *overlap; 1633 1634 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 1635 /* 1636 * We may assume that the active list is sorted by 1637 * lf_start. 1638 */ 1639 if (overlap->lf_start > lock->lf_end) 1640 break; 1641 if (!lf_blocks(lock, overlap)) 1642 continue; 1643 return (overlap); 1644 } 1645 return (NOLOCKF); 1646} 1647 1648/* 1649 * Walk the list of locks for an inode to find an overlapping lock (if 1650 * any) and return a classification of that overlap. 1651 * 1652 * Arguments: 1653 * *overlap The place in the lock list to start looking 1654 * lock The lock which is being tested 1655 * type Pass 'SELF' to test only locks with the same 1656 * owner as lock, or 'OTHER' to test only locks 1657 * with a different owner 1658 * 1659 * Returns one of six values: 1660 * 0) no overlap 1661 * 1) overlap == lock 1662 * 2) overlap contains lock 1663 * 3) lock contains overlap 1664 * 4) overlap starts before lock 1665 * 5) overlap ends after lock 1666 * 1667 * If there is an overlapping lock, '*overlap' is set to point at the 1668 * overlapping lock. 1669 * 1670 * NOTE: this returns only the FIRST overlapping lock. There 1671 * may be more than one. 1672 */ 1673static int 1674lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 1675{ 1676 struct lockf_entry *lf; 1677 off_t start, end; 1678 int res; 1679 1680 if ((*overlap) == NOLOCKF) { 1681 return (0); 1682 } 1683#ifdef LOCKF_DEBUG 1684 if (lockf_debug & 2) 1685 lf_print("lf_findoverlap: looking for overlap in", lock); 1686#endif /* LOCKF_DEBUG */ 1687 start = lock->lf_start; 1688 end = lock->lf_end; 1689 res = 0; 1690 while (*overlap) { 1691 lf = *overlap; 1692 if (lf->lf_start > end) 1693 break; 1694 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1695 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1696 *overlap = LIST_NEXT(lf, lf_link); 1697 continue; 1698 } 1699#ifdef LOCKF_DEBUG 1700 if (lockf_debug & 2) 1701 lf_print("\tchecking", lf); 1702#endif /* LOCKF_DEBUG */ 1703 /* 1704 * OK, check for overlap 1705 * 1706 * Six cases: 1707 * 0) no overlap 1708 * 1) overlap == lock 1709 * 2) overlap contains lock 1710 * 3) lock contains overlap 1711 * 4) overlap starts before lock 1712 * 5) overlap ends after lock 1713 */ 1714 if (start > lf->lf_end) { 1715 /* Case 0 */ 1716#ifdef LOCKF_DEBUG 1717 if (lockf_debug & 2) 1718 printf("no overlap\n"); 1719#endif /* LOCKF_DEBUG */ 1720 *overlap = LIST_NEXT(lf, lf_link); 1721 continue; 1722 } 1723 if (lf->lf_start == start && lf->lf_end == end) { 1724 /* Case 1 */ 1725#ifdef LOCKF_DEBUG 1726 if (lockf_debug & 2) 1727 printf("overlap == lock\n"); 1728#endif /* LOCKF_DEBUG */ 1729 res = 1; 1730 break; 1731 } 1732 if (lf->lf_start <= start && lf->lf_end >= end) { 1733 /* Case 2 */ 1734#ifdef LOCKF_DEBUG 1735 if (lockf_debug & 2) 1736 printf("overlap contains lock\n"); 1737#endif /* LOCKF_DEBUG */ 1738 res = 2; 1739 break; 1740 } 1741 if (start <= lf->lf_start && end >= lf->lf_end) { 1742 /* Case 3 */ 1743#ifdef LOCKF_DEBUG 1744 if (lockf_debug & 2) 1745 printf("lock contains overlap\n"); 1746#endif /* LOCKF_DEBUG */ 1747 res = 3; 1748 break; 1749 } 1750 if (lf->lf_start < start && lf->lf_end >= start) { 1751 /* Case 4 */ 1752#ifdef LOCKF_DEBUG 1753 if (lockf_debug & 2) 1754 printf("overlap starts before lock\n"); 1755#endif /* LOCKF_DEBUG */ 1756 res = 4; 1757 break; 1758 } 1759 if (lf->lf_start > start && lf->lf_end > end) { 1760 /* Case 5 */ 1761#ifdef LOCKF_DEBUG 1762 if (lockf_debug & 2) 1763 printf("overlap ends after lock\n"); 1764#endif /* LOCKF_DEBUG */ 1765 res = 5; 1766 break; 1767 } 1768 panic("lf_findoverlap: default"); 1769 } 1770 return (res); 1771} 1772 1773/* 1774 * Split an the existing 'lock1', based on the extent of the lock 1775 * described by 'lock2'. The existing lock should cover 'lock2' 1776 * entirely. 1777 * 1778 * Any pending locks which have been been unblocked are added to 1779 * 'granted' 1780 */ 1781static void 1782lf_split(struct lockf *state, struct lockf_entry *lock1, 1783 struct lockf_entry *lock2, struct lockf_entry_list *granted) 1784{ 1785 struct lockf_entry *splitlock; 1786 1787#ifdef LOCKF_DEBUG 1788 if (lockf_debug & 2) { 1789 lf_print("lf_split", lock1); 1790 lf_print("splitting from", lock2); 1791 } 1792#endif /* LOCKF_DEBUG */ 1793 /* 1794 * Check to see if we don't need to split at all. 1795 */ 1796 if (lock1->lf_start == lock2->lf_start) { 1797 lf_set_start(state, lock1, lock2->lf_end + 1, granted); 1798 return; 1799 } 1800 if (lock1->lf_end == lock2->lf_end) { 1801 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1802 return; 1803 } 1804 /* 1805 * Make a new lock consisting of the last part of 1806 * the encompassing lock. 1807 */ 1808 splitlock = lf_alloc_lock(lock1->lf_owner); 1809 memcpy(splitlock, lock1, sizeof *splitlock); 1810 if (splitlock->lf_flags & F_REMOTE) 1811 vref(splitlock->lf_vnode); 1812 1813 /* 1814 * This cannot cause a deadlock since any edges we would add 1815 * to splitlock already exist in lock1. We must be sure to add 1816 * necessary dependancies to splitlock before we reduce lock1 1817 * otherwise we may accidentally grant a pending lock that 1818 * was blocked by the tail end of lock1. 1819 */ 1820 splitlock->lf_start = lock2->lf_end + 1; 1821 LIST_INIT(&splitlock->lf_outedges); 1822 LIST_INIT(&splitlock->lf_inedges); 1823 sx_xlock(&lf_owner_graph_lock); 1824 lf_add_incoming(state, splitlock); 1825 sx_xunlock(&lf_owner_graph_lock); 1826 1827 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1828 1829 /* 1830 * OK, now link it in 1831 */ 1832 lf_insert_lock(state, splitlock); 1833} 1834 1835struct lockdesc { 1836 STAILQ_ENTRY(lockdesc) link; 1837 struct vnode *vp; 1838 struct flock fl; 1839}; 1840STAILQ_HEAD(lockdesclist, lockdesc); 1841 1842int 1843lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg) 1844{ 1845 struct lockf *ls; 1846 struct lockf_entry *lf; 1847 struct lockdesc *ldesc; 1848 struct lockdesclist locks; 1849 int error; 1850 1851 /* 1852 * In order to keep the locking simple, we iterate over the 1853 * active lock lists to build a list of locks that need 1854 * releasing. We then call the iterator for each one in turn. 1855 * 1856 * We take an extra reference to the vnode for the duration to 1857 * make sure it doesn't go away before we are finished. 1858 */ 1859 STAILQ_INIT(&locks); 1860 sx_xlock(&lf_lock_states_lock); 1861 LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1862 sx_xlock(&ls->ls_lock); 1863 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1864 if (lf->lf_owner->lo_sysid != sysid) 1865 continue; 1866 1867 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1868 M_WAITOK); 1869 ldesc->vp = lf->lf_vnode; 1870 vref(ldesc->vp); 1871 ldesc->fl.l_start = lf->lf_start; 1872 if (lf->lf_end == OFF_MAX) 1873 ldesc->fl.l_len = 0; 1874 else 1875 ldesc->fl.l_len = 1876 lf->lf_end - lf->lf_start + 1; 1877 ldesc->fl.l_whence = SEEK_SET; 1878 ldesc->fl.l_type = F_UNLCK; 1879 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1880 ldesc->fl.l_sysid = sysid; 1881 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1882 } 1883 sx_xunlock(&ls->ls_lock); 1884 } 1885 sx_xunlock(&lf_lock_states_lock); 1886 1887 /* 1888 * Call the iterator function for each lock in turn. If the 1889 * iterator returns an error code, just free the rest of the 1890 * lockdesc structures. 1891 */ 1892 error = 0; 1893 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1894 STAILQ_REMOVE_HEAD(&locks, link); 1895 if (!error) 1896 error = fn(ldesc->vp, &ldesc->fl, arg); 1897 vrele(ldesc->vp); 1898 free(ldesc, M_LOCKF); 1899 } 1900 1901 return (error); 1902} 1903 1904int 1905lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg) 1906{ 1907 struct lockf *ls; 1908 struct lockf_entry *lf; 1909 struct lockdesc *ldesc; 1910 struct lockdesclist locks; 1911 int error; 1912 1913 /* 1914 * In order to keep the locking simple, we iterate over the 1915 * active lock lists to build a list of locks that need 1916 * releasing. We then call the iterator for each one in turn. 1917 * 1918 * We take an extra reference to the vnode for the duration to 1919 * make sure it doesn't go away before we are finished. 1920 */ 1921 STAILQ_INIT(&locks); 1922 ls = vp->v_lockf; 1923 if (!ls) 1924 return (0); 1925 1926 sx_xlock(&ls->ls_lock); 1927 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1928 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF, 1929 M_WAITOK); 1930 ldesc->vp = lf->lf_vnode; 1931 vref(ldesc->vp); 1932 ldesc->fl.l_start = lf->lf_start; 1933 if (lf->lf_end == OFF_MAX) 1934 ldesc->fl.l_len = 0; 1935 else 1936 ldesc->fl.l_len = 1937 lf->lf_end - lf->lf_start + 1; 1938 ldesc->fl.l_whence = SEEK_SET; 1939 ldesc->fl.l_type = F_UNLCK; 1940 ldesc->fl.l_pid = lf->lf_owner->lo_pid; 1941 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; 1942 STAILQ_INSERT_TAIL(&locks, ldesc, link); 1943 } 1944 sx_xunlock(&ls->ls_lock); 1945 1946 /* 1947 * Call the iterator function for each lock in turn. If the 1948 * iterator returns an error code, just free the rest of the 1949 * lockdesc structures. 1950 */ 1951 error = 0; 1952 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) { 1953 STAILQ_REMOVE_HEAD(&locks, link); 1954 if (!error) 1955 error = fn(ldesc->vp, &ldesc->fl, arg); 1956 vrele(ldesc->vp); 1957 free(ldesc, M_LOCKF); 1958 } 1959 1960 return (error); 1961} 1962 1963static int 1964lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg) 1965{ 1966 1967 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE); 1968 return (0); 1969} 1970 1971void 1972lf_clearremotesys(int sysid) 1973{ 1974 1975 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1976 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL); 1977} 1978 1979int 1980lf_countlocks(int sysid) 1981{ 1982 int i; 1983 struct lock_owner *lo; 1984 int count; 1985 1986 count = 0; 1987 sx_xlock(&lf_lock_owners_lock); 1988 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1989 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1990 if (lo->lo_sysid == sysid) 1991 count += lo->lo_refs; 1992 sx_xunlock(&lf_lock_owners_lock); 1993 1994 return (count); 1995} 1996 1997#ifdef LOCKF_DEBUG 1998 1999/* 2000 * Return non-zero if y is reachable from x using a brute force 2001 * search. If reachable and path is non-null, return the route taken 2002 * in path. 2003 */ 2004static int 2005graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 2006 struct owner_vertex_list *path) 2007{ 2008 struct owner_edge *e; 2009 2010 if (x == y) { 2011 if (path) 2012 TAILQ_INSERT_HEAD(path, x, v_link); 2013 return 1; 2014 } 2015 2016 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2017 if (graph_reaches(e->e_to, y, path)) { 2018 if (path) 2019 TAILQ_INSERT_HEAD(path, x, v_link); 2020 return 1; 2021 } 2022 } 2023 return 0; 2024} 2025 2026/* 2027 * Perform consistency checks on the graph. Make sure the values of 2028 * v_order are correct. If checkorder is non-zero, check no vertex can 2029 * reach any other vertex with a smaller order. 2030 */ 2031static void 2032graph_check(struct owner_graph *g, int checkorder) 2033{ 2034 int i, j; 2035 2036 for (i = 0; i < g->g_size; i++) { 2037 if (!g->g_vertices[i]->v_owner) 2038 continue; 2039 KASSERT(g->g_vertices[i]->v_order == i, 2040 ("lock graph vertices disordered")); 2041 if (checkorder) { 2042 for (j = 0; j < i; j++) { 2043 if (!g->g_vertices[j]->v_owner) 2044 continue; 2045 KASSERT(!graph_reaches(g->g_vertices[i], 2046 g->g_vertices[j], NULL), 2047 ("lock graph vertices disordered")); 2048 } 2049 } 2050 } 2051} 2052 2053static void 2054graph_print_vertices(struct owner_vertex_list *set) 2055{ 2056 struct owner_vertex *v; 2057 2058 printf("{ "); 2059 TAILQ_FOREACH(v, set, v_link) { 2060 printf("%d:", v->v_order); 2061 lf_print_owner(v->v_owner); 2062 if (TAILQ_NEXT(v, v_link)) 2063 printf(", "); 2064 } 2065 printf(" }\n"); 2066} 2067 2068#endif 2069 2070/* 2071 * Calculate the sub-set of vertices v from the affected region [y..x] 2072 * where v is reachable from y. Return -1 if a loop was detected 2073 * (i.e. x is reachable from y, otherwise the number of vertices in 2074 * this subset. 2075 */ 2076static int 2077graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 2078 struct owner_vertex *y, struct owner_vertex_list *delta) 2079{ 2080 uint32_t gen; 2081 struct owner_vertex *v; 2082 struct owner_edge *e; 2083 int n; 2084 2085 /* 2086 * We start with a set containing just y. Then for each vertex 2087 * v in the set so far unprocessed, we add each vertex that v 2088 * has an out-edge to and that is within the affected region 2089 * [y..x]. If we see the vertex x on our travels, stop 2090 * immediately. 2091 */ 2092 TAILQ_INIT(delta); 2093 TAILQ_INSERT_TAIL(delta, y, v_link); 2094 v = y; 2095 n = 1; 2096 gen = g->g_gen; 2097 while (v) { 2098 LIST_FOREACH(e, &v->v_outedges, e_outlink) { 2099 if (e->e_to == x) 2100 return -1; 2101 if (e->e_to->v_order < x->v_order 2102 && e->e_to->v_gen != gen) { 2103 e->e_to->v_gen = gen; 2104 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2105 n++; 2106 } 2107 } 2108 v = TAILQ_NEXT(v, v_link); 2109 } 2110 2111 return (n); 2112} 2113 2114/* 2115 * Calculate the sub-set of vertices v from the affected region [y..x] 2116 * where v reaches x. Return the number of vertices in this subset. 2117 */ 2118static int 2119graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2120 struct owner_vertex *y, struct owner_vertex_list *delta) 2121{ 2122 uint32_t gen; 2123 struct owner_vertex *v; 2124 struct owner_edge *e; 2125 int n; 2126 2127 /* 2128 * We start with a set containing just x. Then for each vertex 2129 * v in the set so far unprocessed, we add each vertex that v 2130 * has an in-edge from and that is within the affected region 2131 * [y..x]. 2132 */ 2133 TAILQ_INIT(delta); 2134 TAILQ_INSERT_TAIL(delta, x, v_link); 2135 v = x; 2136 n = 1; 2137 gen = g->g_gen; 2138 while (v) { 2139 LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2140 if (e->e_from->v_order > y->v_order 2141 && e->e_from->v_gen != gen) { 2142 e->e_from->v_gen = gen; 2143 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2144 n++; 2145 } 2146 } 2147 v = TAILQ_PREV(v, owner_vertex_list, v_link); 2148 } 2149 2150 return (n); 2151} 2152 2153static int 2154graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2155{ 2156 struct owner_vertex *v; 2157 int i, j; 2158 2159 TAILQ_FOREACH(v, set, v_link) { 2160 for (i = n; 2161 i > 0 && indices[i - 1] > v->v_order; i--) 2162 ; 2163 for (j = n - 1; j >= i; j--) 2164 indices[j + 1] = indices[j]; 2165 indices[i] = v->v_order; 2166 n++; 2167 } 2168 2169 return (n); 2170} 2171 2172static int 2173graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2174 struct owner_vertex_list *set) 2175{ 2176 struct owner_vertex *v, *vlowest; 2177 2178 while (!TAILQ_EMPTY(set)) { 2179 vlowest = NULL; 2180 TAILQ_FOREACH(v, set, v_link) { 2181 if (!vlowest || v->v_order < vlowest->v_order) 2182 vlowest = v; 2183 } 2184 TAILQ_REMOVE(set, vlowest, v_link); 2185 vlowest->v_order = indices[nextunused]; 2186 g->g_vertices[vlowest->v_order] = vlowest; 2187 nextunused++; 2188 } 2189 2190 return (nextunused); 2191} 2192 2193static int 2194graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2195 struct owner_vertex *y) 2196{ 2197 struct owner_edge *e; 2198 struct owner_vertex_list deltaF, deltaB; 2199 int nF, nB, n, vi, i; 2200 int *indices; 2201 2202 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2203 2204 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2205 if (e->e_to == y) { 2206 e->e_refs++; 2207 return (0); 2208 } 2209 } 2210 2211#ifdef LOCKF_DEBUG 2212 if (lockf_debug & 8) { 2213 printf("adding edge %d:", x->v_order); 2214 lf_print_owner(x->v_owner); 2215 printf(" -> %d:", y->v_order); 2216 lf_print_owner(y->v_owner); 2217 printf("\n"); 2218 } 2219#endif 2220 if (y->v_order < x->v_order) { 2221 /* 2222 * The new edge violates the order. First find the set 2223 * of affected vertices reachable from y (deltaF) and 2224 * the set of affect vertices affected that reach x 2225 * (deltaB), using the graph generation number to 2226 * detect whether we have visited a given vertex 2227 * already. We re-order the graph so that each vertex 2228 * in deltaB appears before each vertex in deltaF. 2229 * 2230 * If x is a member of deltaF, then the new edge would 2231 * create a cycle. Otherwise, we may assume that 2232 * deltaF and deltaB are disjoint. 2233 */ 2234 g->g_gen++; 2235 if (g->g_gen == 0) { 2236 /* 2237 * Generation wrap. 2238 */ 2239 for (vi = 0; vi < g->g_size; vi++) { 2240 g->g_vertices[vi]->v_gen = 0; 2241 } 2242 g->g_gen++; 2243 } 2244 nF = graph_delta_forward(g, x, y, &deltaF); 2245 if (nF < 0) { 2246#ifdef LOCKF_DEBUG 2247 if (lockf_debug & 8) { 2248 struct owner_vertex_list path; 2249 printf("deadlock: "); 2250 TAILQ_INIT(&path); 2251 graph_reaches(y, x, &path); 2252 graph_print_vertices(&path); 2253 } 2254#endif 2255 return (EDEADLK); 2256 } 2257 2258#ifdef LOCKF_DEBUG 2259 if (lockf_debug & 8) { 2260 printf("re-ordering graph vertices\n"); 2261 printf("deltaF = "); 2262 graph_print_vertices(&deltaF); 2263 } 2264#endif 2265 2266 nB = graph_delta_backward(g, x, y, &deltaB); 2267 2268#ifdef LOCKF_DEBUG 2269 if (lockf_debug & 8) { 2270 printf("deltaB = "); 2271 graph_print_vertices(&deltaB); 2272 } 2273#endif 2274 2275 /* 2276 * We first build a set of vertex indices (vertex 2277 * order values) that we may use, then we re-assign 2278 * orders first to those vertices in deltaB, then to 2279 * deltaF. Note that the contents of deltaF and deltaB 2280 * may be partially disordered - we perform an 2281 * insertion sort while building our index set. 2282 */ 2283 indices = g->g_indexbuf; 2284 n = graph_add_indices(indices, 0, &deltaF); 2285 graph_add_indices(indices, n, &deltaB); 2286 2287 /* 2288 * We must also be sure to maintain the relative 2289 * ordering of deltaF and deltaB when re-assigning 2290 * vertices. We do this by iteratively removing the 2291 * lowest ordered element from the set and assigning 2292 * it the next value from our new ordering. 2293 */ 2294 i = graph_assign_indices(g, indices, 0, &deltaB); 2295 graph_assign_indices(g, indices, i, &deltaF); 2296 2297#ifdef LOCKF_DEBUG 2298 if (lockf_debug & 8) { 2299 struct owner_vertex_list set; 2300 TAILQ_INIT(&set); 2301 for (i = 0; i < nB + nF; i++) 2302 TAILQ_INSERT_TAIL(&set, 2303 g->g_vertices[indices[i]], v_link); 2304 printf("new ordering = "); 2305 graph_print_vertices(&set); 2306 } 2307#endif 2308 } 2309 2310 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2311 2312#ifdef LOCKF_DEBUG 2313 if (lockf_debug & 8) { 2314 graph_check(g, TRUE); 2315 } 2316#endif 2317 2318 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2319 2320 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2321 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2322 e->e_refs = 1; 2323 e->e_from = x; 2324 e->e_to = y; 2325 2326 return (0); 2327} 2328 2329/* 2330 * Remove an edge x->y from the graph. 2331 */ 2332static void 2333graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2334 struct owner_vertex *y) 2335{ 2336 struct owner_edge *e; 2337 2338 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2339 2340 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2341 if (e->e_to == y) 2342 break; 2343 } 2344 KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2345 2346 e->e_refs--; 2347 if (e->e_refs == 0) { 2348#ifdef LOCKF_DEBUG 2349 if (lockf_debug & 8) { 2350 printf("removing edge %d:", x->v_order); 2351 lf_print_owner(x->v_owner); 2352 printf(" -> %d:", y->v_order); 2353 lf_print_owner(y->v_owner); 2354 printf("\n"); 2355 } 2356#endif 2357 LIST_REMOVE(e, e_outlink); 2358 LIST_REMOVE(e, e_inlink); 2359 free(e, M_LOCKF); 2360 } 2361} 2362 2363/* 2364 * Allocate a vertex from the free list. Return ENOMEM if there are 2365 * none. 2366 */ 2367static struct owner_vertex * 2368graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2369{ 2370 struct owner_vertex *v; 2371 2372 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2373 2374 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2375 if (g->g_size == g->g_space) { 2376 g->g_vertices = realloc(g->g_vertices, 2377 2 * g->g_space * sizeof(struct owner_vertex *), 2378 M_LOCKF, M_WAITOK); 2379 free(g->g_indexbuf, M_LOCKF); 2380 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2381 M_LOCKF, M_WAITOK); 2382 g->g_space = 2 * g->g_space; 2383 } 2384 v->v_order = g->g_size; 2385 v->v_gen = g->g_gen; 2386 g->g_vertices[g->g_size] = v; 2387 g->g_size++; 2388 2389 LIST_INIT(&v->v_outedges); 2390 LIST_INIT(&v->v_inedges); 2391 v->v_owner = lo; 2392 2393 return (v); 2394} 2395 2396static void 2397graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2398{ 2399 struct owner_vertex *w; 2400 int i; 2401 2402 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2403 2404 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2405 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2406 2407 /* 2408 * Remove from the graph's array and close up the gap, 2409 * renumbering the other vertices. 2410 */ 2411 for (i = v->v_order + 1; i < g->g_size; i++) { 2412 w = g->g_vertices[i]; 2413 w->v_order--; 2414 g->g_vertices[i - 1] = w; 2415 } 2416 g->g_size--; 2417 2418 free(v, M_LOCKF); 2419} 2420 2421static struct owner_graph * 2422graph_init(struct owner_graph *g) 2423{ 2424 2425 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2426 M_LOCKF, M_WAITOK); 2427 g->g_size = 0; 2428 g->g_space = 10; 2429 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2430 g->g_gen = 0; 2431 2432 return (g); 2433} 2434 2435#ifdef LOCKF_DEBUG 2436/* 2437 * Print description of a lock owner 2438 */ 2439static void 2440lf_print_owner(struct lock_owner *lo) 2441{ 2442 2443 if (lo->lo_flags & F_REMOTE) { 2444 printf("remote pid %d, system %d", 2445 lo->lo_pid, lo->lo_sysid); 2446 } else if (lo->lo_flags & F_FLOCK) { 2447 printf("file %p", lo->lo_id); 2448 } else { 2449 printf("local pid %d", lo->lo_pid); 2450 } 2451} 2452 2453/* 2454 * Print out a lock. 2455 */ 2456static void 2457lf_print(char *tag, struct lockf_entry *lock) 2458{ 2459 2460 printf("%s: lock %p for ", tag, (void *)lock); 2461 lf_print_owner(lock->lf_owner); 2462 if (lock->lf_inode != (struct inode *)0) 2463 printf(" in ino %ju on dev <%s>,", 2464 (uintmax_t)lock->lf_inode->i_number, 2465 devtoname(lock->lf_inode->i_dev)); 2466 printf(" %s, start %jd, end ", 2467 lock->lf_type == F_RDLCK ? "shared" : 2468 lock->lf_type == F_WRLCK ? "exclusive" : 2469 lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2470 (intmax_t)lock->lf_start); 2471 if (lock->lf_end == OFF_MAX) 2472 printf("EOF"); 2473 else 2474 printf("%jd", (intmax_t)lock->lf_end); 2475 if (!LIST_EMPTY(&lock->lf_outedges)) 2476 printf(" block %p\n", 2477 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 2478 else 2479 printf("\n"); 2480} 2481 2482static void 2483lf_printlist(char *tag, struct lockf_entry *lock) 2484{ 2485 struct lockf_entry *lf, *blk; 2486 struct lockf_edge *e; 2487 2488 if (lock->lf_inode == (struct inode *)0) 2489 return; 2490 2491 printf("%s: Lock list for ino %ju on dev <%s>:\n", 2492 tag, (uintmax_t)lock->lf_inode->i_number, 2493 devtoname(lock->lf_inode->i_dev)); 2494 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 2495 printf("\tlock %p for ",(void *)lf); 2496 lf_print_owner(lock->lf_owner); 2497 printf(", %s, start %jd, end %jd", 2498 lf->lf_type == F_RDLCK ? "shared" : 2499 lf->lf_type == F_WRLCK ? "exclusive" : 2500 lf->lf_type == F_UNLCK ? "unlock" : 2501 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2502 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2503 blk = e->le_to; 2504 printf("\n\t\tlock request %p for ", (void *)blk); 2505 lf_print_owner(blk->lf_owner); 2506 printf(", %s, start %jd, end %jd", 2507 blk->lf_type == F_RDLCK ? "shared" : 2508 blk->lf_type == F_WRLCK ? "exclusive" : 2509 blk->lf_type == F_UNLCK ? "unlock" : 2510 "unknown", (intmax_t)blk->lf_start, 2511 (intmax_t)blk->lf_end); 2512 if (!LIST_EMPTY(&blk->lf_inedges)) 2513 panic("lf_printlist: bad list"); 2514 } 2515 printf("\n"); 2516 } 2517} 2518#endif /* LOCKF_DEBUG */ 2519