kern_lockf.c revision 178247
1/*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27/*- 28 * Copyright (c) 1982, 1986, 1989, 1993 29 * The Regents of the University of California. All rights reserved. 30 * 31 * This code is derived from software contributed to Berkeley by 32 * Scooter Morris at Genentech Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 4. Neither the name of the University nor the names of its contributors 43 * may be used to endorse or promote products derived from this software 44 * without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 59 */ 60 61#include <sys/cdefs.h> 62__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 178247 2008-04-16 14:08:12Z dfr $"); 63 64#include "opt_debug_lockf.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/hash.h> 69#include <sys/kernel.h> 70#include <sys/limits.h> 71#include <sys/lock.h> 72#include <sys/mount.h> 73#include <sys/mutex.h> 74#include <sys/proc.h> 75#include <sys/sx.h> 76#include <sys/unistd.h> 77#include <sys/vnode.h> 78#include <sys/malloc.h> 79#include <sys/fcntl.h> 80#include <sys/lockf.h> 81#include <sys/taskqueue.h> 82 83#ifdef LOCKF_DEBUG 84#include <sys/sysctl.h> 85 86#include <ufs/ufs/quota.h> 87#include <ufs/ufs/inode.h> 88 89static int lockf_debug = 0; /* control debug output */ 90SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 91#endif 92 93MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 94 95struct owner_edge; 96struct owner_vertex; 97struct owner_vertex_list; 98struct owner_graph; 99 100#define NOLOCKF (struct lockf_entry *)0 101#define SELF 0x1 102#define OTHERS 0x2 103static void lf_init(void *); 104static int lf_hash_owner(caddr_t, struct flock *, int); 105static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106 int); 107static struct lockf_entry * 108 lf_alloc_lock(struct lock_owner *); 109static void lf_free_lock(struct lockf_entry *); 110static int lf_clearlock(struct lockf *, struct lockf_entry *); 111static int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112static int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113static void lf_free_edge(struct lockf_edge *); 114static struct lockf_edge * 115 lf_alloc_edge(void); 116static void lf_alloc_vertex(struct lockf_entry *); 117static int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118static void lf_remove_edge(struct lockf_edge *); 119static void lf_remove_outgoing(struct lockf_entry *); 120static void lf_remove_incoming(struct lockf_entry *); 121static int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122static int lf_add_incoming(struct lockf *, struct lockf_entry *); 123static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124 int); 125static struct lockf_entry * 126 lf_getblock(struct lockf *, struct lockf_entry *); 127static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128static void lf_insert_lock(struct lockf *, struct lockf_entry *); 129static void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130static void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131 int all, struct lockf_entry_list *); 132static void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133 struct lockf_entry_list*); 134static void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135 struct lockf_entry_list*); 136static int lf_setlock(struct lockf *, struct lockf_entry *, 137 struct vnode *, void **cookiep); 138static int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139static void lf_split(struct lockf *, struct lockf_entry *, 140 struct lockf_entry *, struct lockf_entry_list *); 141#ifdef LOCKF_DEBUG 142static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143 struct owner_vertex_list *path); 144static void graph_check(struct owner_graph *g, int checkorder); 145static void graph_print_vertices(struct owner_vertex_list *set); 146#endif 147static int graph_delta_forward(struct owner_graph *g, 148 struct owner_vertex *x, struct owner_vertex *y, 149 struct owner_vertex_list *delta); 150static int graph_delta_backward(struct owner_graph *g, 151 struct owner_vertex *x, struct owner_vertex *y, 152 struct owner_vertex_list *delta); 153static int graph_add_indices(int *indices, int n, 154 struct owner_vertex_list *set); 155static int graph_assign_indices(struct owner_graph *g, int *indices, 156 int nextunused, struct owner_vertex_list *set); 157static int graph_add_edge(struct owner_graph *g, 158 struct owner_vertex *x, struct owner_vertex *y); 159static void graph_remove_edge(struct owner_graph *g, 160 struct owner_vertex *x, struct owner_vertex *y); 161static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162 struct lock_owner *lo); 163static void graph_free_vertex(struct owner_graph *g, 164 struct owner_vertex *v); 165static struct owner_graph * graph_init(struct owner_graph *g); 166#ifdef LOCKF_DEBUG 167static void lf_print(char *, struct lockf_entry *); 168static void lf_printlist(char *, struct lockf_entry *); 169static void lf_print_owner(struct lock_owner *); 170#endif 171 172/* 173 * This structure is used to keep track of both local and remote lock 174 * owners. The lf_owner field of the struct lockf_entry points back at 175 * the lock owner structure. Each possible lock owner (local proc for 176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177 * pair for remote locks) is represented by a unique instance of 178 * struct lock_owner. 179 * 180 * If a lock owner has a lock that blocks some other lock or a lock 181 * that is waiting for some other lock, it also has a vertex in the 182 * owner_graph below. 183 * 184 * Locks: 185 * (s) locked by state->ls_lock 186 * (S) locked by lf_lock_states_lock 187 * (l) locked by lf_lock_owners_lock 188 * (g) locked by lf_owner_graph_lock 189 * (c) const until freeing 190 */ 191#define LOCK_OWNER_HASH_SIZE 256 192 193struct lock_owner { 194 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195 int lo_refs; /* (l) Number of locks referring to this */ 196 int lo_flags; /* (c) Flags passwd to lf_advlock */ 197 caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198 pid_t lo_pid; /* (c) Process Id of the lock owner */ 199 int lo_sysid; /* (c) System Id of the lock owner */ 200 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201}; 202 203LIST_HEAD(lock_owner_list, lock_owner); 204 205static struct sx lf_lock_states_lock; 206static struct lockf_list lf_lock_states; /* (S) */ 207static struct sx lf_lock_owners_lock; 208static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209 210/* 211 * Structures for deadlock detection. 212 * 213 * We have two types of directed graph, the first is the set of locks, 214 * both active and pending on a vnode. Within this graph, active locks 215 * are terminal nodes in the graph (i.e. have no out-going 216 * edges). Pending locks have out-going edges to each blocking active 217 * lock that prevents the lock from being granted and also to each 218 * older pending lock that would block them if it was active. The 219 * graph for each vnode is naturally acyclic; new edges are only ever 220 * added to or from new nodes (either new pending locks which only add 221 * out-going edges or new active locks which only add in-coming edges) 222 * therefore they cannot create loops in the lock graph. 223 * 224 * The second graph is a global graph of lock owners. Each lock owner 225 * is a vertex in that graph and an edge is added to the graph 226 * whenever an edge is added to a vnode graph, with end points 227 * corresponding to owner of the new pending lock and the owner of the 228 * lock upon which it waits. In order to prevent deadlock, we only add 229 * an edge to this graph if the new edge would not create a cycle. 230 * 231 * The lock owner graph is topologically sorted, i.e. if a node has 232 * any outgoing edges, then it has an order strictly less than any 233 * node to which it has an outgoing edge. We preserve this ordering 234 * (and detect cycles) on edge insertion using Algorithm PK from the 235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237 * No. 1.7) 238 */ 239struct owner_vertex; 240 241struct owner_edge { 242 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244 int e_refs; /* (g) number of times added */ 245 struct owner_vertex *e_from; /* (c) out-going from here */ 246 struct owner_vertex *e_to; /* (c) in-coming to here */ 247}; 248LIST_HEAD(owner_edge_list, owner_edge); 249 250struct owner_vertex { 251 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252 uint32_t v_gen; /* (g) workspace for edge insertion */ 253 int v_order; /* (g) order of vertex in graph */ 254 struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255 struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256 struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257}; 258TAILQ_HEAD(owner_vertex_list, owner_vertex); 259 260struct owner_graph { 261 struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262 int g_size; /* (g) number of vertices */ 263 int g_space; /* (g) space allocated for vertices */ 264 int *g_indexbuf; /* (g) workspace for loop detection */ 265 uint32_t g_gen; /* (g) increment when re-ordering */ 266}; 267 268static struct sx lf_owner_graph_lock; 269static struct owner_graph lf_owner_graph; 270 271/* 272 * Initialise various structures and locks. 273 */ 274static void 275lf_init(void *dummy) 276{ 277 int i; 278 279 sx_init(&lf_lock_states_lock, "lock states lock"); 280 LIST_INIT(&lf_lock_states); 281 282 sx_init(&lf_lock_owners_lock, "lock owners lock"); 283 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284 LIST_INIT(&lf_lock_owners[i]); 285 286 sx_init(&lf_owner_graph_lock, "owner graph lock"); 287 graph_init(&lf_owner_graph); 288} 289SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290 291/* 292 * Generate a hash value for a lock owner. 293 */ 294static int 295lf_hash_owner(caddr_t id, struct flock *fl, int flags) 296{ 297 uint32_t h; 298 299 if (flags & F_REMOTE) { 300 h = HASHSTEP(0, fl->l_pid); 301 h = HASHSTEP(h, fl->l_sysid); 302 } else if (flags & F_FLOCK) { 303 h = ((uintptr_t) id) >> 7; 304 } else { 305 struct proc *p = (struct proc *) id; 306 h = HASHSTEP(0, p->p_pid); 307 h = HASHSTEP(h, 0); 308 } 309 310 return (h % LOCK_OWNER_HASH_SIZE); 311} 312 313/* 314 * Return true if a lock owner matches the details passed to 315 * lf_advlock. 316 */ 317static int 318lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319 int flags) 320{ 321 if (flags & F_REMOTE) { 322 return lo->lo_pid == fl->l_pid 323 && lo->lo_sysid == fl->l_sysid; 324 } else { 325 return lo->lo_id == id; 326 } 327} 328 329static struct lockf_entry * 330lf_alloc_lock(struct lock_owner *lo) 331{ 332 struct lockf_entry *lf; 333 334 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335 336#ifdef LOCKF_DEBUG 337 if (lockf_debug & 4) 338 printf("Allocated lock %p\n", lf); 339#endif 340 if (lo) { 341 sx_xlock(&lf_lock_owners_lock); 342 lo->lo_refs++; 343 sx_xunlock(&lf_lock_owners_lock); 344 lf->lf_owner = lo; 345 } 346 347 return (lf); 348} 349 350static void 351lf_free_lock(struct lockf_entry *lock) 352{ 353 /* 354 * Adjust the lock_owner reference count and 355 * reclaim the entry if this is the last lock 356 * for that owner. 357 */ 358 struct lock_owner *lo = lock->lf_owner; 359 if (lo) { 360 KASSERT(LIST_EMPTY(&lock->lf_outedges), 361 ("freeing lock with dependancies")); 362 KASSERT(LIST_EMPTY(&lock->lf_inedges), 363 ("freeing lock with dependants")); 364 sx_xlock(&lf_lock_owners_lock); 365 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366 lo->lo_refs--; 367 if (lo->lo_refs == 0) { 368#ifdef LOCKF_DEBUG 369 if (lockf_debug & 1) 370 printf("lf_free_lock: freeing lock owner %p\n", 371 lo); 372#endif 373 if (lo->lo_vertex) { 374 sx_xlock(&lf_owner_graph_lock); 375 graph_free_vertex(&lf_owner_graph, 376 lo->lo_vertex); 377 sx_xunlock(&lf_owner_graph_lock); 378 } 379 LIST_REMOVE(lo, lo_link); 380 free(lo, M_LOCKF); 381#ifdef LOCKF_DEBUG 382 if (lockf_debug & 4) 383 printf("Freed lock owner %p\n", lo); 384#endif 385 } 386 sx_unlock(&lf_lock_owners_lock); 387 } 388 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389 vrele(lock->lf_vnode); 390 lock->lf_vnode = NULL; 391 } 392#ifdef LOCKF_DEBUG 393 if (lockf_debug & 4) 394 printf("Freed lock %p\n", lock); 395#endif 396 free(lock, M_LOCKF); 397} 398 399/* 400 * Advisory record locking support 401 */ 402int 403lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404 u_quad_t size) 405{ 406 struct lockf *state, *freestate = NULL; 407 struct flock *fl = ap->a_fl; 408 struct lockf_entry *lock; 409 struct vnode *vp = ap->a_vp; 410 caddr_t id = ap->a_id; 411 int flags = ap->a_flags; 412 int hash; 413 struct lock_owner *lo; 414 off_t start, end, oadd; 415 int error; 416 417 /* 418 * Handle the F_UNLKSYS case first - no need to mess about 419 * creating a lock owner for this one. 420 */ 421 if (ap->a_op == F_UNLCKSYS) { 422 lf_clearremotesys(fl->l_sysid); 423 return (0); 424 } 425 426 /* 427 * Convert the flock structure into a start and end. 428 */ 429 switch (fl->l_whence) { 430 431 case SEEK_SET: 432 case SEEK_CUR: 433 /* 434 * Caller is responsible for adding any necessary offset 435 * when SEEK_CUR is used. 436 */ 437 start = fl->l_start; 438 break; 439 440 case SEEK_END: 441 if (size > OFF_MAX || 442 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443 return (EOVERFLOW); 444 start = size + fl->l_start; 445 break; 446 447 default: 448 return (EINVAL); 449 } 450 if (start < 0) 451 return (EINVAL); 452 if (fl->l_len < 0) { 453 if (start == 0) 454 return (EINVAL); 455 end = start - 1; 456 start += fl->l_len; 457 if (start < 0) 458 return (EINVAL); 459 } else if (fl->l_len == 0) { 460 end = OFF_MAX; 461 } else { 462 oadd = fl->l_len - 1; 463 if (oadd > OFF_MAX - start) 464 return (EOVERFLOW); 465 end = start + oadd; 466 } 467 /* 468 * Avoid the common case of unlocking when inode has no locks. 469 */ 470 if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) { 471 if (ap->a_op != F_SETLK) { 472 fl->l_type = F_UNLCK; 473 return (0); 474 } 475 } 476 477 /* 478 * Map our arguments to an existing lock owner or create one 479 * if this is the first time we have seen this owner. 480 */ 481 hash = lf_hash_owner(id, fl, flags); 482 sx_xlock(&lf_lock_owners_lock); 483 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 484 if (lf_owner_matches(lo, id, fl, flags)) 485 break; 486 if (!lo) { 487 /* 488 * We initialise the lock with a reference 489 * count which matches the new lockf_entry 490 * structure created below. 491 */ 492 lo = malloc(sizeof(struct lock_owner), M_LOCKF, 493 M_WAITOK|M_ZERO); 494#ifdef LOCKF_DEBUG 495 if (lockf_debug & 4) 496 printf("Allocated lock owner %p\n", lo); 497#endif 498 499 lo->lo_refs = 1; 500 lo->lo_flags = flags; 501 lo->lo_id = id; 502 if (flags & F_REMOTE) { 503 lo->lo_pid = fl->l_pid; 504 lo->lo_sysid = fl->l_sysid; 505 } else if (flags & F_FLOCK) { 506 lo->lo_pid = -1; 507 lo->lo_sysid = 0; 508 } else { 509 struct proc *p = (struct proc *) id; 510 lo->lo_pid = p->p_pid; 511 lo->lo_sysid = 0; 512 } 513 lo->lo_vertex = NULL; 514 515#ifdef LOCKF_DEBUG 516 if (lockf_debug & 1) { 517 printf("lf_advlockasync: new lock owner %p ", lo); 518 lf_print_owner(lo); 519 printf("\n"); 520 } 521#endif 522 523 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 524 } else { 525 /* 526 * We have seen this lock owner before, increase its 527 * reference count to account for the new lockf_entry 528 * structure we create below. 529 */ 530 lo->lo_refs++; 531 } 532 sx_xunlock(&lf_lock_owners_lock); 533 534 /* 535 * Create the lockf structure. We initialise the lf_owner 536 * field here instead of in lf_alloc_lock() to avoid paying 537 * the lf_lock_owners_lock tax twice. 538 */ 539 lock = lf_alloc_lock(NULL); 540 lock->lf_start = start; 541 lock->lf_end = end; 542 lock->lf_owner = lo; 543 lock->lf_vnode = vp; 544 if (flags & F_REMOTE) { 545 /* 546 * For remote locks, the caller may release its ref to 547 * the vnode at any time - we have to ref it here to 548 * prevent it from being recycled unexpectedly. 549 */ 550 vref(vp); 551 } 552 553 /* 554 * XXX The problem is that VTOI is ufs specific, so it will 555 * break LOCKF_DEBUG for all other FS's other than UFS because 556 * it casts the vnode->data ptr to struct inode *. 557 */ 558/* lock->lf_inode = VTOI(ap->a_vp); */ 559 lock->lf_inode = (struct inode *)0; 560 lock->lf_type = fl->l_type; 561 LIST_INIT(&lock->lf_outedges); 562 LIST_INIT(&lock->lf_inedges); 563 lock->lf_async_task = ap->a_task; 564 lock->lf_flags = ap->a_flags; 565 566 /* 567 * Do the requested operation. First find our state structure 568 * and create a new one if necessary - the caller's *statep 569 * variable and the state's ls_threads count is protected by 570 * the vnode interlock. 571 */ 572 VI_LOCK(vp); 573 if (vp->v_iflag & VI_DOOMED) { 574 VI_UNLOCK(vp); 575 lf_free_lock(lock); 576 return (ENOENT); 577 } 578 579 /* 580 * Allocate a state structure if necessary. 581 */ 582 state = *statep; 583 if (state == NULL) { 584 struct lockf *ls; 585 586 VI_UNLOCK(vp); 587 588 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 589 sx_init(&ls->ls_lock, "ls_lock"); 590 LIST_INIT(&ls->ls_active); 591 LIST_INIT(&ls->ls_pending); 592 ls->ls_threads = 1; 593 594 sx_xlock(&lf_lock_states_lock); 595 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 596 sx_xunlock(&lf_lock_states_lock); 597 598 /* 599 * Cope if we lost a race with some other thread while 600 * trying to allocate memory. 601 */ 602 VI_LOCK(vp); 603 if (vp->v_iflag & VI_DOOMED) { 604 VI_UNLOCK(vp); 605 sx_xlock(&lf_lock_states_lock); 606 LIST_REMOVE(ls, ls_link); 607 sx_xunlock(&lf_lock_states_lock); 608 sx_destroy(&ls->ls_lock); 609 free(ls, M_LOCKF); 610 lf_free_lock(lock); 611 return (ENOENT); 612 } 613 if ((*statep) == NULL) { 614 state = *statep = ls; 615 VI_UNLOCK(vp); 616 } else { 617 state = *statep; 618 state->ls_threads++; 619 VI_UNLOCK(vp); 620 621 sx_xlock(&lf_lock_states_lock); 622 LIST_REMOVE(ls, ls_link); 623 sx_xunlock(&lf_lock_states_lock); 624 sx_destroy(&ls->ls_lock); 625 free(ls, M_LOCKF); 626 } 627 } else { 628 state->ls_threads++; 629 VI_UNLOCK(vp); 630 } 631 632 sx_xlock(&state->ls_lock); 633 switch(ap->a_op) { 634 case F_SETLK: 635 error = lf_setlock(state, lock, vp, ap->a_cookiep); 636 break; 637 638 case F_UNLCK: 639 error = lf_clearlock(state, lock); 640 lf_free_lock(lock); 641 break; 642 643 case F_GETLK: 644 error = lf_getlock(state, lock, fl); 645 lf_free_lock(lock); 646 break; 647 648 case F_CANCEL: 649 if (ap->a_cookiep) 650 error = lf_cancel(state, lock, *ap->a_cookiep); 651 else 652 error = EINVAL; 653 lf_free_lock(lock); 654 break; 655 656 default: 657 lf_free_lock(lock); 658 error = EINVAL; 659 break; 660 } 661 662#ifdef INVARIANTS 663 /* 664 * Check for some can't happen stuff. In this case, the active 665 * lock list becoming disordered or containing mutually 666 * blocking locks. We also check the pending list for locks 667 * which should be active (i.e. have no out-going edges). 668 */ 669 LIST_FOREACH(lock, &state->ls_active, lf_link) { 670 struct lockf_entry *lf; 671 if (LIST_NEXT(lock, lf_link)) 672 KASSERT((lock->lf_start 673 <= LIST_NEXT(lock, lf_link)->lf_start), 674 ("locks disordered")); 675 LIST_FOREACH(lf, &state->ls_active, lf_link) { 676 if (lock == lf) 677 break; 678 KASSERT(!lf_blocks(lock, lf), 679 ("two conflicting active locks")); 680 if (lock->lf_owner == lf->lf_owner) 681 KASSERT(!lf_overlaps(lock, lf), 682 ("two overlapping locks from same owner")); 683 } 684 } 685 LIST_FOREACH(lock, &state->ls_pending, lf_link) { 686 KASSERT(!LIST_EMPTY(&lock->lf_outedges), 687 ("pending lock which should be active")); 688 } 689#endif 690 sx_xunlock(&state->ls_lock); 691 692 /* 693 * If we have removed the last active lock on the vnode and 694 * this is the last thread that was in-progress, we can free 695 * the state structure. We update the caller's pointer inside 696 * the vnode interlock but call free outside. 697 * 698 * XXX alternatively, keep the state structure around until 699 * the filesystem recycles - requires a callback from the 700 * filesystem. 701 */ 702 VI_LOCK(vp); 703 704 state->ls_threads--; 705 wakeup(state); 706 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 707 KASSERT(LIST_EMPTY(&state->ls_pending), 708 ("freeing state with pending locks")); 709 freestate = state; 710 *statep = NULL; 711 } 712 713 VI_UNLOCK(vp); 714 715 if (freestate) { 716 sx_xlock(&lf_lock_states_lock); 717 LIST_REMOVE(freestate, ls_link); 718 sx_xunlock(&lf_lock_states_lock); 719 sx_destroy(&freestate->ls_lock); 720 free(freestate, M_LOCKF); 721 } 722 return (error); 723} 724 725int 726lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 727{ 728 struct vop_advlockasync_args a; 729 730 a.a_vp = ap->a_vp; 731 a.a_id = ap->a_id; 732 a.a_op = ap->a_op; 733 a.a_fl = ap->a_fl; 734 a.a_flags = ap->a_flags; 735 a.a_task = NULL; 736 a.a_cookiep = NULL; 737 738 return (lf_advlockasync(&a, statep, size)); 739} 740 741void 742lf_purgelocks(struct vnode *vp, struct lockf **statep) 743{ 744 struct lockf *state; 745 struct lockf_entry *lock, *nlock; 746 747 /* 748 * For this to work correctly, the caller must ensure that no 749 * other threads enter the locking system for this vnode, 750 * e.g. by checking VI_DOOMED. We wake up any threads that are 751 * sleeping waiting for locks on this vnode and then free all 752 * the remaining locks. 753 */ 754 VI_LOCK(vp); 755 state = *statep; 756 if (state) { 757 state->ls_threads++; 758 VI_UNLOCK(vp); 759 760 sx_xlock(&state->ls_lock); 761 sx_xlock(&lf_owner_graph_lock); 762 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 763 LIST_REMOVE(lock, lf_link); 764 lf_remove_outgoing(lock); 765 lf_remove_incoming(lock); 766 767 /* 768 * If its an async lock, we can just free it 769 * here, otherwise we let the sleeping thread 770 * free it. 771 */ 772 if (lock->lf_async_task) { 773 lf_free_lock(lock); 774 } else { 775 lock->lf_flags |= F_INTR; 776 wakeup(lock); 777 } 778 } 779 sx_xunlock(&lf_owner_graph_lock); 780 sx_xunlock(&state->ls_lock); 781 782 /* 783 * Wait for all other threads, sleeping and otherwise 784 * to leave. 785 */ 786 VI_LOCK(vp); 787 while (state->ls_threads > 1) 788 msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 789 *statep = 0; 790 VI_UNLOCK(vp); 791 792 /* 793 * We can just free all the active locks since they 794 * will have no dependancies (we removed them all 795 * above). We don't need to bother locking since we 796 * are the last thread using this state structure. 797 */ 798 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 799 LIST_REMOVE(lock, lf_link); 800 lf_free_lock(lock); 801 } 802 sx_xlock(&lf_lock_states_lock); 803 LIST_REMOVE(state, ls_link); 804 sx_xunlock(&lf_lock_states_lock); 805 sx_destroy(&state->ls_lock); 806 free(state, M_LOCKF); 807 } else { 808 VI_UNLOCK(vp); 809 } 810} 811 812/* 813 * Return non-zero if locks 'x' and 'y' overlap. 814 */ 815static int 816lf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 817{ 818 819 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 820} 821 822/* 823 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 824 */ 825static int 826lf_blocks(struct lockf_entry *x, struct lockf_entry *y) 827{ 828 829 return x->lf_owner != y->lf_owner 830 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 831 && lf_overlaps(x, y); 832} 833 834/* 835 * Allocate a lock edge from the free list 836 */ 837static struct lockf_edge * 838lf_alloc_edge(void) 839{ 840 841 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 842} 843 844/* 845 * Free a lock edge. 846 */ 847static void 848lf_free_edge(struct lockf_edge *e) 849{ 850 851 free(e, M_LOCKF); 852} 853 854 855/* 856 * Ensure that the lock's owner has a corresponding vertex in the 857 * owner graph. 858 */ 859static void 860lf_alloc_vertex(struct lockf_entry *lock) 861{ 862 struct owner_graph *g = &lf_owner_graph; 863 864 if (!lock->lf_owner->lo_vertex) 865 lock->lf_owner->lo_vertex = 866 graph_alloc_vertex(g, lock->lf_owner); 867} 868 869/* 870 * Attempt to record an edge from lock x to lock y. Return EDEADLK if 871 * the new edge would cause a cycle in the owner graph. 872 */ 873static int 874lf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 875{ 876 struct owner_graph *g = &lf_owner_graph; 877 struct lockf_edge *e; 878 int error; 879 880#ifdef INVARIANTS 881 LIST_FOREACH(e, &x->lf_outedges, le_outlink) 882 KASSERT(e->le_to != y, ("adding lock edge twice")); 883#endif 884 885 /* 886 * Make sure the two owners have entries in the owner graph. 887 */ 888 lf_alloc_vertex(x); 889 lf_alloc_vertex(y); 890 891 error = graph_add_edge(g, x->lf_owner->lo_vertex, 892 y->lf_owner->lo_vertex); 893 if (error) 894 return (error); 895 896 e = lf_alloc_edge(); 897 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 898 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 899 e->le_from = x; 900 e->le_to = y; 901 902 return (0); 903} 904 905/* 906 * Remove an edge from the lock graph. 907 */ 908static void 909lf_remove_edge(struct lockf_edge *e) 910{ 911 struct owner_graph *g = &lf_owner_graph; 912 struct lockf_entry *x = e->le_from; 913 struct lockf_entry *y = e->le_to; 914 915 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 916 LIST_REMOVE(e, le_outlink); 917 LIST_REMOVE(e, le_inlink); 918 e->le_from = NULL; 919 e->le_to = NULL; 920 lf_free_edge(e); 921} 922 923/* 924 * Remove all out-going edges from lock x. 925 */ 926static void 927lf_remove_outgoing(struct lockf_entry *x) 928{ 929 struct lockf_edge *e; 930 931 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 932 lf_remove_edge(e); 933 } 934} 935 936/* 937 * Remove all in-coming edges from lock x. 938 */ 939static void 940lf_remove_incoming(struct lockf_entry *x) 941{ 942 struct lockf_edge *e; 943 944 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 945 lf_remove_edge(e); 946 } 947} 948 949/* 950 * Walk the list of locks for the file and create an out-going edge 951 * from lock to each blocking lock. 952 */ 953static int 954lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 955{ 956 struct lockf_entry *overlap; 957 int error; 958 959 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 960 /* 961 * We may assume that the active list is sorted by 962 * lf_start. 963 */ 964 if (overlap->lf_start > lock->lf_end) 965 break; 966 if (!lf_blocks(lock, overlap)) 967 continue; 968 969 /* 970 * We've found a blocking lock. Add the corresponding 971 * edge to the graphs and see if it would cause a 972 * deadlock. 973 */ 974 error = lf_add_edge(lock, overlap); 975 976 /* 977 * The only error that lf_add_edge returns is EDEADLK. 978 * Remove any edges we added and return the error. 979 */ 980 if (error) { 981 lf_remove_outgoing(lock); 982 return (error); 983 } 984 } 985 986 /* 987 * We also need to add edges to sleeping locks that block 988 * us. This ensures that lf_wakeup_lock cannot grant two 989 * mutually blocking locks simultaneously and also enforces a 990 * 'first come, first served' fairness model. Note that this 991 * only happens if we are blocked by at least one active lock 992 * due to the call to lf_getblock in lf_setlock below. 993 */ 994 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 995 if (!lf_blocks(lock, overlap)) 996 continue; 997 /* 998 * We've found a blocking lock. Add the corresponding 999 * edge to the graphs and see if it would cause a 1000 * deadlock. 1001 */ 1002 error = lf_add_edge(lock, overlap); 1003 1004 /* 1005 * The only error that lf_add_edge returns is EDEADLK. 1006 * Remove any edges we added and return the error. 1007 */ 1008 if (error) { 1009 lf_remove_outgoing(lock); 1010 return (error); 1011 } 1012 } 1013 1014 return (0); 1015} 1016 1017/* 1018 * Walk the list of pending locks for the file and create an in-coming 1019 * edge from lock to each blocking lock. 1020 */ 1021static int 1022lf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1023{ 1024 struct lockf_entry *overlap; 1025 int error; 1026 1027 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1028 if (!lf_blocks(lock, overlap)) 1029 continue; 1030 1031 /* 1032 * We've found a blocking lock. Add the corresponding 1033 * edge to the graphs and see if it would cause a 1034 * deadlock. 1035 */ 1036 error = lf_add_edge(overlap, lock); 1037 1038 /* 1039 * The only error that lf_add_edge returns is EDEADLK. 1040 * Remove any edges we added and return the error. 1041 */ 1042 if (error) { 1043 lf_remove_incoming(lock); 1044 return (error); 1045 } 1046 } 1047 return (0); 1048} 1049 1050/* 1051 * Insert lock into the active list, keeping list entries ordered by 1052 * increasing values of lf_start. 1053 */ 1054static void 1055lf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1056{ 1057 struct lockf_entry *lf, *lfprev; 1058 1059 if (LIST_EMPTY(&state->ls_active)) { 1060 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1061 return; 1062 } 1063 1064 lfprev = NULL; 1065 LIST_FOREACH(lf, &state->ls_active, lf_link) { 1066 if (lf->lf_start > lock->lf_start) { 1067 LIST_INSERT_BEFORE(lf, lock, lf_link); 1068 return; 1069 } 1070 lfprev = lf; 1071 } 1072 LIST_INSERT_AFTER(lfprev, lock, lf_link); 1073} 1074 1075/* 1076 * Wake up a sleeping lock and remove it from the pending list now 1077 * that all its dependancies have been resolved. The caller should 1078 * arrange for the lock to be added to the active list, adjusting any 1079 * existing locks for the same owner as needed. 1080 */ 1081static void 1082lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1083{ 1084 1085 /* 1086 * Remove from ls_pending list and wake up the caller 1087 * or start the async notification, as appropriate. 1088 */ 1089 LIST_REMOVE(wakelock, lf_link); 1090#ifdef LOCKF_DEBUG 1091 if (lockf_debug & 1) 1092 lf_print("lf_wakeup_lock: awakening", wakelock); 1093#endif /* LOCKF_DEBUG */ 1094 if (wakelock->lf_async_task) { 1095 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1096 } else { 1097 wakeup(wakelock); 1098 } 1099} 1100 1101/* 1102 * Re-check all dependant locks and remove edges to locks that we no 1103 * longer block. If 'all' is non-zero, the lock has been removed and 1104 * we must remove all the dependancies, otherwise it has simply been 1105 * reduced but remains active. Any pending locks which have been been 1106 * unblocked are added to 'granted' 1107 */ 1108static void 1109lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1110 struct lockf_entry_list *granted) 1111{ 1112 struct lockf_edge *e, *ne; 1113 struct lockf_entry *deplock; 1114 1115 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1116 deplock = e->le_from; 1117 if (all || !lf_blocks(lock, deplock)) { 1118 sx_xlock(&lf_owner_graph_lock); 1119 lf_remove_edge(e); 1120 sx_xunlock(&lf_owner_graph_lock); 1121 if (LIST_EMPTY(&deplock->lf_outedges)) { 1122 lf_wakeup_lock(state, deplock); 1123 LIST_INSERT_HEAD(granted, deplock, lf_link); 1124 } 1125 } 1126 } 1127} 1128 1129/* 1130 * Set the start of an existing active lock, updating dependancies and 1131 * adding any newly woken locks to 'granted'. 1132 */ 1133static void 1134lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1135 struct lockf_entry_list *granted) 1136{ 1137 1138 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1139 lock->lf_start = new_start; 1140 LIST_REMOVE(lock, lf_link); 1141 lf_insert_lock(state, lock); 1142 lf_update_dependancies(state, lock, FALSE, granted); 1143} 1144 1145/* 1146 * Set the end of an existing active lock, updating dependancies and 1147 * adding any newly woken locks to 'granted'. 1148 */ 1149static void 1150lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1151 struct lockf_entry_list *granted) 1152{ 1153 1154 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1155 lock->lf_end = new_end; 1156 lf_update_dependancies(state, lock, FALSE, granted); 1157} 1158 1159/* 1160 * Add a lock to the active list, updating or removing any current 1161 * locks owned by the same owner and processing any pending locks that 1162 * become unblocked as a result. This code is also used for unlock 1163 * since the logic for updating existing locks is identical. 1164 * 1165 * As a result of processing the new lock, we may unblock existing 1166 * pending locks as a result of downgrading/unlocking. We simply 1167 * activate the newly granted locks by looping. 1168 * 1169 * Since the new lock already has its dependancies set up, we always 1170 * add it to the list (unless its an unlock request). This may 1171 * fragment the lock list in some pathological cases but its probably 1172 * not a real problem. 1173 */ 1174static void 1175lf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1176{ 1177 struct lockf_entry *overlap, *lf; 1178 struct lockf_entry_list granted; 1179 int ovcase; 1180 1181 LIST_INIT(&granted); 1182 LIST_INSERT_HEAD(&granted, lock, lf_link); 1183 1184 while (!LIST_EMPTY(&granted)) { 1185 lock = LIST_FIRST(&granted); 1186 LIST_REMOVE(lock, lf_link); 1187 1188 /* 1189 * Skip over locks owned by other processes. Handle 1190 * any locks that overlap and are owned by ourselves. 1191 */ 1192 overlap = LIST_FIRST(&state->ls_active); 1193 for (;;) { 1194 ovcase = lf_findoverlap(&overlap, lock, SELF); 1195 1196#ifdef LOCKF_DEBUG 1197 if (ovcase && (lockf_debug & 2)) { 1198 printf("lf_setlock: overlap %d", ovcase); 1199 lf_print("", overlap); 1200 } 1201#endif 1202 /* 1203 * Six cases: 1204 * 0) no overlap 1205 * 1) overlap == lock 1206 * 2) overlap contains lock 1207 * 3) lock contains overlap 1208 * 4) overlap starts before lock 1209 * 5) overlap ends after lock 1210 */ 1211 switch (ovcase) { 1212 case 0: /* no overlap */ 1213 break; 1214 1215 case 1: /* overlap == lock */ 1216 /* 1217 * We have already setup the 1218 * dependants for the new lock, taking 1219 * into account a possible downgrade 1220 * or unlock. Remove the old lock. 1221 */ 1222 LIST_REMOVE(overlap, lf_link); 1223 lf_update_dependancies(state, overlap, TRUE, 1224 &granted); 1225 lf_free_lock(overlap); 1226 break; 1227 1228 case 2: /* overlap contains lock */ 1229 /* 1230 * Just split the existing lock. 1231 */ 1232 lf_split(state, overlap, lock, &granted); 1233 break; 1234 1235 case 3: /* lock contains overlap */ 1236 /* 1237 * Delete the overlap and advance to 1238 * the next entry in the list. 1239 */ 1240 lf = LIST_NEXT(overlap, lf_link); 1241 LIST_REMOVE(overlap, lf_link); 1242 lf_update_dependancies(state, overlap, TRUE, 1243 &granted); 1244 lf_free_lock(overlap); 1245 overlap = lf; 1246 continue; 1247 1248 case 4: /* overlap starts before lock */ 1249 /* 1250 * Just update the overlap end and 1251 * move on. 1252 */ 1253 lf_set_end(state, overlap, lock->lf_start - 1, 1254 &granted); 1255 overlap = LIST_NEXT(overlap, lf_link); 1256 continue; 1257 1258 case 5: /* overlap ends after lock */ 1259 /* 1260 * Change the start of overlap and 1261 * re-insert. 1262 */ 1263 lf_set_start(state, overlap, lock->lf_end + 1, 1264 &granted); 1265 break; 1266 } 1267 break; 1268 } 1269#ifdef LOCKF_DEBUG 1270 if (lockf_debug & 1) { 1271 if (lock->lf_type != F_UNLCK) 1272 lf_print("lf_activate_lock: activated", lock); 1273 else 1274 lf_print("lf_activate_lock: unlocked", lock); 1275 lf_printlist("lf_activate_lock", lock); 1276 } 1277#endif /* LOCKF_DEBUG */ 1278 if (lock->lf_type != F_UNLCK) 1279 lf_insert_lock(state, lock); 1280 } 1281} 1282 1283/* 1284 * Cancel a pending lock request, either as a result of a signal or a 1285 * cancel request for an async lock. 1286 */ 1287static void 1288lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1289{ 1290 struct lockf_entry_list granted; 1291 1292 /* 1293 * Note it is theoretically possible that cancelling this lock 1294 * may allow some other pending lock to become 1295 * active. Consider this case: 1296 * 1297 * Owner Action Result Dependancies 1298 * 1299 * A: lock [0..0] succeeds 1300 * B: lock [2..2] succeeds 1301 * C: lock [1..2] blocked C->B 1302 * D: lock [0..1] blocked C->B,D->A,D->C 1303 * A: unlock [0..0] C->B,D->C 1304 * C: cancel [1..2] 1305 */ 1306 1307 LIST_REMOVE(lock, lf_link); 1308 1309 /* 1310 * Removing out-going edges is simple. 1311 */ 1312 sx_xlock(&lf_owner_graph_lock); 1313 lf_remove_outgoing(lock); 1314 sx_xunlock(&lf_owner_graph_lock); 1315 1316 /* 1317 * Removing in-coming edges may allow some other lock to 1318 * become active - we use lf_update_dependancies to figure 1319 * this out. 1320 */ 1321 LIST_INIT(&granted); 1322 lf_update_dependancies(state, lock, TRUE, &granted); 1323 lf_free_lock(lock); 1324 1325 /* 1326 * Feed any newly active locks to lf_activate_lock. 1327 */ 1328 while (!LIST_EMPTY(&granted)) { 1329 lock = LIST_FIRST(&granted); 1330 LIST_REMOVE(lock, lf_link); 1331 lf_activate_lock(state, lock); 1332 } 1333} 1334 1335/* 1336 * Set a byte-range lock. 1337 */ 1338static int 1339lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1340 void **cookiep) 1341{ 1342 struct lockf_entry *block; 1343 static char lockstr[] = "lockf"; 1344 int priority, error; 1345 1346#ifdef LOCKF_DEBUG 1347 if (lockf_debug & 1) 1348 lf_print("lf_setlock", lock); 1349#endif /* LOCKF_DEBUG */ 1350 1351 /* 1352 * Set the priority 1353 */ 1354 priority = PLOCK; 1355 if (lock->lf_type == F_WRLCK) 1356 priority += 4; 1357 priority |= PCATCH; 1358 /* 1359 * Scan lock list for this file looking for locks that would block us. 1360 */ 1361 while ((block = lf_getblock(state, lock))) { 1362 /* 1363 * Free the structure and return if nonblocking. 1364 */ 1365 if ((lock->lf_flags & F_WAIT) == 0 1366 && lock->lf_async_task == NULL) { 1367 lf_free_lock(lock); 1368 error = EAGAIN; 1369 goto out; 1370 } 1371 1372 /* 1373 * We are blocked. Create edges to each blocking lock, 1374 * checking for deadlock using the owner graph. For 1375 * simplicity, we run deadlock detection for all 1376 * locks, posix and otherwise. 1377 */ 1378 sx_xlock(&lf_owner_graph_lock); 1379 error = lf_add_outgoing(state, lock); 1380 sx_xunlock(&lf_owner_graph_lock); 1381 1382 if (error) { 1383#ifdef LOCKF_DEBUG 1384 if (lockf_debug & 1) 1385 lf_print("lf_setlock: deadlock", lock); 1386#endif 1387 lf_free_lock(lock); 1388 goto out; 1389 } 1390 1391 /* 1392 * For flock type locks, we must first remove 1393 * any shared locks that we hold before we sleep 1394 * waiting for an exclusive lock. 1395 */ 1396 if ((lock->lf_flags & F_FLOCK) && 1397 lock->lf_type == F_WRLCK) { 1398 lock->lf_type = F_UNLCK; 1399 lf_activate_lock(state, lock); 1400 lock->lf_type = F_WRLCK; 1401 } 1402 /* 1403 * We have added edges to everything that blocks 1404 * us. Sleep until they all go away. 1405 */ 1406 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 1407#ifdef LOCKF_DEBUG 1408 if (lockf_debug & 1) { 1409 struct lockf_edge *e; 1410 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1411 lf_print("lf_setlock: blocking on", e->le_to); 1412 lf_printlist("lf_setlock", e->le_to); 1413 } 1414 } 1415#endif /* LOCKF_DEBUG */ 1416 1417 if ((lock->lf_flags & F_WAIT) == 0) { 1418 /* 1419 * The caller requested async notification - 1420 * this callback happens when the blocking 1421 * lock is released, allowing the caller to 1422 * make another attempt to take the lock. 1423 */ 1424 *cookiep = (void *) lock; 1425 error = EINPROGRESS; 1426 goto out; 1427 } 1428 1429 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 1430 /* 1431 * We may have been awakened by a signal and/or by a 1432 * debugger continuing us (in which cases we must 1433 * remove our lock graph edges) and/or by another 1434 * process releasing a lock (in which case our edges 1435 * have already been removed and we have been moved to 1436 * the active list). We may also have been woken by 1437 * lf_purgelocks which we report to the caller as 1438 * EINTR. In that case, lf_purgelocks will have 1439 * removed our lock graph edges. 1440 * 1441 * Note that it is possible to receive a signal after 1442 * we were successfully woken (and moved to the active 1443 * list) but before we resumed execution. In this 1444 * case, our lf_outedges list will be clear. We 1445 * pretend there was no error. 1446 * 1447 * Note also, if we have been sleeping long enough, we 1448 * may now have incoming edges from some newer lock 1449 * which is waiting behind us in the queue. 1450 */ 1451 if (lock->lf_flags & F_INTR) { 1452 error = EINTR; 1453 lf_free_lock(lock); 1454 goto out; 1455 } 1456 if (LIST_EMPTY(&lock->lf_outedges)) { 1457 error = 0; 1458 } else { 1459 lf_cancel_lock(state, lock); 1460 goto out; 1461 } 1462#ifdef LOCKF_DEBUG 1463 if (lockf_debug & 1) { 1464 lf_print("lf_setlock: granted", lock); 1465 } 1466#endif 1467 goto out; 1468 } 1469 /* 1470 * It looks like we are going to grant the lock. First add 1471 * edges from any currently pending lock that the new lock 1472 * would block. 1473 */ 1474 sx_xlock(&lf_owner_graph_lock); 1475 error = lf_add_incoming(state, lock); 1476 sx_xunlock(&lf_owner_graph_lock); 1477 if (error) { 1478#ifdef LOCKF_DEBUG 1479 if (lockf_debug & 1) 1480 lf_print("lf_setlock: deadlock", lock); 1481#endif 1482 lf_free_lock(lock); 1483 goto out; 1484 } 1485 1486 /* 1487 * No blocks!! Add the lock. Note that we will 1488 * downgrade or upgrade any overlapping locks this 1489 * process already owns. 1490 */ 1491 lf_activate_lock(state, lock); 1492 error = 0; 1493out: 1494 return (error); 1495} 1496 1497/* 1498 * Remove a byte-range lock on an inode. 1499 * 1500 * Generally, find the lock (or an overlap to that lock) 1501 * and remove it (or shrink it), then wakeup anyone we can. 1502 */ 1503static int 1504lf_clearlock(struct lockf *state, struct lockf_entry *unlock) 1505{ 1506 struct lockf_entry *overlap; 1507 1508 overlap = LIST_FIRST(&state->ls_active); 1509 1510 if (overlap == NOLOCKF) 1511 return (0); 1512#ifdef LOCKF_DEBUG 1513 if (unlock->lf_type != F_UNLCK) 1514 panic("lf_clearlock: bad type"); 1515 if (lockf_debug & 1) 1516 lf_print("lf_clearlock", unlock); 1517#endif /* LOCKF_DEBUG */ 1518 1519 lf_activate_lock(state, unlock); 1520 1521 return (0); 1522} 1523 1524/* 1525 * Check whether there is a blocking lock, and if so return its 1526 * details in '*fl'. 1527 */ 1528static int 1529lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 1530{ 1531 struct lockf_entry *block; 1532 1533#ifdef LOCKF_DEBUG 1534 if (lockf_debug & 1) 1535 lf_print("lf_getlock", lock); 1536#endif /* LOCKF_DEBUG */ 1537 1538 if ((block = lf_getblock(state, lock))) { 1539 fl->l_type = block->lf_type; 1540 fl->l_whence = SEEK_SET; 1541 fl->l_start = block->lf_start; 1542 if (block->lf_end == OFF_MAX) 1543 fl->l_len = 0; 1544 else 1545 fl->l_len = block->lf_end - block->lf_start + 1; 1546 fl->l_pid = block->lf_owner->lo_pid; 1547 fl->l_sysid = block->lf_owner->lo_sysid; 1548 } else { 1549 fl->l_type = F_UNLCK; 1550 } 1551 return (0); 1552} 1553 1554/* 1555 * Cancel an async lock request. 1556 */ 1557static int 1558lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1559{ 1560 struct lockf_entry *reallock; 1561 1562 /* 1563 * We need to match this request with an existing lock 1564 * request. 1565 */ 1566 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1567 if ((void *) reallock == cookie) { 1568 /* 1569 * Double-check that this lock looks right 1570 * (maybe use a rolling ID for the cancel 1571 * cookie instead?) 1572 */ 1573 if (!(reallock->lf_vnode == lock->lf_vnode 1574 && reallock->lf_start == lock->lf_start 1575 && reallock->lf_end == lock->lf_end)) { 1576 return (ENOENT); 1577 } 1578 1579 /* 1580 * Make sure this lock was async and then just 1581 * remove it from its wait lists. 1582 */ 1583 if (!reallock->lf_async_task) { 1584 return (ENOENT); 1585 } 1586 1587 /* 1588 * Note that since any other thread must take 1589 * state->ls_lock before it can possibly 1590 * trigger the async callback, we are safe 1591 * from a race with lf_wakeup_lock, i.e. we 1592 * can free the lock (actually our caller does 1593 * this). 1594 */ 1595 lf_cancel_lock(state, reallock); 1596 return (0); 1597 } 1598 } 1599 1600 /* 1601 * We didn't find a matching lock - not much we can do here. 1602 */ 1603 return (ENOENT); 1604} 1605 1606/* 1607 * Walk the list of locks for an inode and 1608 * return the first blocking lock. 1609 */ 1610static struct lockf_entry * 1611lf_getblock(struct lockf *state, struct lockf_entry *lock) 1612{ 1613 struct lockf_entry *overlap; 1614 1615 LIST_FOREACH(overlap, &state->ls_active, lf_link) { 1616 /* 1617 * We may assume that the active list is sorted by 1618 * lf_start. 1619 */ 1620 if (overlap->lf_start > lock->lf_end) 1621 break; 1622 if (!lf_blocks(lock, overlap)) 1623 continue; 1624 return (overlap); 1625 } 1626 return (NOLOCKF); 1627} 1628 1629/* 1630 * Walk the list of locks for an inode to find an overlapping lock (if 1631 * any) and return a classification of that overlap. 1632 * 1633 * Arguments: 1634 * *overlap The place in the lock list to start looking 1635 * lock The lock which is being tested 1636 * type Pass 'SELF' to test only locks with the same 1637 * owner as lock, or 'OTHER' to test only locks 1638 * with a different owner 1639 * 1640 * Returns one of six values: 1641 * 0) no overlap 1642 * 1) overlap == lock 1643 * 2) overlap contains lock 1644 * 3) lock contains overlap 1645 * 4) overlap starts before lock 1646 * 5) overlap ends after lock 1647 * 1648 * If there is an overlapping lock, '*overlap' is set to point at the 1649 * overlapping lock. 1650 * 1651 * NOTE: this returns only the FIRST overlapping lock. There 1652 * may be more than one. 1653 */ 1654static int 1655lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 1656{ 1657 struct lockf_entry *lf; 1658 off_t start, end; 1659 int res; 1660 1661 if ((*overlap) == NOLOCKF) { 1662 return (0); 1663 } 1664#ifdef LOCKF_DEBUG 1665 if (lockf_debug & 2) 1666 lf_print("lf_findoverlap: looking for overlap in", lock); 1667#endif /* LOCKF_DEBUG */ 1668 start = lock->lf_start; 1669 end = lock->lf_end; 1670 res = 0; 1671 while (*overlap) { 1672 lf = *overlap; 1673 if (lf->lf_start > end) 1674 break; 1675 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1676 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1677 *overlap = LIST_NEXT(lf, lf_link); 1678 continue; 1679 } 1680#ifdef LOCKF_DEBUG 1681 if (lockf_debug & 2) 1682 lf_print("\tchecking", lf); 1683#endif /* LOCKF_DEBUG */ 1684 /* 1685 * OK, check for overlap 1686 * 1687 * Six cases: 1688 * 0) no overlap 1689 * 1) overlap == lock 1690 * 2) overlap contains lock 1691 * 3) lock contains overlap 1692 * 4) overlap starts before lock 1693 * 5) overlap ends after lock 1694 */ 1695 if (start > lf->lf_end) { 1696 /* Case 0 */ 1697#ifdef LOCKF_DEBUG 1698 if (lockf_debug & 2) 1699 printf("no overlap\n"); 1700#endif /* LOCKF_DEBUG */ 1701 *overlap = LIST_NEXT(lf, lf_link); 1702 continue; 1703 } 1704 if (lf->lf_start == start && lf->lf_end == end) { 1705 /* Case 1 */ 1706#ifdef LOCKF_DEBUG 1707 if (lockf_debug & 2) 1708 printf("overlap == lock\n"); 1709#endif /* LOCKF_DEBUG */ 1710 res = 1; 1711 break; 1712 } 1713 if (lf->lf_start <= start && lf->lf_end >= end) { 1714 /* Case 2 */ 1715#ifdef LOCKF_DEBUG 1716 if (lockf_debug & 2) 1717 printf("overlap contains lock\n"); 1718#endif /* LOCKF_DEBUG */ 1719 res = 2; 1720 break; 1721 } 1722 if (start <= lf->lf_start && end >= lf->lf_end) { 1723 /* Case 3 */ 1724#ifdef LOCKF_DEBUG 1725 if (lockf_debug & 2) 1726 printf("lock contains overlap\n"); 1727#endif /* LOCKF_DEBUG */ 1728 res = 3; 1729 break; 1730 } 1731 if (lf->lf_start < start && lf->lf_end >= start) { 1732 /* Case 4 */ 1733#ifdef LOCKF_DEBUG 1734 if (lockf_debug & 2) 1735 printf("overlap starts before lock\n"); 1736#endif /* LOCKF_DEBUG */ 1737 res = 4; 1738 break; 1739 } 1740 if (lf->lf_start > start && lf->lf_end > end) { 1741 /* Case 5 */ 1742#ifdef LOCKF_DEBUG 1743 if (lockf_debug & 2) 1744 printf("overlap ends after lock\n"); 1745#endif /* LOCKF_DEBUG */ 1746 res = 5; 1747 break; 1748 } 1749 panic("lf_findoverlap: default"); 1750 } 1751 return (res); 1752} 1753 1754/* 1755 * Split an the existing 'lock1', based on the extent of the lock 1756 * described by 'lock2'. The existing lock should cover 'lock2' 1757 * entirely. 1758 * 1759 * Any pending locks which have been been unblocked are added to 1760 * 'granted' 1761 */ 1762static void 1763lf_split(struct lockf *state, struct lockf_entry *lock1, 1764 struct lockf_entry *lock2, struct lockf_entry_list *granted) 1765{ 1766 struct lockf_entry *splitlock; 1767 1768#ifdef LOCKF_DEBUG 1769 if (lockf_debug & 2) { 1770 lf_print("lf_split", lock1); 1771 lf_print("splitting from", lock2); 1772 } 1773#endif /* LOCKF_DEBUG */ 1774 /* 1775 * Check to see if we don't need to split at all. 1776 */ 1777 if (lock1->lf_start == lock2->lf_start) { 1778 lf_set_start(state, lock1, lock2->lf_end + 1, granted); 1779 return; 1780 } 1781 if (lock1->lf_end == lock2->lf_end) { 1782 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1783 return; 1784 } 1785 /* 1786 * Make a new lock consisting of the last part of 1787 * the encompassing lock. 1788 */ 1789 splitlock = lf_alloc_lock(lock1->lf_owner); 1790 memcpy(splitlock, lock1, sizeof *splitlock); 1791 if (splitlock->lf_flags & F_REMOTE) 1792 vref(splitlock->lf_vnode); 1793 1794 /* 1795 * This cannot cause a deadlock since any edges we would add 1796 * to splitlock already exist in lock1. We must be sure to add 1797 * necessary dependancies to splitlock before we reduce lock1 1798 * otherwise we may accidentally grant a pending lock that 1799 * was blocked by the tail end of lock1. 1800 */ 1801 splitlock->lf_start = lock2->lf_end + 1; 1802 LIST_INIT(&splitlock->lf_outedges); 1803 LIST_INIT(&splitlock->lf_inedges); 1804 sx_xlock(&lf_owner_graph_lock); 1805 lf_add_incoming(state, splitlock); 1806 sx_xunlock(&lf_owner_graph_lock); 1807 1808 lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1809 1810 /* 1811 * OK, now link it in 1812 */ 1813 lf_insert_lock(state, splitlock); 1814} 1815 1816struct clearlock { 1817 STAILQ_ENTRY(clearlock) link; 1818 struct vnode *vp; 1819 struct flock fl; 1820}; 1821STAILQ_HEAD(clearlocklist, clearlock); 1822 1823void 1824lf_clearremotesys(int sysid) 1825{ 1826 struct lockf *ls; 1827 struct lockf_entry *lf; 1828 struct clearlock *cl; 1829 struct clearlocklist locks; 1830 1831 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1832 1833 /* 1834 * In order to keep the locking simple, we iterate over the 1835 * active lock lists to build a list of locks that need 1836 * releasing. We then call VOP_ADVLOCK for each one in turn. 1837 * 1838 * We take an extra reference to the vnode for the duration to 1839 * make sure it doesn't go away before we are finished. 1840 */ 1841 STAILQ_INIT(&locks); 1842 sx_xlock(&lf_lock_states_lock); 1843 LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1844 sx_xlock(&ls->ls_lock); 1845 LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1846 if (lf->lf_owner->lo_sysid != sysid) 1847 continue; 1848 1849 cl = malloc(sizeof(struct clearlock), M_LOCKF, 1850 M_WAITOK); 1851 cl->vp = lf->lf_vnode; 1852 vref(cl->vp); 1853 cl->fl.l_start = lf->lf_start; 1854 if (lf->lf_end == OFF_MAX) 1855 cl->fl.l_len = 0; 1856 else 1857 cl->fl.l_len = 1858 lf->lf_end - lf->lf_start + 1; 1859 cl->fl.l_whence = SEEK_SET; 1860 cl->fl.l_type = F_UNLCK; 1861 cl->fl.l_pid = lf->lf_owner->lo_pid; 1862 cl->fl.l_sysid = sysid; 1863 STAILQ_INSERT_TAIL(&locks, cl, link); 1864 } 1865 sx_xunlock(&ls->ls_lock); 1866 } 1867 sx_xunlock(&lf_lock_states_lock); 1868 1869 while ((cl = STAILQ_FIRST(&locks)) != NULL) { 1870 STAILQ_REMOVE_HEAD(&locks, link); 1871 VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE); 1872 vrele(cl->vp); 1873 free(cl, M_LOCKF); 1874 } 1875} 1876 1877int 1878lf_countlocks(int sysid) 1879{ 1880 int i; 1881 struct lock_owner *lo; 1882 int count; 1883 1884 count = 0; 1885 sx_xlock(&lf_lock_owners_lock); 1886 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1887 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1888 if (lo->lo_sysid == sysid) 1889 count += lo->lo_refs; 1890 sx_xunlock(&lf_lock_owners_lock); 1891 1892 return (count); 1893} 1894 1895#ifdef LOCKF_DEBUG 1896 1897/* 1898 * Return non-zero if y is reachable from x using a brute force 1899 * search. If reachable and path is non-null, return the route taken 1900 * in path. 1901 */ 1902static int 1903graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 1904 struct owner_vertex_list *path) 1905{ 1906 struct owner_edge *e; 1907 1908 if (x == y) { 1909 if (path) 1910 TAILQ_INSERT_HEAD(path, x, v_link); 1911 return 1; 1912 } 1913 1914 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 1915 if (graph_reaches(e->e_to, y, path)) { 1916 if (path) 1917 TAILQ_INSERT_HEAD(path, x, v_link); 1918 return 1; 1919 } 1920 } 1921 return 0; 1922} 1923 1924/* 1925 * Perform consistency checks on the graph. Make sure the values of 1926 * v_order are correct. If checkorder is non-zero, check no vertex can 1927 * reach any other vertex with a smaller order. 1928 */ 1929static void 1930graph_check(struct owner_graph *g, int checkorder) 1931{ 1932 int i, j; 1933 1934 for (i = 0; i < g->g_size; i++) { 1935 if (!g->g_vertices[i]->v_owner) 1936 continue; 1937 KASSERT(g->g_vertices[i]->v_order == i, 1938 ("lock graph vertices disordered")); 1939 if (checkorder) { 1940 for (j = 0; j < i; j++) { 1941 if (!g->g_vertices[j]->v_owner) 1942 continue; 1943 KASSERT(!graph_reaches(g->g_vertices[i], 1944 g->g_vertices[j], NULL), 1945 ("lock graph vertices disordered")); 1946 } 1947 } 1948 } 1949} 1950 1951static void 1952graph_print_vertices(struct owner_vertex_list *set) 1953{ 1954 struct owner_vertex *v; 1955 1956 printf("{ "); 1957 TAILQ_FOREACH(v, set, v_link) { 1958 printf("%d:", v->v_order); 1959 lf_print_owner(v->v_owner); 1960 if (TAILQ_NEXT(v, v_link)) 1961 printf(", "); 1962 } 1963 printf(" }\n"); 1964} 1965 1966#endif 1967 1968/* 1969 * Calculate the sub-set of vertices v from the affected region [y..x] 1970 * where v is reachable from y. Return -1 if a loop was detected 1971 * (i.e. x is reachable from y, otherwise the number of vertices in 1972 * this subset. 1973 */ 1974static int 1975graph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 1976 struct owner_vertex *y, struct owner_vertex_list *delta) 1977{ 1978 uint32_t gen; 1979 struct owner_vertex *v; 1980 struct owner_edge *e; 1981 int n; 1982 1983 /* 1984 * We start with a set containing just y. Then for each vertex 1985 * v in the set so far unprocessed, we add each vertex that v 1986 * has an out-edge to and that is within the affected region 1987 * [y..x]. If we see the vertex x on our travels, stop 1988 * immediately. 1989 */ 1990 TAILQ_INIT(delta); 1991 TAILQ_INSERT_TAIL(delta, y, v_link); 1992 v = y; 1993 n = 1; 1994 gen = g->g_gen; 1995 while (v) { 1996 LIST_FOREACH(e, &v->v_outedges, e_outlink) { 1997 if (e->e_to == x) 1998 return -1; 1999 if (e->e_to->v_order < x->v_order 2000 && e->e_to->v_gen != gen) { 2001 e->e_to->v_gen = gen; 2002 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2003 n++; 2004 } 2005 } 2006 v = TAILQ_NEXT(v, v_link); 2007 } 2008 2009 return (n); 2010} 2011 2012/* 2013 * Calculate the sub-set of vertices v from the affected region [y..x] 2014 * where v reaches x. Return the number of vertices in this subset. 2015 */ 2016static int 2017graph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2018 struct owner_vertex *y, struct owner_vertex_list *delta) 2019{ 2020 uint32_t gen; 2021 struct owner_vertex *v; 2022 struct owner_edge *e; 2023 int n; 2024 2025 /* 2026 * We start with a set containing just x. Then for each vertex 2027 * v in the set so far unprocessed, we add each vertex that v 2028 * has an in-edge from and that is within the affected region 2029 * [y..x]. 2030 */ 2031 TAILQ_INIT(delta); 2032 TAILQ_INSERT_TAIL(delta, x, v_link); 2033 v = x; 2034 n = 1; 2035 gen = g->g_gen; 2036 while (v) { 2037 LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2038 if (e->e_from->v_order > y->v_order 2039 && e->e_from->v_gen != gen) { 2040 e->e_from->v_gen = gen; 2041 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2042 n++; 2043 } 2044 } 2045 v = TAILQ_PREV(v, owner_vertex_list, v_link); 2046 } 2047 2048 return (n); 2049} 2050 2051static int 2052graph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2053{ 2054 struct owner_vertex *v; 2055 int i, j; 2056 2057 TAILQ_FOREACH(v, set, v_link) { 2058 for (i = n; 2059 i > 0 && indices[i - 1] > v->v_order; i--) 2060 ; 2061 for (j = n - 1; j >= i; j--) 2062 indices[j + 1] = indices[j]; 2063 indices[i] = v->v_order; 2064 n++; 2065 } 2066 2067 return (n); 2068} 2069 2070static int 2071graph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2072 struct owner_vertex_list *set) 2073{ 2074 struct owner_vertex *v, *vlowest; 2075 2076 while (!TAILQ_EMPTY(set)) { 2077 vlowest = NULL; 2078 TAILQ_FOREACH(v, set, v_link) { 2079 if (!vlowest || v->v_order < vlowest->v_order) 2080 vlowest = v; 2081 } 2082 TAILQ_REMOVE(set, vlowest, v_link); 2083 vlowest->v_order = indices[nextunused]; 2084 g->g_vertices[vlowest->v_order] = vlowest; 2085 nextunused++; 2086 } 2087 2088 return (nextunused); 2089} 2090 2091static int 2092graph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2093 struct owner_vertex *y) 2094{ 2095 struct owner_edge *e; 2096 struct owner_vertex_list deltaF, deltaB; 2097 int nF, nB, n, vi, i; 2098 int *indices; 2099 2100 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2101 2102 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2103 if (e->e_to == y) { 2104 e->e_refs++; 2105 return (0); 2106 } 2107 } 2108 2109#ifdef LOCKF_DEBUG 2110 if (lockf_debug & 8) { 2111 printf("adding edge %d:", x->v_order); 2112 lf_print_owner(x->v_owner); 2113 printf(" -> %d:", y->v_order); 2114 lf_print_owner(y->v_owner); 2115 printf("\n"); 2116 } 2117#endif 2118 if (y->v_order < x->v_order) { 2119 /* 2120 * The new edge violates the order. First find the set 2121 * of affected vertices reachable from y (deltaF) and 2122 * the set of affect vertices affected that reach x 2123 * (deltaB), using the graph generation number to 2124 * detect whether we have visited a given vertex 2125 * already. We re-order the graph so that each vertex 2126 * in deltaB appears before each vertex in deltaF. 2127 * 2128 * If x is a member of deltaF, then the new edge would 2129 * create a cycle. Otherwise, we may assume that 2130 * deltaF and deltaB are disjoint. 2131 */ 2132 g->g_gen++; 2133 if (g->g_gen == 0) { 2134 /* 2135 * Generation wrap. 2136 */ 2137 for (vi = 0; vi < g->g_size; vi++) { 2138 g->g_vertices[vi]->v_gen = 0; 2139 } 2140 g->g_gen++; 2141 } 2142 nF = graph_delta_forward(g, x, y, &deltaF); 2143 if (nF < 0) { 2144#ifdef LOCKF_DEBUG 2145 if (lockf_debug & 8) { 2146 struct owner_vertex_list path; 2147 printf("deadlock: "); 2148 TAILQ_INIT(&path); 2149 graph_reaches(y, x, &path); 2150 graph_print_vertices(&path); 2151 } 2152#endif 2153 return (EDEADLK); 2154 } 2155 2156#ifdef LOCKF_DEBUG 2157 if (lockf_debug & 8) { 2158 printf("re-ordering graph vertices\n"); 2159 printf("deltaF = "); 2160 graph_print_vertices(&deltaF); 2161 } 2162#endif 2163 2164 nB = graph_delta_backward(g, x, y, &deltaB); 2165 2166#ifdef LOCKF_DEBUG 2167 if (lockf_debug & 8) { 2168 printf("deltaB = "); 2169 graph_print_vertices(&deltaB); 2170 } 2171#endif 2172 2173 /* 2174 * We first build a set of vertex indices (vertex 2175 * order values) that we may use, then we re-assign 2176 * orders first to those vertices in deltaB, then to 2177 * deltaF. Note that the contents of deltaF and deltaB 2178 * may be partially disordered - we perform an 2179 * insertion sort while building our index set. 2180 */ 2181 indices = g->g_indexbuf; 2182 n = graph_add_indices(indices, 0, &deltaF); 2183 graph_add_indices(indices, n, &deltaB); 2184 2185 /* 2186 * We must also be sure to maintain the relative 2187 * ordering of deltaF and deltaB when re-assigning 2188 * vertices. We do this by iteratively removing the 2189 * lowest ordered element from the set and assigning 2190 * it the next value from our new ordering. 2191 */ 2192 i = graph_assign_indices(g, indices, 0, &deltaB); 2193 graph_assign_indices(g, indices, i, &deltaF); 2194 2195#ifdef LOCKF_DEBUG 2196 if (lockf_debug & 8) { 2197 struct owner_vertex_list set; 2198 TAILQ_INIT(&set); 2199 for (i = 0; i < nB + nF; i++) 2200 TAILQ_INSERT_TAIL(&set, 2201 g->g_vertices[indices[i]], v_link); 2202 printf("new ordering = "); 2203 graph_print_vertices(&set); 2204 } 2205#endif 2206 } 2207 2208 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2209 2210#ifdef LOCKF_DEBUG 2211 if (lockf_debug & 8) { 2212 graph_check(g, TRUE); 2213 } 2214#endif 2215 2216 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2217 2218 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2219 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2220 e->e_refs = 1; 2221 e->e_from = x; 2222 e->e_to = y; 2223 2224 return (0); 2225} 2226 2227/* 2228 * Remove an edge x->y from the graph. 2229 */ 2230static void 2231graph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2232 struct owner_vertex *y) 2233{ 2234 struct owner_edge *e; 2235 2236 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2237 2238 LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2239 if (e->e_to == y) 2240 break; 2241 } 2242 KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2243 2244 e->e_refs--; 2245 if (e->e_refs == 0) { 2246#ifdef LOCKF_DEBUG 2247 if (lockf_debug & 8) { 2248 printf("removing edge %d:", x->v_order); 2249 lf_print_owner(x->v_owner); 2250 printf(" -> %d:", y->v_order); 2251 lf_print_owner(y->v_owner); 2252 printf("\n"); 2253 } 2254#endif 2255 LIST_REMOVE(e, e_outlink); 2256 LIST_REMOVE(e, e_inlink); 2257 free(e, M_LOCKF); 2258 } 2259} 2260 2261/* 2262 * Allocate a vertex from the free list. Return ENOMEM if there are 2263 * none. 2264 */ 2265static struct owner_vertex * 2266graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2267{ 2268 struct owner_vertex *v; 2269 2270 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2271 2272 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2273 if (g->g_size == g->g_space) { 2274 g->g_vertices = realloc(g->g_vertices, 2275 2 * g->g_space * sizeof(struct owner_vertex *), 2276 M_LOCKF, M_WAITOK); 2277 free(g->g_indexbuf, M_LOCKF); 2278 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2279 M_LOCKF, M_WAITOK); 2280 g->g_space = 2 * g->g_space; 2281 } 2282 v->v_order = g->g_size; 2283 v->v_gen = g->g_gen; 2284 g->g_vertices[g->g_size] = v; 2285 g->g_size++; 2286 2287 LIST_INIT(&v->v_outedges); 2288 LIST_INIT(&v->v_inedges); 2289 v->v_owner = lo; 2290 2291 return (v); 2292} 2293 2294static void 2295graph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2296{ 2297 struct owner_vertex *w; 2298 int i; 2299 2300 sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2301 2302 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2303 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2304 2305 /* 2306 * Remove from the graph's array and close up the gap, 2307 * renumbering the other vertices. 2308 */ 2309 for (i = v->v_order + 1; i < g->g_size; i++) { 2310 w = g->g_vertices[i]; 2311 w->v_order--; 2312 g->g_vertices[i - 1] = w; 2313 } 2314 g->g_size--; 2315 2316 free(v, M_LOCKF); 2317} 2318 2319static struct owner_graph * 2320graph_init(struct owner_graph *g) 2321{ 2322 2323 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2324 M_LOCKF, M_WAITOK); 2325 g->g_size = 0; 2326 g->g_space = 10; 2327 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2328 g->g_gen = 0; 2329 2330 return (g); 2331} 2332 2333#ifdef LOCKF_DEBUG 2334/* 2335 * Print description of a lock owner 2336 */ 2337static void 2338lf_print_owner(struct lock_owner *lo) 2339{ 2340 2341 if (lo->lo_flags & F_REMOTE) { 2342 printf("remote pid %d, system %d", 2343 lo->lo_pid, lo->lo_sysid); 2344 } else if (lo->lo_flags & F_FLOCK) { 2345 printf("file %p", lo->lo_id); 2346 } else { 2347 printf("local pid %d", lo->lo_pid); 2348 } 2349} 2350 2351/* 2352 * Print out a lock. 2353 */ 2354static void 2355lf_print(char *tag, struct lockf_entry *lock) 2356{ 2357 2358 printf("%s: lock %p for ", tag, (void *)lock); 2359 lf_print_owner(lock->lf_owner); 2360 if (lock->lf_inode != (struct inode *)0) 2361 printf(" in ino %ju on dev <%s>,", 2362 (uintmax_t)lock->lf_inode->i_number, 2363 devtoname(lock->lf_inode->i_dev)); 2364 printf(" %s, start %jd, end ", 2365 lock->lf_type == F_RDLCK ? "shared" : 2366 lock->lf_type == F_WRLCK ? "exclusive" : 2367 lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2368 (intmax_t)lock->lf_start); 2369 if (lock->lf_end == OFF_MAX) 2370 printf("EOF"); 2371 else 2372 printf("%jd", (intmax_t)lock->lf_end); 2373 if (!LIST_EMPTY(&lock->lf_outedges)) 2374 printf(" block %p\n", 2375 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 2376 else 2377 printf("\n"); 2378} 2379 2380static void 2381lf_printlist(char *tag, struct lockf_entry *lock) 2382{ 2383 struct lockf_entry *lf, *blk; 2384 struct lockf_edge *e; 2385 2386 if (lock->lf_inode == (struct inode *)0) 2387 return; 2388 2389 printf("%s: Lock list for ino %ju on dev <%s>:\n", 2390 tag, (uintmax_t)lock->lf_inode->i_number, 2391 devtoname(lock->lf_inode->i_dev)); 2392 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { 2393 printf("\tlock %p for ",(void *)lf); 2394 lf_print_owner(lock->lf_owner); 2395 printf(", %s, start %jd, end %jd", 2396 lf->lf_type == F_RDLCK ? "shared" : 2397 lf->lf_type == F_WRLCK ? "exclusive" : 2398 lf->lf_type == F_UNLCK ? "unlock" : 2399 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2400 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2401 blk = e->le_to; 2402 printf("\n\t\tlock request %p for ", (void *)blk); 2403 lf_print_owner(blk->lf_owner); 2404 printf(", %s, start %jd, end %jd", 2405 blk->lf_type == F_RDLCK ? "shared" : 2406 blk->lf_type == F_WRLCK ? "exclusive" : 2407 blk->lf_type == F_UNLCK ? "unlock" : 2408 "unknown", (intmax_t)blk->lf_start, 2409 (intmax_t)blk->lf_end); 2410 if (!LIST_EMPTY(&blk->lf_inedges)) 2411 panic("lf_printlist: bad list"); 2412 } 2413 printf("\n"); 2414 } 2415} 2416#endif /* LOCKF_DEBUG */ 2417