kern_lockf.c revision 178243
1139804Simp/*- 2177633Sdfr * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3177633Sdfr * Authors: Doug Rabson <dfr@rabson.org> 4177633Sdfr * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5177633Sdfr * 6177633Sdfr * Redistribution and use in source and binary forms, with or without 7177633Sdfr * modification, are permitted provided that the following conditions 8177633Sdfr * are met: 9177633Sdfr * 1. Redistributions of source code must retain the above copyright 10177633Sdfr * notice, this list of conditions and the following disclaimer. 11177633Sdfr * 2. Redistributions in binary form must reproduce the above copyright 12177633Sdfr * notice, this list of conditions and the following disclaimer in the 13177633Sdfr * documentation and/or other materials provided with the distribution. 14177633Sdfr * 15177633Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16177633Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17177633Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18177633Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19177633Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20177633Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21177633Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22177633Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23177633Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24177633Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25177633Sdfr * SUCH DAMAGE. 26177633Sdfr */ 27177633Sdfr/*- 281960Sdg * Copyright (c) 1982, 1986, 1989, 1993 291960Sdg * The Regents of the University of California. All rights reserved. 301960Sdg * 311960Sdg * This code is derived from software contributed to Berkeley by 321960Sdg * Scooter Morris at Genentech Inc. 331960Sdg * 341960Sdg * Redistribution and use in source and binary forms, with or without 351960Sdg * modification, are permitted provided that the following conditions 361960Sdg * are met: 371960Sdg * 1. Redistributions of source code must retain the above copyright 381960Sdg * notice, this list of conditions and the following disclaimer. 391960Sdg * 2. Redistributions in binary form must reproduce the above copyright 401960Sdg * notice, this list of conditions and the following disclaimer in the 411960Sdg * documentation and/or other materials provided with the distribution. 421960Sdg * 4. Neither the name of the University nor the names of its contributors 431960Sdg * may be used to endorse or promote products derived from this software 441960Sdg * without specific prior written permission. 451960Sdg * 461960Sdg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 471960Sdg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 481960Sdg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 491960Sdg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 501960Sdg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 511960Sdg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 521960Sdg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 531960Sdg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 541960Sdg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 551960Sdg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 561960Sdg * SUCH DAMAGE. 571960Sdg * 581960Sdg * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 591960Sdg */ 601960Sdg 61116182Sobrien#include <sys/cdefs.h> 62116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 178243 2008-04-16 11:33:32Z kib $"); 63116182Sobrien 6432929Seivind#include "opt_debug_lockf.h" 6532929Seivind 661960Sdg#include <sys/param.h> 671960Sdg#include <sys/systm.h> 68177633Sdfr#include <sys/hash.h> 6941059Speter#include <sys/kernel.h> 70114216Skan#include <sys/limits.h> 7131561Sbde#include <sys/lock.h> 72101778Sphk#include <sys/mount.h> 7376166Smarkm#include <sys/mutex.h> 741960Sdg#include <sys/proc.h> 75177633Sdfr#include <sys/sx.h> 7618020Sbde#include <sys/unistd.h> 771960Sdg#include <sys/vnode.h> 781960Sdg#include <sys/malloc.h> 791960Sdg#include <sys/fcntl.h> 801960Sdg#include <sys/lockf.h> 81177633Sdfr#include <sys/taskqueue.h> 821960Sdg 831960Sdg#ifdef LOCKF_DEBUG 8422880Sbde#include <sys/sysctl.h> 8522880Sbde 8622880Sbde#include <ufs/ufs/quota.h> 8722880Sbde#include <ufs/ufs/inode.h> 8822880Sbde 89177633Sdfrstatic int lockf_debug = 0; /* control debug output */ 9024481SbdeSYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 911960Sdg#endif 921960Sdg 9375631SalfredMALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 9430309Sphk 95177633Sdfrstruct owner_edge; 96177633Sdfrstruct owner_vertex; 97177633Sdfrstruct owner_vertex_list; 98177633Sdfrstruct owner_graph; 99177633Sdfr 100177633Sdfr#define NOLOCKF (struct lockf_entry *)0 1011960Sdg#define SELF 0x1 1021960Sdg#define OTHERS 0x2 103177633Sdfrstatic void lf_init(void *); 104177633Sdfrstatic int lf_hash_owner(caddr_t, struct flock *, int); 105177633Sdfrstatic int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *, 106177633Sdfr int); 107177633Sdfrstatic struct lockf_entry * 108177633Sdfr lf_alloc_lock(struct lock_owner *); 109177633Sdfrstatic void lf_free_lock(struct lockf_entry *); 110177633Sdfrstatic int lf_clearlock(struct lockf *, struct lockf_entry *); 111177633Sdfrstatic int lf_overlaps(struct lockf_entry *, struct lockf_entry *); 112177633Sdfrstatic int lf_blocks(struct lockf_entry *, struct lockf_entry *); 113177633Sdfrstatic void lf_free_edge(struct lockf_edge *); 114177633Sdfrstatic struct lockf_edge * 115177633Sdfr lf_alloc_edge(void); 116177633Sdfrstatic void lf_alloc_vertex(struct lockf_entry *); 117177633Sdfrstatic int lf_add_edge(struct lockf_entry *, struct lockf_entry *); 118177633Sdfrstatic void lf_remove_edge(struct lockf_edge *); 119177633Sdfrstatic void lf_remove_outgoing(struct lockf_entry *); 120177633Sdfrstatic void lf_remove_incoming(struct lockf_entry *); 121177633Sdfrstatic int lf_add_outgoing(struct lockf *, struct lockf_entry *); 122177633Sdfrstatic int lf_add_incoming(struct lockf *, struct lockf_entry *); 123177633Sdfrstatic int lf_findoverlap(struct lockf_entry **, struct lockf_entry *, 124177633Sdfr int); 125177633Sdfrstatic struct lockf_entry * 126177633Sdfr lf_getblock(struct lockf *, struct lockf_entry *); 127177633Sdfrstatic int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *); 128177633Sdfrstatic void lf_insert_lock(struct lockf *, struct lockf_entry *); 129177633Sdfrstatic void lf_wakeup_lock(struct lockf *, struct lockf_entry *); 130177633Sdfrstatic void lf_update_dependancies(struct lockf *, struct lockf_entry *, 131177633Sdfr int all, struct lockf_entry_list *); 132177633Sdfrstatic void lf_set_start(struct lockf *, struct lockf_entry *, off_t, 133177633Sdfr struct lockf_entry_list*); 134177633Sdfrstatic void lf_set_end(struct lockf *, struct lockf_entry *, off_t, 135177633Sdfr struct lockf_entry_list*); 136177633Sdfrstatic int lf_setlock(struct lockf *, struct lockf_entry *, 137177633Sdfr struct vnode *, void **cookiep); 138177633Sdfrstatic int lf_cancel(struct lockf *, struct lockf_entry *, void *); 139177633Sdfrstatic void lf_split(struct lockf *, struct lockf_entry *, 140177633Sdfr struct lockf_entry *, struct lockf_entry_list *); 141140808Sjeff#ifdef LOCKF_DEBUG 142177633Sdfrstatic int graph_reaches(struct owner_vertex *x, struct owner_vertex *y, 143177633Sdfr struct owner_vertex_list *path); 144177633Sdfrstatic void graph_check(struct owner_graph *g, int checkorder); 145177633Sdfrstatic void graph_print_vertices(struct owner_vertex_list *set); 146140808Sjeff#endif 147177633Sdfrstatic int graph_delta_forward(struct owner_graph *g, 148177633Sdfr struct owner_vertex *x, struct owner_vertex *y, 149177633Sdfr struct owner_vertex_list *delta); 150177633Sdfrstatic int graph_delta_backward(struct owner_graph *g, 151177633Sdfr struct owner_vertex *x, struct owner_vertex *y, 152177633Sdfr struct owner_vertex_list *delta); 153177633Sdfrstatic int graph_add_indices(int *indices, int n, 154177633Sdfr struct owner_vertex_list *set); 155177633Sdfrstatic int graph_assign_indices(struct owner_graph *g, int *indices, 156177633Sdfr int nextunused, struct owner_vertex_list *set); 157177633Sdfrstatic int graph_add_edge(struct owner_graph *g, 158177633Sdfr struct owner_vertex *x, struct owner_vertex *y); 159177633Sdfrstatic void graph_remove_edge(struct owner_graph *g, 160177633Sdfr struct owner_vertex *x, struct owner_vertex *y); 161177633Sdfrstatic struct owner_vertex *graph_alloc_vertex(struct owner_graph *g, 162177633Sdfr struct lock_owner *lo); 163177633Sdfrstatic void graph_free_vertex(struct owner_graph *g, 164177633Sdfr struct owner_vertex *v); 165177633Sdfrstatic struct owner_graph * graph_init(struct owner_graph *g); 166177633Sdfr#ifdef LOCKF_DEBUG 167177633Sdfrstatic void lf_print(char *, struct lockf_entry *); 168177633Sdfrstatic void lf_printlist(char *, struct lockf_entry *); 169177633Sdfrstatic void lf_print_owner(struct lock_owner *); 170177633Sdfr#endif 1711960Sdg 1721960Sdg/* 173177633Sdfr * This structure is used to keep track of both local and remote lock 174177633Sdfr * owners. The lf_owner field of the struct lockf_entry points back at 175177633Sdfr * the lock owner structure. Each possible lock owner (local proc for 176177633Sdfr * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid> 177177633Sdfr * pair for remote locks) is represented by a unique instance of 178177633Sdfr * struct lock_owner. 179177633Sdfr * 180177633Sdfr * If a lock owner has a lock that blocks some other lock or a lock 181177633Sdfr * that is waiting for some other lock, it also has a vertex in the 182177633Sdfr * owner_graph below. 183177633Sdfr * 184177633Sdfr * Locks: 185177633Sdfr * (s) locked by state->ls_lock 186177633Sdfr * (S) locked by lf_lock_states_lock 187177633Sdfr * (l) locked by lf_lock_owners_lock 188177633Sdfr * (g) locked by lf_owner_graph_lock 189177633Sdfr * (c) const until freeing 190177633Sdfr */ 191177633Sdfr#define LOCK_OWNER_HASH_SIZE 256 192177633Sdfr 193177633Sdfrstruct lock_owner { 194177633Sdfr LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */ 195177633Sdfr int lo_refs; /* (l) Number of locks referring to this */ 196177633Sdfr int lo_flags; /* (c) Flags passwd to lf_advlock */ 197177633Sdfr caddr_t lo_id; /* (c) Id value passed to lf_advlock */ 198177633Sdfr pid_t lo_pid; /* (c) Process Id of the lock owner */ 199177633Sdfr int lo_sysid; /* (c) System Id of the lock owner */ 200177633Sdfr struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */ 201177633Sdfr}; 202177633Sdfr 203177633SdfrLIST_HEAD(lock_owner_list, lock_owner); 204177633Sdfr 205177633Sdfrstatic struct sx lf_lock_states_lock; 206177633Sdfrstatic struct lockf_list lf_lock_states; /* (S) */ 207177633Sdfrstatic struct sx lf_lock_owners_lock; 208177633Sdfrstatic struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */ 209177633Sdfr 210177633Sdfr/* 211177633Sdfr * Structures for deadlock detection. 212177633Sdfr * 213177633Sdfr * We have two types of directed graph, the first is the set of locks, 214177633Sdfr * both active and pending on a vnode. Within this graph, active locks 215177633Sdfr * are terminal nodes in the graph (i.e. have no out-going 216177633Sdfr * edges). Pending locks have out-going edges to each blocking active 217177633Sdfr * lock that prevents the lock from being granted and also to each 218177633Sdfr * older pending lock that would block them if it was active. The 219177633Sdfr * graph for each vnode is naturally acyclic; new edges are only ever 220177633Sdfr * added to or from new nodes (either new pending locks which only add 221177633Sdfr * out-going edges or new active locks which only add in-coming edges) 222177633Sdfr * therefore they cannot create loops in the lock graph. 223177633Sdfr * 224177633Sdfr * The second graph is a global graph of lock owners. Each lock owner 225177633Sdfr * is a vertex in that graph and an edge is added to the graph 226177633Sdfr * whenever an edge is added to a vnode graph, with end points 227177633Sdfr * corresponding to owner of the new pending lock and the owner of the 228177633Sdfr * lock upon which it waits. In order to prevent deadlock, we only add 229177633Sdfr * an edge to this graph if the new edge would not create a cycle. 230177633Sdfr * 231177633Sdfr * The lock owner graph is topologically sorted, i.e. if a node has 232177633Sdfr * any outgoing edges, then it has an order strictly less than any 233177633Sdfr * node to which it has an outgoing edge. We preserve this ordering 234177633Sdfr * (and detect cycles) on edge insertion using Algorithm PK from the 235177633Sdfr * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic 236177633Sdfr * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article 237177633Sdfr * No. 1.7) 238177633Sdfr */ 239177633Sdfrstruct owner_vertex; 240177633Sdfr 241177633Sdfrstruct owner_edge { 242177633Sdfr LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */ 243177633Sdfr LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */ 244177633Sdfr int e_refs; /* (g) number of times added */ 245177633Sdfr struct owner_vertex *e_from; /* (c) out-going from here */ 246177633Sdfr struct owner_vertex *e_to; /* (c) in-coming to here */ 247177633Sdfr}; 248177633SdfrLIST_HEAD(owner_edge_list, owner_edge); 249177633Sdfr 250177633Sdfrstruct owner_vertex { 251177633Sdfr TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */ 252177633Sdfr uint32_t v_gen; /* (g) workspace for edge insertion */ 253177633Sdfr int v_order; /* (g) order of vertex in graph */ 254177633Sdfr struct owner_edge_list v_outedges;/* (g) list of out-edges */ 255177633Sdfr struct owner_edge_list v_inedges; /* (g) list of in-edges */ 256177633Sdfr struct lock_owner *v_owner; /* (c) corresponding lock owner */ 257177633Sdfr}; 258177633SdfrTAILQ_HEAD(owner_vertex_list, owner_vertex); 259177633Sdfr 260177633Sdfrstruct owner_graph { 261177633Sdfr struct owner_vertex** g_vertices; /* (g) pointers to vertices */ 262177633Sdfr int g_size; /* (g) number of vertices */ 263177633Sdfr int g_space; /* (g) space allocated for vertices */ 264177633Sdfr int *g_indexbuf; /* (g) workspace for loop detection */ 265177633Sdfr uint32_t g_gen; /* (g) increment when re-ordering */ 266177633Sdfr}; 267177633Sdfr 268177633Sdfrstatic struct sx lf_owner_graph_lock; 269177633Sdfrstatic struct owner_graph lf_owner_graph; 270177633Sdfr 271177633Sdfr/* 272177633Sdfr * Initialise various structures and locks. 273177633Sdfr */ 274177633Sdfrstatic void 275177633Sdfrlf_init(void *dummy) 276177633Sdfr{ 277177633Sdfr int i; 278177633Sdfr 279177633Sdfr sx_init(&lf_lock_states_lock, "lock states lock"); 280177633Sdfr LIST_INIT(&lf_lock_states); 281177633Sdfr 282177633Sdfr sx_init(&lf_lock_owners_lock, "lock owners lock"); 283177633Sdfr for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 284177633Sdfr LIST_INIT(&lf_lock_owners[i]); 285177633Sdfr 286177633Sdfr sx_init(&lf_owner_graph_lock, "owner graph lock"); 287177633Sdfr graph_init(&lf_owner_graph); 288177633Sdfr} 289177633SdfrSYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL); 290177633Sdfr 291177633Sdfr/* 292177633Sdfr * Generate a hash value for a lock owner. 293177633Sdfr */ 294177633Sdfrstatic int 295177633Sdfrlf_hash_owner(caddr_t id, struct flock *fl, int flags) 296177633Sdfr{ 297177633Sdfr uint32_t h; 298177633Sdfr 299177633Sdfr if (flags & F_REMOTE) { 300177633Sdfr h = HASHSTEP(0, fl->l_pid); 301177633Sdfr h = HASHSTEP(h, fl->l_sysid); 302177633Sdfr } else if (flags & F_FLOCK) { 303177633Sdfr h = ((uintptr_t) id) >> 7; 304177633Sdfr } else { 305177633Sdfr struct proc *p = (struct proc *) id; 306177633Sdfr h = HASHSTEP(0, p->p_pid); 307177633Sdfr h = HASHSTEP(h, 0); 308177633Sdfr } 309177633Sdfr 310177633Sdfr return (h % LOCK_OWNER_HASH_SIZE); 311177633Sdfr} 312177633Sdfr 313177633Sdfr/* 314177633Sdfr * Return true if a lock owner matches the details passed to 315177633Sdfr * lf_advlock. 316177633Sdfr */ 317177633Sdfrstatic int 318177633Sdfrlf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl, 319177633Sdfr int flags) 320177633Sdfr{ 321177633Sdfr if (flags & F_REMOTE) { 322177633Sdfr return lo->lo_pid == fl->l_pid 323177633Sdfr && lo->lo_sysid == fl->l_sysid; 324177633Sdfr } else { 325177633Sdfr return lo->lo_id == id; 326177633Sdfr } 327177633Sdfr} 328177633Sdfr 329177633Sdfrstatic struct lockf_entry * 330177633Sdfrlf_alloc_lock(struct lock_owner *lo) 331177633Sdfr{ 332177633Sdfr struct lockf_entry *lf; 333177633Sdfr 334177633Sdfr lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO); 335177633Sdfr 336177633Sdfr#ifdef LOCKF_DEBUG 337177633Sdfr if (lockf_debug & 4) 338177633Sdfr printf("Allocated lock %p\n", lf); 339177633Sdfr#endif 340177633Sdfr if (lo) { 341177633Sdfr sx_xlock(&lf_lock_owners_lock); 342177633Sdfr lo->lo_refs++; 343177633Sdfr sx_xunlock(&lf_lock_owners_lock); 344177633Sdfr lf->lf_owner = lo; 345177633Sdfr } 346177633Sdfr 347177633Sdfr return (lf); 348177633Sdfr} 349177633Sdfr 350177633Sdfrstatic void 351177633Sdfrlf_free_lock(struct lockf_entry *lock) 352177633Sdfr{ 353177633Sdfr /* 354177633Sdfr * Adjust the lock_owner reference count and 355177633Sdfr * reclaim the entry if this is the last lock 356177633Sdfr * for that owner. 357177633Sdfr */ 358177633Sdfr struct lock_owner *lo = lock->lf_owner; 359177633Sdfr if (lo) { 360177633Sdfr KASSERT(LIST_EMPTY(&lock->lf_outedges), 361177633Sdfr ("freeing lock with dependancies")); 362177633Sdfr KASSERT(LIST_EMPTY(&lock->lf_inedges), 363177633Sdfr ("freeing lock with dependants")); 364177633Sdfr sx_xlock(&lf_lock_owners_lock); 365177633Sdfr KASSERT(lo->lo_refs > 0, ("lock owner refcount")); 366177633Sdfr lo->lo_refs--; 367177633Sdfr if (lo->lo_refs == 0) { 368177633Sdfr#ifdef LOCKF_DEBUG 369177633Sdfr if (lockf_debug & 1) 370177633Sdfr printf("lf_free_lock: freeing lock owner %p\n", 371177633Sdfr lo); 372177633Sdfr#endif 373177633Sdfr if (lo->lo_vertex) { 374177633Sdfr sx_xlock(&lf_owner_graph_lock); 375177633Sdfr graph_free_vertex(&lf_owner_graph, 376177633Sdfr lo->lo_vertex); 377177633Sdfr sx_xunlock(&lf_owner_graph_lock); 378177633Sdfr } 379177633Sdfr LIST_REMOVE(lo, lo_link); 380177633Sdfr free(lo, M_LOCKF); 381177633Sdfr#ifdef LOCKF_DEBUG 382177633Sdfr if (lockf_debug & 4) 383177633Sdfr printf("Freed lock owner %p\n", lo); 384177633Sdfr#endif 385177633Sdfr } 386177633Sdfr sx_unlock(&lf_lock_owners_lock); 387177633Sdfr } 388177633Sdfr if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { 389177633Sdfr vrele(lock->lf_vnode); 390177633Sdfr lock->lf_vnode = NULL; 391177633Sdfr } 392177633Sdfr#ifdef LOCKF_DEBUG 393177633Sdfr if (lockf_debug & 4) 394177633Sdfr printf("Freed lock %p\n", lock); 395177633Sdfr#endif 396177633Sdfr free(lock, M_LOCKF); 397177633Sdfr} 398177633Sdfr 399177633Sdfr/* 4001960Sdg * Advisory record locking support 4011960Sdg */ 4021960Sdgint 403177633Sdfrlf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, 404177633Sdfr u_quad_t size) 4051960Sdg{ 406177633Sdfr struct lockf *state, *freestate = NULL; 407171193Sjeff struct flock *fl = ap->a_fl; 408177633Sdfr struct lockf_entry *lock; 409171193Sjeff struct vnode *vp = ap->a_vp; 410177633Sdfr caddr_t id = ap->a_id; 411177633Sdfr int flags = ap->a_flags; 412177633Sdfr int hash; 413177633Sdfr struct lock_owner *lo; 41482346Sache off_t start, end, oadd; 4151960Sdg int error; 4161960Sdg 4171960Sdg /* 418177633Sdfr * Handle the F_UNLKSYS case first - no need to mess about 419177633Sdfr * creating a lock owner for this one. 420177633Sdfr */ 421177633Sdfr if (ap->a_op == F_UNLCKSYS) { 422177633Sdfr lf_clearremotesys(fl->l_sysid); 423177633Sdfr return (0); 424177633Sdfr } 425177633Sdfr 426177633Sdfr /* 4271960Sdg * Convert the flock structure into a start and end. 4281960Sdg */ 4291960Sdg switch (fl->l_whence) { 4301960Sdg 4311960Sdg case SEEK_SET: 4321960Sdg case SEEK_CUR: 4331960Sdg /* 4341960Sdg * Caller is responsible for adding any necessary offset 4351960Sdg * when SEEK_CUR is used. 4361960Sdg */ 4371960Sdg start = fl->l_start; 4381960Sdg break; 4391960Sdg 4401960Sdg case SEEK_END: 44182516Sache if (size > OFF_MAX || 442171193Sjeff (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 443171193Sjeff return (EOVERFLOW); 4441960Sdg start = size + fl->l_start; 4451960Sdg break; 4461960Sdg 4471960Sdg default: 448171193Sjeff return (EINVAL); 4491960Sdg } 450171193Sjeff if (start < 0) 451171193Sjeff return (EINVAL); 45282200Sache if (fl->l_len < 0) { 453171193Sjeff if (start == 0) 454171193Sjeff return (EINVAL); 45582202Sache end = start - 1; 45682200Sache start += fl->l_len; 457171193Sjeff if (start < 0) 458171193Sjeff return (EINVAL); 459177633Sdfr } else if (fl->l_len == 0) { 460177633Sdfr end = OFF_MAX; 461177633Sdfr } else { 46282346Sache oadd = fl->l_len - 1; 463171193Sjeff if (oadd > OFF_MAX - start) 464171193Sjeff return (EOVERFLOW); 46582172Sache end = start + oadd; 46620676Sbde } 4671960Sdg /* 46820676Sbde * Avoid the common case of unlocking when inode has no locks. 46920676Sbde */ 470177633Sdfr if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) { 47120676Sbde if (ap->a_op != F_SETLK) { 47220676Sbde fl->l_type = F_UNLCK; 473171193Sjeff return (0); 47420676Sbde } 47520676Sbde } 476177633Sdfr 47720676Sbde /* 478177633Sdfr * Map our arguments to an existing lock owner or create one 479177633Sdfr * if this is the first time we have seen this owner. 480171193Sjeff */ 481177633Sdfr hash = lf_hash_owner(id, fl, flags); 482177633Sdfr sx_xlock(&lf_lock_owners_lock); 483177633Sdfr LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link) 484177633Sdfr if (lf_owner_matches(lo, id, fl, flags)) 485177633Sdfr break; 486177633Sdfr if (!lo) { 487177633Sdfr /* 488177633Sdfr * We initialise the lock with a reference 489177633Sdfr * count which matches the new lockf_entry 490177633Sdfr * structure created below. 491177633Sdfr */ 492177633Sdfr lo = malloc(sizeof(struct lock_owner), M_LOCKF, 493177633Sdfr M_WAITOK|M_ZERO); 494177633Sdfr#ifdef LOCKF_DEBUG 495177633Sdfr if (lockf_debug & 4) 496177633Sdfr printf("Allocated lock owner %p\n", lo); 497177633Sdfr#endif 498177633Sdfr 499177633Sdfr lo->lo_refs = 1; 500177633Sdfr lo->lo_flags = flags; 501177633Sdfr lo->lo_id = id; 502177633Sdfr if (flags & F_REMOTE) { 503177633Sdfr lo->lo_pid = fl->l_pid; 504177633Sdfr lo->lo_sysid = fl->l_sysid; 505177633Sdfr } else if (flags & F_FLOCK) { 506177633Sdfr lo->lo_pid = -1; 507177633Sdfr lo->lo_sysid = 0; 508177633Sdfr } else { 509177633Sdfr struct proc *p = (struct proc *) id; 510177633Sdfr lo->lo_pid = p->p_pid; 511177633Sdfr lo->lo_sysid = 0; 512177633Sdfr } 513177633Sdfr lo->lo_vertex = NULL; 514177633Sdfr 515177633Sdfr#ifdef LOCKF_DEBUG 516177633Sdfr if (lockf_debug & 1) { 517177633Sdfr printf("lf_advlockasync: new lock owner %p ", lo); 518177633Sdfr lf_print_owner(lo); 519177633Sdfr printf("\n"); 520177633Sdfr } 521177633Sdfr#endif 522177633Sdfr 523177633Sdfr LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link); 524177633Sdfr } else { 525177633Sdfr /* 526177633Sdfr * We have seen this lock owner before, increase its 527177633Sdfr * reference count to account for the new lockf_entry 528177633Sdfr * structure we create below. 529177633Sdfr */ 530177633Sdfr lo->lo_refs++; 531171772Skib } 532177633Sdfr sx_xunlock(&lf_lock_owners_lock); 533177633Sdfr 534171193Sjeff /* 535177633Sdfr * Create the lockf structure. We initialise the lf_owner 536177633Sdfr * field here instead of in lf_alloc_lock() to avoid paying 537177633Sdfr * the lf_lock_owners_lock tax twice. 5381960Sdg */ 539177633Sdfr lock = lf_alloc_lock(NULL); 5401960Sdg lock->lf_start = start; 5411960Sdg lock->lf_end = end; 542177633Sdfr lock->lf_owner = lo; 543177633Sdfr lock->lf_vnode = vp; 544177633Sdfr if (flags & F_REMOTE) { 545177633Sdfr /* 546177633Sdfr * For remote locks, the caller may release its ref to 547177633Sdfr * the vnode at any time - we have to ref it here to 548177633Sdfr * prevent it from being recycled unexpectedly. 549177633Sdfr */ 550177633Sdfr vref(vp); 551177633Sdfr } 552177633Sdfr 55387211Salfred /* 55487211Salfred * XXX The problem is that VTOI is ufs specific, so it will 55587211Salfred * break LOCKF_DEBUG for all other FS's other than UFS because 55687211Salfred * it casts the vnode->data ptr to struct inode *. 55787211Salfred */ 55887211Salfred/* lock->lf_inode = VTOI(ap->a_vp); */ 55987211Salfred lock->lf_inode = (struct inode *)0; 56022521Sdyson lock->lf_type = fl->l_type; 561177633Sdfr LIST_INIT(&lock->lf_outedges); 562177633Sdfr LIST_INIT(&lock->lf_inedges); 563177633Sdfr lock->lf_async_task = ap->a_task; 5641960Sdg lock->lf_flags = ap->a_flags; 565177633Sdfr 5661960Sdg /* 567177633Sdfr * Do the requested operation. First find our state structure 568177633Sdfr * and create a new one if necessary - the caller's *statep 569177633Sdfr * variable and the state's ls_threads count is protected by 570177633Sdfr * the vnode interlock. 5711960Sdg */ 572171193Sjeff VI_LOCK(vp); 573178243Skib if (vp->v_iflag & VI_DOOMED) { 574178243Skib VI_UNLOCK(vp); 575178243Skib lf_free_lock(lock); 576178243Skib return (ENOENT); 577178243Skib } 578177633Sdfr 579177633Sdfr /* 580177633Sdfr * Allocate a state structure if necessary. 581177633Sdfr */ 582177633Sdfr state = *statep; 583177633Sdfr if (state == NULL) { 584177633Sdfr struct lockf *ls; 585177633Sdfr 586177633Sdfr VI_UNLOCK(vp); 587177633Sdfr 588177633Sdfr ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO); 589177633Sdfr sx_init(&ls->ls_lock, "ls_lock"); 590177633Sdfr LIST_INIT(&ls->ls_active); 591177633Sdfr LIST_INIT(&ls->ls_pending); 592177841Sdfr ls->ls_threads = 1; 593177633Sdfr 594177633Sdfr sx_xlock(&lf_lock_states_lock); 595177633Sdfr LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link); 596177633Sdfr sx_xunlock(&lf_lock_states_lock); 597177633Sdfr 598177633Sdfr /* 599177633Sdfr * Cope if we lost a race with some other thread while 600177633Sdfr * trying to allocate memory. 601177633Sdfr */ 602177633Sdfr VI_LOCK(vp); 603178243Skib if (vp->v_iflag & VI_DOOMED) { 604178243Skib VI_UNLOCK(vp); 605178243Skib sx_xlock(&lf_lock_states_lock); 606178243Skib LIST_REMOVE(ls, ls_link); 607178243Skib sx_xunlock(&lf_lock_states_lock); 608178243Skib sx_destroy(&ls->ls_lock); 609178243Skib free(ls, M_LOCKF); 610178243Skib lf_free_lock(lock); 611178243Skib return (ENOENT); 612178243Skib } 613177633Sdfr if ((*statep) == NULL) { 614177841Sdfr state = *statep = ls; 615177841Sdfr VI_UNLOCK(vp); 616177633Sdfr } else { 617177841Sdfr state = *statep; 618177841Sdfr state->ls_threads++; 619177841Sdfr VI_UNLOCK(vp); 620177841Sdfr 621177633Sdfr sx_xlock(&lf_lock_states_lock); 622177633Sdfr LIST_REMOVE(ls, ls_link); 623177633Sdfr sx_xunlock(&lf_lock_states_lock); 624177633Sdfr sx_destroy(&ls->ls_lock); 625177633Sdfr free(ls, M_LOCKF); 626177633Sdfr } 627177841Sdfr } else { 628177841Sdfr state->ls_threads++; 629177841Sdfr VI_UNLOCK(vp); 630177633Sdfr } 631177633Sdfr 632177633Sdfr sx_xlock(&state->ls_lock); 6331960Sdg switch(ap->a_op) { 6341960Sdg case F_SETLK: 635177633Sdfr error = lf_setlock(state, lock, vp, ap->a_cookiep); 636171193Sjeff break; 6371960Sdg 6381960Sdg case F_UNLCK: 639177633Sdfr error = lf_clearlock(state, lock); 640177633Sdfr lf_free_lock(lock); 641171193Sjeff break; 6421960Sdg 6431960Sdg case F_GETLK: 644177633Sdfr error = lf_getlock(state, lock, fl); 645177633Sdfr lf_free_lock(lock); 646171193Sjeff break; 6478876Srgrimes 648177633Sdfr case F_CANCEL: 649177633Sdfr if (ap->a_cookiep) 650177633Sdfr error = lf_cancel(state, lock, *ap->a_cookiep); 651177633Sdfr else 652177633Sdfr error = EINVAL; 653177633Sdfr lf_free_lock(lock); 654177633Sdfr break; 655177633Sdfr 6561960Sdg default: 657177633Sdfr lf_free_lock(lock); 658140808Sjeff error = EINVAL; 659171193Sjeff break; 6601960Sdg } 661177633Sdfr 662177633Sdfr#ifdef INVARIANTS 663177633Sdfr /* 664177633Sdfr * Check for some can't happen stuff. In this case, the active 665177633Sdfr * lock list becoming disordered or containing mutually 666177633Sdfr * blocking locks. We also check the pending list for locks 667177633Sdfr * which should be active (i.e. have no out-going edges). 668177633Sdfr */ 669177633Sdfr LIST_FOREACH(lock, &state->ls_active, lf_link) { 670177633Sdfr struct lockf_entry *lf; 671177633Sdfr if (LIST_NEXT(lock, lf_link)) 672177633Sdfr KASSERT((lock->lf_start 673177633Sdfr <= LIST_NEXT(lock, lf_link)->lf_start), 674177633Sdfr ("locks disordered")); 675177633Sdfr LIST_FOREACH(lf, &state->ls_active, lf_link) { 676177633Sdfr if (lock == lf) 677177633Sdfr break; 678177633Sdfr KASSERT(!lf_blocks(lock, lf), 679177633Sdfr ("two conflicting active locks")); 680177633Sdfr if (lock->lf_owner == lf->lf_owner) 681177633Sdfr KASSERT(!lf_overlaps(lock, lf), 682177633Sdfr ("two overlapping locks from same owner")); 683177633Sdfr } 684177633Sdfr } 685177633Sdfr LIST_FOREACH(lock, &state->ls_pending, lf_link) { 686177633Sdfr KASSERT(!LIST_EMPTY(&lock->lf_outedges), 687177633Sdfr ("pending lock which should be active")); 688177633Sdfr } 689177633Sdfr#endif 690177633Sdfr sx_xunlock(&state->ls_lock); 691177633Sdfr 692177633Sdfr /* 693177633Sdfr * If we have removed the last active lock on the vnode and 694177633Sdfr * this is the last thread that was in-progress, we can free 695177633Sdfr * the state structure. We update the caller's pointer inside 696177633Sdfr * the vnode interlock but call free outside. 697177633Sdfr * 698177633Sdfr * XXX alternatively, keep the state structure around until 699177633Sdfr * the filesystem recycles - requires a callback from the 700177633Sdfr * filesystem. 701177633Sdfr */ 702177633Sdfr VI_LOCK(vp); 703177633Sdfr 704177633Sdfr state->ls_threads--; 705178243Skib wakeup(state); 706177633Sdfr if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { 707177633Sdfr KASSERT(LIST_EMPTY(&state->ls_pending), 708177633Sdfr ("freeing state with pending locks")); 709177633Sdfr freestate = state; 710177633Sdfr *statep = NULL; 711177633Sdfr } 712177633Sdfr 713171193Sjeff VI_UNLOCK(vp); 714177633Sdfr 715177633Sdfr if (freestate) { 716177633Sdfr sx_xlock(&lf_lock_states_lock); 717177633Sdfr LIST_REMOVE(freestate, ls_link); 718177633Sdfr sx_xunlock(&lf_lock_states_lock); 719177633Sdfr sx_destroy(&freestate->ls_lock); 720177633Sdfr free(freestate, M_LOCKF); 721171772Skib } 722140808Sjeff return (error); 7231960Sdg} 7241960Sdg 725177633Sdfrint 726177633Sdfrlf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size) 727177633Sdfr{ 728177633Sdfr struct vop_advlockasync_args a; 729177633Sdfr 730177633Sdfr a.a_vp = ap->a_vp; 731177633Sdfr a.a_id = ap->a_id; 732177633Sdfr a.a_op = ap->a_op; 733177633Sdfr a.a_fl = ap->a_fl; 734177633Sdfr a.a_flags = ap->a_flags; 735177633Sdfr a.a_task = NULL; 736177633Sdfr a.a_cookiep = NULL; 737177633Sdfr 738177633Sdfr return (lf_advlockasync(&a, statep, size)); 739177633Sdfr} 740177633Sdfr 741178243Skibvoid 742178243Skiblf_purgelocks(struct vnode *vp, struct lockf **statep) 743178243Skib{ 744178243Skib struct lockf *state; 745178243Skib struct lockf_entry *lock, *nlock; 746178243Skib 747178243Skib /* 748178243Skib * For this to work correctly, the caller must ensure that no 749178243Skib * other threads enter the locking system for this vnode, 750178243Skib * e.g. by checking VI_DOOMED. We wake up any threads that are 751178243Skib * sleeping waiting for locks on this vnode and then free all 752178243Skib * the remaining locks. 753178243Skib */ 754178243Skib VI_LOCK(vp); 755178243Skib state = *statep; 756178243Skib if (state) { 757178243Skib state->ls_threads++; 758178243Skib VI_UNLOCK(vp); 759178243Skib 760178243Skib sx_xlock(&state->ls_lock); 761178243Skib sx_xlock(&lf_owner_graph_lock); 762178243Skib LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 763178243Skib LIST_REMOVE(lock, lf_link); 764178243Skib lf_remove_outgoing(lock); 765178243Skib lf_remove_incoming(lock); 766178243Skib 767178243Skib /* 768178243Skib * If its an async lock, we can just free it 769178243Skib * here, otherwise we let the sleeping thread 770178243Skib * free it. 771178243Skib */ 772178243Skib if (lock->lf_async_task) { 773178243Skib lf_free_lock(lock); 774178243Skib } else { 775178243Skib lock->lf_flags |= F_INTR; 776178243Skib wakeup(lock); 777178243Skib } 778178243Skib } 779178243Skib sx_xunlock(&lf_owner_graph_lock); 780178243Skib sx_xunlock(&state->ls_lock); 781178243Skib 782178243Skib /* 783178243Skib * Wait for all other threads, sleeping and otherwise 784178243Skib * to leave. 785178243Skib */ 786178243Skib VI_LOCK(vp); 787178243Skib while (state->ls_threads > 1) 788178243Skib msleep(state, VI_MTX(vp), 0, "purgelocks", 0); 789178243Skib *statep = 0; 790178243Skib VI_UNLOCK(vp); 791178243Skib 792178243Skib /* 793178243Skib * We can just free all the active locks since they 794178243Skib * will have no dependancies (we removed them all 795178243Skib * above). We don't need to bother locking since we 796178243Skib * are the last thread using this state structure. 797178243Skib */ 798178243Skib LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { 799178243Skib LIST_REMOVE(lock, lf_link); 800178243Skib lf_free_lock(lock); 801178243Skib } 802178243Skib sx_xlock(&lf_lock_states_lock); 803178243Skib LIST_REMOVE(state, ls_link); 804178243Skib sx_xunlock(&lf_lock_states_lock); 805178243Skib sx_destroy(&state->ls_lock); 806178243Skib free(state, M_LOCKF); 807178243Skib } else { 808178243Skib VI_UNLOCK(vp); 809178243Skib } 810178243Skib} 811178243Skib 8121960Sdg/* 813177633Sdfr * Return non-zero if locks 'x' and 'y' overlap. 814177633Sdfr */ 815177633Sdfrstatic int 816177633Sdfrlf_overlaps(struct lockf_entry *x, struct lockf_entry *y) 817177633Sdfr{ 818177633Sdfr 819177633Sdfr return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); 820177633Sdfr} 821177633Sdfr 822177633Sdfr/* 823177633Sdfr * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa). 824177633Sdfr */ 825177633Sdfrstatic int 826177633Sdfrlf_blocks(struct lockf_entry *x, struct lockf_entry *y) 827177633Sdfr{ 828177633Sdfr 829177633Sdfr return x->lf_owner != y->lf_owner 830177633Sdfr && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) 831177633Sdfr && lf_overlaps(x, y); 832177633Sdfr} 833177633Sdfr 834177633Sdfr/* 835177633Sdfr * Allocate a lock edge from the free list 836177633Sdfr */ 837177633Sdfrstatic struct lockf_edge * 838177633Sdfrlf_alloc_edge(void) 839177633Sdfr{ 840177633Sdfr 841177633Sdfr return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO)); 842177633Sdfr} 843177633Sdfr 844177633Sdfr/* 845177633Sdfr * Free a lock edge. 846177633Sdfr */ 847177633Sdfrstatic void 848177633Sdfrlf_free_edge(struct lockf_edge *e) 849177633Sdfr{ 850177633Sdfr 851177633Sdfr free(e, M_LOCKF); 852177633Sdfr} 853177633Sdfr 854177633Sdfr 855177633Sdfr/* 856177633Sdfr * Ensure that the lock's owner has a corresponding vertex in the 857177633Sdfr * owner graph. 858177633Sdfr */ 859177633Sdfrstatic void 860177633Sdfrlf_alloc_vertex(struct lockf_entry *lock) 861177633Sdfr{ 862177633Sdfr struct owner_graph *g = &lf_owner_graph; 863177633Sdfr 864177633Sdfr if (!lock->lf_owner->lo_vertex) 865177633Sdfr lock->lf_owner->lo_vertex = 866177633Sdfr graph_alloc_vertex(g, lock->lf_owner); 867177633Sdfr} 868177633Sdfr 869177633Sdfr/* 870177633Sdfr * Attempt to record an edge from lock x to lock y. Return EDEADLK if 871177633Sdfr * the new edge would cause a cycle in the owner graph. 872177633Sdfr */ 873177633Sdfrstatic int 874177633Sdfrlf_add_edge(struct lockf_entry *x, struct lockf_entry *y) 875177633Sdfr{ 876177633Sdfr struct owner_graph *g = &lf_owner_graph; 877177633Sdfr struct lockf_edge *e; 878177633Sdfr int error; 879177633Sdfr 880177633Sdfr#ifdef INVARIANTS 881177633Sdfr LIST_FOREACH(e, &x->lf_outedges, le_outlink) 882177633Sdfr KASSERT(e->le_to != y, ("adding lock edge twice")); 883177633Sdfr#endif 884177633Sdfr 885177633Sdfr /* 886177633Sdfr * Make sure the two owners have entries in the owner graph. 887177633Sdfr */ 888177633Sdfr lf_alloc_vertex(x); 889177633Sdfr lf_alloc_vertex(y); 890177633Sdfr 891177633Sdfr error = graph_add_edge(g, x->lf_owner->lo_vertex, 892177633Sdfr y->lf_owner->lo_vertex); 893177633Sdfr if (error) 894177633Sdfr return (error); 895177633Sdfr 896177633Sdfr e = lf_alloc_edge(); 897177633Sdfr LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); 898177633Sdfr LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); 899177633Sdfr e->le_from = x; 900177633Sdfr e->le_to = y; 901177633Sdfr 902177633Sdfr return (0); 903177633Sdfr} 904177633Sdfr 905177633Sdfr/* 906177633Sdfr * Remove an edge from the lock graph. 907177633Sdfr */ 908177633Sdfrstatic void 909177633Sdfrlf_remove_edge(struct lockf_edge *e) 910177633Sdfr{ 911177633Sdfr struct owner_graph *g = &lf_owner_graph; 912177633Sdfr struct lockf_entry *x = e->le_from; 913177633Sdfr struct lockf_entry *y = e->le_to; 914177633Sdfr 915177633Sdfr graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); 916177633Sdfr LIST_REMOVE(e, le_outlink); 917177633Sdfr LIST_REMOVE(e, le_inlink); 918177633Sdfr e->le_from = NULL; 919177633Sdfr e->le_to = NULL; 920177633Sdfr lf_free_edge(e); 921177633Sdfr} 922177633Sdfr 923177633Sdfr/* 924177633Sdfr * Remove all out-going edges from lock x. 925177633Sdfr */ 926177633Sdfrstatic void 927177633Sdfrlf_remove_outgoing(struct lockf_entry *x) 928177633Sdfr{ 929177633Sdfr struct lockf_edge *e; 930177633Sdfr 931177633Sdfr while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { 932177633Sdfr lf_remove_edge(e); 933177633Sdfr } 934177633Sdfr} 935177633Sdfr 936177633Sdfr/* 937177633Sdfr * Remove all in-coming edges from lock x. 938177633Sdfr */ 939177633Sdfrstatic void 940177633Sdfrlf_remove_incoming(struct lockf_entry *x) 941177633Sdfr{ 942177633Sdfr struct lockf_edge *e; 943177633Sdfr 944177633Sdfr while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { 945177633Sdfr lf_remove_edge(e); 946177633Sdfr } 947177633Sdfr} 948177633Sdfr 949177633Sdfr/* 950177633Sdfr * Walk the list of locks for the file and create an out-going edge 951177633Sdfr * from lock to each blocking lock. 952177633Sdfr */ 953177633Sdfrstatic int 954177633Sdfrlf_add_outgoing(struct lockf *state, struct lockf_entry *lock) 955177633Sdfr{ 956177633Sdfr struct lockf_entry *overlap; 957177633Sdfr int error; 958177633Sdfr 959177633Sdfr LIST_FOREACH(overlap, &state->ls_active, lf_link) { 960177633Sdfr /* 961177633Sdfr * We may assume that the active list is sorted by 962177633Sdfr * lf_start. 963177633Sdfr */ 964177633Sdfr if (overlap->lf_start > lock->lf_end) 965177633Sdfr break; 966177633Sdfr if (!lf_blocks(lock, overlap)) 967177633Sdfr continue; 968177633Sdfr 969177633Sdfr /* 970177633Sdfr * We've found a blocking lock. Add the corresponding 971177633Sdfr * edge to the graphs and see if it would cause a 972177633Sdfr * deadlock. 973177633Sdfr */ 974177633Sdfr error = lf_add_edge(lock, overlap); 975177633Sdfr 976177633Sdfr /* 977177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 978177633Sdfr * Remove any edges we added and return the error. 979177633Sdfr */ 980177633Sdfr if (error) { 981177633Sdfr lf_remove_outgoing(lock); 982177633Sdfr return (error); 983177633Sdfr } 984177633Sdfr } 985177633Sdfr 986177633Sdfr /* 987177633Sdfr * We also need to add edges to sleeping locks that block 988177633Sdfr * us. This ensures that lf_wakeup_lock cannot grant two 989177633Sdfr * mutually blocking locks simultaneously and also enforces a 990177633Sdfr * 'first come, first served' fairness model. Note that this 991177633Sdfr * only happens if we are blocked by at least one active lock 992177633Sdfr * due to the call to lf_getblock in lf_setlock below. 993177633Sdfr */ 994177633Sdfr LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 995177633Sdfr if (!lf_blocks(lock, overlap)) 996177633Sdfr continue; 997177633Sdfr /* 998177633Sdfr * We've found a blocking lock. Add the corresponding 999177633Sdfr * edge to the graphs and see if it would cause a 1000177633Sdfr * deadlock. 1001177633Sdfr */ 1002177633Sdfr error = lf_add_edge(lock, overlap); 1003177633Sdfr 1004177633Sdfr /* 1005177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 1006177633Sdfr * Remove any edges we added and return the error. 1007177633Sdfr */ 1008177633Sdfr if (error) { 1009177633Sdfr lf_remove_outgoing(lock); 1010177633Sdfr return (error); 1011177633Sdfr } 1012177633Sdfr } 1013177633Sdfr 1014177633Sdfr return (0); 1015177633Sdfr} 1016177633Sdfr 1017177633Sdfr/* 1018177633Sdfr * Walk the list of pending locks for the file and create an in-coming 1019177633Sdfr * edge from lock to each blocking lock. 1020177633Sdfr */ 1021177633Sdfrstatic int 1022177633Sdfrlf_add_incoming(struct lockf *state, struct lockf_entry *lock) 1023177633Sdfr{ 1024177633Sdfr struct lockf_entry *overlap; 1025177633Sdfr int error; 1026177633Sdfr 1027177633Sdfr LIST_FOREACH(overlap, &state->ls_pending, lf_link) { 1028177633Sdfr if (!lf_blocks(lock, overlap)) 1029177633Sdfr continue; 1030177633Sdfr 1031177633Sdfr /* 1032177633Sdfr * We've found a blocking lock. Add the corresponding 1033177633Sdfr * edge to the graphs and see if it would cause a 1034177633Sdfr * deadlock. 1035177633Sdfr */ 1036177633Sdfr error = lf_add_edge(overlap, lock); 1037177633Sdfr 1038177633Sdfr /* 1039177633Sdfr * The only error that lf_add_edge returns is EDEADLK. 1040177633Sdfr * Remove any edges we added and return the error. 1041177633Sdfr */ 1042177633Sdfr if (error) { 1043177633Sdfr lf_remove_incoming(lock); 1044177633Sdfr return (error); 1045177633Sdfr } 1046177633Sdfr } 1047177633Sdfr return (0); 1048177633Sdfr} 1049177633Sdfr 1050177633Sdfr/* 1051177633Sdfr * Insert lock into the active list, keeping list entries ordered by 1052177633Sdfr * increasing values of lf_start. 1053177633Sdfr */ 1054177633Sdfrstatic void 1055177633Sdfrlf_insert_lock(struct lockf *state, struct lockf_entry *lock) 1056177633Sdfr{ 1057177633Sdfr struct lockf_entry *lf, *lfprev; 1058177633Sdfr 1059177633Sdfr if (LIST_EMPTY(&state->ls_active)) { 1060177633Sdfr LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); 1061177633Sdfr return; 1062177633Sdfr } 1063177633Sdfr 1064177633Sdfr lfprev = NULL; 1065177633Sdfr LIST_FOREACH(lf, &state->ls_active, lf_link) { 1066177633Sdfr if (lf->lf_start > lock->lf_start) { 1067177633Sdfr LIST_INSERT_BEFORE(lf, lock, lf_link); 1068177633Sdfr return; 1069177633Sdfr } 1070177633Sdfr lfprev = lf; 1071177633Sdfr } 1072177633Sdfr LIST_INSERT_AFTER(lfprev, lock, lf_link); 1073177633Sdfr} 1074177633Sdfr 1075177633Sdfr/* 1076177633Sdfr * Wake up a sleeping lock and remove it from the pending list now 1077177633Sdfr * that all its dependancies have been resolved. The caller should 1078177633Sdfr * arrange for the lock to be added to the active list, adjusting any 1079177633Sdfr * existing locks for the same owner as needed. 1080177633Sdfr */ 1081177633Sdfrstatic void 1082177633Sdfrlf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock) 1083177633Sdfr{ 1084177633Sdfr 1085177633Sdfr /* 1086177633Sdfr * Remove from ls_pending list and wake up the caller 1087177633Sdfr * or start the async notification, as appropriate. 1088177633Sdfr */ 1089177633Sdfr LIST_REMOVE(wakelock, lf_link); 1090177633Sdfr#ifdef LOCKF_DEBUG 1091177633Sdfr if (lockf_debug & 1) 1092177633Sdfr lf_print("lf_wakeup_lock: awakening", wakelock); 1093177633Sdfr#endif /* LOCKF_DEBUG */ 1094177633Sdfr if (wakelock->lf_async_task) { 1095177633Sdfr taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); 1096177633Sdfr } else { 1097177633Sdfr wakeup(wakelock); 1098177633Sdfr } 1099177633Sdfr} 1100177633Sdfr 1101177633Sdfr/* 1102177633Sdfr * Re-check all dependant locks and remove edges to locks that we no 1103177633Sdfr * longer block. If 'all' is non-zero, the lock has been removed and 1104177633Sdfr * we must remove all the dependancies, otherwise it has simply been 1105177633Sdfr * reduced but remains active. Any pending locks which have been been 1106177633Sdfr * unblocked are added to 'granted' 1107177633Sdfr */ 1108177633Sdfrstatic void 1109177633Sdfrlf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, 1110177633Sdfr struct lockf_entry_list *granted) 1111177633Sdfr{ 1112177633Sdfr struct lockf_edge *e, *ne; 1113177633Sdfr struct lockf_entry *deplock; 1114177633Sdfr 1115177633Sdfr LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { 1116177633Sdfr deplock = e->le_from; 1117177633Sdfr if (all || !lf_blocks(lock, deplock)) { 1118177633Sdfr sx_xlock(&lf_owner_graph_lock); 1119177633Sdfr lf_remove_edge(e); 1120177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1121177633Sdfr if (LIST_EMPTY(&deplock->lf_outedges)) { 1122177633Sdfr lf_wakeup_lock(state, deplock); 1123177633Sdfr LIST_INSERT_HEAD(granted, deplock, lf_link); 1124177633Sdfr } 1125177633Sdfr } 1126177633Sdfr } 1127177633Sdfr} 1128177633Sdfr 1129177633Sdfr/* 1130177633Sdfr * Set the start of an existing active lock, updating dependancies and 1131177633Sdfr * adding any newly woken locks to 'granted'. 1132177633Sdfr */ 1133177633Sdfrstatic void 1134177633Sdfrlf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, 1135177633Sdfr struct lockf_entry_list *granted) 1136177633Sdfr{ 1137177633Sdfr 1138177633Sdfr KASSERT(new_start >= lock->lf_start, ("can't increase lock")); 1139177633Sdfr lock->lf_start = new_start; 1140177633Sdfr LIST_REMOVE(lock, lf_link); 1141177633Sdfr lf_insert_lock(state, lock); 1142177633Sdfr lf_update_dependancies(state, lock, FALSE, granted); 1143177633Sdfr} 1144177633Sdfr 1145177633Sdfr/* 1146177633Sdfr * Set the end of an existing active lock, updating dependancies and 1147177633Sdfr * adding any newly woken locks to 'granted'. 1148177633Sdfr */ 1149177633Sdfrstatic void 1150177633Sdfrlf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, 1151177633Sdfr struct lockf_entry_list *granted) 1152177633Sdfr{ 1153177633Sdfr 1154177633Sdfr KASSERT(new_end <= lock->lf_end, ("can't increase lock")); 1155177633Sdfr lock->lf_end = new_end; 1156177633Sdfr lf_update_dependancies(state, lock, FALSE, granted); 1157177633Sdfr} 1158177633Sdfr 1159177633Sdfr/* 1160177633Sdfr * Add a lock to the active list, updating or removing any current 1161177633Sdfr * locks owned by the same owner and processing any pending locks that 1162177633Sdfr * become unblocked as a result. This code is also used for unlock 1163177633Sdfr * since the logic for updating existing locks is identical. 1164177633Sdfr * 1165177633Sdfr * As a result of processing the new lock, we may unblock existing 1166177633Sdfr * pending locks as a result of downgrading/unlocking. We simply 1167177633Sdfr * activate the newly granted locks by looping. 1168177633Sdfr * 1169177633Sdfr * Since the new lock already has its dependancies set up, we always 1170177633Sdfr * add it to the list (unless its an unlock request). This may 1171177633Sdfr * fragment the lock list in some pathological cases but its probably 1172177633Sdfr * not a real problem. 1173177633Sdfr */ 1174177633Sdfrstatic void 1175177633Sdfrlf_activate_lock(struct lockf *state, struct lockf_entry *lock) 1176177633Sdfr{ 1177177633Sdfr struct lockf_entry *overlap, *lf; 1178177633Sdfr struct lockf_entry_list granted; 1179177633Sdfr int ovcase; 1180177633Sdfr 1181177633Sdfr LIST_INIT(&granted); 1182177633Sdfr LIST_INSERT_HEAD(&granted, lock, lf_link); 1183177633Sdfr 1184177633Sdfr while (!LIST_EMPTY(&granted)) { 1185177633Sdfr lock = LIST_FIRST(&granted); 1186177633Sdfr LIST_REMOVE(lock, lf_link); 1187177633Sdfr 1188177633Sdfr /* 1189177633Sdfr * Skip over locks owned by other processes. Handle 1190177633Sdfr * any locks that overlap and are owned by ourselves. 1191177633Sdfr */ 1192177633Sdfr overlap = LIST_FIRST(&state->ls_active); 1193177633Sdfr for (;;) { 1194177633Sdfr ovcase = lf_findoverlap(&overlap, lock, SELF); 1195177633Sdfr 1196177633Sdfr#ifdef LOCKF_DEBUG 1197177633Sdfr if (ovcase && (lockf_debug & 2)) { 1198177633Sdfr printf("lf_setlock: overlap %d", ovcase); 1199177633Sdfr lf_print("", overlap); 1200177633Sdfr } 1201177633Sdfr#endif 1202177633Sdfr /* 1203177633Sdfr * Six cases: 1204177633Sdfr * 0) no overlap 1205177633Sdfr * 1) overlap == lock 1206177633Sdfr * 2) overlap contains lock 1207177633Sdfr * 3) lock contains overlap 1208177633Sdfr * 4) overlap starts before lock 1209177633Sdfr * 5) overlap ends after lock 1210177633Sdfr */ 1211177633Sdfr switch (ovcase) { 1212177633Sdfr case 0: /* no overlap */ 1213177633Sdfr break; 1214177633Sdfr 1215177633Sdfr case 1: /* overlap == lock */ 1216177633Sdfr /* 1217177633Sdfr * We have already setup the 1218177633Sdfr * dependants for the new lock, taking 1219177633Sdfr * into account a possible downgrade 1220177633Sdfr * or unlock. Remove the old lock. 1221177633Sdfr */ 1222177633Sdfr LIST_REMOVE(overlap, lf_link); 1223177633Sdfr lf_update_dependancies(state, overlap, TRUE, 1224177633Sdfr &granted); 1225177633Sdfr lf_free_lock(overlap); 1226177633Sdfr break; 1227177633Sdfr 1228177633Sdfr case 2: /* overlap contains lock */ 1229177633Sdfr /* 1230177633Sdfr * Just split the existing lock. 1231177633Sdfr */ 1232177633Sdfr lf_split(state, overlap, lock, &granted); 1233177633Sdfr break; 1234177633Sdfr 1235177633Sdfr case 3: /* lock contains overlap */ 1236177633Sdfr /* 1237177633Sdfr * Delete the overlap and advance to 1238177633Sdfr * the next entry in the list. 1239177633Sdfr */ 1240177633Sdfr lf = LIST_NEXT(overlap, lf_link); 1241177633Sdfr LIST_REMOVE(overlap, lf_link); 1242177633Sdfr lf_update_dependancies(state, overlap, TRUE, 1243177633Sdfr &granted); 1244177633Sdfr lf_free_lock(overlap); 1245177633Sdfr overlap = lf; 1246177633Sdfr continue; 1247177633Sdfr 1248177633Sdfr case 4: /* overlap starts before lock */ 1249177633Sdfr /* 1250177633Sdfr * Just update the overlap end and 1251177633Sdfr * move on. 1252177633Sdfr */ 1253177633Sdfr lf_set_end(state, overlap, lock->lf_start - 1, 1254177633Sdfr &granted); 1255177633Sdfr overlap = LIST_NEXT(overlap, lf_link); 1256177633Sdfr continue; 1257177633Sdfr 1258177633Sdfr case 5: /* overlap ends after lock */ 1259177633Sdfr /* 1260177633Sdfr * Change the start of overlap and 1261177633Sdfr * re-insert. 1262177633Sdfr */ 1263177633Sdfr lf_set_start(state, overlap, lock->lf_end + 1, 1264177633Sdfr &granted); 1265177633Sdfr break; 1266177633Sdfr } 1267177633Sdfr break; 1268177633Sdfr } 1269177633Sdfr#ifdef LOCKF_DEBUG 1270177633Sdfr if (lockf_debug & 1) { 1271177633Sdfr if (lock->lf_type != F_UNLCK) 1272177633Sdfr lf_print("lf_activate_lock: activated", lock); 1273177633Sdfr else 1274177633Sdfr lf_print("lf_activate_lock: unlocked", lock); 1275177633Sdfr lf_printlist("lf_activate_lock", lock); 1276177633Sdfr } 1277177633Sdfr#endif /* LOCKF_DEBUG */ 1278177633Sdfr if (lock->lf_type != F_UNLCK) 1279177633Sdfr lf_insert_lock(state, lock); 1280177633Sdfr } 1281177633Sdfr} 1282177633Sdfr 1283177633Sdfr/* 1284177633Sdfr * Cancel a pending lock request, either as a result of a signal or a 1285177633Sdfr * cancel request for an async lock. 1286177633Sdfr */ 1287177633Sdfrstatic void 1288177633Sdfrlf_cancel_lock(struct lockf *state, struct lockf_entry *lock) 1289177633Sdfr{ 1290177633Sdfr struct lockf_entry_list granted; 1291177633Sdfr 1292177633Sdfr /* 1293177633Sdfr * Note it is theoretically possible that cancelling this lock 1294177633Sdfr * may allow some other pending lock to become 1295177633Sdfr * active. Consider this case: 1296177633Sdfr * 1297177633Sdfr * Owner Action Result Dependancies 1298177633Sdfr * 1299177633Sdfr * A: lock [0..0] succeeds 1300177633Sdfr * B: lock [2..2] succeeds 1301177633Sdfr * C: lock [1..2] blocked C->B 1302177633Sdfr * D: lock [0..1] blocked C->B,D->A,D->C 1303177633Sdfr * A: unlock [0..0] C->B,D->C 1304177633Sdfr * C: cancel [1..2] 1305177633Sdfr */ 1306177633Sdfr 1307177633Sdfr LIST_REMOVE(lock, lf_link); 1308177633Sdfr 1309177633Sdfr /* 1310177633Sdfr * Removing out-going edges is simple. 1311177633Sdfr */ 1312177633Sdfr sx_xlock(&lf_owner_graph_lock); 1313177633Sdfr lf_remove_outgoing(lock); 1314177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1315177633Sdfr 1316177633Sdfr /* 1317177633Sdfr * Removing in-coming edges may allow some other lock to 1318177633Sdfr * become active - we use lf_update_dependancies to figure 1319177633Sdfr * this out. 1320177633Sdfr */ 1321177633Sdfr LIST_INIT(&granted); 1322177633Sdfr lf_update_dependancies(state, lock, TRUE, &granted); 1323177633Sdfr lf_free_lock(lock); 1324177633Sdfr 1325177633Sdfr /* 1326177633Sdfr * Feed any newly active locks to lf_activate_lock. 1327177633Sdfr */ 1328177633Sdfr while (!LIST_EMPTY(&granted)) { 1329177633Sdfr lock = LIST_FIRST(&granted); 1330177633Sdfr LIST_REMOVE(lock, lf_link); 1331177633Sdfr lf_activate_lock(state, lock); 1332177633Sdfr } 1333177633Sdfr} 1334177633Sdfr 1335177633Sdfr/* 13361960Sdg * Set a byte-range lock. 13371960Sdg */ 133812819Sphkstatic int 1339177633Sdfrlf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, 1340177633Sdfr void **cookiep) 13411960Sdg{ 1342177633Sdfr struct lockf_entry *block; 13431960Sdg static char lockstr[] = "lockf"; 1344177633Sdfr int priority, error; 13451960Sdg 13461960Sdg#ifdef LOCKF_DEBUG 13471960Sdg if (lockf_debug & 1) 13481960Sdg lf_print("lf_setlock", lock); 13491960Sdg#endif /* LOCKF_DEBUG */ 13501960Sdg 13511960Sdg /* 13521960Sdg * Set the priority 13531960Sdg */ 13541960Sdg priority = PLOCK; 13551960Sdg if (lock->lf_type == F_WRLCK) 13561960Sdg priority += 4; 13571960Sdg priority |= PCATCH; 13581960Sdg /* 13591960Sdg * Scan lock list for this file looking for locks that would block us. 13601960Sdg */ 1361177633Sdfr while ((block = lf_getblock(state, lock))) { 13621960Sdg /* 13631960Sdg * Free the structure and return if nonblocking. 13641960Sdg */ 1365177633Sdfr if ((lock->lf_flags & F_WAIT) == 0 1366177633Sdfr && lock->lf_async_task == NULL) { 1367177633Sdfr lf_free_lock(lock); 1368177633Sdfr error = EAGAIN; 1369177633Sdfr goto out; 13701960Sdg } 1371177633Sdfr 13721960Sdg /* 1373177633Sdfr * We are blocked. Create edges to each blocking lock, 1374177633Sdfr * checking for deadlock using the owner graph. For 1375177633Sdfr * simplicity, we run deadlock detection for all 1376177633Sdfr * locks, posix and otherwise. 13771960Sdg */ 1378177633Sdfr sx_xlock(&lf_owner_graph_lock); 1379177633Sdfr error = lf_add_outgoing(state, lock); 1380177633Sdfr sx_xunlock(&lf_owner_graph_lock); 13811960Sdg 1382177633Sdfr if (error) { 1383177633Sdfr#ifdef LOCKF_DEBUG 1384177633Sdfr if (lockf_debug & 1) 1385177633Sdfr lf_print("lf_setlock: deadlock", lock); 1386177633Sdfr#endif 1387177633Sdfr lf_free_lock(lock); 1388177633Sdfr goto out; 13891960Sdg } 1390177633Sdfr 13911960Sdg /* 13921960Sdg * For flock type locks, we must first remove 13931960Sdg * any shared locks that we hold before we sleep 13941960Sdg * waiting for an exclusive lock. 13951960Sdg */ 13961960Sdg if ((lock->lf_flags & F_FLOCK) && 13971960Sdg lock->lf_type == F_WRLCK) { 13981960Sdg lock->lf_type = F_UNLCK; 1399177633Sdfr lf_activate_lock(state, lock); 14001960Sdg lock->lf_type = F_WRLCK; 14011960Sdg } 14021960Sdg /* 1403177633Sdfr * We have added edges to everything that blocks 1404177633Sdfr * us. Sleep until they all go away. 14051960Sdg */ 1406177633Sdfr LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); 14071960Sdg#ifdef LOCKF_DEBUG 14081960Sdg if (lockf_debug & 1) { 1409177633Sdfr struct lockf_edge *e; 1410177633Sdfr LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { 1411177633Sdfr lf_print("lf_setlock: blocking on", e->le_to); 1412177633Sdfr lf_printlist("lf_setlock", e->le_to); 1413177633Sdfr } 14141960Sdg } 14151960Sdg#endif /* LOCKF_DEBUG */ 1416177633Sdfr 1417177633Sdfr if ((lock->lf_flags & F_WAIT) == 0) { 1418177633Sdfr /* 1419177633Sdfr * The caller requested async notification - 1420177633Sdfr * this callback happens when the blocking 1421177633Sdfr * lock is released, allowing the caller to 1422177633Sdfr * make another attempt to take the lock. 1423177633Sdfr */ 1424177633Sdfr *cookiep = (void *) lock; 1425177633Sdfr error = EINPROGRESS; 1426177633Sdfr goto out; 1427177633Sdfr } 1428177633Sdfr 1429177633Sdfr error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); 143048556Sbde /* 143148556Sbde * We may have been awakened by a signal and/or by a 1432177633Sdfr * debugger continuing us (in which cases we must 1433177633Sdfr * remove our lock graph edges) and/or by another 1434177633Sdfr * process releasing a lock (in which case our edges 1435177633Sdfr * have already been removed and we have been moved to 1436178243Skib * the active list). We may also have been woken by 1437178243Skib * lf_purgelocks which we report to the caller as 1438178243Skib * EINTR. In that case, lf_purgelocks will have 1439178243Skib * removed our lock graph edges. 1440177633Sdfr * 1441177633Sdfr * Note that it is possible to receive a signal after 1442177633Sdfr * we were successfully woken (and moved to the active 1443177633Sdfr * list) but before we resumed execution. In this 1444177633Sdfr * case, our lf_outedges list will be clear. We 1445177633Sdfr * pretend there was no error. 1446177633Sdfr * 1447177633Sdfr * Note also, if we have been sleeping long enough, we 1448177633Sdfr * may now have incoming edges from some newer lock 1449177633Sdfr * which is waiting behind us in the queue. 145048556Sbde */ 1451178243Skib if (lock->lf_flags & F_INTR) { 1452178243Skib error = EINTR; 1453178243Skib lf_free_lock(lock); 1454178243Skib goto out; 1455178243Skib } 1456177633Sdfr if (LIST_EMPTY(&lock->lf_outedges)) { 1457177633Sdfr error = 0; 1458177633Sdfr } else { 1459177633Sdfr lf_cancel_lock(state, lock); 1460177633Sdfr goto out; 14611960Sdg } 1462177633Sdfr#ifdef LOCKF_DEBUG 1463177633Sdfr if (lockf_debug & 1) { 1464177633Sdfr lf_print("lf_setlock: granted", lock); 146548556Sbde } 1466177633Sdfr#endif 1467177633Sdfr goto out; 14681960Sdg } 14691960Sdg /* 1470177633Sdfr * It looks like we are going to grant the lock. First add 1471177633Sdfr * edges from any currently pending lock that the new lock 1472177633Sdfr * would block. 1473177633Sdfr */ 1474177633Sdfr sx_xlock(&lf_owner_graph_lock); 1475177633Sdfr error = lf_add_incoming(state, lock); 1476177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1477177633Sdfr if (error) { 1478177633Sdfr#ifdef LOCKF_DEBUG 1479177633Sdfr if (lockf_debug & 1) 1480177633Sdfr lf_print("lf_setlock: deadlock", lock); 1481177633Sdfr#endif 1482177633Sdfr lf_free_lock(lock); 1483177633Sdfr goto out; 1484177633Sdfr } 1485177633Sdfr 1486177633Sdfr /* 14871960Sdg * No blocks!! Add the lock. Note that we will 14881960Sdg * downgrade or upgrade any overlapping locks this 14891960Sdg * process already owns. 14901960Sdg */ 1491177633Sdfr lf_activate_lock(state, lock); 1492177633Sdfr error = 0; 1493177633Sdfrout: 1494177633Sdfr return (error); 14951960Sdg} 14961960Sdg 14971960Sdg/* 14981960Sdg * Remove a byte-range lock on an inode. 14991960Sdg * 15001960Sdg * Generally, find the lock (or an overlap to that lock) 15011960Sdg * and remove it (or shrink it), then wakeup anyone we can. 15021960Sdg */ 150312819Sphkstatic int 1504177633Sdfrlf_clearlock(struct lockf *state, struct lockf_entry *unlock) 15051960Sdg{ 1506177633Sdfr struct lockf_entry *overlap; 15071960Sdg 1508177633Sdfr overlap = LIST_FIRST(&state->ls_active); 1509177633Sdfr 1510177633Sdfr if (overlap == NOLOCKF) 15111960Sdg return (0); 15121960Sdg#ifdef LOCKF_DEBUG 15131960Sdg if (unlock->lf_type != F_UNLCK) 15141960Sdg panic("lf_clearlock: bad type"); 15151960Sdg if (lockf_debug & 1) 15161960Sdg lf_print("lf_clearlock", unlock); 15171960Sdg#endif /* LOCKF_DEBUG */ 15181960Sdg 1519177633Sdfr lf_activate_lock(state, unlock); 15201960Sdg 15211960Sdg return (0); 15221960Sdg} 15231960Sdg 15241960Sdg/* 1525177633Sdfr * Check whether there is a blocking lock, and if so return its 1526177633Sdfr * details in '*fl'. 15271960Sdg */ 152812819Sphkstatic int 1529177633Sdfrlf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) 15301960Sdg{ 1531177633Sdfr struct lockf_entry *block; 15321960Sdg 15331960Sdg#ifdef LOCKF_DEBUG 15341960Sdg if (lockf_debug & 1) 15351960Sdg lf_print("lf_getlock", lock); 15361960Sdg#endif /* LOCKF_DEBUG */ 15371960Sdg 1538177633Sdfr if ((block = lf_getblock(state, lock))) { 15391960Sdg fl->l_type = block->lf_type; 15401960Sdg fl->l_whence = SEEK_SET; 15411960Sdg fl->l_start = block->lf_start; 1542177633Sdfr if (block->lf_end == OFF_MAX) 15431960Sdg fl->l_len = 0; 15441960Sdg else 15451960Sdg fl->l_len = block->lf_end - block->lf_start + 1; 1546177633Sdfr fl->l_pid = block->lf_owner->lo_pid; 1547177633Sdfr fl->l_sysid = block->lf_owner->lo_sysid; 15481960Sdg } else { 15491960Sdg fl->l_type = F_UNLCK; 15501960Sdg } 15511960Sdg return (0); 15521960Sdg} 15531960Sdg 15541960Sdg/* 1555177633Sdfr * Cancel an async lock request. 1556177633Sdfr */ 1557177633Sdfrstatic int 1558177633Sdfrlf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) 1559177633Sdfr{ 1560177633Sdfr struct lockf_entry *reallock; 1561177633Sdfr 1562177633Sdfr /* 1563177633Sdfr * We need to match this request with an existing lock 1564177633Sdfr * request. 1565177633Sdfr */ 1566177633Sdfr LIST_FOREACH(reallock, &state->ls_pending, lf_link) { 1567177633Sdfr if ((void *) reallock == cookie) { 1568177633Sdfr /* 1569177633Sdfr * Double-check that this lock looks right 1570177633Sdfr * (maybe use a rolling ID for the cancel 1571177633Sdfr * cookie instead?) 1572177633Sdfr */ 1573177633Sdfr if (!(reallock->lf_vnode == lock->lf_vnode 1574177633Sdfr && reallock->lf_start == lock->lf_start 1575177633Sdfr && reallock->lf_end == lock->lf_end)) { 1576177633Sdfr return (ENOENT); 1577177633Sdfr } 1578177633Sdfr 1579177633Sdfr /* 1580177633Sdfr * Make sure this lock was async and then just 1581177633Sdfr * remove it from its wait lists. 1582177633Sdfr */ 1583177633Sdfr if (!reallock->lf_async_task) { 1584177633Sdfr return (ENOENT); 1585177633Sdfr } 1586177633Sdfr 1587177633Sdfr /* 1588177633Sdfr * Note that since any other thread must take 1589177633Sdfr * state->ls_lock before it can possibly 1590177633Sdfr * trigger the async callback, we are safe 1591177633Sdfr * from a race with lf_wakeup_lock, i.e. we 1592177633Sdfr * can free the lock (actually our caller does 1593177633Sdfr * this). 1594177633Sdfr */ 1595177633Sdfr lf_cancel_lock(state, reallock); 1596177633Sdfr return (0); 1597177633Sdfr } 1598177633Sdfr } 1599177633Sdfr 1600177633Sdfr /* 1601177633Sdfr * We didn't find a matching lock - not much we can do here. 1602177633Sdfr */ 1603177633Sdfr return (ENOENT); 1604177633Sdfr} 1605177633Sdfr 1606177633Sdfr/* 16071960Sdg * Walk the list of locks for an inode and 16081960Sdg * return the first blocking lock. 16091960Sdg */ 1610177633Sdfrstatic struct lockf_entry * 1611177633Sdfrlf_getblock(struct lockf *state, struct lockf_entry *lock) 16121960Sdg{ 1613177633Sdfr struct lockf_entry *overlap; 16141960Sdg 1615177633Sdfr LIST_FOREACH(overlap, &state->ls_active, lf_link) { 16161960Sdg /* 1617177633Sdfr * We may assume that the active list is sorted by 1618177633Sdfr * lf_start. 16191960Sdg */ 1620177633Sdfr if (overlap->lf_start > lock->lf_end) 1621177633Sdfr break; 1622177633Sdfr if (!lf_blocks(lock, overlap)) 1623177633Sdfr continue; 1624177633Sdfr return (overlap); 16251960Sdg } 16261960Sdg return (NOLOCKF); 16271960Sdg} 16281960Sdg 16291960Sdg/* 1630177633Sdfr * Walk the list of locks for an inode to find an overlapping lock (if 1631177633Sdfr * any) and return a classification of that overlap. 16321960Sdg * 1633177633Sdfr * Arguments: 1634177633Sdfr * *overlap The place in the lock list to start looking 1635177633Sdfr * lock The lock which is being tested 1636177633Sdfr * type Pass 'SELF' to test only locks with the same 1637177633Sdfr * owner as lock, or 'OTHER' to test only locks 1638177633Sdfr * with a different owner 1639177633Sdfr * 1640177633Sdfr * Returns one of six values: 1641177633Sdfr * 0) no overlap 1642177633Sdfr * 1) overlap == lock 1643177633Sdfr * 2) overlap contains lock 1644177633Sdfr * 3) lock contains overlap 1645177633Sdfr * 4) overlap starts before lock 1646177633Sdfr * 5) overlap ends after lock 1647177633Sdfr * 1648177633Sdfr * If there is an overlapping lock, '*overlap' is set to point at the 1649177633Sdfr * overlapping lock. 1650177633Sdfr * 16511960Sdg * NOTE: this returns only the FIRST overlapping lock. There 16521960Sdg * may be more than one. 16531960Sdg */ 165412819Sphkstatic int 1655177633Sdfrlf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) 16561960Sdg{ 1657177633Sdfr struct lockf_entry *lf; 16581960Sdg off_t start, end; 1659177633Sdfr int res; 16601960Sdg 1661177633Sdfr if ((*overlap) == NOLOCKF) { 16621960Sdg return (0); 1663177633Sdfr } 16641960Sdg#ifdef LOCKF_DEBUG 16651960Sdg if (lockf_debug & 2) 16661960Sdg lf_print("lf_findoverlap: looking for overlap in", lock); 16671960Sdg#endif /* LOCKF_DEBUG */ 16681960Sdg start = lock->lf_start; 16691960Sdg end = lock->lf_end; 1670177633Sdfr res = 0; 1671177633Sdfr while (*overlap) { 1672177633Sdfr lf = *overlap; 1673177633Sdfr if (lf->lf_start > end) 1674177633Sdfr break; 1675177633Sdfr if (((type & SELF) && lf->lf_owner != lock->lf_owner) || 1676177633Sdfr ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { 1677177633Sdfr *overlap = LIST_NEXT(lf, lf_link); 16781960Sdg continue; 16791960Sdg } 16801960Sdg#ifdef LOCKF_DEBUG 16811960Sdg if (lockf_debug & 2) 16821960Sdg lf_print("\tchecking", lf); 16831960Sdg#endif /* LOCKF_DEBUG */ 16841960Sdg /* 16851960Sdg * OK, check for overlap 16861960Sdg * 16871960Sdg * Six cases: 16881960Sdg * 0) no overlap 16891960Sdg * 1) overlap == lock 16901960Sdg * 2) overlap contains lock 16911960Sdg * 3) lock contains overlap 16921960Sdg * 4) overlap starts before lock 16931960Sdg * 5) overlap ends after lock 16941960Sdg */ 1695177633Sdfr if (start > lf->lf_end) { 16961960Sdg /* Case 0 */ 16971960Sdg#ifdef LOCKF_DEBUG 16981960Sdg if (lockf_debug & 2) 16991960Sdg printf("no overlap\n"); 17001960Sdg#endif /* LOCKF_DEBUG */ 1701177633Sdfr *overlap = LIST_NEXT(lf, lf_link); 17021960Sdg continue; 17031960Sdg } 1704177633Sdfr if (lf->lf_start == start && lf->lf_end == end) { 17051960Sdg /* Case 1 */ 17061960Sdg#ifdef LOCKF_DEBUG 17071960Sdg if (lockf_debug & 2) 17081960Sdg printf("overlap == lock\n"); 17091960Sdg#endif /* LOCKF_DEBUG */ 1710177633Sdfr res = 1; 1711177633Sdfr break; 17121960Sdg } 1713177633Sdfr if (lf->lf_start <= start && lf->lf_end >= end) { 17141960Sdg /* Case 2 */ 17151960Sdg#ifdef LOCKF_DEBUG 17161960Sdg if (lockf_debug & 2) 17171960Sdg printf("overlap contains lock\n"); 17181960Sdg#endif /* LOCKF_DEBUG */ 1719177633Sdfr res = 2; 1720177633Sdfr break; 17211960Sdg } 1722177633Sdfr if (start <= lf->lf_start && end >= lf->lf_end) { 17231960Sdg /* Case 3 */ 17241960Sdg#ifdef LOCKF_DEBUG 17251960Sdg if (lockf_debug & 2) 17261960Sdg printf("lock contains overlap\n"); 17271960Sdg#endif /* LOCKF_DEBUG */ 1728177633Sdfr res = 3; 1729177633Sdfr break; 17301960Sdg } 1731177633Sdfr if (lf->lf_start < start && lf->lf_end >= start) { 17321960Sdg /* Case 4 */ 17331960Sdg#ifdef LOCKF_DEBUG 17341960Sdg if (lockf_debug & 2) 17351960Sdg printf("overlap starts before lock\n"); 17361960Sdg#endif /* LOCKF_DEBUG */ 1737177633Sdfr res = 4; 1738177633Sdfr break; 17391960Sdg } 1740177633Sdfr if (lf->lf_start > start && lf->lf_end > end) { 17411960Sdg /* Case 5 */ 17421960Sdg#ifdef LOCKF_DEBUG 17431960Sdg if (lockf_debug & 2) 17441960Sdg printf("overlap ends after lock\n"); 17451960Sdg#endif /* LOCKF_DEBUG */ 1746177633Sdfr res = 5; 1747177633Sdfr break; 17481960Sdg } 17491960Sdg panic("lf_findoverlap: default"); 17501960Sdg } 1751177633Sdfr return (res); 17521960Sdg} 17531960Sdg 17541960Sdg/* 1755177633Sdfr * Split an the existing 'lock1', based on the extent of the lock 1756177633Sdfr * described by 'lock2'. The existing lock should cover 'lock2' 1757177633Sdfr * entirely. 1758177633Sdfr * 1759177633Sdfr * Any pending locks which have been been unblocked are added to 1760177633Sdfr * 'granted' 17611960Sdg */ 176212819Sphkstatic void 1763177633Sdfrlf_split(struct lockf *state, struct lockf_entry *lock1, 1764177633Sdfr struct lockf_entry *lock2, struct lockf_entry_list *granted) 17651960Sdg{ 1766177633Sdfr struct lockf_entry *splitlock; 17671960Sdg 17681960Sdg#ifdef LOCKF_DEBUG 17691960Sdg if (lockf_debug & 2) { 17701960Sdg lf_print("lf_split", lock1); 17711960Sdg lf_print("splitting from", lock2); 17721960Sdg } 17731960Sdg#endif /* LOCKF_DEBUG */ 17741960Sdg /* 1775177633Sdfr * Check to see if we don't need to split at all. 17761960Sdg */ 17771960Sdg if (lock1->lf_start == lock2->lf_start) { 1778177633Sdfr lf_set_start(state, lock1, lock2->lf_end + 1, granted); 17791960Sdg return; 17801960Sdg } 17811960Sdg if (lock1->lf_end == lock2->lf_end) { 1782177633Sdfr lf_set_end(state, lock1, lock2->lf_start - 1, granted); 17831960Sdg return; 17841960Sdg } 17851960Sdg /* 17861960Sdg * Make a new lock consisting of the last part of 1787177633Sdfr * the encompassing lock. 17881960Sdg */ 1789177633Sdfr splitlock = lf_alloc_lock(lock1->lf_owner); 1790177633Sdfr memcpy(splitlock, lock1, sizeof *splitlock); 1791177633Sdfr if (splitlock->lf_flags & F_REMOTE) 1792177633Sdfr vref(splitlock->lf_vnode); 1793177633Sdfr 1794177633Sdfr /* 1795177633Sdfr * This cannot cause a deadlock since any edges we would add 1796177633Sdfr * to splitlock already exist in lock1. We must be sure to add 1797177633Sdfr * necessary dependancies to splitlock before we reduce lock1 1798177633Sdfr * otherwise we may accidentally grant a pending lock that 1799177633Sdfr * was blocked by the tail end of lock1. 1800177633Sdfr */ 18011960Sdg splitlock->lf_start = lock2->lf_end + 1; 1802177633Sdfr LIST_INIT(&splitlock->lf_outedges); 1803177633Sdfr LIST_INIT(&splitlock->lf_inedges); 1804177633Sdfr sx_xlock(&lf_owner_graph_lock); 1805177633Sdfr lf_add_incoming(state, splitlock); 1806177633Sdfr sx_xunlock(&lf_owner_graph_lock); 1807177633Sdfr 1808177633Sdfr lf_set_end(state, lock1, lock2->lf_start - 1, granted); 1809177633Sdfr 18101960Sdg /* 18111960Sdg * OK, now link it in 18121960Sdg */ 1813177633Sdfr lf_insert_lock(state, splitlock); 18141960Sdg} 18151960Sdg 1816177633Sdfrstruct clearlock { 1817177633Sdfr STAILQ_ENTRY(clearlock) link; 1818177633Sdfr struct vnode *vp; 1819177633Sdfr struct flock fl; 1820177633Sdfr}; 1821177633SdfrSTAILQ_HEAD(clearlocklist, clearlock); 1822177633Sdfr 1823177633Sdfrvoid 1824177633Sdfrlf_clearremotesys(int sysid) 1825177633Sdfr{ 1826177633Sdfr struct lockf *ls; 1827177633Sdfr struct lockf_entry *lf; 1828177633Sdfr struct clearlock *cl; 1829177633Sdfr struct clearlocklist locks; 1830177633Sdfr 1831177633Sdfr KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS")); 1832177633Sdfr 1833177633Sdfr /* 1834177633Sdfr * In order to keep the locking simple, we iterate over the 1835177633Sdfr * active lock lists to build a list of locks that need 1836177633Sdfr * releasing. We then call VOP_ADVLOCK for each one in turn. 1837177633Sdfr * 1838177633Sdfr * We take an extra reference to the vnode for the duration to 1839177633Sdfr * make sure it doesn't go away before we are finished. 1840177633Sdfr */ 1841177633Sdfr STAILQ_INIT(&locks); 1842177633Sdfr sx_xlock(&lf_lock_states_lock); 1843177633Sdfr LIST_FOREACH(ls, &lf_lock_states, ls_link) { 1844177633Sdfr sx_xlock(&ls->ls_lock); 1845177633Sdfr LIST_FOREACH(lf, &ls->ls_active, lf_link) { 1846177633Sdfr if (lf->lf_owner->lo_sysid != sysid) 1847177633Sdfr continue; 1848177633Sdfr 1849177633Sdfr cl = malloc(sizeof(struct clearlock), M_LOCKF, 1850177633Sdfr M_WAITOK); 1851177633Sdfr cl->vp = lf->lf_vnode; 1852177633Sdfr vref(cl->vp); 1853177633Sdfr cl->fl.l_start = lf->lf_start; 1854177633Sdfr if (lf->lf_end == OFF_MAX) 1855177633Sdfr cl->fl.l_len = 0; 1856177633Sdfr else 1857177633Sdfr cl->fl.l_len = 1858177633Sdfr lf->lf_end - lf->lf_start + 1; 1859177633Sdfr cl->fl.l_whence = SEEK_SET; 1860177633Sdfr cl->fl.l_type = F_UNLCK; 1861177633Sdfr cl->fl.l_pid = lf->lf_owner->lo_pid; 1862177633Sdfr cl->fl.l_sysid = sysid; 1863177633Sdfr STAILQ_INSERT_TAIL(&locks, cl, link); 1864177633Sdfr } 1865177633Sdfr sx_xunlock(&ls->ls_lock); 1866177633Sdfr } 1867177633Sdfr sx_xunlock(&lf_lock_states_lock); 1868177633Sdfr 1869177633Sdfr while ((cl = STAILQ_FIRST(&locks)) != NULL) { 1870177633Sdfr STAILQ_REMOVE_HEAD(&locks, link); 1871177633Sdfr VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE); 1872177633Sdfr vrele(cl->vp); 1873177633Sdfr free(cl, M_LOCKF); 1874177633Sdfr } 1875177633Sdfr} 1876177633Sdfr 1877177633Sdfrint 1878177633Sdfrlf_countlocks(int sysid) 1879177633Sdfr{ 1880177633Sdfr int i; 1881177633Sdfr struct lock_owner *lo; 1882177633Sdfr int count; 1883177633Sdfr 1884177633Sdfr count = 0; 1885177633Sdfr sx_xlock(&lf_lock_owners_lock); 1886177633Sdfr for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) 1887177633Sdfr LIST_FOREACH(lo, &lf_lock_owners[i], lo_link) 1888177633Sdfr if (lo->lo_sysid == sysid) 1889177633Sdfr count += lo->lo_refs; 1890177633Sdfr sx_xunlock(&lf_lock_owners_lock); 1891177633Sdfr 1892177633Sdfr return (count); 1893177633Sdfr} 1894177633Sdfr 1895177633Sdfr#ifdef LOCKF_DEBUG 1896177633Sdfr 18971960Sdg/* 1898177633Sdfr * Return non-zero if y is reachable from x using a brute force 1899177633Sdfr * search. If reachable and path is non-null, return the route taken 1900177633Sdfr * in path. 19011960Sdg */ 1902177633Sdfrstatic int 1903177633Sdfrgraph_reaches(struct owner_vertex *x, struct owner_vertex *y, 1904177633Sdfr struct owner_vertex_list *path) 1905177633Sdfr{ 1906177633Sdfr struct owner_edge *e; 1907177633Sdfr 1908177633Sdfr if (x == y) { 1909177633Sdfr if (path) 1910177633Sdfr TAILQ_INSERT_HEAD(path, x, v_link); 1911177633Sdfr return 1; 1912177633Sdfr } 1913177633Sdfr 1914177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 1915177633Sdfr if (graph_reaches(e->e_to, y, path)) { 1916177633Sdfr if (path) 1917177633Sdfr TAILQ_INSERT_HEAD(path, x, v_link); 1918177633Sdfr return 1; 1919177633Sdfr } 1920177633Sdfr } 1921177633Sdfr return 0; 1922177633Sdfr} 1923177633Sdfr 1924177633Sdfr/* 1925177633Sdfr * Perform consistency checks on the graph. Make sure the values of 1926177633Sdfr * v_order are correct. If checkorder is non-zero, check no vertex can 1927177633Sdfr * reach any other vertex with a smaller order. 1928177633Sdfr */ 192912819Sphkstatic void 1930177633Sdfrgraph_check(struct owner_graph *g, int checkorder) 19311960Sdg{ 1932177633Sdfr int i, j; 19331960Sdg 1934177633Sdfr for (i = 0; i < g->g_size; i++) { 1935177633Sdfr if (!g->g_vertices[i]->v_owner) 1936177633Sdfr continue; 1937177633Sdfr KASSERT(g->g_vertices[i]->v_order == i, 1938177633Sdfr ("lock graph vertices disordered")); 1939177633Sdfr if (checkorder) { 1940177633Sdfr for (j = 0; j < i; j++) { 1941177633Sdfr if (!g->g_vertices[j]->v_owner) 1942177633Sdfr continue; 1943177633Sdfr KASSERT(!graph_reaches(g->g_vertices[i], 1944177633Sdfr g->g_vertices[j], NULL), 1945177633Sdfr ("lock graph vertices disordered")); 1946177633Sdfr } 1947177633Sdfr } 1948177633Sdfr } 1949177633Sdfr} 1950177633Sdfr 1951177633Sdfrstatic void 1952177633Sdfrgraph_print_vertices(struct owner_vertex_list *set) 1953177633Sdfr{ 1954177633Sdfr struct owner_vertex *v; 1955177633Sdfr 1956177633Sdfr printf("{ "); 1957177633Sdfr TAILQ_FOREACH(v, set, v_link) { 1958177633Sdfr printf("%d:", v->v_order); 1959177633Sdfr lf_print_owner(v->v_owner); 1960177633Sdfr if (TAILQ_NEXT(v, v_link)) 1961177633Sdfr printf(", "); 1962177633Sdfr } 1963177633Sdfr printf(" }\n"); 1964177633Sdfr} 1965177633Sdfr 1966177633Sdfr#endif 1967177633Sdfr 1968177633Sdfr/* 1969177633Sdfr * Calculate the sub-set of vertices v from the affected region [y..x] 1970177633Sdfr * where v is reachable from y. Return -1 if a loop was detected 1971177633Sdfr * (i.e. x is reachable from y, otherwise the number of vertices in 1972177633Sdfr * this subset. 1973177633Sdfr */ 1974177633Sdfrstatic int 1975177633Sdfrgraph_delta_forward(struct owner_graph *g, struct owner_vertex *x, 1976177633Sdfr struct owner_vertex *y, struct owner_vertex_list *delta) 1977177633Sdfr{ 1978177633Sdfr uint32_t gen; 1979177633Sdfr struct owner_vertex *v; 1980177633Sdfr struct owner_edge *e; 1981177633Sdfr int n; 1982177633Sdfr 1983177633Sdfr /* 1984177633Sdfr * We start with a set containing just y. Then for each vertex 1985177633Sdfr * v in the set so far unprocessed, we add each vertex that v 1986177633Sdfr * has an out-edge to and that is within the affected region 1987177633Sdfr * [y..x]. If we see the vertex x on our travels, stop 1988177633Sdfr * immediately. 1989177633Sdfr */ 1990177633Sdfr TAILQ_INIT(delta); 1991177633Sdfr TAILQ_INSERT_TAIL(delta, y, v_link); 1992177633Sdfr v = y; 1993177633Sdfr n = 1; 1994177633Sdfr gen = g->g_gen; 1995177633Sdfr while (v) { 1996177633Sdfr LIST_FOREACH(e, &v->v_outedges, e_outlink) { 1997177633Sdfr if (e->e_to == x) 1998177633Sdfr return -1; 1999177633Sdfr if (e->e_to->v_order < x->v_order 2000177633Sdfr && e->e_to->v_gen != gen) { 2001177633Sdfr e->e_to->v_gen = gen; 2002177633Sdfr TAILQ_INSERT_TAIL(delta, e->e_to, v_link); 2003177633Sdfr n++; 2004177633Sdfr } 2005177633Sdfr } 2006177633Sdfr v = TAILQ_NEXT(v, v_link); 2007177633Sdfr } 2008177633Sdfr 2009177633Sdfr return (n); 2010177633Sdfr} 2011177633Sdfr 2012177633Sdfr/* 2013177633Sdfr * Calculate the sub-set of vertices v from the affected region [y..x] 2014177633Sdfr * where v reaches x. Return the number of vertices in this subset. 2015177633Sdfr */ 2016177633Sdfrstatic int 2017177633Sdfrgraph_delta_backward(struct owner_graph *g, struct owner_vertex *x, 2018177633Sdfr struct owner_vertex *y, struct owner_vertex_list *delta) 2019177633Sdfr{ 2020177633Sdfr uint32_t gen; 2021177633Sdfr struct owner_vertex *v; 2022177633Sdfr struct owner_edge *e; 2023177633Sdfr int n; 2024177633Sdfr 2025177633Sdfr /* 2026177633Sdfr * We start with a set containing just x. Then for each vertex 2027177633Sdfr * v in the set so far unprocessed, we add each vertex that v 2028177633Sdfr * has an in-edge from and that is within the affected region 2029177633Sdfr * [y..x]. 2030177633Sdfr */ 2031177633Sdfr TAILQ_INIT(delta); 2032177633Sdfr TAILQ_INSERT_TAIL(delta, x, v_link); 2033177633Sdfr v = x; 2034177633Sdfr n = 1; 2035177633Sdfr gen = g->g_gen; 2036177633Sdfr while (v) { 2037177633Sdfr LIST_FOREACH(e, &v->v_inedges, e_inlink) { 2038177633Sdfr if (e->e_from->v_order > y->v_order 2039177633Sdfr && e->e_from->v_gen != gen) { 2040177633Sdfr e->e_from->v_gen = gen; 2041177633Sdfr TAILQ_INSERT_HEAD(delta, e->e_from, v_link); 2042177633Sdfr n++; 2043177633Sdfr } 2044177633Sdfr } 2045177633Sdfr v = TAILQ_PREV(v, owner_vertex_list, v_link); 2046177633Sdfr } 2047177633Sdfr 2048177633Sdfr return (n); 2049177633Sdfr} 2050177633Sdfr 2051177633Sdfrstatic int 2052177633Sdfrgraph_add_indices(int *indices, int n, struct owner_vertex_list *set) 2053177633Sdfr{ 2054177633Sdfr struct owner_vertex *v; 2055177633Sdfr int i, j; 2056177633Sdfr 2057177633Sdfr TAILQ_FOREACH(v, set, v_link) { 2058177633Sdfr for (i = n; 2059177633Sdfr i > 0 && indices[i - 1] > v->v_order; i--) 2060177633Sdfr ; 2061177633Sdfr for (j = n - 1; j >= i; j--) 2062177633Sdfr indices[j + 1] = indices[j]; 2063177633Sdfr indices[i] = v->v_order; 2064177633Sdfr n++; 2065177633Sdfr } 2066177633Sdfr 2067177633Sdfr return (n); 2068177633Sdfr} 2069177633Sdfr 2070177633Sdfrstatic int 2071177633Sdfrgraph_assign_indices(struct owner_graph *g, int *indices, int nextunused, 2072177633Sdfr struct owner_vertex_list *set) 2073177633Sdfr{ 2074177633Sdfr struct owner_vertex *v, *vlowest; 2075177633Sdfr 2076177633Sdfr while (!TAILQ_EMPTY(set)) { 2077177633Sdfr vlowest = NULL; 2078177633Sdfr TAILQ_FOREACH(v, set, v_link) { 2079177633Sdfr if (!vlowest || v->v_order < vlowest->v_order) 2080177633Sdfr vlowest = v; 2081177633Sdfr } 2082177633Sdfr TAILQ_REMOVE(set, vlowest, v_link); 2083177633Sdfr vlowest->v_order = indices[nextunused]; 2084177633Sdfr g->g_vertices[vlowest->v_order] = vlowest; 2085177633Sdfr nextunused++; 2086177633Sdfr } 2087177633Sdfr 2088177633Sdfr return (nextunused); 2089177633Sdfr} 2090177633Sdfr 2091177633Sdfrstatic int 2092177633Sdfrgraph_add_edge(struct owner_graph *g, struct owner_vertex *x, 2093177633Sdfr struct owner_vertex *y) 2094177633Sdfr{ 2095177633Sdfr struct owner_edge *e; 2096177633Sdfr struct owner_vertex_list deltaF, deltaB; 2097177633Sdfr int nF, nB, n, vi, i; 2098177633Sdfr int *indices; 2099177633Sdfr 2100177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2101177633Sdfr 2102177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2103177633Sdfr if (e->e_to == y) { 2104177633Sdfr e->e_refs++; 2105177633Sdfr return (0); 2106177633Sdfr } 2107177633Sdfr } 2108177633Sdfr 21091960Sdg#ifdef LOCKF_DEBUG 2110177633Sdfr if (lockf_debug & 8) { 2111177633Sdfr printf("adding edge %d:", x->v_order); 2112177633Sdfr lf_print_owner(x->v_owner); 2113177633Sdfr printf(" -> %d:", y->v_order); 2114177633Sdfr lf_print_owner(y->v_owner); 2115177633Sdfr printf("\n"); 211622521Sdyson } 2117177633Sdfr#endif 2118177633Sdfr if (y->v_order < x->v_order) { 2119177633Sdfr /* 2120177633Sdfr * The new edge violates the order. First find the set 2121177633Sdfr * of affected vertices reachable from y (deltaF) and 2122177633Sdfr * the set of affect vertices affected that reach x 2123177633Sdfr * (deltaB), using the graph generation number to 2124177633Sdfr * detect whether we have visited a given vertex 2125177633Sdfr * already. We re-order the graph so that each vertex 2126177633Sdfr * in deltaB appears before each vertex in deltaF. 2127177633Sdfr * 2128177633Sdfr * If x is a member of deltaF, then the new edge would 2129177633Sdfr * create a cycle. Otherwise, we may assume that 2130177633Sdfr * deltaF and deltaB are disjoint. 2131177633Sdfr */ 2132177633Sdfr g->g_gen++; 2133177633Sdfr if (g->g_gen == 0) { 2134177633Sdfr /* 2135177633Sdfr * Generation wrap. 2136177633Sdfr */ 2137177633Sdfr for (vi = 0; vi < g->g_size; vi++) { 2138177633Sdfr g->g_vertices[vi]->v_gen = 0; 2139177633Sdfr } 2140177633Sdfr g->g_gen++; 2141177633Sdfr } 2142177633Sdfr nF = graph_delta_forward(g, x, y, &deltaF); 2143177633Sdfr if (nF < 0) { 2144177633Sdfr#ifdef LOCKF_DEBUG 2145177633Sdfr if (lockf_debug & 8) { 2146177633Sdfr struct owner_vertex_list path; 2147177633Sdfr printf("deadlock: "); 2148177633Sdfr TAILQ_INIT(&path); 2149177633Sdfr graph_reaches(y, x, &path); 2150177633Sdfr graph_print_vertices(&path); 2151177633Sdfr } 2152177633Sdfr#endif 2153177633Sdfr return (EDEADLK); 2154177633Sdfr } 2155177633Sdfr 2156177633Sdfr#ifdef LOCKF_DEBUG 2157177633Sdfr if (lockf_debug & 8) { 2158177633Sdfr printf("re-ordering graph vertices\n"); 2159177633Sdfr printf("deltaF = "); 2160177633Sdfr graph_print_vertices(&deltaF); 2161177633Sdfr } 2162177633Sdfr#endif 2163177633Sdfr 2164177633Sdfr nB = graph_delta_backward(g, x, y, &deltaB); 2165177633Sdfr 2166177633Sdfr#ifdef LOCKF_DEBUG 2167177633Sdfr if (lockf_debug & 8) { 2168177633Sdfr printf("deltaB = "); 2169177633Sdfr graph_print_vertices(&deltaB); 2170177633Sdfr } 2171177633Sdfr#endif 2172177633Sdfr 2173177633Sdfr /* 2174177633Sdfr * We first build a set of vertex indices (vertex 2175177633Sdfr * order values) that we may use, then we re-assign 2176177633Sdfr * orders first to those vertices in deltaB, then to 2177177633Sdfr * deltaF. Note that the contents of deltaF and deltaB 2178177633Sdfr * may be partially disordered - we perform an 2179177633Sdfr * insertion sort while building our index set. 2180177633Sdfr */ 2181177633Sdfr indices = g->g_indexbuf; 2182177633Sdfr n = graph_add_indices(indices, 0, &deltaF); 2183177633Sdfr graph_add_indices(indices, n, &deltaB); 2184177633Sdfr 2185177633Sdfr /* 2186177633Sdfr * We must also be sure to maintain the relative 2187177633Sdfr * ordering of deltaF and deltaB when re-assigning 2188177633Sdfr * vertices. We do this by iteratively removing the 2189177633Sdfr * lowest ordered element from the set and assigning 2190177633Sdfr * it the next value from our new ordering. 2191177633Sdfr */ 2192177633Sdfr i = graph_assign_indices(g, indices, 0, &deltaB); 2193177633Sdfr graph_assign_indices(g, indices, i, &deltaF); 2194177633Sdfr 2195177633Sdfr#ifdef LOCKF_DEBUG 2196177633Sdfr if (lockf_debug & 8) { 2197177633Sdfr struct owner_vertex_list set; 2198177633Sdfr TAILQ_INIT(&set); 2199177633Sdfr for (i = 0; i < nB + nF; i++) 2200177633Sdfr TAILQ_INSERT_TAIL(&set, 2201177633Sdfr g->g_vertices[indices[i]], v_link); 2202177633Sdfr printf("new ordering = "); 2203177633Sdfr graph_print_vertices(&set); 2204177633Sdfr } 2205177633Sdfr#endif 2206177633Sdfr } 2207177633Sdfr 2208177633Sdfr KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); 2209177633Sdfr 2210177633Sdfr#ifdef LOCKF_DEBUG 2211177633Sdfr if (lockf_debug & 8) { 2212177633Sdfr graph_check(g, TRUE); 2213177633Sdfr } 2214177633Sdfr#endif 2215177633Sdfr 2216177633Sdfr e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK); 2217177633Sdfr 2218177633Sdfr LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); 2219177633Sdfr LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); 2220177633Sdfr e->e_refs = 1; 2221177633Sdfr e->e_from = x; 2222177633Sdfr e->e_to = y; 2223177633Sdfr 2224177633Sdfr return (0); 22251960Sdg} 22261960Sdg 2227177633Sdfr/* 2228177633Sdfr * Remove an edge x->y from the graph. 2229177633Sdfr */ 2230177633Sdfrstatic void 2231177633Sdfrgraph_remove_edge(struct owner_graph *g, struct owner_vertex *x, 2232177633Sdfr struct owner_vertex *y) 2233177633Sdfr{ 2234177633Sdfr struct owner_edge *e; 2235177633Sdfr 2236177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2237177633Sdfr 2238177633Sdfr LIST_FOREACH(e, &x->v_outedges, e_outlink) { 2239177633Sdfr if (e->e_to == y) 2240177633Sdfr break; 2241177633Sdfr } 2242177633Sdfr KASSERT(e, ("Removing non-existent edge from deadlock graph")); 2243177633Sdfr 2244177633Sdfr e->e_refs--; 2245177633Sdfr if (e->e_refs == 0) { 22461960Sdg#ifdef LOCKF_DEBUG 2247177633Sdfr if (lockf_debug & 8) { 2248177633Sdfr printf("removing edge %d:", x->v_order); 2249177633Sdfr lf_print_owner(x->v_owner); 2250177633Sdfr printf(" -> %d:", y->v_order); 2251177633Sdfr lf_print_owner(y->v_owner); 2252177633Sdfr printf("\n"); 2253177633Sdfr } 2254177633Sdfr#endif 2255177633Sdfr LIST_REMOVE(e, e_outlink); 2256177633Sdfr LIST_REMOVE(e, e_inlink); 2257177633Sdfr free(e, M_LOCKF); 2258177633Sdfr } 2259177633Sdfr} 2260177633Sdfr 22611960Sdg/* 2262177633Sdfr * Allocate a vertex from the free list. Return ENOMEM if there are 2263177633Sdfr * none. 2264177633Sdfr */ 2265177633Sdfrstatic struct owner_vertex * 2266177633Sdfrgraph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo) 2267177633Sdfr{ 2268177633Sdfr struct owner_vertex *v; 2269177633Sdfr 2270177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2271177633Sdfr 2272177633Sdfr v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK); 2273177633Sdfr if (g->g_size == g->g_space) { 2274177633Sdfr g->g_vertices = realloc(g->g_vertices, 2275177633Sdfr 2 * g->g_space * sizeof(struct owner_vertex *), 2276177633Sdfr M_LOCKF, M_WAITOK); 2277177633Sdfr free(g->g_indexbuf, M_LOCKF); 2278177633Sdfr g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), 2279177633Sdfr M_LOCKF, M_WAITOK); 2280177633Sdfr g->g_space = 2 * g->g_space; 2281177633Sdfr } 2282177633Sdfr v->v_order = g->g_size; 2283177633Sdfr v->v_gen = g->g_gen; 2284177633Sdfr g->g_vertices[g->g_size] = v; 2285177633Sdfr g->g_size++; 2286177633Sdfr 2287177633Sdfr LIST_INIT(&v->v_outedges); 2288177633Sdfr LIST_INIT(&v->v_inedges); 2289177633Sdfr v->v_owner = lo; 2290177633Sdfr 2291177633Sdfr return (v); 2292177633Sdfr} 2293177633Sdfr 2294177633Sdfrstatic void 2295177633Sdfrgraph_free_vertex(struct owner_graph *g, struct owner_vertex *v) 2296177633Sdfr{ 2297177633Sdfr struct owner_vertex *w; 2298177633Sdfr int i; 2299177633Sdfr 2300177633Sdfr sx_assert(&lf_owner_graph_lock, SX_XLOCKED); 2301177633Sdfr 2302177633Sdfr KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); 2303177633Sdfr KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); 2304177633Sdfr 2305177633Sdfr /* 2306177633Sdfr * Remove from the graph's array and close up the gap, 2307177633Sdfr * renumbering the other vertices. 2308177633Sdfr */ 2309177633Sdfr for (i = v->v_order + 1; i < g->g_size; i++) { 2310177633Sdfr w = g->g_vertices[i]; 2311177633Sdfr w->v_order--; 2312177633Sdfr g->g_vertices[i - 1] = w; 2313177633Sdfr } 2314177633Sdfr g->g_size--; 2315177633Sdfr 2316177633Sdfr free(v, M_LOCKF); 2317177633Sdfr} 2318177633Sdfr 2319177633Sdfrstatic struct owner_graph * 2320177633Sdfrgraph_init(struct owner_graph *g) 2321177633Sdfr{ 2322177633Sdfr 2323177633Sdfr g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), 2324177633Sdfr M_LOCKF, M_WAITOK); 2325177633Sdfr g->g_size = 0; 2326177633Sdfr g->g_space = 10; 2327177633Sdfr g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); 2328177633Sdfr g->g_gen = 0; 2329177633Sdfr 2330177633Sdfr return (g); 2331177633Sdfr} 2332177633Sdfr 2333177633Sdfr#ifdef LOCKF_DEBUG 2334177633Sdfr/* 2335177633Sdfr * Print description of a lock owner 2336177633Sdfr */ 2337177633Sdfrstatic void 2338177633Sdfrlf_print_owner(struct lock_owner *lo) 2339177633Sdfr{ 2340177633Sdfr 2341177633Sdfr if (lo->lo_flags & F_REMOTE) { 2342177633Sdfr printf("remote pid %d, system %d", 2343177633Sdfr lo->lo_pid, lo->lo_sysid); 2344177633Sdfr } else if (lo->lo_flags & F_FLOCK) { 2345177633Sdfr printf("file %p", lo->lo_id); 2346177633Sdfr } else { 2347177633Sdfr printf("local pid %d", lo->lo_pid); 2348177633Sdfr } 2349177633Sdfr} 2350177633Sdfr 2351177633Sdfr/* 23521960Sdg * Print out a lock. 23531960Sdg */ 2354140808Sjeffstatic void 2355177633Sdfrlf_print(char *tag, struct lockf_entry *lock) 23561960Sdg{ 23578876Srgrimes 235837951Sbde printf("%s: lock %p for ", tag, (void *)lock); 2359177633Sdfr lf_print_owner(lock->lf_owner); 236087211Salfred if (lock->lf_inode != (struct inode *)0) 2361177633Sdfr printf(" in ino %ju on dev <%s>,", 2362106584Smux (uintmax_t)lock->lf_inode->i_number, 2363177633Sdfr devtoname(lock->lf_inode->i_dev)); 2364177633Sdfr printf(" %s, start %jd, end ", 2365177633Sdfr lock->lf_type == F_RDLCK ? "shared" : 2366177633Sdfr lock->lf_type == F_WRLCK ? "exclusive" : 2367177633Sdfr lock->lf_type == F_UNLCK ? "unlock" : "unknown", 2368177633Sdfr (intmax_t)lock->lf_start); 2369177633Sdfr if (lock->lf_end == OFF_MAX) 2370177633Sdfr printf("EOF"); 237187211Salfred else 2372177633Sdfr printf("%jd", (intmax_t)lock->lf_end); 2373177633Sdfr if (!LIST_EMPTY(&lock->lf_outedges)) 2374177633Sdfr printf(" block %p\n", 2375177633Sdfr (void *)LIST_FIRST(&lock->lf_outedges)->le_to); 23761960Sdg else 23771960Sdg printf("\n"); 23781960Sdg} 23791960Sdg 2380140808Sjeffstatic void 2381177633Sdfrlf_printlist(char *tag, struct lockf_entry *lock) 23821960Sdg{ 2383177633Sdfr struct lockf_entry *lf, *blk; 2384177633Sdfr struct lockf_edge *e; 23851960Sdg 238687211Salfred if (lock->lf_inode == (struct inode *)0) 238787211Salfred return; 238887211Salfred 2389144278Sphk printf("%s: Lock list for ino %ju on dev <%s>:\n", 2390106584Smux tag, (uintmax_t)lock->lf_inode->i_number, 2391144278Sphk devtoname(lock->lf_inode->i_dev)); 2392177633Sdfr LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) { 239337951Sbde printf("\tlock %p for ",(void *)lf); 2394177633Sdfr lf_print_owner(lock->lf_owner); 2395106584Smux printf(", %s, start %jd, end %jd", 239637951Sbde lf->lf_type == F_RDLCK ? "shared" : 239737951Sbde lf->lf_type == F_WRLCK ? "exclusive" : 239837951Sbde lf->lf_type == F_UNLCK ? "unlock" : 2399106584Smux "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); 2400177633Sdfr LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { 2401177633Sdfr blk = e->le_to; 240237951Sbde printf("\n\t\tlock request %p for ", (void *)blk); 2403177633Sdfr lf_print_owner(blk->lf_owner); 2404106584Smux printf(", %s, start %jd, end %jd", 240537951Sbde blk->lf_type == F_RDLCK ? "shared" : 240637951Sbde blk->lf_type == F_WRLCK ? "exclusive" : 240737951Sbde blk->lf_type == F_UNLCK ? "unlock" : 2408106584Smux "unknown", (intmax_t)blk->lf_start, 2409106584Smux (intmax_t)blk->lf_end); 2410177633Sdfr if (!LIST_EMPTY(&blk->lf_inedges)) 241122521Sdyson panic("lf_printlist: bad list"); 241222521Sdyson } 241322521Sdyson printf("\n"); 24141960Sdg } 24151960Sdg} 24161960Sdg#endif /* LOCKF_DEBUG */ 2417