zfs_rlock.c revision 185029
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22168404Spjd * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23168404Spjd * Use is subject to license terms. 24168404Spjd */ 25168404Spjd 26168404Spjd#pragma ident "%Z%%M% %I% %E% SMI" 27168404Spjd 28168404Spjd/* 29168404Spjd * This file contains the code to implement file range locking in 30168404Spjd * ZFS, although there isn't much specific to ZFS (all that comes to mind 31168404Spjd * support for growing the blocksize). 32168404Spjd * 33168404Spjd * Interface 34168404Spjd * --------- 35168404Spjd * Defined in zfs_rlock.h but essentially: 36168404Spjd * rl = zfs_range_lock(zp, off, len, lock_type); 37168404Spjd * zfs_range_unlock(rl); 38168404Spjd * zfs_range_reduce(rl, off, len); 39168404Spjd * 40168404Spjd * AVL tree 41168404Spjd * -------- 42168404Spjd * An AVL tree is used to maintain the state of the existing ranges 43168404Spjd * that are locked for exclusive (writer) or shared (reader) use. 44168404Spjd * The starting range offset is used for searching and sorting the tree. 45168404Spjd * 46168404Spjd * Common case 47168404Spjd * ----------- 48168404Spjd * The (hopefully) usual case is of no overlaps or contention for 49168404Spjd * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree 50168404Spjd * searched that finds no overlap, and *this* rl_t is placed in the tree. 51168404Spjd * 52168404Spjd * Overlaps/Reference counting/Proxy locks 53168404Spjd * --------------------------------------- 54168404Spjd * The avl code only allows one node at a particular offset. Also it's very 55168404Spjd * inefficient to search through all previous entries looking for overlaps 56168404Spjd * (because the very 1st in the ordered list might be at offset 0 but 57168404Spjd * cover the whole file). 58168404Spjd * So this implementation uses reference counts and proxy range locks. 59168404Spjd * Firstly, only reader locks use reference counts and proxy locks, 60168404Spjd * because writer locks are exclusive. 61168404Spjd * When a reader lock overlaps with another then a proxy lock is created 62168404Spjd * for that range and replaces the original lock. If the overlap 63168404Spjd * is exact then the reference count of the proxy is simply incremented. 64168404Spjd * Otherwise, the proxy lock is split into smaller lock ranges and 65168404Spjd * new proxy locks created for non overlapping ranges. 66168404Spjd * The reference counts are adjusted accordingly. 67168404Spjd * Meanwhile, the orginal lock is kept around (this is the callers handle) 68168404Spjd * and its offset and length are used when releasing the lock. 69168404Spjd * 70168404Spjd * Thread coordination 71168404Spjd * ------------------- 72168404Spjd * In order to make wakeups efficient and to ensure multiple continuous 73168404Spjd * readers on a range don't starve a writer for the same range lock, 74168404Spjd * two condition variables are allocated in each rl_t. 75168404Spjd * If a writer (or reader) can't get a range it initialises the writer 76168404Spjd * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; 77168404Spjd * and waits on that cv. When a thread unlocks that range it wakes up all 78168404Spjd * writers then all readers before destroying the lock. 79168404Spjd * 80168404Spjd * Append mode writes 81168404Spjd * ------------------ 82168404Spjd * Append mode writes need to lock a range at the end of a file. 83168404Spjd * The offset of the end of the file is determined under the 84168404Spjd * range locking mutex, and the lock type converted from RL_APPEND to 85168404Spjd * RL_WRITER and the range locked. 86168404Spjd * 87168404Spjd * Grow block handling 88168404Spjd * ------------------- 89168404Spjd * ZFS supports multiple block sizes currently upto 128K. The smallest 90168404Spjd * block size is used for the file which is grown as needed. During this 91168404Spjd * growth all other writers and readers must be excluded. 92168404Spjd * So if the block size needs to be grown then the whole file is 93168404Spjd * exclusively locked, then later the caller will reduce the lock 94168404Spjd * range to just the range to be written using zfs_reduce_range. 95168404Spjd */ 96168404Spjd 97168404Spjd#include <sys/zfs_rlock.h> 98168404Spjd 99168404Spjd/* 100168404Spjd * Check if a write lock can be grabbed, or wait and recheck until available. 101168404Spjd */ 102168404Spjdstatic void 103168404Spjdzfs_range_lock_writer(znode_t *zp, rl_t *new) 104168404Spjd{ 105168404Spjd avl_tree_t *tree = &zp->z_range_avl; 106168404Spjd rl_t *rl; 107168404Spjd avl_index_t where; 108168404Spjd uint64_t end_size; 109168404Spjd uint64_t off = new->r_off; 110168404Spjd uint64_t len = new->r_len; 111168404Spjd 112168404Spjd for (;;) { 113168404Spjd /* 114168404Spjd * Range locking is also used by zvol and uses a 115168404Spjd * dummied up znode. However, for zvol, we don't need to 116168404Spjd * append or grow blocksize, and besides we don't have 117168404Spjd * a z_phys or z_zfsvfs - so skip that processing. 118168404Spjd * 119168404Spjd * Yes, this is ugly, and would be solved by not handling 120168404Spjd * grow or append in range lock code. If that was done then 121168404Spjd * we could make the range locking code generically available 122168404Spjd * to other non-zfs consumers. 123168404Spjd */ 124168404Spjd if (zp->z_vnode) { /* caller is ZPL */ 125168404Spjd /* 126168404Spjd * If in append mode pick up the current end of file. 127168404Spjd * This is done under z_range_lock to avoid races. 128168404Spjd */ 129168404Spjd if (new->r_type == RL_APPEND) 130168404Spjd new->r_off = zp->z_phys->zp_size; 131168404Spjd 132168404Spjd /* 133168404Spjd * If we need to grow the block size then grab the whole 134168404Spjd * file range. This is also done under z_range_lock to 135168404Spjd * avoid races. 136168404Spjd */ 137168404Spjd end_size = MAX(zp->z_phys->zp_size, new->r_off + len); 138168404Spjd if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) || 139168404Spjd zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) { 140168404Spjd new->r_off = 0; 141168404Spjd new->r_len = UINT64_MAX; 142168404Spjd } 143168404Spjd } 144168404Spjd 145168404Spjd /* 146168404Spjd * First check for the usual case of no locks 147168404Spjd */ 148168404Spjd if (avl_numnodes(tree) == 0) { 149168404Spjd new->r_type = RL_WRITER; /* convert to writer */ 150168404Spjd avl_add(tree, new); 151168404Spjd return; 152168404Spjd } 153168404Spjd 154168404Spjd /* 155168404Spjd * Look for any locks in the range. 156168404Spjd */ 157168404Spjd rl = avl_find(tree, new, &where); 158168404Spjd if (rl) 159168404Spjd goto wait; /* already locked at same offset */ 160168404Spjd 161168404Spjd rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 162168404Spjd if (rl && (rl->r_off < new->r_off + new->r_len)) 163168404Spjd goto wait; 164168404Spjd 165168404Spjd rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 166168404Spjd if (rl && rl->r_off + rl->r_len > new->r_off) 167168404Spjd goto wait; 168168404Spjd 169168404Spjd new->r_type = RL_WRITER; /* convert possible RL_APPEND */ 170168404Spjd avl_insert(tree, new, where); 171168404Spjd return; 172168404Spjdwait: 173168404Spjd if (!rl->r_write_wanted) { 174168404Spjd cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); 175168404Spjd rl->r_write_wanted = B_TRUE; 176168404Spjd } 177168404Spjd cv_wait(&rl->r_wr_cv, &zp->z_range_lock); 178168404Spjd 179168404Spjd /* reset to original */ 180168404Spjd new->r_off = off; 181168404Spjd new->r_len = len; 182168404Spjd } 183168404Spjd} 184168404Spjd 185168404Spjd/* 186168404Spjd * If this is an original (non-proxy) lock then replace it by 187168404Spjd * a proxy and return the proxy. 188168404Spjd */ 189168404Spjdstatic rl_t * 190168404Spjdzfs_range_proxify(avl_tree_t *tree, rl_t *rl) 191168404Spjd{ 192168404Spjd rl_t *proxy; 193168404Spjd 194168404Spjd if (rl->r_proxy) 195168404Spjd return (rl); /* already a proxy */ 196168404Spjd 197168404Spjd ASSERT3U(rl->r_cnt, ==, 1); 198168404Spjd ASSERT(rl->r_write_wanted == B_FALSE); 199168404Spjd ASSERT(rl->r_read_wanted == B_FALSE); 200168404Spjd avl_remove(tree, rl); 201168404Spjd rl->r_cnt = 0; 202168404Spjd 203168404Spjd /* create a proxy range lock */ 204168404Spjd proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); 205168404Spjd proxy->r_off = rl->r_off; 206168404Spjd proxy->r_len = rl->r_len; 207168404Spjd proxy->r_cnt = 1; 208168404Spjd proxy->r_type = RL_READER; 209168404Spjd proxy->r_proxy = B_TRUE; 210168404Spjd proxy->r_write_wanted = B_FALSE; 211168404Spjd proxy->r_read_wanted = B_FALSE; 212168404Spjd avl_add(tree, proxy); 213168404Spjd 214168404Spjd return (proxy); 215168404Spjd} 216168404Spjd 217168404Spjd/* 218168404Spjd * Split the range lock at the supplied offset 219168404Spjd * returning the *front* proxy. 220168404Spjd */ 221168404Spjdstatic rl_t * 222168404Spjdzfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) 223168404Spjd{ 224168404Spjd rl_t *front, *rear; 225168404Spjd 226168404Spjd ASSERT3U(rl->r_len, >, 1); 227168404Spjd ASSERT3U(off, >, rl->r_off); 228168404Spjd ASSERT3U(off, <, rl->r_off + rl->r_len); 229168404Spjd ASSERT(rl->r_write_wanted == B_FALSE); 230168404Spjd ASSERT(rl->r_read_wanted == B_FALSE); 231168404Spjd 232168404Spjd /* create the rear proxy range lock */ 233168404Spjd rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); 234168404Spjd rear->r_off = off; 235168404Spjd rear->r_len = rl->r_off + rl->r_len - off; 236168404Spjd rear->r_cnt = rl->r_cnt; 237168404Spjd rear->r_type = RL_READER; 238168404Spjd rear->r_proxy = B_TRUE; 239168404Spjd rear->r_write_wanted = B_FALSE; 240168404Spjd rear->r_read_wanted = B_FALSE; 241168404Spjd 242168404Spjd front = zfs_range_proxify(tree, rl); 243168404Spjd front->r_len = off - rl->r_off; 244168404Spjd 245168404Spjd avl_insert_here(tree, rear, front, AVL_AFTER); 246168404Spjd return (front); 247168404Spjd} 248168404Spjd 249168404Spjd/* 250168404Spjd * Create and add a new proxy range lock for the supplied range. 251168404Spjd */ 252168404Spjdstatic void 253168404Spjdzfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) 254168404Spjd{ 255168404Spjd rl_t *rl; 256168404Spjd 257168404Spjd ASSERT(len); 258168404Spjd rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); 259168404Spjd rl->r_off = off; 260168404Spjd rl->r_len = len; 261168404Spjd rl->r_cnt = 1; 262168404Spjd rl->r_type = RL_READER; 263168404Spjd rl->r_proxy = B_TRUE; 264168404Spjd rl->r_write_wanted = B_FALSE; 265168404Spjd rl->r_read_wanted = B_FALSE; 266168404Spjd avl_add(tree, rl); 267168404Spjd} 268168404Spjd 269168404Spjdstatic void 270168404Spjdzfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) 271168404Spjd{ 272168404Spjd rl_t *next; 273168404Spjd uint64_t off = new->r_off; 274168404Spjd uint64_t len = new->r_len; 275168404Spjd 276168404Spjd /* 277168404Spjd * prev arrives either: 278168404Spjd * - pointing to an entry at the same offset 279168404Spjd * - pointing to the entry with the closest previous offset whose 280168404Spjd * range may overlap with the new range 281168404Spjd * - null, if there were no ranges starting before the new one 282168404Spjd */ 283168404Spjd if (prev) { 284168404Spjd if (prev->r_off + prev->r_len <= off) { 285168404Spjd prev = NULL; 286168404Spjd } else if (prev->r_off != off) { 287168404Spjd /* 288168404Spjd * convert to proxy if needed then 289168404Spjd * split this entry and bump ref count 290168404Spjd */ 291168404Spjd prev = zfs_range_split(tree, prev, off); 292168404Spjd prev = AVL_NEXT(tree, prev); /* move to rear range */ 293168404Spjd } 294168404Spjd } 295168404Spjd ASSERT((prev == NULL) || (prev->r_off == off)); 296168404Spjd 297168404Spjd if (prev) 298168404Spjd next = prev; 299168404Spjd else 300168404Spjd next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 301168404Spjd 302168404Spjd if (next == NULL || off + len <= next->r_off) { 303168404Spjd /* no overlaps, use the original new rl_t in the tree */ 304168404Spjd avl_insert(tree, new, where); 305168404Spjd return; 306168404Spjd } 307168404Spjd 308168404Spjd if (off < next->r_off) { 309168404Spjd /* Add a proxy for initial range before the overlap */ 310168404Spjd zfs_range_new_proxy(tree, off, next->r_off - off); 311168404Spjd } 312168404Spjd 313168404Spjd new->r_cnt = 0; /* will use proxies in tree */ 314168404Spjd /* 315168404Spjd * We now search forward through the ranges, until we go past the end 316168404Spjd * of the new range. For each entry we make it a proxy if it 317168404Spjd * isn't already, then bump its reference count. If there's any 318168404Spjd * gaps between the ranges then we create a new proxy range. 319168404Spjd */ 320168404Spjd for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { 321168404Spjd if (off + len <= next->r_off) 322168404Spjd break; 323168404Spjd if (prev && prev->r_off + prev->r_len < next->r_off) { 324168404Spjd /* there's a gap */ 325168404Spjd ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); 326168404Spjd zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 327168404Spjd next->r_off - (prev->r_off + prev->r_len)); 328168404Spjd } 329168404Spjd if (off + len == next->r_off + next->r_len) { 330168404Spjd /* exact overlap with end */ 331168404Spjd next = zfs_range_proxify(tree, next); 332168404Spjd next->r_cnt++; 333168404Spjd return; 334168404Spjd } 335168404Spjd if (off + len < next->r_off + next->r_len) { 336168404Spjd /* new range ends in the middle of this block */ 337168404Spjd next = zfs_range_split(tree, next, off + len); 338168404Spjd next->r_cnt++; 339168404Spjd return; 340168404Spjd } 341168404Spjd ASSERT3U(off + len, >, next->r_off + next->r_len); 342168404Spjd next = zfs_range_proxify(tree, next); 343168404Spjd next->r_cnt++; 344168404Spjd } 345168404Spjd 346168404Spjd /* Add the remaining end range. */ 347168404Spjd zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 348168404Spjd (off + len) - (prev->r_off + prev->r_len)); 349168404Spjd} 350168404Spjd 351168404Spjd/* 352168404Spjd * Check if a reader lock can be grabbed, or wait and recheck until available. 353168404Spjd */ 354168404Spjdstatic void 355168404Spjdzfs_range_lock_reader(znode_t *zp, rl_t *new) 356168404Spjd{ 357168404Spjd avl_tree_t *tree = &zp->z_range_avl; 358168404Spjd rl_t *prev, *next; 359168404Spjd avl_index_t where; 360168404Spjd uint64_t off = new->r_off; 361168404Spjd uint64_t len = new->r_len; 362168404Spjd 363168404Spjd /* 364168404Spjd * Look for any writer locks in the range. 365168404Spjd */ 366168404Spjdretry: 367168404Spjd prev = avl_find(tree, new, &where); 368168404Spjd if (prev == NULL) 369168404Spjd prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 370168404Spjd 371168404Spjd /* 372168404Spjd * Check the previous range for a writer lock overlap. 373168404Spjd */ 374168404Spjd if (prev && (off < prev->r_off + prev->r_len)) { 375168404Spjd if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { 376168404Spjd if (!prev->r_read_wanted) { 377168404Spjd cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); 378168404Spjd prev->r_read_wanted = B_TRUE; 379168404Spjd } 380168404Spjd cv_wait(&prev->r_rd_cv, &zp->z_range_lock); 381168404Spjd goto retry; 382168404Spjd } 383168404Spjd if (off + len < prev->r_off + prev->r_len) 384168404Spjd goto got_lock; 385168404Spjd } 386168404Spjd 387168404Spjd /* 388168404Spjd * Search through the following ranges to see if there's 389168404Spjd * write lock any overlap. 390168404Spjd */ 391168404Spjd if (prev) 392168404Spjd next = AVL_NEXT(tree, prev); 393168404Spjd else 394168404Spjd next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 395168404Spjd for (; next; next = AVL_NEXT(tree, next)) { 396168404Spjd if (off + len <= next->r_off) 397168404Spjd goto got_lock; 398168404Spjd if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { 399168404Spjd if (!next->r_read_wanted) { 400168404Spjd cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); 401168404Spjd next->r_read_wanted = B_TRUE; 402168404Spjd } 403168404Spjd cv_wait(&next->r_rd_cv, &zp->z_range_lock); 404168404Spjd goto retry; 405168404Spjd } 406168404Spjd if (off + len <= next->r_off + next->r_len) 407168404Spjd goto got_lock; 408168404Spjd } 409168404Spjd 410168404Spjdgot_lock: 411168404Spjd /* 412168404Spjd * Add the read lock, which may involve splitting existing 413168404Spjd * locks and bumping ref counts (r_cnt). 414168404Spjd */ 415168404Spjd zfs_range_add_reader(tree, new, prev, where); 416168404Spjd} 417168404Spjd 418168404Spjd/* 419168404Spjd * Lock a range (offset, length) as either shared (RL_READER) 420168404Spjd * or exclusive (RL_WRITER). Returns the range lock structure 421168404Spjd * for later unlocking or reduce range (if entire file 422168404Spjd * previously locked as RL_WRITER). 423168404Spjd */ 424168404Spjdrl_t * 425168404Spjdzfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) 426168404Spjd{ 427168404Spjd rl_t *new; 428168404Spjd 429168404Spjd ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); 430168404Spjd 431168404Spjd new = kmem_alloc(sizeof (rl_t), KM_SLEEP); 432168404Spjd new->r_zp = zp; 433168404Spjd new->r_off = off; 434168404Spjd new->r_len = len; 435168404Spjd new->r_cnt = 1; /* assume it's going to be in the tree */ 436168404Spjd new->r_type = type; 437168404Spjd new->r_proxy = B_FALSE; 438168404Spjd new->r_write_wanted = B_FALSE; 439168404Spjd new->r_read_wanted = B_FALSE; 440168404Spjd 441168404Spjd mutex_enter(&zp->z_range_lock); 442168404Spjd if (type == RL_READER) { 443168404Spjd /* 444168404Spjd * First check for the usual case of no locks 445168404Spjd */ 446168404Spjd if (avl_numnodes(&zp->z_range_avl) == 0) 447168404Spjd avl_add(&zp->z_range_avl, new); 448168404Spjd else 449168404Spjd zfs_range_lock_reader(zp, new); 450168404Spjd } else 451168404Spjd zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ 452168404Spjd mutex_exit(&zp->z_range_lock); 453168404Spjd return (new); 454168404Spjd} 455168404Spjd 456168404Spjd/* 457168404Spjd * Unlock a reader lock 458168404Spjd */ 459168404Spjdstatic void 460168404Spjdzfs_range_unlock_reader(znode_t *zp, rl_t *remove) 461168404Spjd{ 462168404Spjd avl_tree_t *tree = &zp->z_range_avl; 463168404Spjd rl_t *rl, *next; 464168404Spjd uint64_t len; 465168404Spjd 466168404Spjd /* 467168404Spjd * The common case is when the remove entry is in the tree 468168404Spjd * (cnt == 1) meaning there's been no other reader locks overlapping 469168404Spjd * with this one. Otherwise the remove entry will have been 470168404Spjd * removed from the tree and replaced by proxies (one or 471168404Spjd * more ranges mapping to the entire range). 472168404Spjd */ 473168404Spjd if (remove->r_cnt == 1) { 474168404Spjd avl_remove(tree, remove); 475185029Spjd if (remove->r_write_wanted) { 476168404Spjd cv_broadcast(&remove->r_wr_cv); 477185029Spjd cv_destroy(&remove->r_wr_cv); 478185029Spjd } 479185029Spjd if (remove->r_read_wanted) { 480168404Spjd cv_broadcast(&remove->r_rd_cv); 481185029Spjd cv_destroy(&remove->r_rd_cv); 482185029Spjd } 483168404Spjd } else { 484168404Spjd ASSERT3U(remove->r_cnt, ==, 0); 485168404Spjd ASSERT3U(remove->r_write_wanted, ==, 0); 486168404Spjd ASSERT3U(remove->r_read_wanted, ==, 0); 487168404Spjd /* 488168404Spjd * Find start proxy representing this reader lock, 489168404Spjd * then decrement ref count on all proxies 490168404Spjd * that make up this range, freeing them as needed. 491168404Spjd */ 492168404Spjd rl = avl_find(tree, remove, NULL); 493168404Spjd ASSERT(rl); 494168404Spjd ASSERT(rl->r_cnt); 495168404Spjd ASSERT(rl->r_type == RL_READER); 496168404Spjd for (len = remove->r_len; len != 0; rl = next) { 497168404Spjd len -= rl->r_len; 498168404Spjd if (len) { 499168404Spjd next = AVL_NEXT(tree, rl); 500168404Spjd ASSERT(next); 501168404Spjd ASSERT(rl->r_off + rl->r_len == next->r_off); 502168404Spjd ASSERT(next->r_cnt); 503168404Spjd ASSERT(next->r_type == RL_READER); 504168404Spjd } 505168404Spjd rl->r_cnt--; 506168404Spjd if (rl->r_cnt == 0) { 507168404Spjd avl_remove(tree, rl); 508185029Spjd if (rl->r_write_wanted) { 509168404Spjd cv_broadcast(&rl->r_wr_cv); 510185029Spjd cv_destroy(&rl->r_wr_cv); 511185029Spjd } 512185029Spjd if (rl->r_read_wanted) { 513168404Spjd cv_broadcast(&rl->r_rd_cv); 514185029Spjd cv_destroy(&rl->r_rd_cv); 515185029Spjd } 516168404Spjd kmem_free(rl, sizeof (rl_t)); 517168404Spjd } 518168404Spjd } 519168404Spjd } 520168404Spjd kmem_free(remove, sizeof (rl_t)); 521168404Spjd} 522168404Spjd 523168404Spjd/* 524168404Spjd * Unlock range and destroy range lock structure. 525168404Spjd */ 526168404Spjdvoid 527168404Spjdzfs_range_unlock(rl_t *rl) 528168404Spjd{ 529168404Spjd znode_t *zp = rl->r_zp; 530168404Spjd 531168404Spjd ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); 532168404Spjd ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); 533168404Spjd ASSERT(!rl->r_proxy); 534168404Spjd 535168404Spjd mutex_enter(&zp->z_range_lock); 536168404Spjd if (rl->r_type == RL_WRITER) { 537168404Spjd /* writer locks can't be shared or split */ 538168404Spjd avl_remove(&zp->z_range_avl, rl); 539168404Spjd mutex_exit(&zp->z_range_lock); 540168404Spjd if (rl->r_write_wanted) { 541168404Spjd cv_broadcast(&rl->r_wr_cv); 542168404Spjd cv_destroy(&rl->r_wr_cv); 543168404Spjd } 544168404Spjd if (rl->r_read_wanted) { 545168404Spjd cv_broadcast(&rl->r_rd_cv); 546168404Spjd cv_destroy(&rl->r_rd_cv); 547168404Spjd } 548168404Spjd kmem_free(rl, sizeof (rl_t)); 549168404Spjd } else { 550168404Spjd /* 551168404Spjd * lock may be shared, let zfs_range_unlock_reader() 552168404Spjd * release the lock and free the rl_t 553168404Spjd */ 554168404Spjd zfs_range_unlock_reader(zp, rl); 555168404Spjd mutex_exit(&zp->z_range_lock); 556168404Spjd } 557168404Spjd} 558168404Spjd 559168404Spjd/* 560168404Spjd * Reduce range locked as RL_WRITER from whole file to specified range. 561168404Spjd * Asserts the whole file is exclusivly locked and so there's only one 562168404Spjd * entry in the tree. 563168404Spjd */ 564168404Spjdvoid 565168404Spjdzfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) 566168404Spjd{ 567168404Spjd znode_t *zp = rl->r_zp; 568168404Spjd 569168404Spjd /* Ensure there are no other locks */ 570168404Spjd ASSERT(avl_numnodes(&zp->z_range_avl) == 1); 571168404Spjd ASSERT(rl->r_off == 0); 572168404Spjd ASSERT(rl->r_type == RL_WRITER); 573168404Spjd ASSERT(!rl->r_proxy); 574168404Spjd ASSERT3U(rl->r_len, ==, UINT64_MAX); 575168404Spjd ASSERT3U(rl->r_cnt, ==, 1); 576168404Spjd 577168404Spjd mutex_enter(&zp->z_range_lock); 578168404Spjd rl->r_off = off; 579168404Spjd rl->r_len = len; 580168404Spjd mutex_exit(&zp->z_range_lock); 581168404Spjd if (rl->r_write_wanted) 582168404Spjd cv_broadcast(&rl->r_wr_cv); 583168404Spjd if (rl->r_read_wanted) 584168404Spjd cv_broadcast(&rl->r_rd_cv); 585168404Spjd} 586168404Spjd 587168404Spjd/* 588168404Spjd * AVL comparison function used to order range locks 589168404Spjd * Locks are ordered on the start offset of the range. 590168404Spjd */ 591168404Spjdint 592168404Spjdzfs_range_compare(const void *arg1, const void *arg2) 593168404Spjd{ 594168404Spjd const rl_t *rl1 = arg1; 595168404Spjd const rl_t *rl2 = arg2; 596168404Spjd 597168404Spjd if (rl1->r_off > rl2->r_off) 598168404Spjd return (1); 599168404Spjd if (rl1->r_off < rl2->r_off) 600168404Spjd return (-1); 601168404Spjd return (0); 602168404Spjd} 603