1193323Sed/* 2193323Sed * CDDL HEADER START 3193323Sed * 4193323Sed * The contents of this file are subject to the terms of the 5193323Sed * Common Development and Distribution License (the "License"). 6193323Sed * You may not use this file except in compliance with the License. 7193323Sed * 8193323Sed * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9193323Sed * or http://www.opensolaris.org/os/licensing. 10193323Sed * See the License for the specific language governing permissions 11193323Sed * and limitations under the License. 12193323Sed * 13201360Srdivacky * When distributing Covered Code, include this CDDL HEADER in each 14193323Sed * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15249423Sdim * If applicable, add the following below this CDDL HEADER, with the 16249423Sdim * fields enclosed by brackets "[]" replaced with your own identifying 17249423Sdim * information: Portions Copyright [yyyy] [name of copyright owner] 18249423Sdim * 19249423Sdim * CDDL HEADER END 20249423Sdim */ 21249423Sdim/* 22239462Sdim * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23193323Sed * Use is subject to license terms. 24193323Sed */ 25193323Sed/* 26193323Sed * Copyright (c) 2012 by Delphix. All rights reserved. 27193323Sed */ 28249423Sdim 29249423Sdim/* 30249423Sdim * This file contains the code to implement file range locking in 31249423Sdim * ZFS, although there isn't much specific to ZFS (all that comes to mind is 32249423Sdim * support for growing the blocksize). 33249423Sdim * 34249423Sdim * Interface 35249423Sdim * --------- 36249423Sdim * Defined in zfs_rlock.h but essentially: 37193323Sed * rl = zfs_range_lock(zp, off, len, lock_type); 38202375Srdivacky * zfs_range_unlock(rl); 39198090Srdivacky * zfs_range_reduce(rl, off, len); 40195098Sed * 41193323Sed * AVL tree 42249423Sdim * -------- 43193323Sed * An AVL tree is used to maintain the state of the existing ranges 44249423Sdim * that are locked for exclusive (writer) or shared (reader) use. 45249423Sdim * The starting range offset is used for searching and sorting the tree. 46249423Sdim * 47249423Sdim * Common case 48249423Sdim * ----------- 49249423Sdim * The (hopefully) usual case is of no overlaps or contention for 50249423Sdim * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree 51193323Sed * searched that finds no overlap, and *this* rl_t is placed in the tree. 52193323Sed * 53193323Sed * Overlaps/Reference counting/Proxy locks 54193323Sed * --------------------------------------- 55193323Sed * The avl code only allows one node at a particular offset. Also it's very 56193323Sed * inefficient to search through all previous entries looking for overlaps 57198090Srdivacky * (because the very 1st in the ordered list might be at offset 0 but 58193323Sed * cover the whole file). 59193323Sed * So this implementation uses reference counts and proxy range locks. 60193323Sed * Firstly, only reader locks use reference counts and proxy locks, 61193323Sed * because writer locks are exclusive. 62239462Sdim * When a reader lock overlaps with another then a proxy lock is created 63239462Sdim * for that range and replaces the original lock. If the overlap 64239462Sdim * is exact then the reference count of the proxy is simply incremented. 65193323Sed * Otherwise, the proxy lock is split into smaller lock ranges and 66193323Sed * new proxy locks created for non overlapping ranges. 67193323Sed * The reference counts are adjusted accordingly. 68193323Sed * Meanwhile, the orginal lock is kept around (this is the callers handle) 69193323Sed * and its offset and length are used when releasing the lock. 70193323Sed * 71193323Sed * Thread coordination 72193323Sed * ------------------- 73193323Sed * In order to make wakeups efficient and to ensure multiple continuous 74193323Sed * readers on a range don't starve a writer for the same range lock, 75193323Sed * two condition variables are allocated in each rl_t. 76193323Sed * If a writer (or reader) can't get a range it initialises the writer 77193323Sed * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; 78198090Srdivacky * and waits on that cv. When a thread unlocks that range it wakes up all 79193323Sed * writers then all readers before destroying the lock. 80193323Sed * 81193323Sed * Append mode writes 82193323Sed * ------------------ 83193323Sed * Append mode writes need to lock a range at the end of a file. 84193323Sed * The offset of the end of the file is determined under the 85249423Sdim * range locking mutex, and the lock type converted from RL_APPEND to 86249423Sdim * RL_WRITER and the range locked. 87193323Sed * 88193323Sed * Grow block handling 89193323Sed * ------------------- 90193323Sed * ZFS supports multiple block sizes currently upto 128K. The smallest 91193323Sed * block size is used for the file which is grown as needed. During this 92193323Sed * growth all other writers and readers must be excluded. 93193323Sed * So if the block size needs to be grown then the whole file is 94193323Sed * exclusively locked, then later the caller will reduce the lock 95193323Sed * range to just the range to be written using zfs_reduce_range. 96193323Sed */ 97193323Sed 98193323Sed#include <sys/zfs_rlock.h> 99218893Sdim 100193323Sed/* 101193323Sed * Check if a write lock can be grabbed, or wait and recheck until available. 102193323Sed */ 103193323Sedstatic void 104193323Sedzfs_range_lock_writer(znode_t *zp, rl_t *new) 105193323Sed{ 106193323Sed avl_tree_t *tree = &zp->z_range_avl; 107193323Sed rl_t *rl; 108193323Sed avl_index_t where; 109193323Sed uint64_t end_size; 110193323Sed uint64_t off = new->r_off; 111193323Sed uint64_t len = new->r_len; 112193323Sed 113193323Sed for (;;) { 114234353Sdim /* 115234353Sdim * Range locking is also used by zvol and uses a 116234353Sdim * dummied up znode. However, for zvol, we don't need to 117234353Sdim * append or grow blocksize, and besides we don't have 118234353Sdim * a "sa" data or z_zfsvfs - so skip that processing. 119234353Sdim * 120234353Sdim * Yes, this is ugly, and would be solved by not handling 121193323Sed * grow or append in range lock code. If that was done then 122234353Sdim * we could make the range locking code generically available 123243830Sdim * to other non-zfs consumers. 124243830Sdim */ 125193323Sed if (zp->z_vnode) { /* caller is ZPL */ 126243830Sdim /* 127243830Sdim * If in append mode pick up the current end of file. 128193323Sed * This is done under z_range_lock to avoid races. 129193323Sed */ 130193323Sed if (new->r_type == RL_APPEND) 131193323Sed new->r_off = zp->z_size; 132193323Sed 133234353Sdim /* 134234353Sdim * If we need to grow the block size then grab the whole 135193323Sed * file range. This is also done under z_range_lock to 136193323Sed * avoid races. 137193323Sed */ 138193323Sed end_size = MAX(zp->z_size, new->r_off + len); 139193323Sed if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) || 140193323Sed zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) { 141193323Sed new->r_off = 0; 142193323Sed new->r_len = UINT64_MAX; 143193323Sed } 144193323Sed } 145193323Sed 146193323Sed /* 147218893Sdim * First check for the usual case of no locks 148193323Sed */ 149193323Sed if (avl_numnodes(tree) == 0) { 150193323Sed new->r_type = RL_WRITER; /* convert to writer */ 151193323Sed avl_add(tree, new); 152193323Sed return; 153193323Sed } 154193323Sed 155193323Sed /* 156193323Sed * Look for any locks in the range. 157193323Sed */ 158193323Sed rl = avl_find(tree, new, &where); 159193323Sed if (rl) 160193323Sed goto wait; /* already locked at same offset */ 161193574Sed 162193323Sed rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 163193323Sed if (rl && (rl->r_off < new->r_off + new->r_len)) 164243830Sdim goto wait; 165243830Sdim 166193323Sed rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 167243830Sdim if (rl && rl->r_off + rl->r_len > new->r_off) 168243830Sdim goto wait; 169193323Sed 170193323Sed new->r_type = RL_WRITER; /* convert possible RL_APPEND */ 171193323Sed avl_insert(tree, new, where); 172193323Sed return; 173193574Sedwait: 174193323Sed if (!rl->r_write_wanted) { 175193323Sed cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); 176193323Sed rl->r_write_wanted = B_TRUE; 177193323Sed } 178193323Sed cv_wait(&rl->r_wr_cv, &zp->z_range_lock); 179193323Sed 180193323Sed /* reset to original */ 181193323Sed new->r_off = off; 182193323Sed new->r_len = len; 183193323Sed } 184193323Sed} 185193323Sed 186193323Sed/* 187193323Sed * If this is an original (non-proxy) lock then replace it by 188193323Sed * a proxy and return the proxy. 189193323Sed */ 190193323Sedstatic rl_t * 191193323Sedzfs_range_proxify(avl_tree_t *tree, rl_t *rl) 192193323Sed{ 193193323Sed rl_t *proxy; 194218893Sdim 195218893Sdim if (rl->r_proxy) 196193323Sed return (rl); /* already a proxy */ 197193323Sed 198193323Sed ASSERT3U(rl->r_cnt, ==, 1); 199193323Sed ASSERT(rl->r_write_wanted == B_FALSE); 200193323Sed ASSERT(rl->r_read_wanted == B_FALSE); 201193323Sed avl_remove(tree, rl); 202193323Sed rl->r_cnt = 0; 203193323Sed 204239462Sdim /* create a proxy range lock */ 205239462Sdim proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); 206239462Sdim proxy->r_off = rl->r_off; 207239462Sdim proxy->r_len = rl->r_len; 208239462Sdim proxy->r_cnt = 1; 209239462Sdim proxy->r_type = RL_READER; 210239462Sdim proxy->r_proxy = B_TRUE; 211239462Sdim proxy->r_write_wanted = B_FALSE; 212239462Sdim proxy->r_read_wanted = B_FALSE; 213239462Sdim avl_add(tree, proxy); 214239462Sdim 215239462Sdim return (proxy); 216239462Sdim} 217239462Sdim 218239462Sdim/* 219239462Sdim * Split the range lock at the supplied offset 220193323Sed * returning the *front* proxy. 221193323Sed */ 222193323Sedstatic rl_t * 223193323Sedzfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) 224193323Sed{ 225193323Sed rl_t *front, *rear; 226193323Sed 227193323Sed ASSERT3U(rl->r_len, >, 1); 228193323Sed ASSERT3U(off, >, rl->r_off); 229193323Sed ASSERT3U(off, <, rl->r_off + rl->r_len); 230193323Sed ASSERT(rl->r_write_wanted == B_FALSE); 231193323Sed ASSERT(rl->r_read_wanted == B_FALSE); 232193323Sed 233193323Sed /* create the rear proxy range lock */ 234193323Sed rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); 235193323Sed rear->r_off = off; 236193323Sed rear->r_len = rl->r_off + rl->r_len - off; 237193323Sed rear->r_cnt = rl->r_cnt; 238193323Sed rear->r_type = RL_READER; 239193323Sed rear->r_proxy = B_TRUE; 240193323Sed rear->r_write_wanted = B_FALSE; 241193323Sed rear->r_read_wanted = B_FALSE; 242193323Sed 243193323Sed front = zfs_range_proxify(tree, rl); 244193323Sed front->r_len = off - rl->r_off; 245193323Sed 246193323Sed avl_insert_here(tree, rear, front, AVL_AFTER); 247193323Sed return (front); 248193323Sed} 249193323Sed 250193323Sed/* 251193323Sed * Create and add a new proxy range lock for the supplied range. 252193323Sed */ 253198090Srdivackystatic void 254193323Sedzfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) 255193323Sed{ 256193323Sed rl_t *rl; 257193323Sed 258193323Sed ASSERT(len); 259193323Sed rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); 260193323Sed rl->r_off = off; 261193323Sed rl->r_len = len; 262193323Sed rl->r_cnt = 1; 263193323Sed rl->r_type = RL_READER; 264193323Sed rl->r_proxy = B_TRUE; 265193323Sed rl->r_write_wanted = B_FALSE; 266193323Sed rl->r_read_wanted = B_FALSE; 267193323Sed avl_add(tree, rl); 268193323Sed} 269193323Sed 270193323Sedstatic void 271193323Sedzfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) 272193323Sed{ 273193323Sed rl_t *next; 274193323Sed uint64_t off = new->r_off; 275193323Sed uint64_t len = new->r_len; 276193323Sed 277193323Sed /* 278193323Sed * prev arrives either: 279193323Sed * - pointing to an entry at the same offset 280193323Sed * - pointing to the entry with the closest previous offset whose 281193323Sed * range may overlap with the new range 282193323Sed * - null, if there were no ranges starting before the new one 283193323Sed */ 284193323Sed if (prev) { 285193323Sed if (prev->r_off + prev->r_len <= off) { 286193323Sed prev = NULL; 287193323Sed } else if (prev->r_off != off) { 288193323Sed /* 289193323Sed * convert to proxy if needed then 290193323Sed * split this entry and bump ref count 291193323Sed */ 292193323Sed prev = zfs_range_split(tree, prev, off); 293193323Sed prev = AVL_NEXT(tree, prev); /* move to rear range */ 294193323Sed } 295193323Sed } 296193323Sed ASSERT((prev == NULL) || (prev->r_off == off)); 297193323Sed 298193323Sed if (prev) 299193323Sed next = prev; 300193323Sed else 301193323Sed next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 302193323Sed 303193323Sed if (next == NULL || off + len <= next->r_off) { 304193323Sed /* no overlaps, use the original new rl_t in the tree */ 305193323Sed avl_insert(tree, new, where); 306193323Sed return; 307193323Sed } 308193323Sed 309193323Sed if (off < next->r_off) { 310193323Sed /* Add a proxy for initial range before the overlap */ 311193323Sed zfs_range_new_proxy(tree, off, next->r_off - off); 312193323Sed } 313193323Sed 314193323Sed new->r_cnt = 0; /* will use proxies in tree */ 315193323Sed /* 316193323Sed * We now search forward through the ranges, until we go past the end 317193323Sed * of the new range. For each entry we make it a proxy if it 318193323Sed * isn't already, then bump its reference count. If there's any 319193323Sed * gaps between the ranges then we create a new proxy range. 320193323Sed */ 321193323Sed for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { 322193323Sed if (off + len <= next->r_off) 323193323Sed break; 324193323Sed if (prev && prev->r_off + prev->r_len < next->r_off) { 325193323Sed /* there's a gap */ 326193323Sed ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); 327193323Sed zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 328193323Sed next->r_off - (prev->r_off + prev->r_len)); 329193323Sed } 330193323Sed if (off + len == next->r_off + next->r_len) { 331193323Sed /* exact overlap with end */ 332193323Sed next = zfs_range_proxify(tree, next); 333193323Sed next->r_cnt++; 334193323Sed return; 335193323Sed } 336193323Sed if (off + len < next->r_off + next->r_len) { 337193323Sed /* new range ends in the middle of this block */ 338193323Sed next = zfs_range_split(tree, next, off + len); 339193323Sed next->r_cnt++; 340193323Sed return; 341193323Sed } 342193323Sed ASSERT3U(off + len, >, next->r_off + next->r_len); 343193323Sed next = zfs_range_proxify(tree, next); 344193323Sed next->r_cnt++; 345193323Sed } 346193323Sed 347193323Sed /* Add the remaining end range. */ 348193323Sed zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 349193323Sed (off + len) - (prev->r_off + prev->r_len)); 350193323Sed} 351193323Sed 352193323Sed/* 353193323Sed * Check if a reader lock can be grabbed, or wait and recheck until available. 354193323Sed */ 355193323Sedstatic void 356193323Sedzfs_range_lock_reader(znode_t *zp, rl_t *new) 357193323Sed{ 358193323Sed avl_tree_t *tree = &zp->z_range_avl; 359193323Sed rl_t *prev, *next; 360193323Sed avl_index_t where; 361193323Sed uint64_t off = new->r_off; 362193323Sed uint64_t len = new->r_len; 363193323Sed 364193323Sed /* 365193323Sed * Look for any writer locks in the range. 366193323Sed */ 367195098Sedretry: 368195098Sed prev = avl_find(tree, new, &where); 369198090Srdivacky if (prev == NULL) 370193323Sed prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 371193323Sed 372193323Sed /* 373193323Sed * Check the previous range for a writer lock overlap. 374193323Sed */ 375193323Sed if (prev && (off < prev->r_off + prev->r_len)) { 376193323Sed if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { 377193323Sed if (!prev->r_read_wanted) { 378193323Sed cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); 379193323Sed prev->r_read_wanted = B_TRUE; 380193323Sed } 381193323Sed cv_wait(&prev->r_rd_cv, &zp->z_range_lock); 382193323Sed goto retry; 383193323Sed } 384193323Sed if (off + len < prev->r_off + prev->r_len) 385193323Sed goto got_lock; 386193323Sed } 387195098Sed 388239462Sdim /* 389193323Sed * Search through the following ranges to see if there's 390193323Sed * write lock any overlap. 391193323Sed */ 392193323Sed if (prev) 393193323Sed next = AVL_NEXT(tree, prev); 394193323Sed else 395193323Sed next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 396193323Sed for (; next; next = AVL_NEXT(tree, next)) { 397234353Sdim if (off + len <= next->r_off) 398234353Sdim goto got_lock; 399234353Sdim if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { 400193323Sed if (!next->r_read_wanted) { 401193323Sed cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); 402193323Sed next->r_read_wanted = B_TRUE; 403193323Sed } 404193323Sed cv_wait(&next->r_rd_cv, &zp->z_range_lock); 405193323Sed goto retry; 406193323Sed } 407193323Sed if (off + len <= next->r_off + next->r_len) 408193323Sed goto got_lock; 409193323Sed } 410195098Sed 411193323Sedgot_lock: 412193323Sed /* 413193323Sed * Add the read lock, which may involve splitting existing 414193323Sed * locks and bumping ref counts (r_cnt). 415193323Sed */ 416193323Sed zfs_range_add_reader(tree, new, prev, where); 417193323Sed} 418226633Sdim 419193323Sed/* 420193323Sed * Lock a range (offset, length) as either shared (RL_READER) 421195098Sed * or exclusive (RL_WRITER). Returns the range lock structure 422193323Sed * for later unlocking or reduce range (if entire file 423193323Sed * previously locked as RL_WRITER). 424239462Sdim */ 425239462Sdimrl_t * 426239462Sdimzfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) 427239462Sdim{ 428239462Sdim rl_t *new; 429239462Sdim 430239462Sdim ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); 431193323Sed 432193323Sed new = kmem_alloc(sizeof (rl_t), KM_SLEEP); 433193323Sed new->r_zp = zp; 434193323Sed new->r_off = off; 435239462Sdim if (len + off < off) /* overflow */ 436193323Sed len = UINT64_MAX - off; 437193323Sed new->r_len = len; 438193323Sed new->r_cnt = 1; /* assume it's going to be in the tree */ 439193323Sed new->r_type = type; 440193323Sed new->r_proxy = B_FALSE; 441193323Sed new->r_write_wanted = B_FALSE; 442239462Sdim new->r_read_wanted = B_FALSE; 443193323Sed 444193323Sed mutex_enter(&zp->z_range_lock); 445193323Sed if (type == RL_READER) { 446193323Sed /* 447193323Sed * First check for the usual case of no locks 448193323Sed */ 449193323Sed if (avl_numnodes(&zp->z_range_avl) == 0) 450193323Sed avl_add(&zp->z_range_avl, new); 451193323Sed else 452193323Sed zfs_range_lock_reader(zp, new); 453193323Sed } else 454193323Sed zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ 455193323Sed mutex_exit(&zp->z_range_lock); 456226633Sdim return (new); 457226633Sdim} 458226633Sdim 459193323Sed/* 460193323Sed * Unlock a reader lock 461193323Sed */ 462239462Sdimstatic void 463193323Sedzfs_range_unlock_reader(znode_t *zp, rl_t *remove) 464193323Sed{ 465239462Sdim avl_tree_t *tree = &zp->z_range_avl; 466239462Sdim rl_t *rl, *next = NULL; 467239462Sdim uint64_t len; 468239462Sdim 469239462Sdim /* 470193323Sed * The common case is when the remove entry is in the tree 471193323Sed * (cnt == 1) meaning there's been no other reader locks overlapping 472198090Srdivacky * with this one. Otherwise the remove entry will have been 473193323Sed * removed from the tree and replaced by proxies (one or 474193323Sed * more ranges mapping to the entire range). 475193323Sed */ 476193323Sed if (remove->r_cnt == 1) { 477198892Srdivacky avl_remove(tree, remove); 478198892Srdivacky if (remove->r_write_wanted) { 479243830Sdim cv_broadcast(&remove->r_wr_cv); 480243830Sdim cv_destroy(&remove->r_wr_cv); 481243830Sdim } 482243830Sdim if (remove->r_read_wanted) { 483198892Srdivacky cv_broadcast(&remove->r_rd_cv); 484198892Srdivacky cv_destroy(&remove->r_rd_cv); 485193323Sed } 486239462Sdim } else { 487239462Sdim ASSERT0(remove->r_cnt); 488239462Sdim ASSERT0(remove->r_write_wanted); 489239462Sdim ASSERT0(remove->r_read_wanted); 490193323Sed /* 491193323Sed * Find start proxy representing this reader lock, 492193323Sed * then decrement ref count on all proxies 493193323Sed * that make up this range, freeing them as needed. 494193323Sed */ 495193323Sed rl = avl_find(tree, remove, NULL); 496193323Sed ASSERT(rl); 497193323Sed ASSERT(rl->r_cnt); 498193323Sed ASSERT(rl->r_type == RL_READER); 499193323Sed for (len = remove->r_len; len != 0; rl = next) { 500193323Sed len -= rl->r_len; 501193323Sed if (len) { 502193323Sed next = AVL_NEXT(tree, rl); 503193323Sed ASSERT(next); 504193323Sed ASSERT(rl->r_off + rl->r_len == next->r_off); 505193323Sed ASSERT(next->r_cnt); 506204642Srdivacky ASSERT(next->r_type == RL_READER); 507193323Sed } 508193323Sed rl->r_cnt--; 509193323Sed if (rl->r_cnt == 0) { 510204642Srdivacky avl_remove(tree, rl); 511234353Sdim if (rl->r_write_wanted) { 512193323Sed cv_broadcast(&rl->r_wr_cv); 513193323Sed cv_destroy(&rl->r_wr_cv); 514193323Sed } 515193323Sed if (rl->r_read_wanted) { 516193323Sed cv_broadcast(&rl->r_rd_cv); 517193323Sed cv_destroy(&rl->r_rd_cv); 518204642Srdivacky } 519234353Sdim kmem_free(rl, sizeof (rl_t)); 520234353Sdim } 521193323Sed } 522193323Sed } 523193323Sed kmem_free(remove, sizeof (rl_t)); 524193323Sed} 525193323Sed 526193323Sed/* 527193323Sed * Unlock range and destroy range lock structure. 528193323Sed */ 529218893Sdimvoid 530193323Sedzfs_range_unlock(rl_t *rl) 531193323Sed{ 532193323Sed znode_t *zp = rl->r_zp; 533193323Sed 534193323Sed ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); 535193323Sed ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); 536193323Sed ASSERT(!rl->r_proxy); 537193323Sed 538193323Sed mutex_enter(&zp->z_range_lock); 539193323Sed if (rl->r_type == RL_WRITER) { 540193323Sed /* writer locks can't be shared or split */ 541218893Sdim avl_remove(&zp->z_range_avl, rl); 542193323Sed mutex_exit(&zp->z_range_lock); 543193323Sed if (rl->r_write_wanted) { 544193323Sed cv_broadcast(&rl->r_wr_cv); 545193323Sed cv_destroy(&rl->r_wr_cv); 546193323Sed } 547193323Sed if (rl->r_read_wanted) { 548193323Sed cv_broadcast(&rl->r_rd_cv); 549193323Sed cv_destroy(&rl->r_rd_cv); 550193323Sed } 551193323Sed kmem_free(rl, sizeof (rl_t)); 552193323Sed } else { 553193323Sed /* 554193323Sed * lock may be shared, let zfs_range_unlock_reader() 555193323Sed * release the lock and free the rl_t 556193323Sed */ 557193323Sed zfs_range_unlock_reader(zp, rl); 558193323Sed mutex_exit(&zp->z_range_lock); 559193323Sed } 560193323Sed} 561193323Sed 562193323Sed/* 563193323Sed * Reduce range locked as RL_WRITER from whole file to specified range. 564193323Sed * Asserts the whole file is exclusivly locked and so there's only one 565193323Sed * entry in the tree. 566193323Sed */ 567193323Sedvoid 568193323Sedzfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) 569239462Sdim{ 570193323Sed znode_t *zp = rl->r_zp; 571193323Sed 572193323Sed /* Ensure there are no other locks */ 573193323Sed ASSERT(avl_numnodes(&zp->z_range_avl) == 1); 574193323Sed ASSERT(rl->r_off == 0); 575193323Sed ASSERT(rl->r_type == RL_WRITER); 576239462Sdim ASSERT(!rl->r_proxy); 577239462Sdim ASSERT3U(rl->r_len, ==, UINT64_MAX); 578193323Sed ASSERT3U(rl->r_cnt, ==, 1); 579193323Sed 580193323Sed mutex_enter(&zp->z_range_lock); 581193323Sed rl->r_off = off; 582193323Sed rl->r_len = len; 583193323Sed mutex_exit(&zp->z_range_lock); 584193323Sed if (rl->r_write_wanted) 585193323Sed cv_broadcast(&rl->r_wr_cv); 586193323Sed if (rl->r_read_wanted) 587193323Sed cv_broadcast(&rl->r_rd_cv); 588193323Sed} 589193323Sed 590193323Sed/* 591193323Sed * AVL comparison function used to order range locks 592193323Sed * Locks are ordered on the start offset of the range. 593193323Sed */ 594193323Sedint 595193323Sedzfs_range_compare(const void *arg1, const void *arg2) 596193323Sed{ 597193323Sed const rl_t *rl1 = arg1; 598239462Sdim const rl_t *rl2 = arg2; 599193323Sed 600234353Sdim if (rl1->r_off > rl2->r_off) 601234353Sdim return (1); 602234353Sdim if (rl1->r_off < rl2->r_off) 603234353Sdim return (-1); 604234353Sdim return (0); 605234353Sdim} 606239462Sdim