28/* 29 * This file contains the code to implement file range locking in 30 * ZFS, although there isn't much specific to ZFS (all that comes to mind 31 * support for growing the blocksize). 32 * 33 * Interface 34 * --------- 35 * Defined in zfs_rlock.h but essentially: 36 * rl = zfs_range_lock(zp, off, len, lock_type); 37 * zfs_range_unlock(rl); 38 * zfs_range_reduce(rl, off, len); 39 * 40 * AVL tree 41 * -------- 42 * An AVL tree is used to maintain the state of the existing ranges 43 * that are locked for exclusive (writer) or shared (reader) use. 44 * The starting range offset is used for searching and sorting the tree. 45 * 46 * Common case 47 * ----------- 48 * The (hopefully) usual case is of no overlaps or contention for 49 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree 50 * searched that finds no overlap, and *this* rl_t is placed in the tree. 51 * 52 * Overlaps/Reference counting/Proxy locks 53 * --------------------------------------- 54 * The avl code only allows one node at a particular offset. Also it's very 55 * inefficient to search through all previous entries looking for overlaps 56 * (because the very 1st in the ordered list might be at offset 0 but 57 * cover the whole file). 58 * So this implementation uses reference counts and proxy range locks. 59 * Firstly, only reader locks use reference counts and proxy locks, 60 * because writer locks are exclusive. 61 * When a reader lock overlaps with another then a proxy lock is created 62 * for that range and replaces the original lock. If the overlap 63 * is exact then the reference count of the proxy is simply incremented. 64 * Otherwise, the proxy lock is split into smaller lock ranges and 65 * new proxy locks created for non overlapping ranges. 66 * The reference counts are adjusted accordingly. 67 * Meanwhile, the orginal lock is kept around (this is the callers handle) 68 * and its offset and length are used when releasing the lock. 69 * 70 * Thread coordination 71 * ------------------- 72 * In order to make wakeups efficient and to ensure multiple continuous 73 * readers on a range don't starve a writer for the same range lock, 74 * two condition variables are allocated in each rl_t. 75 * If a writer (or reader) can't get a range it initialises the writer 76 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; 77 * and waits on that cv. When a thread unlocks that range it wakes up all 78 * writers then all readers before destroying the lock. 79 * 80 * Append mode writes 81 * ------------------ 82 * Append mode writes need to lock a range at the end of a file. 83 * The offset of the end of the file is determined under the 84 * range locking mutex, and the lock type converted from RL_APPEND to 85 * RL_WRITER and the range locked. 86 * 87 * Grow block handling 88 * ------------------- 89 * ZFS supports multiple block sizes currently upto 128K. The smallest 90 * block size is used for the file which is grown as needed. During this 91 * growth all other writers and readers must be excluded. 92 * So if the block size needs to be grown then the whole file is 93 * exclusively locked, then later the caller will reduce the lock 94 * range to just the range to be written using zfs_reduce_range. 95 */ 96 97#include <sys/zfs_rlock.h> 98 99/* 100 * Check if a write lock can be grabbed, or wait and recheck until available. 101 */ 102static void 103zfs_range_lock_writer(znode_t *zp, rl_t *new) 104{ 105 avl_tree_t *tree = &zp->z_range_avl; 106 rl_t *rl; 107 avl_index_t where; 108 uint64_t end_size; 109 uint64_t off = new->r_off; 110 uint64_t len = new->r_len; 111 112 for (;;) { 113 /* 114 * Range locking is also used by zvol and uses a 115 * dummied up znode. However, for zvol, we don't need to 116 * append or grow blocksize, and besides we don't have 117 * a z_phys or z_zfsvfs - so skip that processing. 118 * 119 * Yes, this is ugly, and would be solved by not handling 120 * grow or append in range lock code. If that was done then 121 * we could make the range locking code generically available 122 * to other non-zfs consumers. 123 */ 124 if (zp->z_vnode) { /* caller is ZPL */ 125 /* 126 * If in append mode pick up the current end of file. 127 * This is done under z_range_lock to avoid races. 128 */ 129 if (new->r_type == RL_APPEND) 130 new->r_off = zp->z_phys->zp_size; 131 132 /* 133 * If we need to grow the block size then grab the whole 134 * file range. This is also done under z_range_lock to 135 * avoid races. 136 */ 137 end_size = MAX(zp->z_phys->zp_size, new->r_off + len); 138 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) || 139 zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) { 140 new->r_off = 0; 141 new->r_len = UINT64_MAX; 142 } 143 } 144 145 /* 146 * First check for the usual case of no locks 147 */ 148 if (avl_numnodes(tree) == 0) { 149 new->r_type = RL_WRITER; /* convert to writer */ 150 avl_add(tree, new); 151 return; 152 } 153 154 /* 155 * Look for any locks in the range. 156 */ 157 rl = avl_find(tree, new, &where); 158 if (rl) 159 goto wait; /* already locked at same offset */ 160 161 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 162 if (rl && (rl->r_off < new->r_off + new->r_len)) 163 goto wait; 164 165 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 166 if (rl && rl->r_off + rl->r_len > new->r_off) 167 goto wait; 168 169 new->r_type = RL_WRITER; /* convert possible RL_APPEND */ 170 avl_insert(tree, new, where); 171 return; 172wait: 173 if (!rl->r_write_wanted) { 174 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); 175 rl->r_write_wanted = B_TRUE; 176 } 177 cv_wait(&rl->r_wr_cv, &zp->z_range_lock); 178 179 /* reset to original */ 180 new->r_off = off; 181 new->r_len = len; 182 } 183} 184 185/* 186 * If this is an original (non-proxy) lock then replace it by 187 * a proxy and return the proxy. 188 */ 189static rl_t * 190zfs_range_proxify(avl_tree_t *tree, rl_t *rl) 191{ 192 rl_t *proxy; 193 194 if (rl->r_proxy) 195 return (rl); /* already a proxy */ 196 197 ASSERT3U(rl->r_cnt, ==, 1); 198 ASSERT(rl->r_write_wanted == B_FALSE); 199 ASSERT(rl->r_read_wanted == B_FALSE); 200 avl_remove(tree, rl); 201 rl->r_cnt = 0; 202 203 /* create a proxy range lock */ 204 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); 205 proxy->r_off = rl->r_off; 206 proxy->r_len = rl->r_len; 207 proxy->r_cnt = 1; 208 proxy->r_type = RL_READER; 209 proxy->r_proxy = B_TRUE; 210 proxy->r_write_wanted = B_FALSE; 211 proxy->r_read_wanted = B_FALSE; 212 avl_add(tree, proxy); 213 214 return (proxy); 215} 216 217/* 218 * Split the range lock at the supplied offset 219 * returning the *front* proxy. 220 */ 221static rl_t * 222zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) 223{ 224 rl_t *front, *rear; 225 226 ASSERT3U(rl->r_len, >, 1); 227 ASSERT3U(off, >, rl->r_off); 228 ASSERT3U(off, <, rl->r_off + rl->r_len); 229 ASSERT(rl->r_write_wanted == B_FALSE); 230 ASSERT(rl->r_read_wanted == B_FALSE); 231 232 /* create the rear proxy range lock */ 233 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); 234 rear->r_off = off; 235 rear->r_len = rl->r_off + rl->r_len - off; 236 rear->r_cnt = rl->r_cnt; 237 rear->r_type = RL_READER; 238 rear->r_proxy = B_TRUE; 239 rear->r_write_wanted = B_FALSE; 240 rear->r_read_wanted = B_FALSE; 241 242 front = zfs_range_proxify(tree, rl); 243 front->r_len = off - rl->r_off; 244 245 avl_insert_here(tree, rear, front, AVL_AFTER); 246 return (front); 247} 248 249/* 250 * Create and add a new proxy range lock for the supplied range. 251 */ 252static void 253zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) 254{ 255 rl_t *rl; 256 257 ASSERT(len); 258 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); 259 rl->r_off = off; 260 rl->r_len = len; 261 rl->r_cnt = 1; 262 rl->r_type = RL_READER; 263 rl->r_proxy = B_TRUE; 264 rl->r_write_wanted = B_FALSE; 265 rl->r_read_wanted = B_FALSE; 266 avl_add(tree, rl); 267} 268 269static void 270zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) 271{ 272 rl_t *next; 273 uint64_t off = new->r_off; 274 uint64_t len = new->r_len; 275 276 /* 277 * prev arrives either: 278 * - pointing to an entry at the same offset 279 * - pointing to the entry with the closest previous offset whose 280 * range may overlap with the new range 281 * - null, if there were no ranges starting before the new one 282 */ 283 if (prev) { 284 if (prev->r_off + prev->r_len <= off) { 285 prev = NULL; 286 } else if (prev->r_off != off) { 287 /* 288 * convert to proxy if needed then 289 * split this entry and bump ref count 290 */ 291 prev = zfs_range_split(tree, prev, off); 292 prev = AVL_NEXT(tree, prev); /* move to rear range */ 293 } 294 } 295 ASSERT((prev == NULL) || (prev->r_off == off)); 296 297 if (prev) 298 next = prev; 299 else 300 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 301 302 if (next == NULL || off + len <= next->r_off) { 303 /* no overlaps, use the original new rl_t in the tree */ 304 avl_insert(tree, new, where); 305 return; 306 } 307 308 if (off < next->r_off) { 309 /* Add a proxy for initial range before the overlap */ 310 zfs_range_new_proxy(tree, off, next->r_off - off); 311 } 312 313 new->r_cnt = 0; /* will use proxies in tree */ 314 /* 315 * We now search forward through the ranges, until we go past the end 316 * of the new range. For each entry we make it a proxy if it 317 * isn't already, then bump its reference count. If there's any 318 * gaps between the ranges then we create a new proxy range. 319 */ 320 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { 321 if (off + len <= next->r_off) 322 break; 323 if (prev && prev->r_off + prev->r_len < next->r_off) { 324 /* there's a gap */ 325 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); 326 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 327 next->r_off - (prev->r_off + prev->r_len)); 328 } 329 if (off + len == next->r_off + next->r_len) { 330 /* exact overlap with end */ 331 next = zfs_range_proxify(tree, next); 332 next->r_cnt++; 333 return; 334 } 335 if (off + len < next->r_off + next->r_len) { 336 /* new range ends in the middle of this block */ 337 next = zfs_range_split(tree, next, off + len); 338 next->r_cnt++; 339 return; 340 } 341 ASSERT3U(off + len, >, next->r_off + next->r_len); 342 next = zfs_range_proxify(tree, next); 343 next->r_cnt++; 344 } 345 346 /* Add the remaining end range. */ 347 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 348 (off + len) - (prev->r_off + prev->r_len)); 349} 350 351/* 352 * Check if a reader lock can be grabbed, or wait and recheck until available. 353 */ 354static void 355zfs_range_lock_reader(znode_t *zp, rl_t *new) 356{ 357 avl_tree_t *tree = &zp->z_range_avl; 358 rl_t *prev, *next; 359 avl_index_t where; 360 uint64_t off = new->r_off; 361 uint64_t len = new->r_len; 362 363 /* 364 * Look for any writer locks in the range. 365 */ 366retry: 367 prev = avl_find(tree, new, &where); 368 if (prev == NULL) 369 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 370 371 /* 372 * Check the previous range for a writer lock overlap. 373 */ 374 if (prev && (off < prev->r_off + prev->r_len)) { 375 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { 376 if (!prev->r_read_wanted) { 377 cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); 378 prev->r_read_wanted = B_TRUE; 379 } 380 cv_wait(&prev->r_rd_cv, &zp->z_range_lock); 381 goto retry; 382 } 383 if (off + len < prev->r_off + prev->r_len) 384 goto got_lock; 385 } 386 387 /* 388 * Search through the following ranges to see if there's 389 * write lock any overlap. 390 */ 391 if (prev) 392 next = AVL_NEXT(tree, prev); 393 else 394 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 395 for (; next; next = AVL_NEXT(tree, next)) { 396 if (off + len <= next->r_off) 397 goto got_lock; 398 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { 399 if (!next->r_read_wanted) { 400 cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); 401 next->r_read_wanted = B_TRUE; 402 } 403 cv_wait(&next->r_rd_cv, &zp->z_range_lock); 404 goto retry; 405 } 406 if (off + len <= next->r_off + next->r_len) 407 goto got_lock; 408 } 409 410got_lock: 411 /* 412 * Add the read lock, which may involve splitting existing 413 * locks and bumping ref counts (r_cnt). 414 */ 415 zfs_range_add_reader(tree, new, prev, where); 416} 417 418/* 419 * Lock a range (offset, length) as either shared (RL_READER) 420 * or exclusive (RL_WRITER). Returns the range lock structure 421 * for later unlocking or reduce range (if entire file 422 * previously locked as RL_WRITER). 423 */ 424rl_t * 425zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) 426{ 427 rl_t *new; 428 429 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); 430 431 new = kmem_alloc(sizeof (rl_t), KM_SLEEP); 432 new->r_zp = zp; 433 new->r_off = off;
| 26/* 27 * This file contains the code to implement file range locking in 28 * ZFS, although there isn't much specific to ZFS (all that comes to mind 29 * support for growing the blocksize). 30 * 31 * Interface 32 * --------- 33 * Defined in zfs_rlock.h but essentially: 34 * rl = zfs_range_lock(zp, off, len, lock_type); 35 * zfs_range_unlock(rl); 36 * zfs_range_reduce(rl, off, len); 37 * 38 * AVL tree 39 * -------- 40 * An AVL tree is used to maintain the state of the existing ranges 41 * that are locked for exclusive (writer) or shared (reader) use. 42 * The starting range offset is used for searching and sorting the tree. 43 * 44 * Common case 45 * ----------- 46 * The (hopefully) usual case is of no overlaps or contention for 47 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree 48 * searched that finds no overlap, and *this* rl_t is placed in the tree. 49 * 50 * Overlaps/Reference counting/Proxy locks 51 * --------------------------------------- 52 * The avl code only allows one node at a particular offset. Also it's very 53 * inefficient to search through all previous entries looking for overlaps 54 * (because the very 1st in the ordered list might be at offset 0 but 55 * cover the whole file). 56 * So this implementation uses reference counts and proxy range locks. 57 * Firstly, only reader locks use reference counts and proxy locks, 58 * because writer locks are exclusive. 59 * When a reader lock overlaps with another then a proxy lock is created 60 * for that range and replaces the original lock. If the overlap 61 * is exact then the reference count of the proxy is simply incremented. 62 * Otherwise, the proxy lock is split into smaller lock ranges and 63 * new proxy locks created for non overlapping ranges. 64 * The reference counts are adjusted accordingly. 65 * Meanwhile, the orginal lock is kept around (this is the callers handle) 66 * and its offset and length are used when releasing the lock. 67 * 68 * Thread coordination 69 * ------------------- 70 * In order to make wakeups efficient and to ensure multiple continuous 71 * readers on a range don't starve a writer for the same range lock, 72 * two condition variables are allocated in each rl_t. 73 * If a writer (or reader) can't get a range it initialises the writer 74 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting; 75 * and waits on that cv. When a thread unlocks that range it wakes up all 76 * writers then all readers before destroying the lock. 77 * 78 * Append mode writes 79 * ------------------ 80 * Append mode writes need to lock a range at the end of a file. 81 * The offset of the end of the file is determined under the 82 * range locking mutex, and the lock type converted from RL_APPEND to 83 * RL_WRITER and the range locked. 84 * 85 * Grow block handling 86 * ------------------- 87 * ZFS supports multiple block sizes currently upto 128K. The smallest 88 * block size is used for the file which is grown as needed. During this 89 * growth all other writers and readers must be excluded. 90 * So if the block size needs to be grown then the whole file is 91 * exclusively locked, then later the caller will reduce the lock 92 * range to just the range to be written using zfs_reduce_range. 93 */ 94 95#include <sys/zfs_rlock.h> 96 97/* 98 * Check if a write lock can be grabbed, or wait and recheck until available. 99 */ 100static void 101zfs_range_lock_writer(znode_t *zp, rl_t *new) 102{ 103 avl_tree_t *tree = &zp->z_range_avl; 104 rl_t *rl; 105 avl_index_t where; 106 uint64_t end_size; 107 uint64_t off = new->r_off; 108 uint64_t len = new->r_len; 109 110 for (;;) { 111 /* 112 * Range locking is also used by zvol and uses a 113 * dummied up znode. However, for zvol, we don't need to 114 * append or grow blocksize, and besides we don't have 115 * a z_phys or z_zfsvfs - so skip that processing. 116 * 117 * Yes, this is ugly, and would be solved by not handling 118 * grow or append in range lock code. If that was done then 119 * we could make the range locking code generically available 120 * to other non-zfs consumers. 121 */ 122 if (zp->z_vnode) { /* caller is ZPL */ 123 /* 124 * If in append mode pick up the current end of file. 125 * This is done under z_range_lock to avoid races. 126 */ 127 if (new->r_type == RL_APPEND) 128 new->r_off = zp->z_phys->zp_size; 129 130 /* 131 * If we need to grow the block size then grab the whole 132 * file range. This is also done under z_range_lock to 133 * avoid races. 134 */ 135 end_size = MAX(zp->z_phys->zp_size, new->r_off + len); 136 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) || 137 zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) { 138 new->r_off = 0; 139 new->r_len = UINT64_MAX; 140 } 141 } 142 143 /* 144 * First check for the usual case of no locks 145 */ 146 if (avl_numnodes(tree) == 0) { 147 new->r_type = RL_WRITER; /* convert to writer */ 148 avl_add(tree, new); 149 return; 150 } 151 152 /* 153 * Look for any locks in the range. 154 */ 155 rl = avl_find(tree, new, &where); 156 if (rl) 157 goto wait; /* already locked at same offset */ 158 159 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 160 if (rl && (rl->r_off < new->r_off + new->r_len)) 161 goto wait; 162 163 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 164 if (rl && rl->r_off + rl->r_len > new->r_off) 165 goto wait; 166 167 new->r_type = RL_WRITER; /* convert possible RL_APPEND */ 168 avl_insert(tree, new, where); 169 return; 170wait: 171 if (!rl->r_write_wanted) { 172 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL); 173 rl->r_write_wanted = B_TRUE; 174 } 175 cv_wait(&rl->r_wr_cv, &zp->z_range_lock); 176 177 /* reset to original */ 178 new->r_off = off; 179 new->r_len = len; 180 } 181} 182 183/* 184 * If this is an original (non-proxy) lock then replace it by 185 * a proxy and return the proxy. 186 */ 187static rl_t * 188zfs_range_proxify(avl_tree_t *tree, rl_t *rl) 189{ 190 rl_t *proxy; 191 192 if (rl->r_proxy) 193 return (rl); /* already a proxy */ 194 195 ASSERT3U(rl->r_cnt, ==, 1); 196 ASSERT(rl->r_write_wanted == B_FALSE); 197 ASSERT(rl->r_read_wanted == B_FALSE); 198 avl_remove(tree, rl); 199 rl->r_cnt = 0; 200 201 /* create a proxy range lock */ 202 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP); 203 proxy->r_off = rl->r_off; 204 proxy->r_len = rl->r_len; 205 proxy->r_cnt = 1; 206 proxy->r_type = RL_READER; 207 proxy->r_proxy = B_TRUE; 208 proxy->r_write_wanted = B_FALSE; 209 proxy->r_read_wanted = B_FALSE; 210 avl_add(tree, proxy); 211 212 return (proxy); 213} 214 215/* 216 * Split the range lock at the supplied offset 217 * returning the *front* proxy. 218 */ 219static rl_t * 220zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off) 221{ 222 rl_t *front, *rear; 223 224 ASSERT3U(rl->r_len, >, 1); 225 ASSERT3U(off, >, rl->r_off); 226 ASSERT3U(off, <, rl->r_off + rl->r_len); 227 ASSERT(rl->r_write_wanted == B_FALSE); 228 ASSERT(rl->r_read_wanted == B_FALSE); 229 230 /* create the rear proxy range lock */ 231 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP); 232 rear->r_off = off; 233 rear->r_len = rl->r_off + rl->r_len - off; 234 rear->r_cnt = rl->r_cnt; 235 rear->r_type = RL_READER; 236 rear->r_proxy = B_TRUE; 237 rear->r_write_wanted = B_FALSE; 238 rear->r_read_wanted = B_FALSE; 239 240 front = zfs_range_proxify(tree, rl); 241 front->r_len = off - rl->r_off; 242 243 avl_insert_here(tree, rear, front, AVL_AFTER); 244 return (front); 245} 246 247/* 248 * Create and add a new proxy range lock for the supplied range. 249 */ 250static void 251zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len) 252{ 253 rl_t *rl; 254 255 ASSERT(len); 256 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP); 257 rl->r_off = off; 258 rl->r_len = len; 259 rl->r_cnt = 1; 260 rl->r_type = RL_READER; 261 rl->r_proxy = B_TRUE; 262 rl->r_write_wanted = B_FALSE; 263 rl->r_read_wanted = B_FALSE; 264 avl_add(tree, rl); 265} 266 267static void 268zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where) 269{ 270 rl_t *next; 271 uint64_t off = new->r_off; 272 uint64_t len = new->r_len; 273 274 /* 275 * prev arrives either: 276 * - pointing to an entry at the same offset 277 * - pointing to the entry with the closest previous offset whose 278 * range may overlap with the new range 279 * - null, if there were no ranges starting before the new one 280 */ 281 if (prev) { 282 if (prev->r_off + prev->r_len <= off) { 283 prev = NULL; 284 } else if (prev->r_off != off) { 285 /* 286 * convert to proxy if needed then 287 * split this entry and bump ref count 288 */ 289 prev = zfs_range_split(tree, prev, off); 290 prev = AVL_NEXT(tree, prev); /* move to rear range */ 291 } 292 } 293 ASSERT((prev == NULL) || (prev->r_off == off)); 294 295 if (prev) 296 next = prev; 297 else 298 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 299 300 if (next == NULL || off + len <= next->r_off) { 301 /* no overlaps, use the original new rl_t in the tree */ 302 avl_insert(tree, new, where); 303 return; 304 } 305 306 if (off < next->r_off) { 307 /* Add a proxy for initial range before the overlap */ 308 zfs_range_new_proxy(tree, off, next->r_off - off); 309 } 310 311 new->r_cnt = 0; /* will use proxies in tree */ 312 /* 313 * We now search forward through the ranges, until we go past the end 314 * of the new range. For each entry we make it a proxy if it 315 * isn't already, then bump its reference count. If there's any 316 * gaps between the ranges then we create a new proxy range. 317 */ 318 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) { 319 if (off + len <= next->r_off) 320 break; 321 if (prev && prev->r_off + prev->r_len < next->r_off) { 322 /* there's a gap */ 323 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len); 324 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 325 next->r_off - (prev->r_off + prev->r_len)); 326 } 327 if (off + len == next->r_off + next->r_len) { 328 /* exact overlap with end */ 329 next = zfs_range_proxify(tree, next); 330 next->r_cnt++; 331 return; 332 } 333 if (off + len < next->r_off + next->r_len) { 334 /* new range ends in the middle of this block */ 335 next = zfs_range_split(tree, next, off + len); 336 next->r_cnt++; 337 return; 338 } 339 ASSERT3U(off + len, >, next->r_off + next->r_len); 340 next = zfs_range_proxify(tree, next); 341 next->r_cnt++; 342 } 343 344 /* Add the remaining end range. */ 345 zfs_range_new_proxy(tree, prev->r_off + prev->r_len, 346 (off + len) - (prev->r_off + prev->r_len)); 347} 348 349/* 350 * Check if a reader lock can be grabbed, or wait and recheck until available. 351 */ 352static void 353zfs_range_lock_reader(znode_t *zp, rl_t *new) 354{ 355 avl_tree_t *tree = &zp->z_range_avl; 356 rl_t *prev, *next; 357 avl_index_t where; 358 uint64_t off = new->r_off; 359 uint64_t len = new->r_len; 360 361 /* 362 * Look for any writer locks in the range. 363 */ 364retry: 365 prev = avl_find(tree, new, &where); 366 if (prev == NULL) 367 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE); 368 369 /* 370 * Check the previous range for a writer lock overlap. 371 */ 372 if (prev && (off < prev->r_off + prev->r_len)) { 373 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) { 374 if (!prev->r_read_wanted) { 375 cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL); 376 prev->r_read_wanted = B_TRUE; 377 } 378 cv_wait(&prev->r_rd_cv, &zp->z_range_lock); 379 goto retry; 380 } 381 if (off + len < prev->r_off + prev->r_len) 382 goto got_lock; 383 } 384 385 /* 386 * Search through the following ranges to see if there's 387 * write lock any overlap. 388 */ 389 if (prev) 390 next = AVL_NEXT(tree, prev); 391 else 392 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER); 393 for (; next; next = AVL_NEXT(tree, next)) { 394 if (off + len <= next->r_off) 395 goto got_lock; 396 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) { 397 if (!next->r_read_wanted) { 398 cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL); 399 next->r_read_wanted = B_TRUE; 400 } 401 cv_wait(&next->r_rd_cv, &zp->z_range_lock); 402 goto retry; 403 } 404 if (off + len <= next->r_off + next->r_len) 405 goto got_lock; 406 } 407 408got_lock: 409 /* 410 * Add the read lock, which may involve splitting existing 411 * locks and bumping ref counts (r_cnt). 412 */ 413 zfs_range_add_reader(tree, new, prev, where); 414} 415 416/* 417 * Lock a range (offset, length) as either shared (RL_READER) 418 * or exclusive (RL_WRITER). Returns the range lock structure 419 * for later unlocking or reduce range (if entire file 420 * previously locked as RL_WRITER). 421 */ 422rl_t * 423zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) 424{ 425 rl_t *new; 426 427 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND); 428 429 new = kmem_alloc(sizeof (rl_t), KM_SLEEP); 430 new->r_zp = zp; 431 new->r_off = off;
|
434 new->r_len = len; 435 new->r_cnt = 1; /* assume it's going to be in the tree */ 436 new->r_type = type; 437 new->r_proxy = B_FALSE; 438 new->r_write_wanted = B_FALSE; 439 new->r_read_wanted = B_FALSE; 440 441 mutex_enter(&zp->z_range_lock); 442 if (type == RL_READER) { 443 /* 444 * First check for the usual case of no locks 445 */ 446 if (avl_numnodes(&zp->z_range_avl) == 0) 447 avl_add(&zp->z_range_avl, new); 448 else 449 zfs_range_lock_reader(zp, new); 450 } else 451 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ 452 mutex_exit(&zp->z_range_lock); 453 return (new); 454} 455 456/* 457 * Unlock a reader lock 458 */ 459static void 460zfs_range_unlock_reader(znode_t *zp, rl_t *remove) 461{ 462 avl_tree_t *tree = &zp->z_range_avl; 463 rl_t *rl, *next; 464 uint64_t len; 465 466 /* 467 * The common case is when the remove entry is in the tree 468 * (cnt == 1) meaning there's been no other reader locks overlapping 469 * with this one. Otherwise the remove entry will have been 470 * removed from the tree and replaced by proxies (one or 471 * more ranges mapping to the entire range). 472 */ 473 if (remove->r_cnt == 1) { 474 avl_remove(tree, remove); 475 if (remove->r_write_wanted) { 476 cv_broadcast(&remove->r_wr_cv); 477 cv_destroy(&remove->r_wr_cv); 478 } 479 if (remove->r_read_wanted) { 480 cv_broadcast(&remove->r_rd_cv); 481 cv_destroy(&remove->r_rd_cv); 482 } 483 } else { 484 ASSERT3U(remove->r_cnt, ==, 0); 485 ASSERT3U(remove->r_write_wanted, ==, 0); 486 ASSERT3U(remove->r_read_wanted, ==, 0); 487 /* 488 * Find start proxy representing this reader lock, 489 * then decrement ref count on all proxies 490 * that make up this range, freeing them as needed. 491 */ 492 rl = avl_find(tree, remove, NULL); 493 ASSERT(rl); 494 ASSERT(rl->r_cnt); 495 ASSERT(rl->r_type == RL_READER); 496 for (len = remove->r_len; len != 0; rl = next) { 497 len -= rl->r_len; 498 if (len) { 499 next = AVL_NEXT(tree, rl); 500 ASSERT(next); 501 ASSERT(rl->r_off + rl->r_len == next->r_off); 502 ASSERT(next->r_cnt); 503 ASSERT(next->r_type == RL_READER); 504 } 505 rl->r_cnt--; 506 if (rl->r_cnt == 0) { 507 avl_remove(tree, rl); 508 if (rl->r_write_wanted) { 509 cv_broadcast(&rl->r_wr_cv); 510 cv_destroy(&rl->r_wr_cv); 511 } 512 if (rl->r_read_wanted) { 513 cv_broadcast(&rl->r_rd_cv); 514 cv_destroy(&rl->r_rd_cv); 515 } 516 kmem_free(rl, sizeof (rl_t)); 517 } 518 } 519 } 520 kmem_free(remove, sizeof (rl_t)); 521} 522 523/* 524 * Unlock range and destroy range lock structure. 525 */ 526void 527zfs_range_unlock(rl_t *rl) 528{ 529 znode_t *zp = rl->r_zp; 530 531 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); 532 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); 533 ASSERT(!rl->r_proxy); 534 535 mutex_enter(&zp->z_range_lock); 536 if (rl->r_type == RL_WRITER) { 537 /* writer locks can't be shared or split */ 538 avl_remove(&zp->z_range_avl, rl); 539 mutex_exit(&zp->z_range_lock); 540 if (rl->r_write_wanted) { 541 cv_broadcast(&rl->r_wr_cv); 542 cv_destroy(&rl->r_wr_cv); 543 } 544 if (rl->r_read_wanted) { 545 cv_broadcast(&rl->r_rd_cv); 546 cv_destroy(&rl->r_rd_cv); 547 } 548 kmem_free(rl, sizeof (rl_t)); 549 } else { 550 /* 551 * lock may be shared, let zfs_range_unlock_reader() 552 * release the lock and free the rl_t 553 */ 554 zfs_range_unlock_reader(zp, rl); 555 mutex_exit(&zp->z_range_lock); 556 } 557} 558 559/* 560 * Reduce range locked as RL_WRITER from whole file to specified range. 561 * Asserts the whole file is exclusivly locked and so there's only one 562 * entry in the tree. 563 */ 564void 565zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) 566{ 567 znode_t *zp = rl->r_zp; 568 569 /* Ensure there are no other locks */ 570 ASSERT(avl_numnodes(&zp->z_range_avl) == 1); 571 ASSERT(rl->r_off == 0); 572 ASSERT(rl->r_type == RL_WRITER); 573 ASSERT(!rl->r_proxy); 574 ASSERT3U(rl->r_len, ==, UINT64_MAX); 575 ASSERT3U(rl->r_cnt, ==, 1); 576 577 mutex_enter(&zp->z_range_lock); 578 rl->r_off = off; 579 rl->r_len = len; 580 mutex_exit(&zp->z_range_lock); 581 if (rl->r_write_wanted) 582 cv_broadcast(&rl->r_wr_cv); 583 if (rl->r_read_wanted) 584 cv_broadcast(&rl->r_rd_cv); 585} 586 587/* 588 * AVL comparison function used to order range locks 589 * Locks are ordered on the start offset of the range. 590 */ 591int 592zfs_range_compare(const void *arg1, const void *arg2) 593{ 594 const rl_t *rl1 = arg1; 595 const rl_t *rl2 = arg2; 596 597 if (rl1->r_off > rl2->r_off) 598 return (1); 599 if (rl1->r_off < rl2->r_off) 600 return (-1); 601 return (0); 602}
| 434 new->r_len = len; 435 new->r_cnt = 1; /* assume it's going to be in the tree */ 436 new->r_type = type; 437 new->r_proxy = B_FALSE; 438 new->r_write_wanted = B_FALSE; 439 new->r_read_wanted = B_FALSE; 440 441 mutex_enter(&zp->z_range_lock); 442 if (type == RL_READER) { 443 /* 444 * First check for the usual case of no locks 445 */ 446 if (avl_numnodes(&zp->z_range_avl) == 0) 447 avl_add(&zp->z_range_avl, new); 448 else 449 zfs_range_lock_reader(zp, new); 450 } else 451 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */ 452 mutex_exit(&zp->z_range_lock); 453 return (new); 454} 455 456/* 457 * Unlock a reader lock 458 */ 459static void 460zfs_range_unlock_reader(znode_t *zp, rl_t *remove) 461{ 462 avl_tree_t *tree = &zp->z_range_avl; 463 rl_t *rl, *next; 464 uint64_t len; 465 466 /* 467 * The common case is when the remove entry is in the tree 468 * (cnt == 1) meaning there's been no other reader locks overlapping 469 * with this one. Otherwise the remove entry will have been 470 * removed from the tree and replaced by proxies (one or 471 * more ranges mapping to the entire range). 472 */ 473 if (remove->r_cnt == 1) { 474 avl_remove(tree, remove); 475 if (remove->r_write_wanted) { 476 cv_broadcast(&remove->r_wr_cv); 477 cv_destroy(&remove->r_wr_cv); 478 } 479 if (remove->r_read_wanted) { 480 cv_broadcast(&remove->r_rd_cv); 481 cv_destroy(&remove->r_rd_cv); 482 } 483 } else { 484 ASSERT3U(remove->r_cnt, ==, 0); 485 ASSERT3U(remove->r_write_wanted, ==, 0); 486 ASSERT3U(remove->r_read_wanted, ==, 0); 487 /* 488 * Find start proxy representing this reader lock, 489 * then decrement ref count on all proxies 490 * that make up this range, freeing them as needed. 491 */ 492 rl = avl_find(tree, remove, NULL); 493 ASSERT(rl); 494 ASSERT(rl->r_cnt); 495 ASSERT(rl->r_type == RL_READER); 496 for (len = remove->r_len; len != 0; rl = next) { 497 len -= rl->r_len; 498 if (len) { 499 next = AVL_NEXT(tree, rl); 500 ASSERT(next); 501 ASSERT(rl->r_off + rl->r_len == next->r_off); 502 ASSERT(next->r_cnt); 503 ASSERT(next->r_type == RL_READER); 504 } 505 rl->r_cnt--; 506 if (rl->r_cnt == 0) { 507 avl_remove(tree, rl); 508 if (rl->r_write_wanted) { 509 cv_broadcast(&rl->r_wr_cv); 510 cv_destroy(&rl->r_wr_cv); 511 } 512 if (rl->r_read_wanted) { 513 cv_broadcast(&rl->r_rd_cv); 514 cv_destroy(&rl->r_rd_cv); 515 } 516 kmem_free(rl, sizeof (rl_t)); 517 } 518 } 519 } 520 kmem_free(remove, sizeof (rl_t)); 521} 522 523/* 524 * Unlock range and destroy range lock structure. 525 */ 526void 527zfs_range_unlock(rl_t *rl) 528{ 529 znode_t *zp = rl->r_zp; 530 531 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); 532 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); 533 ASSERT(!rl->r_proxy); 534 535 mutex_enter(&zp->z_range_lock); 536 if (rl->r_type == RL_WRITER) { 537 /* writer locks can't be shared or split */ 538 avl_remove(&zp->z_range_avl, rl); 539 mutex_exit(&zp->z_range_lock); 540 if (rl->r_write_wanted) { 541 cv_broadcast(&rl->r_wr_cv); 542 cv_destroy(&rl->r_wr_cv); 543 } 544 if (rl->r_read_wanted) { 545 cv_broadcast(&rl->r_rd_cv); 546 cv_destroy(&rl->r_rd_cv); 547 } 548 kmem_free(rl, sizeof (rl_t)); 549 } else { 550 /* 551 * lock may be shared, let zfs_range_unlock_reader() 552 * release the lock and free the rl_t 553 */ 554 zfs_range_unlock_reader(zp, rl); 555 mutex_exit(&zp->z_range_lock); 556 } 557} 558 559/* 560 * Reduce range locked as RL_WRITER from whole file to specified range. 561 * Asserts the whole file is exclusivly locked and so there's only one 562 * entry in the tree. 563 */ 564void 565zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len) 566{ 567 znode_t *zp = rl->r_zp; 568 569 /* Ensure there are no other locks */ 570 ASSERT(avl_numnodes(&zp->z_range_avl) == 1); 571 ASSERT(rl->r_off == 0); 572 ASSERT(rl->r_type == RL_WRITER); 573 ASSERT(!rl->r_proxy); 574 ASSERT3U(rl->r_len, ==, UINT64_MAX); 575 ASSERT3U(rl->r_cnt, ==, 1); 576 577 mutex_enter(&zp->z_range_lock); 578 rl->r_off = off; 579 rl->r_len = len; 580 mutex_exit(&zp->z_range_lock); 581 if (rl->r_write_wanted) 582 cv_broadcast(&rl->r_wr_cv); 583 if (rl->r_read_wanted) 584 cv_broadcast(&rl->r_rd_cv); 585} 586 587/* 588 * AVL comparison function used to order range locks 589 * Locks are ordered on the start offset of the range. 590 */ 591int 592zfs_range_compare(const void *arg1, const void *arg2) 593{ 594 const rl_t *rl1 = arg1; 595 const rl_t *rl2 = arg2; 596 597 if (rl1->r_off > rl2->r_off) 598 return (1); 599 if (rl1->r_off < rl2->r_off) 600 return (-1); 601 return (0); 602}
|