lfs_subr.c revision 1.100
1/* $NetBSD: lfs_subr.c,v 1.100 2020/02/23 08:42:53 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31/* 32 * Copyright (c) 1991, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95 60 */ 61 62#include <sys/cdefs.h> 63__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.100 2020/02/23 08:42:53 riastradh Exp $"); 64 65#include <sys/param.h> 66#include <sys/systm.h> 67#include <sys/namei.h> 68#include <sys/vnode.h> 69#include <sys/buf.h> 70#include <sys/mount.h> 71#include <sys/malloc.h> 72#include <sys/proc.h> 73#include <sys/kauth.h> 74 75#include <ufs/lfs/ulfs_inode.h> 76#include <ufs/lfs/lfs.h> 77#include <ufs/lfs/lfs_accessors.h> 78#include <ufs/lfs/lfs_kernel.h> 79#include <ufs/lfs/lfs_extern.h> 80 81#include <uvm/uvm.h> 82 83#ifdef DEBUG 84const char *lfs_res_names[LFS_NB_COUNT] = { 85 "summary", 86 "superblock", 87 "file block", 88 "cluster", 89 "clean", 90 "blkiov", 91}; 92#endif 93 94int lfs_res_qty[LFS_NB_COUNT] = { 95 LFS_N_SUMMARIES, 96 LFS_N_SBLOCKS, 97 LFS_N_IBLOCKS, 98 LFS_N_CLUSTERS, 99 LFS_N_CLEAN, 100 LFS_N_BLKIOV, 101}; 102 103void 104lfs_setup_resblks(struct lfs *fs) 105{ 106 int i, j; 107 int maxbpp; 108 109 ASSERT_NO_SEGLOCK(fs); 110 fs->lfs_resblk = malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT, 111 M_WAITOK); 112 for (i = 0; i < LFS_N_TOTAL; i++) { 113 fs->lfs_resblk[i].inuse = 0; 114 fs->lfs_resblk[i].p = NULL; 115 } 116 for (i = 0; i < LFS_RESHASH_WIDTH; i++) 117 LIST_INIT(fs->lfs_reshash + i); 118 119 /* 120 * These types of allocations can be larger than a page, 121 * so we can't use the pool subsystem for them. 122 */ 123 for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++) 124 fs->lfs_resblk[i].size = lfs_sb_getsumsize(fs); 125 for (j = 0; j < LFS_N_SBLOCKS; j++, i++) 126 fs->lfs_resblk[i].size = LFS_SBPAD; 127 for (j = 0; j < LFS_N_IBLOCKS; j++, i++) 128 fs->lfs_resblk[i].size = lfs_sb_getbsize(fs); 129 for (j = 0; j < LFS_N_CLUSTERS; j++, i++) 130 fs->lfs_resblk[i].size = MAXPHYS; 131 for (j = 0; j < LFS_N_CLEAN; j++, i++) 132 fs->lfs_resblk[i].size = MAXPHYS; 133 for (j = 0; j < LFS_N_BLKIOV; j++, i++) 134 fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO); 135 136 for (i = 0; i < LFS_N_TOTAL; i++) { 137 fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size, 138 M_SEGMENT, M_WAITOK); 139 } 140 141 /* 142 * Initialize pools for small types (XXX is BPP small?) 143 */ 144 pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0, 145 "lfsclpl", &pool_allocator_nointr, IPL_NONE); 146 pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0, 147 "lfssegpool", &pool_allocator_nointr, IPL_NONE); 148 /* XXX: should this int32 be 32/64? */ 149 maxbpp = ((lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2); 150 maxbpp = MIN(maxbpp, lfs_segsize(fs) / lfs_sb_getfsize(fs) + 2); 151 pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0, 152 "lfsbpppl", &pool_allocator_nointr, IPL_NONE); 153} 154 155void 156lfs_free_resblks(struct lfs *fs) 157{ 158 int i; 159 160 pool_destroy(&fs->lfs_bpppool); 161 pool_destroy(&fs->lfs_segpool); 162 pool_destroy(&fs->lfs_clpool); 163 164 mutex_enter(&lfs_lock); 165 for (i = 0; i < LFS_N_TOTAL; i++) { 166 while (fs->lfs_resblk[i].inuse) 167 mtsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0, 168 &lfs_lock); 169 if (fs->lfs_resblk[i].p != NULL) 170 free(fs->lfs_resblk[i].p, M_SEGMENT); 171 } 172 free(fs->lfs_resblk, M_SEGMENT); 173 mutex_exit(&lfs_lock); 174} 175 176static unsigned int 177lfs_mhash(void *vp) 178{ 179 return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH; 180} 181 182/* 183 * Return memory of the given size for the given purpose, or use one of a 184 * number of spare last-resort buffers, if malloc returns NULL. 185 */ 186void * 187lfs_malloc(struct lfs *fs, size_t size, int type) 188{ 189 struct lfs_res_blk *re; 190 void *r; 191 int i, start; 192 unsigned int h; 193 194 ASSERT_MAYBE_SEGLOCK(fs); 195 r = NULL; 196 197 /* If no mem allocated for this type, it just waits */ 198 if (lfs_res_qty[type] == 0) { 199 r = malloc(size, M_SEGMENT, M_WAITOK); 200 return r; 201 } 202 203 /* Otherwise try a quick malloc, and if it works, great */ 204 if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) { 205 return r; 206 } 207 208 /* 209 * If malloc returned NULL, we are forced to use one of our 210 * reserve blocks. We have on hand at least one summary block, 211 * at least one cluster block, at least one superblock, 212 * and several indirect blocks. 213 */ 214 215 mutex_enter(&lfs_lock); 216 /* skip over blocks of other types */ 217 for (i = 0, start = 0; i < type; i++) 218 start += lfs_res_qty[i]; 219 while (r == NULL) { 220 for (i = 0; i < lfs_res_qty[type]; i++) { 221 if (fs->lfs_resblk[start + i].inuse == 0) { 222 re = fs->lfs_resblk + start + i; 223 re->inuse = 1; 224 r = re->p; 225 KASSERT(re->size >= size); 226 h = lfs_mhash(r); 227 LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res); 228 mutex_exit(&lfs_lock); 229 return r; 230 } 231 } 232 DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", 233 lfs_res_names[type], lfs_res_qty[type])); 234 mtsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0, 235 &lfs_lock); 236 DLOG((DLOG_MALLOC, "done sleeping on %s\n", 237 lfs_res_names[type])); 238 } 239 /* NOTREACHED */ 240 mutex_exit(&lfs_lock); 241 return r; 242} 243 244void 245lfs_free(struct lfs *fs, void *p, int type) 246{ 247 unsigned int h; 248 res_t *re; 249 250 ASSERT_MAYBE_SEGLOCK(fs); 251 h = lfs_mhash(p); 252 mutex_enter(&lfs_lock); 253 LIST_FOREACH(re, &fs->lfs_reshash[h], res) { 254 if (re->p == p) { 255 KASSERT(re->inuse == 1); 256 LIST_REMOVE(re, res); 257 re->inuse = 0; 258 wakeup(&fs->lfs_resblk); 259 mutex_exit(&lfs_lock); 260 return; 261 } 262 } 263 264#ifdef notyet /* XXX this assert fires */ 265 for (int i = 0; i < LFS_N_TOTAL; i++) { 266 KDASSERTMSG(fs->lfs_resblk[i].p == p, 267 "lfs_free: inconsistent reserved block"); 268 } 269#endif 270 271 mutex_exit(&lfs_lock); 272 273 /* 274 * If we didn't find it, free it. 275 */ 276 free(p, M_SEGMENT); 277} 278 279/* 280 * lfs_seglock -- 281 * Single thread the segment writer. 282 */ 283int 284lfs_seglock(struct lfs *fs, unsigned long flags) 285{ 286 struct segment *sp; 287 288 mutex_enter(&lfs_lock); 289 if (fs->lfs_seglock) { 290 if (fs->lfs_lockpid == curproc->p_pid && 291 fs->lfs_locklwp == curlwp->l_lid) { 292 ++fs->lfs_seglock; 293 fs->lfs_sp->seg_flags |= flags; 294 mutex_exit(&lfs_lock); 295 return 0; 296 } else if (flags & SEGM_PAGEDAEMON) { 297 mutex_exit(&lfs_lock); 298 return EWOULDBLOCK; 299 } else { 300 while (fs->lfs_seglock) { 301 (void)mtsleep(&fs->lfs_seglock, PRIBIO + 1, 302 "lfs_seglock", 0, &lfs_lock); 303 } 304 } 305 } 306 307 fs->lfs_seglock = 1; 308 fs->lfs_lockpid = curproc->p_pid; 309 fs->lfs_locklwp = curlwp->l_lid; 310 mutex_exit(&lfs_lock); 311 fs->lfs_cleanind = 0; 312 313 LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid); 314 315 /* Drain fragment size changes out */ 316 rw_enter(&fs->lfs_fraglock, RW_WRITER); 317 318 sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK); 319 sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK); 320 sp->seg_flags = flags; 321 sp->vp = NULL; 322 sp->seg_iocount = 0; 323 (void) lfs_initseg(fs); 324 325 /* 326 * Keep a cumulative count of the outstanding I/O operations. If the 327 * disk drive catches up with us it could go to zero before we finish, 328 * so we artificially increment it by one until we've scheduled all of 329 * the writes we intend to do. 330 */ 331 mutex_enter(&lfs_lock); 332 ++fs->lfs_iocount; 333 fs->lfs_startseg = lfs_sb_getcurseg(fs); 334 mutex_exit(&lfs_lock); 335 return 0; 336} 337 338static void lfs_unmark_dirop(struct lfs *); 339 340static struct evcnt lfs_dchain_marker_pass_dirop = 341 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "lfs", "dchain marker pass dirop"); 342EVCNT_ATTACH_STATIC(lfs_dchain_marker_pass_dirop); 343 344static void 345lfs_unmark_dirop(struct lfs *fs) 346{ 347 struct inode *ip, *marker; 348 struct vnode *vp; 349 int doit; 350 351 ASSERT_NO_SEGLOCK(fs); 352 mutex_enter(&lfs_lock); 353 doit = !(fs->lfs_flags & LFS_UNDIROP); 354 if (doit) 355 fs->lfs_flags |= LFS_UNDIROP; 356 mutex_exit(&lfs_lock); 357 358 if (!doit) 359 return; 360 361 marker = pool_get(&lfs_inode_pool, PR_WAITOK); 362 KASSERT(fs != NULL); 363 memset(marker, 0, sizeof(*marker)); 364 marker->inode_ext.lfs = pool_get(&lfs_inoext_pool, PR_WAITOK); 365 memset(marker->inode_ext.lfs, 0, sizeof(*marker->inode_ext.lfs)); 366 marker->i_state |= IN_MARKER; 367 368 mutex_enter(&lfs_lock); 369 TAILQ_INSERT_HEAD(&fs->lfs_dchainhd, marker, i_lfs_dchain); 370 while ((ip = TAILQ_NEXT(marker, i_lfs_dchain)) != NULL) { 371 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 372 TAILQ_INSERT_AFTER(&fs->lfs_dchainhd, ip, marker, 373 i_lfs_dchain); 374 if (ip->i_state & IN_MARKER) { 375 lfs_dchain_marker_pass_dirop.ev_count++; 376 continue; 377 } 378 vp = ITOV(ip); 379 if ((ip->i_state & (IN_ADIROP | IN_CDIROP)) == IN_CDIROP) { 380 --lfs_dirvcount; 381 --fs->lfs_dirvcount; 382 vp->v_uflag &= ~VU_DIROP; 383 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 384 wakeup(&lfs_dirvcount); 385 fs->lfs_unlockvp = vp; 386 mutex_exit(&lfs_lock); 387 vrele(vp); 388 mutex_enter(&lfs_lock); 389 fs->lfs_unlockvp = NULL; 390 ip->i_state &= ~IN_CDIROP; 391 } 392 } 393 TAILQ_REMOVE(&fs->lfs_dchainhd, marker, i_lfs_dchain); 394 fs->lfs_flags &= ~LFS_UNDIROP; 395 wakeup(&fs->lfs_flags); 396 mutex_exit(&lfs_lock); 397 398 pool_put(&lfs_inoext_pool, marker->inode_ext.lfs); 399 pool_put(&lfs_inode_pool, marker); 400} 401 402static void 403lfs_auto_segclean(struct lfs *fs) 404{ 405 int i, error, waited; 406 407 ASSERT_SEGLOCK(fs); 408 /* 409 * Now that we've swapped lfs_activesb, but while we still 410 * hold the segment lock, run through the segment list marking 411 * the empty ones clean. 412 * XXX - do we really need to do them all at once? 413 */ 414 waited = 0; 415 for (i = 0; i < lfs_sb_getnseg(fs); i++) { 416 if ((fs->lfs_suflags[0][i] & 417 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 418 (SEGUSE_DIRTY | SEGUSE_EMPTY) && 419 (fs->lfs_suflags[1][i] & 420 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 421 (SEGUSE_DIRTY | SEGUSE_EMPTY)) { 422 423 /* Make sure the sb is written before we clean */ 424 mutex_enter(&lfs_lock); 425 while (waited == 0 && fs->lfs_sbactive) 426 mtsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb", 427 0, &lfs_lock); 428 mutex_exit(&lfs_lock); 429 waited = 1; 430 431 if ((error = lfs_do_segclean(fs, i)) != 0) { 432 DLOG((DLOG_CLEAN, "lfs_auto_segclean: lfs_do_segclean returned %d for seg %d\n", error, i)); 433 } 434 } 435 fs->lfs_suflags[1 - fs->lfs_activesb][i] = 436 fs->lfs_suflags[fs->lfs_activesb][i]; 437 } 438} 439 440/* 441 * lfs_segunlock -- 442 * Single thread the segment writer. 443 */ 444void 445lfs_segunlock(struct lfs *fs) 446{ 447 struct segment *sp; 448 unsigned long sync, ckp; 449 struct buf *bp; 450 int do_unmark_dirop = 0; 451 452 sp = fs->lfs_sp; 453 454 mutex_enter(&lfs_lock); 455 456 if (!LFS_SEGLOCK_HELD(fs)) 457 panic("lfs seglock not held"); 458 459 if (fs->lfs_seglock == 1) { 460 if ((sp->seg_flags & (SEGM_PROT | SEGM_CLEAN)) == 0) 461 do_unmark_dirop = 1; 462 mutex_exit(&lfs_lock); 463 sync = sp->seg_flags & SEGM_SYNC; 464 ckp = sp->seg_flags & SEGM_CKP; 465 466 /* We should have a segment summary, and nothing else */ 467 KASSERT(sp->cbpp == sp->bpp + 1); 468 469 /* Free allocated segment summary */ 470 lfs_sb_suboffset(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs))); 471 bp = *sp->bpp; 472 lfs_freebuf(fs, bp); 473 474 pool_put(&fs->lfs_bpppool, sp->bpp); 475 sp->bpp = NULL; 476 477 /* 478 * If we're not sync, we're done with sp, get rid of it. 479 * Otherwise, we keep a local copy around but free 480 * fs->lfs_sp so another process can use it (we have to 481 * wait but they don't have to wait for us). 482 */ 483 if (!sync) 484 pool_put(&fs->lfs_segpool, sp); 485 fs->lfs_sp = NULL; 486 487 /* 488 * If the I/O count is non-zero, sleep until it reaches zero. 489 * At the moment, the user's process hangs around so we can 490 * sleep. 491 */ 492 mutex_enter(&lfs_lock); 493 if (--fs->lfs_iocount <= 1) 494 wakeup(&fs->lfs_iocount); 495 mutex_exit(&lfs_lock); 496 497 /* 498 * If we're not checkpointing, we don't have to block 499 * other processes to wait for a synchronous write 500 * to complete. 501 */ 502 if (!ckp) { 503 LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid); 504 505 mutex_enter(&lfs_lock); 506 --fs->lfs_seglock; 507 fs->lfs_lockpid = 0; 508 fs->lfs_locklwp = 0; 509 mutex_exit(&lfs_lock); 510 wakeup(&fs->lfs_seglock); 511 } 512 /* 513 * We let checkpoints happen asynchronously. That means 514 * that during recovery, we have to roll forward between 515 * the two segments described by the first and second 516 * superblocks to make sure that the checkpoint described 517 * by a superblock completed. 518 */ 519 mutex_enter(&lfs_lock); 520 while (ckp && sync && fs->lfs_iocount) { 521 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1, 522 "lfs_iocount", 0, &lfs_lock); 523 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", fs, fs->lfs_iocount)); 524 } 525 while (sync && sp->seg_iocount) { 526 (void)mtsleep(&sp->seg_iocount, PRIBIO + 1, 527 "seg_iocount", 0, &lfs_lock); 528 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount)); 529 } 530 mutex_exit(&lfs_lock); 531 if (sync) 532 pool_put(&fs->lfs_segpool, sp); 533 534 if (ckp) { 535 fs->lfs_nactive = 0; 536 /* If we *know* everything's on disk, write both sbs */ 537 /* XXX should wait for this one */ 538 if (sync) 539 lfs_writesuper(fs, lfs_sb_getsboff(fs, fs->lfs_activesb)); 540 lfs_writesuper(fs, lfs_sb_getsboff(fs, 1 - fs->lfs_activesb)); 541 if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) { 542 lfs_auto_segclean(fs); 543 /* If sync, we can clean the remainder too */ 544 if (sync) 545 lfs_auto_segclean(fs); 546 } 547 fs->lfs_activesb = 1 - fs->lfs_activesb; 548 549 LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid); 550 551 mutex_enter(&lfs_lock); 552 --fs->lfs_seglock; 553 fs->lfs_lockpid = 0; 554 fs->lfs_locklwp = 0; 555 mutex_exit(&lfs_lock); 556 wakeup(&fs->lfs_seglock); 557 } 558 /* Reenable fragment size changes */ 559 rw_exit(&fs->lfs_fraglock); 560 if (do_unmark_dirop) 561 lfs_unmark_dirop(fs); 562 } else { 563 --fs->lfs_seglock; 564 KASSERT(fs->lfs_seglock != 0); 565 mutex_exit(&lfs_lock); 566 } 567} 568 569/* 570 * Drain dirops and start writer. 571 * 572 * No simple_locks are held when we enter and none are held when we return. 573 */ 574void 575lfs_writer_enter(struct lfs *fs, const char *wmesg) 576{ 577 int error; 578 579 ASSERT_NO_SEGLOCK(fs); 580 mutex_enter(&lfs_lock); 581 582 /* disallow dirops during flush */ 583 fs->lfs_writer++; 584 585 while (fs->lfs_dirops > 0) { 586 ++fs->lfs_diropwait; 587 error = mtsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0, 588 &lfs_lock); 589 KASSERT(error == 0); 590 --fs->lfs_diropwait; 591 } 592 593 mutex_exit(&lfs_lock); 594} 595 596int 597lfs_writer_tryenter(struct lfs *fs) 598{ 599 int writer_set; 600 601 ASSERT_MAYBE_SEGLOCK(fs); 602 mutex_enter(&lfs_lock); 603 writer_set = (fs->lfs_dirops == 0); 604 if (writer_set) 605 fs->lfs_writer++; 606 mutex_exit(&lfs_lock); 607 608 return writer_set; 609} 610 611void 612lfs_writer_leave(struct lfs *fs) 613{ 614 bool dowakeup; 615 616 ASSERT_MAYBE_SEGLOCK(fs); 617 mutex_enter(&lfs_lock); 618 dowakeup = !(--fs->lfs_writer); 619 if (dowakeup) 620 cv_broadcast(&fs->lfs_diropscv); 621 mutex_exit(&lfs_lock); 622} 623 624/* 625 * Unlock, wait for the cleaner, then relock to where we were before. 626 * To be used only at a fairly high level, to address a paucity of free 627 * segments propagated back from lfs_gop_write(). 628 */ 629void 630lfs_segunlock_relock(struct lfs *fs) 631{ 632 int n = fs->lfs_seglock; 633 u_int16_t seg_flags; 634 CLEANERINFO *cip; 635 struct buf *bp; 636 637 if (n == 0) 638 return; 639 640 /* Write anything we've already gathered to disk */ 641 lfs_writeseg(fs, fs->lfs_sp); 642 643 /* Tell cleaner */ 644 LFS_CLEANERINFO(cip, fs, bp); 645 lfs_ci_setflags(fs, cip, 646 lfs_ci_getflags(fs, cip) | LFS_CLEANER_MUST_CLEAN); 647 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 648 649 /* Save segment flags for later */ 650 seg_flags = fs->lfs_sp->seg_flags; 651 652 fs->lfs_sp->seg_flags |= SEGM_PROT; /* Don't unmark dirop nodes */ 653 while(fs->lfs_seglock) 654 lfs_segunlock(fs); 655 656 /* Wait for the cleaner */ 657 lfs_wakeup_cleaner(fs); 658 mutex_enter(&lfs_lock); 659 while (LFS_STARVED_FOR_SEGS(fs)) 660 mtsleep(&fs->lfs_availsleep, PRIBIO, "relock", 0, 661 &lfs_lock); 662 mutex_exit(&lfs_lock); 663 664 /* Put the segment lock back the way it was. */ 665 while(n--) 666 lfs_seglock(fs, seg_flags); 667 668 /* Cleaner can relax now */ 669 LFS_CLEANERINFO(cip, fs, bp); 670 lfs_ci_setflags(fs, cip, 671 lfs_ci_getflags(fs, cip) & ~LFS_CLEANER_MUST_CLEAN); 672 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 673 674 return; 675} 676 677/* 678 * Wake up the cleaner, provided that nowrap is not set. 679 */ 680void 681lfs_wakeup_cleaner(struct lfs *fs) 682{ 683 if (fs->lfs_nowrap > 0) 684 return; 685 686 cv_broadcast(&fs->lfs_nextsegsleep); 687 cv_broadcast(&lfs_allclean_wakeup); 688} 689