lfs_subr.c revision 1.61
1/* $NetBSD: lfs_subr.c,v 1.61 2006/09/01 19:41:28 perseant Exp $ */ 2 3/*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38/* 39 * Copyright (c) 1991, 1993 40 * The Regents of the University of California. All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95 67 */ 68 69#include <sys/cdefs.h> 70__KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.61 2006/09/01 19:41:28 perseant Exp $"); 71 72#include <sys/param.h> 73#include <sys/systm.h> 74#include <sys/namei.h> 75#include <sys/vnode.h> 76#include <sys/buf.h> 77#include <sys/mount.h> 78#include <sys/malloc.h> 79#include <sys/proc.h> 80#include <sys/kauth.h> 81 82#include <ufs/ufs/inode.h> 83#include <ufs/lfs/lfs.h> 84#include <ufs/lfs/lfs_extern.h> 85 86#include <uvm/uvm.h> 87 88#ifdef DEBUG 89const char *lfs_res_names[LFS_NB_COUNT] = { 90 "summary", 91 "superblock", 92 "file block", 93 "cluster", 94 "clean", 95 "blkiov", 96}; 97#endif 98 99int lfs_res_qty[LFS_NB_COUNT] = { 100 LFS_N_SUMMARIES, 101 LFS_N_SBLOCKS, 102 LFS_N_IBLOCKS, 103 LFS_N_CLUSTERS, 104 LFS_N_CLEAN, 105 LFS_N_BLKIOV, 106}; 107 108void 109lfs_setup_resblks(struct lfs *fs) 110{ 111 int i, j; 112 int maxbpp; 113 114 ASSERT_NO_SEGLOCK(fs); 115 fs->lfs_resblk = (res_t *)malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT, 116 M_WAITOK); 117 for (i = 0; i < LFS_N_TOTAL; i++) { 118 fs->lfs_resblk[i].inuse = 0; 119 fs->lfs_resblk[i].p = NULL; 120 } 121 for (i = 0; i < LFS_RESHASH_WIDTH; i++) 122 LIST_INIT(fs->lfs_reshash + i); 123 124 /* 125 * These types of allocations can be larger than a page, 126 * so we can't use the pool subsystem for them. 127 */ 128 for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++) 129 fs->lfs_resblk[i].size = fs->lfs_sumsize; 130 for (j = 0; j < LFS_N_SBLOCKS; j++, i++) 131 fs->lfs_resblk[i].size = LFS_SBPAD; 132 for (j = 0; j < LFS_N_IBLOCKS; j++, i++) 133 fs->lfs_resblk[i].size = fs->lfs_bsize; 134 for (j = 0; j < LFS_N_CLUSTERS; j++, i++) 135 fs->lfs_resblk[i].size = MAXPHYS; 136 for (j = 0; j < LFS_N_CLEAN; j++, i++) 137 fs->lfs_resblk[i].size = MAXPHYS; 138 for (j = 0; j < LFS_N_BLKIOV; j++, i++) 139 fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO); 140 141 for (i = 0; i < LFS_N_TOTAL; i++) { 142 fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size, 143 M_SEGMENT, M_WAITOK); 144 } 145 146 /* 147 * Initialize pools for small types (XXX is BPP small?) 148 */ 149 pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0, 150 "lfsclpl", &pool_allocator_nointr); 151 pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0, 152 "lfssegpool", &pool_allocator_nointr); 153 maxbpp = ((fs->lfs_sumsize - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2); 154 maxbpp = MIN(maxbpp, segsize(fs) / fs->lfs_fsize + 2); 155 pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0, 156 "lfsbpppl", &pool_allocator_nointr); 157} 158 159void 160lfs_free_resblks(struct lfs *fs) 161{ 162 int i; 163 164 pool_destroy(&fs->lfs_bpppool); 165 pool_destroy(&fs->lfs_segpool); 166 pool_destroy(&fs->lfs_clpool); 167 168 simple_lock(&fs->lfs_interlock); 169 for (i = 0; i < LFS_N_TOTAL; i++) { 170 while (fs->lfs_resblk[i].inuse) 171 ltsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0, 172 &fs->lfs_interlock); 173 if (fs->lfs_resblk[i].p != NULL) 174 free(fs->lfs_resblk[i].p, M_SEGMENT); 175 } 176 free(fs->lfs_resblk, M_SEGMENT); 177 simple_unlock(&fs->lfs_interlock); 178} 179 180static unsigned int 181lfs_mhash(void *vp) 182{ 183 return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH; 184} 185 186/* 187 * Return memory of the given size for the given purpose, or use one of a 188 * number of spare last-resort buffers, if malloc returns NULL. 189 */ 190void * 191lfs_malloc(struct lfs *fs, size_t size, int type) 192{ 193 struct lfs_res_blk *re; 194 void *r; 195 int i, s, start; 196 unsigned int h; 197 198 ASSERT_MAYBE_SEGLOCK(fs); 199 r = NULL; 200 201 /* If no mem allocated for this type, it just waits */ 202 if (lfs_res_qty[type] == 0) { 203 r = malloc(size, M_SEGMENT, M_WAITOK); 204 return r; 205 } 206 207 /* Otherwise try a quick malloc, and if it works, great */ 208 if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) { 209 return r; 210 } 211 212 /* 213 * If malloc returned NULL, we are forced to use one of our 214 * reserve blocks. We have on hand at least one summary block, 215 * at least one cluster block, at least one superblock, 216 * and several indirect blocks. 217 */ 218 219 simple_lock(&fs->lfs_interlock); 220 /* skip over blocks of other types */ 221 for (i = 0, start = 0; i < type; i++) 222 start += lfs_res_qty[i]; 223 while (r == NULL) { 224 for (i = 0; i < lfs_res_qty[type]; i++) { 225 if (fs->lfs_resblk[start + i].inuse == 0) { 226 re = fs->lfs_resblk + start + i; 227 re->inuse = 1; 228 r = re->p; 229 KASSERT(re->size >= size); 230 h = lfs_mhash(r); 231 s = splbio(); 232 LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res); 233 splx(s); 234 simple_unlock(&fs->lfs_interlock); 235 return r; 236 } 237 } 238 DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", 239 lfs_res_names[type], lfs_res_qty[type])); 240 ltsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0, 241 &fs->lfs_interlock); 242 DLOG((DLOG_MALLOC, "done sleeping on %s\n", 243 lfs_res_names[type])); 244 } 245 /* NOTREACHED */ 246 simple_unlock(&fs->lfs_interlock); 247 return r; 248} 249 250void 251lfs_free(struct lfs *fs, void *p, int type) 252{ 253 int s; 254 unsigned int h; 255 res_t *re; 256#ifdef DEBUG 257 int i; 258#endif 259 260 ASSERT_MAYBE_SEGLOCK(fs); 261 h = lfs_mhash(p); 262 simple_lock(&fs->lfs_interlock); 263 s = splbio(); 264 LIST_FOREACH(re, &fs->lfs_reshash[h], res) { 265 if (re->p == p) { 266 KASSERT(re->inuse == 1); 267 LIST_REMOVE(re, res); 268 re->inuse = 0; 269 wakeup(&fs->lfs_resblk); 270 splx(s); 271 simple_unlock(&fs->lfs_interlock); 272 return; 273 } 274 } 275#ifdef DEBUG 276 for (i = 0; i < LFS_N_TOTAL; i++) { 277 if (fs->lfs_resblk[i].p == p) 278 panic("lfs_free: inconsistent reserved block"); 279 } 280#endif 281 splx(s); 282 simple_unlock(&fs->lfs_interlock); 283 284 /* 285 * If we didn't find it, free it. 286 */ 287 free(p, M_SEGMENT); 288} 289 290/* 291 * lfs_seglock -- 292 * Single thread the segment writer. 293 */ 294int 295lfs_seglock(struct lfs *fs, unsigned long flags) 296{ 297 struct segment *sp; 298 299 simple_lock(&fs->lfs_interlock); 300 if (fs->lfs_seglock) { 301 if (fs->lfs_lockpid == curproc->p_pid && 302 fs->lfs_locklwp == curlwp->l_lid) { 303 simple_unlock(&fs->lfs_interlock); 304 ++fs->lfs_seglock; 305 fs->lfs_sp->seg_flags |= flags; 306 return 0; 307 } else if (flags & SEGM_PAGEDAEMON) { 308 simple_unlock(&fs->lfs_interlock); 309 return EWOULDBLOCK; 310 } else { 311 while (fs->lfs_seglock) { 312 (void)ltsleep(&fs->lfs_seglock, PRIBIO + 1, 313 "lfs seglock", 0, &fs->lfs_interlock); 314 } 315 } 316 } 317 318 fs->lfs_seglock = 1; 319 fs->lfs_lockpid = curproc->p_pid; 320 fs->lfs_locklwp = curlwp->l_lid; 321 simple_unlock(&fs->lfs_interlock); 322 fs->lfs_cleanind = 0; 323 324#ifdef DEBUG 325 LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid); 326#endif 327 /* Drain fragment size changes out */ 328 lockmgr(&fs->lfs_fraglock, LK_EXCLUSIVE, 0); 329 330 sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK); 331 sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK); 332 sp->seg_flags = flags; 333 sp->vp = NULL; 334 sp->seg_iocount = 0; 335 (void) lfs_initseg(fs); 336 337 /* 338 * Keep a cumulative count of the outstanding I/O operations. If the 339 * disk drive catches up with us it could go to zero before we finish, 340 * so we artificially increment it by one until we've scheduled all of 341 * the writes we intend to do. 342 */ 343 simple_lock(&fs->lfs_interlock); 344 ++fs->lfs_iocount; 345 simple_unlock(&fs->lfs_interlock); 346 return 0; 347} 348 349static void lfs_unmark_dirop(struct lfs *); 350 351static void 352lfs_unmark_dirop(struct lfs *fs) 353{ 354 struct inode *ip, *nip; 355 struct vnode *vp; 356 int doit; 357 358 ASSERT_NO_SEGLOCK(fs); 359 simple_lock(&fs->lfs_interlock); 360 doit = !(fs->lfs_flags & LFS_UNDIROP); 361 if (doit) 362 fs->lfs_flags |= LFS_UNDIROP; 363 if (!doit) { 364 simple_unlock(&fs->lfs_interlock); 365 return; 366 } 367 368 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) { 369 nip = TAILQ_NEXT(ip, i_lfs_dchain); 370 simple_unlock(&fs->lfs_interlock); 371 vp = ITOV(ip); 372 373 simple_lock(&vp->v_interlock); 374 if (VOP_ISLOCKED(vp) && 375 vp->v_lock.lk_lockholder != curproc->p_pid) { 376 simple_lock(&fs->lfs_interlock); 377 simple_unlock(&vp->v_interlock); 378 continue; 379 } 380 if ((VTOI(vp)->i_flag & (IN_ADIROP | IN_ALLMOD)) == 0) { 381 simple_lock(&fs->lfs_interlock); 382 simple_lock(&lfs_subsys_lock); 383 --lfs_dirvcount; 384 simple_unlock(&lfs_subsys_lock); 385 --fs->lfs_dirvcount; 386 vp->v_flag &= ~VDIROP; 387 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 388 simple_unlock(&fs->lfs_interlock); 389 wakeup(&lfs_dirvcount); 390 simple_unlock(&vp->v_interlock); 391 simple_lock(&fs->lfs_interlock); 392 fs->lfs_unlockvp = vp; 393 simple_unlock(&fs->lfs_interlock); 394 vrele(vp); 395 simple_lock(&fs->lfs_interlock); 396 fs->lfs_unlockvp = NULL; 397 simple_unlock(&fs->lfs_interlock); 398 } else 399 simple_unlock(&vp->v_interlock); 400 simple_lock(&fs->lfs_interlock); 401 } 402 403 fs->lfs_flags &= ~LFS_UNDIROP; 404 simple_unlock(&fs->lfs_interlock); 405 wakeup(&fs->lfs_flags); 406} 407 408static void 409lfs_auto_segclean(struct lfs *fs) 410{ 411 int i, error, s, waited; 412 413 ASSERT_SEGLOCK(fs); 414 /* 415 * Now that we've swapped lfs_activesb, but while we still 416 * hold the segment lock, run through the segment list marking 417 * the empty ones clean. 418 * XXX - do we really need to do them all at once? 419 */ 420 waited = 0; 421 for (i = 0; i < fs->lfs_nseg; i++) { 422 if ((fs->lfs_suflags[0][i] & 423 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 424 (SEGUSE_DIRTY | SEGUSE_EMPTY) && 425 (fs->lfs_suflags[1][i] & 426 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 427 (SEGUSE_DIRTY | SEGUSE_EMPTY)) { 428 429 /* Make sure the sb is written before we clean */ 430 simple_lock(&fs->lfs_interlock); 431 s = splbio(); 432 while (waited == 0 && fs->lfs_sbactive) 433 ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb", 434 0, &fs->lfs_interlock); 435 splx(s); 436 simple_unlock(&fs->lfs_interlock); 437 waited = 1; 438 439 if ((error = lfs_do_segclean(fs, i)) != 0) { 440 DLOG((DLOG_CLEAN, "lfs_auto_segclean: lfs_do_segclean returned %d for seg %d\n", error, i)); 441 } 442 } 443 fs->lfs_suflags[1 - fs->lfs_activesb][i] = 444 fs->lfs_suflags[fs->lfs_activesb][i]; 445 } 446} 447 448/* 449 * lfs_segunlock -- 450 * Single thread the segment writer. 451 */ 452void 453lfs_segunlock(struct lfs *fs) 454{ 455 struct segment *sp; 456 unsigned long sync, ckp; 457 struct buf *bp; 458 int do_unmark_dirop = 0; 459 460 sp = fs->lfs_sp; 461 462 simple_lock(&fs->lfs_interlock); 463 LOCK_ASSERT(LFS_SEGLOCK_HELD(fs)); 464 if (fs->lfs_seglock == 1) { 465 if ((sp->seg_flags & (SEGM_PROT | SEGM_CLEAN)) == 0 && 466 LFS_STARVED_FOR_SEGS(fs) == 0) 467 do_unmark_dirop = 1; 468 simple_unlock(&fs->lfs_interlock); 469 sync = sp->seg_flags & SEGM_SYNC; 470 ckp = sp->seg_flags & SEGM_CKP; 471 472 /* We should have a segment summary, and nothing else */ 473 KASSERT(sp->cbpp == sp->bpp + 1); 474 475 /* Free allocated segment summary */ 476 fs->lfs_offset -= btofsb(fs, fs->lfs_sumsize); 477 bp = *sp->bpp; 478 lfs_freebuf(fs, bp); 479 480 pool_put(&fs->lfs_bpppool, sp->bpp); 481 sp->bpp = NULL; 482 483 /* 484 * If we're not sync, we're done with sp, get rid of it. 485 * Otherwise, we keep a local copy around but free 486 * fs->lfs_sp so another process can use it (we have to 487 * wait but they don't have to wait for us). 488 */ 489 if (!sync) 490 pool_put(&fs->lfs_segpool, sp); 491 fs->lfs_sp = NULL; 492 493 /* 494 * If the I/O count is non-zero, sleep until it reaches zero. 495 * At the moment, the user's process hangs around so we can 496 * sleep. 497 */ 498 simple_lock(&fs->lfs_interlock); 499 if (--fs->lfs_iocount == 0) 500 LFS_DEBUG_COUNTLOCKED("lfs_segunlock"); 501 if (fs->lfs_iocount <= 1) 502 wakeup(&fs->lfs_iocount); 503 simple_unlock(&fs->lfs_interlock); 504 /* 505 * If we're not checkpointing, we don't have to block 506 * other processes to wait for a synchronous write 507 * to complete. 508 */ 509 if (!ckp) { 510#ifdef DEBUG 511 LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid); 512#endif 513 simple_lock(&fs->lfs_interlock); 514 --fs->lfs_seglock; 515 fs->lfs_lockpid = 0; 516 fs->lfs_locklwp = 0; 517 simple_unlock(&fs->lfs_interlock); 518 wakeup(&fs->lfs_seglock); 519 } 520 /* 521 * We let checkpoints happen asynchronously. That means 522 * that during recovery, we have to roll forward between 523 * the two segments described by the first and second 524 * superblocks to make sure that the checkpoint described 525 * by a superblock completed. 526 */ 527 simple_lock(&fs->lfs_interlock); 528 while (ckp && sync && fs->lfs_iocount) 529 (void)ltsleep(&fs->lfs_iocount, PRIBIO + 1, 530 "lfs_iocount", 0, &fs->lfs_interlock); 531 while (sync && sp->seg_iocount) { 532 (void)ltsleep(&sp->seg_iocount, PRIBIO + 1, 533 "seg_iocount", 0, &fs->lfs_interlock); 534 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount)); 535 } 536 simple_unlock(&fs->lfs_interlock); 537 if (sync) 538 pool_put(&fs->lfs_segpool, sp); 539 540 if (ckp) { 541 fs->lfs_nactive = 0; 542 /* If we *know* everything's on disk, write both sbs */ 543 /* XXX should wait for this one */ 544 if (sync) 545 lfs_writesuper(fs, fs->lfs_sboffs[fs->lfs_activesb]); 546 lfs_writesuper(fs, fs->lfs_sboffs[1 - fs->lfs_activesb]); 547 if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) { 548 lfs_auto_segclean(fs); 549 /* If sync, we can clean the remainder too */ 550 if (sync) 551 lfs_auto_segclean(fs); 552 } 553 fs->lfs_activesb = 1 - fs->lfs_activesb; 554#ifdef DEBUG 555 LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid); 556#endif 557 simple_lock(&fs->lfs_interlock); 558 --fs->lfs_seglock; 559 fs->lfs_lockpid = 0; 560 fs->lfs_locklwp = 0; 561 simple_unlock(&fs->lfs_interlock); 562 wakeup(&fs->lfs_seglock); 563 } 564 /* Reenable fragment size changes */ 565 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0); 566 if (do_unmark_dirop) 567 lfs_unmark_dirop(fs); 568 } else if (fs->lfs_seglock == 0) { 569 simple_unlock(&fs->lfs_interlock); 570 panic ("Seglock not held"); 571 } else { 572 --fs->lfs_seglock; 573 simple_unlock(&fs->lfs_interlock); 574 } 575} 576 577/* 578 * drain dirops and start writer. 579 */ 580int 581lfs_writer_enter(struct lfs *fs, const char *wmesg) 582{ 583 int error = 0; 584 585 ASSERT_MAYBE_SEGLOCK(fs); 586 simple_lock(&fs->lfs_interlock); 587 588 /* disallow dirops during flush */ 589 fs->lfs_writer++; 590 591 while (fs->lfs_dirops > 0) { 592 ++fs->lfs_diropwait; 593 error = ltsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0, 594 &fs->lfs_interlock); 595 --fs->lfs_diropwait; 596 } 597 598 if (error) 599 fs->lfs_writer--; 600 601 simple_unlock(&fs->lfs_interlock); 602 603 return error; 604} 605 606void 607lfs_writer_leave(struct lfs *fs) 608{ 609 boolean_t dowakeup; 610 611 ASSERT_MAYBE_SEGLOCK(fs); 612 simple_lock(&fs->lfs_interlock); 613 dowakeup = !(--fs->lfs_writer); 614 simple_unlock(&fs->lfs_interlock); 615 if (dowakeup) 616 wakeup(&fs->lfs_dirops); 617} 618 619/* 620 * Unlock, wait for the cleaner, then relock to where we were before. 621 * To be used only at a fairly high level, to address a paucity of free 622 * segments propagated back from lfs_gop_write(). 623 */ 624void 625lfs_segunlock_relock(struct lfs *fs) 626{ 627 int n = fs->lfs_seglock; 628 u_int16_t seg_flags; 629 CLEANERINFO *cip; 630 struct buf *bp; 631 632 if (n == 0) 633 return; 634 635 /* Write anything we've already gathered to disk */ 636 lfs_writeseg(fs, fs->lfs_sp); 637 638 /* Tell cleaner */ 639 LFS_CLEANERINFO(cip, fs, bp); 640 cip->flags |= LFS_CLEANER_MUST_CLEAN; 641 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 642 643 /* Save segment flags for later */ 644 seg_flags = fs->lfs_sp->seg_flags; 645 646 fs->lfs_sp->seg_flags |= SEGM_PROT; /* Don't unmark dirop nodes */ 647 while(fs->lfs_seglock) 648 lfs_segunlock(fs); 649 650 /* Wait for the cleaner */ 651 lfs_wakeup_cleaner(fs); 652 simple_lock(&fs->lfs_interlock); 653 while (LFS_STARVED_FOR_SEGS(fs)) 654 ltsleep(&fs->lfs_avail, PRIBIO, "relock", 0, 655 &fs->lfs_interlock); 656 simple_unlock(&fs->lfs_interlock); 657 658 /* Put the segment lock back the way it was. */ 659 while(n--) 660 lfs_seglock(fs, seg_flags); 661 662 /* Cleaner can relax now */ 663 LFS_CLEANERINFO(cip, fs, bp); 664 cip->flags &= ~LFS_CLEANER_MUST_CLEAN; 665 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 666 667 return; 668} 669 670/* 671 * Wake up the cleaner, provided that nowrap is not set. 672 */ 673void 674lfs_wakeup_cleaner(struct lfs *fs) 675{ 676 if (fs->lfs_nowrap > 0) 677 return; 678 679 wakeup(&fs->lfs_nextseg); 680 wakeup(&lfs_allclean_wakeup); 681} 682