ffs_softdep.c revision 280760
1/*- 2 * Copyright 1998, 2000 Marshall Kirk McKusick. 3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org> 4 * All rights reserved. 5 * 6 * The soft updates code is derived from the appendix of a University 7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 8 * "Soft Updates: A Solution to the Metadata Update Problem in File 9 * Systems", CSE-TR-254-95, August 1995). 10 * 11 * Further information about soft updates can be obtained from: 12 * 13 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 14 * 1614 Oxford Street mckusick@mckusick.com 15 * Berkeley, CA 94709-1608 +1-510-843-9542 16 * USA 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 40 */ 41 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_softdep.c 280760 2015-03-27 13:55:56Z kib $"); 44 45#include "opt_ffs.h" 46#include "opt_quota.h" 47#include "opt_ddb.h" 48 49/* 50 * For now we want the safety net that the DEBUG flag provides. 51 */ 52#ifndef DEBUG 53#define DEBUG 54#endif 55 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/systm.h> 59#include <sys/bio.h> 60#include <sys/buf.h> 61#include <sys/kdb.h> 62#include <sys/kthread.h> 63#include <sys/ktr.h> 64#include <sys/limits.h> 65#include <sys/lock.h> 66#include <sys/malloc.h> 67#include <sys/mount.h> 68#include <sys/mutex.h> 69#include <sys/namei.h> 70#include <sys/priv.h> 71#include <sys/proc.h> 72#include <sys/rwlock.h> 73#include <sys/stat.h> 74#include <sys/sysctl.h> 75#include <sys/syslog.h> 76#include <sys/vnode.h> 77#include <sys/conf.h> 78 79#include <ufs/ufs/dir.h> 80#include <ufs/ufs/extattr.h> 81#include <ufs/ufs/quota.h> 82#include <ufs/ufs/inode.h> 83#include <ufs/ufs/ufsmount.h> 84#include <ufs/ffs/fs.h> 85#include <ufs/ffs/softdep.h> 86#include <ufs/ffs/ffs_extern.h> 87#include <ufs/ufs/ufs_extern.h> 88 89#include <vm/vm.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_object.h> 92 93#include <geom/geom.h> 94 95#include <ddb/ddb.h> 96 97#define KTR_SUJ 0 /* Define to KTR_SPARE. */ 98 99#ifndef SOFTUPDATES 100 101int 102softdep_flushfiles(oldmnt, flags, td) 103 struct mount *oldmnt; 104 int flags; 105 struct thread *td; 106{ 107 108 panic("softdep_flushfiles called"); 109} 110 111int 112softdep_mount(devvp, mp, fs, cred) 113 struct vnode *devvp; 114 struct mount *mp; 115 struct fs *fs; 116 struct ucred *cred; 117{ 118 119 return (0); 120} 121 122void 123softdep_initialize() 124{ 125 126 return; 127} 128 129void 130softdep_uninitialize() 131{ 132 133 return; 134} 135 136void 137softdep_unmount(mp) 138 struct mount *mp; 139{ 140 141 panic("softdep_unmount called"); 142} 143 144void 145softdep_setup_sbupdate(ump, fs, bp) 146 struct ufsmount *ump; 147 struct fs *fs; 148 struct buf *bp; 149{ 150 151 panic("softdep_setup_sbupdate called"); 152} 153 154void 155softdep_setup_inomapdep(bp, ip, newinum, mode) 156 struct buf *bp; 157 struct inode *ip; 158 ino_t newinum; 159 int mode; 160{ 161 162 panic("softdep_setup_inomapdep called"); 163} 164 165void 166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 167 struct buf *bp; 168 struct mount *mp; 169 ufs2_daddr_t newblkno; 170 int frags; 171 int oldfrags; 172{ 173 174 panic("softdep_setup_blkmapdep called"); 175} 176 177void 178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 179 struct inode *ip; 180 ufs_lbn_t lbn; 181 ufs2_daddr_t newblkno; 182 ufs2_daddr_t oldblkno; 183 long newsize; 184 long oldsize; 185 struct buf *bp; 186{ 187 188 panic("softdep_setup_allocdirect called"); 189} 190 191void 192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 193 struct inode *ip; 194 ufs_lbn_t lbn; 195 ufs2_daddr_t newblkno; 196 ufs2_daddr_t oldblkno; 197 long newsize; 198 long oldsize; 199 struct buf *bp; 200{ 201 202 panic("softdep_setup_allocext called"); 203} 204 205void 206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 207 struct inode *ip; 208 ufs_lbn_t lbn; 209 struct buf *bp; 210 int ptrno; 211 ufs2_daddr_t newblkno; 212 ufs2_daddr_t oldblkno; 213 struct buf *nbp; 214{ 215 216 panic("softdep_setup_allocindir_page called"); 217} 218 219void 220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 221 struct buf *nbp; 222 struct inode *ip; 223 struct buf *bp; 224 int ptrno; 225 ufs2_daddr_t newblkno; 226{ 227 228 panic("softdep_setup_allocindir_meta called"); 229} 230 231void 232softdep_journal_freeblocks(ip, cred, length, flags) 233 struct inode *ip; 234 struct ucred *cred; 235 off_t length; 236 int flags; 237{ 238 239 panic("softdep_journal_freeblocks called"); 240} 241 242void 243softdep_journal_fsync(ip) 244 struct inode *ip; 245{ 246 247 panic("softdep_journal_fsync called"); 248} 249 250void 251softdep_setup_freeblocks(ip, length, flags) 252 struct inode *ip; 253 off_t length; 254 int flags; 255{ 256 257 panic("softdep_setup_freeblocks called"); 258} 259 260void 261softdep_freefile(pvp, ino, mode) 262 struct vnode *pvp; 263 ino_t ino; 264 int mode; 265{ 266 267 panic("softdep_freefile called"); 268} 269 270int 271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 272 struct buf *bp; 273 struct inode *dp; 274 off_t diroffset; 275 ino_t newinum; 276 struct buf *newdirbp; 277 int isnewblk; 278{ 279 280 panic("softdep_setup_directory_add called"); 281} 282 283void 284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 285 struct buf *bp; 286 struct inode *dp; 287 caddr_t base; 288 caddr_t oldloc; 289 caddr_t newloc; 290 int entrysize; 291{ 292 293 panic("softdep_change_directoryentry_offset called"); 294} 295 296void 297softdep_setup_remove(bp, dp, ip, isrmdir) 298 struct buf *bp; 299 struct inode *dp; 300 struct inode *ip; 301 int isrmdir; 302{ 303 304 panic("softdep_setup_remove called"); 305} 306 307void 308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 309 struct buf *bp; 310 struct inode *dp; 311 struct inode *ip; 312 ino_t newinum; 313 int isrmdir; 314{ 315 316 panic("softdep_setup_directory_change called"); 317} 318 319void 320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 321 struct mount *mp; 322 struct buf *bp; 323 ufs2_daddr_t blkno; 324 int frags; 325 struct workhead *wkhd; 326{ 327 328 panic("%s called", __FUNCTION__); 329} 330 331void 332softdep_setup_inofree(mp, bp, ino, wkhd) 333 struct mount *mp; 334 struct buf *bp; 335 ino_t ino; 336 struct workhead *wkhd; 337{ 338 339 panic("%s called", __FUNCTION__); 340} 341 342void 343softdep_setup_unlink(dp, ip) 344 struct inode *dp; 345 struct inode *ip; 346{ 347 348 panic("%s called", __FUNCTION__); 349} 350 351void 352softdep_setup_link(dp, ip) 353 struct inode *dp; 354 struct inode *ip; 355{ 356 357 panic("%s called", __FUNCTION__); 358} 359 360void 361softdep_revert_link(dp, ip) 362 struct inode *dp; 363 struct inode *ip; 364{ 365 366 panic("%s called", __FUNCTION__); 367} 368 369void 370softdep_setup_rmdir(dp, ip) 371 struct inode *dp; 372 struct inode *ip; 373{ 374 375 panic("%s called", __FUNCTION__); 376} 377 378void 379softdep_revert_rmdir(dp, ip) 380 struct inode *dp; 381 struct inode *ip; 382{ 383 384 panic("%s called", __FUNCTION__); 385} 386 387void 388softdep_setup_create(dp, ip) 389 struct inode *dp; 390 struct inode *ip; 391{ 392 393 panic("%s called", __FUNCTION__); 394} 395 396void 397softdep_revert_create(dp, ip) 398 struct inode *dp; 399 struct inode *ip; 400{ 401 402 panic("%s called", __FUNCTION__); 403} 404 405void 406softdep_setup_mkdir(dp, ip) 407 struct inode *dp; 408 struct inode *ip; 409{ 410 411 panic("%s called", __FUNCTION__); 412} 413 414void 415softdep_revert_mkdir(dp, ip) 416 struct inode *dp; 417 struct inode *ip; 418{ 419 420 panic("%s called", __FUNCTION__); 421} 422 423void 424softdep_setup_dotdot_link(dp, ip) 425 struct inode *dp; 426 struct inode *ip; 427{ 428 429 panic("%s called", __FUNCTION__); 430} 431 432int 433softdep_prealloc(vp, waitok) 434 struct vnode *vp; 435 int waitok; 436{ 437 438 panic("%s called", __FUNCTION__); 439} 440 441int 442softdep_journal_lookup(mp, vpp) 443 struct mount *mp; 444 struct vnode **vpp; 445{ 446 447 return (ENOENT); 448} 449 450void 451softdep_change_linkcnt(ip) 452 struct inode *ip; 453{ 454 455 panic("softdep_change_linkcnt called"); 456} 457 458void 459softdep_load_inodeblock(ip) 460 struct inode *ip; 461{ 462 463 panic("softdep_load_inodeblock called"); 464} 465 466void 467softdep_update_inodeblock(ip, bp, waitfor) 468 struct inode *ip; 469 struct buf *bp; 470 int waitfor; 471{ 472 473 panic("softdep_update_inodeblock called"); 474} 475 476int 477softdep_fsync(vp) 478 struct vnode *vp; /* the "in_core" copy of the inode */ 479{ 480 481 return (0); 482} 483 484void 485softdep_fsync_mountdev(vp) 486 struct vnode *vp; 487{ 488 489 return; 490} 491 492int 493softdep_flushworklist(oldmnt, countp, td) 494 struct mount *oldmnt; 495 int *countp; 496 struct thread *td; 497{ 498 499 *countp = 0; 500 return (0); 501} 502 503int 504softdep_sync_metadata(struct vnode *vp) 505{ 506 507 panic("softdep_sync_metadata called"); 508} 509 510int 511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 512{ 513 514 panic("softdep_sync_buf called"); 515} 516 517int 518softdep_slowdown(vp) 519 struct vnode *vp; 520{ 521 522 panic("softdep_slowdown called"); 523} 524 525int 526softdep_request_cleanup(fs, vp, cred, resource) 527 struct fs *fs; 528 struct vnode *vp; 529 struct ucred *cred; 530 int resource; 531{ 532 533 return (0); 534} 535 536int 537softdep_check_suspend(struct mount *mp, 538 struct vnode *devvp, 539 int softdep_depcnt, 540 int softdep_accdepcnt, 541 int secondary_writes, 542 int secondary_accwrites) 543{ 544 struct bufobj *bo; 545 int error; 546 547 (void) softdep_depcnt, 548 (void) softdep_accdepcnt; 549 550 bo = &devvp->v_bufobj; 551 ASSERT_BO_WLOCKED(bo); 552 553 MNT_ILOCK(mp); 554 while (mp->mnt_secondary_writes != 0) { 555 BO_UNLOCK(bo); 556 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 557 (PUSER - 1) | PDROP, "secwr", 0); 558 BO_LOCK(bo); 559 MNT_ILOCK(mp); 560 } 561 562 /* 563 * Reasons for needing more work before suspend: 564 * - Dirty buffers on devvp. 565 * - Secondary writes occurred after start of vnode sync loop 566 */ 567 error = 0; 568 if (bo->bo_numoutput > 0 || 569 bo->bo_dirty.bv_cnt > 0 || 570 secondary_writes != 0 || 571 mp->mnt_secondary_writes != 0 || 572 secondary_accwrites != mp->mnt_secondary_accwrites) 573 error = EAGAIN; 574 BO_UNLOCK(bo); 575 return (error); 576} 577 578void 579softdep_get_depcounts(struct mount *mp, 580 int *softdepactivep, 581 int *softdepactiveaccp) 582{ 583 (void) mp; 584 *softdepactivep = 0; 585 *softdepactiveaccp = 0; 586} 587 588void 589softdep_buf_append(bp, wkhd) 590 struct buf *bp; 591 struct workhead *wkhd; 592{ 593 594 panic("softdep_buf_appendwork called"); 595} 596 597void 598softdep_inode_append(ip, cred, wkhd) 599 struct inode *ip; 600 struct ucred *cred; 601 struct workhead *wkhd; 602{ 603 604 panic("softdep_inode_appendwork called"); 605} 606 607void 608softdep_freework(wkhd) 609 struct workhead *wkhd; 610{ 611 612 panic("softdep_freework called"); 613} 614 615#else 616 617FEATURE(softupdates, "FFS soft-updates support"); 618 619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, 620 "soft updates stats"); 621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, 622 "total dependencies allocated"); 623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0, 624 "high use dependencies allocated"); 625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, 626 "current dependencies allocated"); 627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, 628 "current dependencies written"); 629 630unsigned long dep_current[D_LAST + 1]; 631unsigned long dep_highuse[D_LAST + 1]; 632unsigned long dep_total[D_LAST + 1]; 633unsigned long dep_write[D_LAST + 1]; 634 635#define SOFTDEP_TYPE(type, str, long) \ 636 static MALLOC_DEFINE(M_ ## type, #str, long); \ 637 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ 638 &dep_total[D_ ## type], 0, ""); \ 639 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ 640 &dep_current[D_ ## type], 0, ""); \ 641 SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ 642 &dep_highuse[D_ ## type], 0, ""); \ 643 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ 644 &dep_write[D_ ## type], 0, ""); 645 646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); 647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); 648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, 649 "Block or frag allocated from cyl group map"); 650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); 651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); 652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); 653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); 654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); 655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); 656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); 657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); 658SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); 659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); 660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); 661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); 662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); 663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); 664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); 665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); 666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); 667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); 668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); 669SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); 670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); 671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); 672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); 673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); 674 675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); 676 677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); 678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); 679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); 680 681#define M_SOFTDEP_FLAGS (M_WAITOK) 682 683/* 684 * translate from workitem type to memory type 685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 686 */ 687static struct malloc_type *memtype[] = { 688 M_PAGEDEP, 689 M_INODEDEP, 690 M_BMSAFEMAP, 691 M_NEWBLK, 692 M_ALLOCDIRECT, 693 M_INDIRDEP, 694 M_ALLOCINDIR, 695 M_FREEFRAG, 696 M_FREEBLKS, 697 M_FREEFILE, 698 M_DIRADD, 699 M_MKDIR, 700 M_DIRREM, 701 M_NEWDIRBLK, 702 M_FREEWORK, 703 M_FREEDEP, 704 M_JADDREF, 705 M_JREMREF, 706 M_JMVREF, 707 M_JNEWBLK, 708 M_JFREEBLK, 709 M_JFREEFRAG, 710 M_JSEG, 711 M_JSEGDEP, 712 M_SBDEP, 713 M_JTRUNC, 714 M_JFSYNC, 715 M_SENTINEL 716}; 717 718#define DtoM(type) (memtype[type]) 719 720/* 721 * Names of malloc types. 722 */ 723#define TYPENAME(type) \ 724 ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???") 725/* 726 * End system adaptation definitions. 727 */ 728 729#define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) 730#define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) 731 732/* 733 * Internal function prototypes. 734 */ 735static void check_clear_deps(struct mount *); 736static void softdep_error(char *, int); 737static int softdep_process_worklist(struct mount *, int); 738static int softdep_waitidle(struct mount *, int); 739static void drain_output(struct vnode *); 740static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); 741static void clear_remove(struct mount *); 742static void clear_inodedeps(struct mount *); 743static void unlinked_inodedep(struct mount *, struct inodedep *); 744static void clear_unlinked_inodedep(struct inodedep *); 745static struct inodedep *first_unlinked_inodedep(struct ufsmount *); 746static int flush_pagedep_deps(struct vnode *, struct mount *, 747 struct diraddhd *); 748static int free_pagedep(struct pagedep *); 749static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); 750static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); 751static int flush_deplist(struct allocdirectlst *, int, int *); 752static int sync_cgs(struct mount *, int); 753static int handle_written_filepage(struct pagedep *, struct buf *); 754static int handle_written_sbdep(struct sbdep *, struct buf *); 755static void initiate_write_sbdep(struct sbdep *); 756static void diradd_inode_written(struct diradd *, struct inodedep *); 757static int handle_written_indirdep(struct indirdep *, struct buf *, 758 struct buf**); 759static int handle_written_inodeblock(struct inodedep *, struct buf *); 760static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, 761 uint8_t *); 762static int handle_written_bmsafemap(struct bmsafemap *, struct buf *); 763static void handle_written_jaddref(struct jaddref *); 764static void handle_written_jremref(struct jremref *); 765static void handle_written_jseg(struct jseg *, struct buf *); 766static void handle_written_jnewblk(struct jnewblk *); 767static void handle_written_jblkdep(struct jblkdep *); 768static void handle_written_jfreefrag(struct jfreefrag *); 769static void complete_jseg(struct jseg *); 770static void complete_jsegs(struct jseg *); 771static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); 772static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); 773static void jremref_write(struct jremref *, struct jseg *, uint8_t *); 774static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); 775static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); 776static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); 777static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); 778static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); 779static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); 780static inline void inoref_write(struct inoref *, struct jseg *, 781 struct jrefrec *); 782static void handle_allocdirect_partdone(struct allocdirect *, 783 struct workhead *); 784static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, 785 struct workhead *); 786static void indirdep_complete(struct indirdep *); 787static int indirblk_lookup(struct mount *, ufs2_daddr_t); 788static void indirblk_insert(struct freework *); 789static void indirblk_remove(struct freework *); 790static void handle_allocindir_partdone(struct allocindir *); 791static void initiate_write_filepage(struct pagedep *, struct buf *); 792static void initiate_write_indirdep(struct indirdep*, struct buf *); 793static void handle_written_mkdir(struct mkdir *, int); 794static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, 795 uint8_t *); 796static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); 797static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 798static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 799static void handle_workitem_freefile(struct freefile *); 800static int handle_workitem_remove(struct dirrem *, int); 801static struct dirrem *newdirrem(struct buf *, struct inode *, 802 struct inode *, int, struct dirrem **); 803static struct indirdep *indirdep_lookup(struct mount *, struct inode *, 804 struct buf *); 805static void cancel_indirdep(struct indirdep *, struct buf *, 806 struct freeblks *); 807static void free_indirdep(struct indirdep *); 808static void free_diradd(struct diradd *, struct workhead *); 809static void merge_diradd(struct inodedep *, struct diradd *); 810static void complete_diradd(struct diradd *); 811static struct diradd *diradd_lookup(struct pagedep *, int); 812static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, 813 struct jremref *); 814static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, 815 struct jremref *); 816static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, 817 struct jremref *, struct jremref *); 818static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, 819 struct jremref *); 820static void cancel_allocindir(struct allocindir *, struct buf *bp, 821 struct freeblks *, int); 822static int setup_trunc_indir(struct freeblks *, struct inode *, 823 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); 824static void complete_trunc_indir(struct freework *); 825static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, 826 int); 827static void complete_mkdir(struct mkdir *); 828static void free_newdirblk(struct newdirblk *); 829static void free_jremref(struct jremref *); 830static void free_jaddref(struct jaddref *); 831static void free_jsegdep(struct jsegdep *); 832static void free_jsegs(struct jblocks *); 833static void rele_jseg(struct jseg *); 834static void free_jseg(struct jseg *, struct jblocks *); 835static void free_jnewblk(struct jnewblk *); 836static void free_jblkdep(struct jblkdep *); 837static void free_jfreefrag(struct jfreefrag *); 838static void free_freedep(struct freedep *); 839static void journal_jremref(struct dirrem *, struct jremref *, 840 struct inodedep *); 841static void cancel_jnewblk(struct jnewblk *, struct workhead *); 842static int cancel_jaddref(struct jaddref *, struct inodedep *, 843 struct workhead *); 844static void cancel_jfreefrag(struct jfreefrag *); 845static inline void setup_freedirect(struct freeblks *, struct inode *, 846 int, int); 847static inline void setup_freeext(struct freeblks *, struct inode *, int, int); 848static inline void setup_freeindir(struct freeblks *, struct inode *, int, 849 ufs_lbn_t, int); 850static inline struct freeblks *newfreeblks(struct mount *, struct inode *); 851static void freeblks_free(struct ufsmount *, struct freeblks *, int); 852static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); 853static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); 854static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); 855static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, 856 int, int); 857static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); 858static int cancel_pagedep(struct pagedep *, struct freeblks *, int); 859static int deallocate_dependencies(struct buf *, struct freeblks *, int); 860static void newblk_freefrag(struct newblk*); 861static void free_newblk(struct newblk *); 862static void cancel_allocdirect(struct allocdirectlst *, 863 struct allocdirect *, struct freeblks *); 864static int check_inode_unwritten(struct inodedep *); 865static int free_inodedep(struct inodedep *); 866static void freework_freeblock(struct freework *); 867static void freework_enqueue(struct freework *); 868static int handle_workitem_freeblocks(struct freeblks *, int); 869static int handle_complete_freeblocks(struct freeblks *, int); 870static void handle_workitem_indirblk(struct freework *); 871static void handle_written_freework(struct freework *); 872static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 873static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, 874 struct workhead *); 875static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, 876 struct inodedep *, struct allocindir *, ufs_lbn_t); 877static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 878 ufs2_daddr_t, ufs_lbn_t); 879static void handle_workitem_freefrag(struct freefrag *); 880static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, 881 ufs_lbn_t); 882static void allocdirect_merge(struct allocdirectlst *, 883 struct allocdirect *, struct allocdirect *); 884static struct freefrag *allocindir_merge(struct allocindir *, 885 struct allocindir *); 886static int bmsafemap_find(struct bmsafemap_hashhead *, int, 887 struct bmsafemap **); 888static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, 889 int cg, struct bmsafemap *); 890static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, 891 struct newblk **); 892static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); 893static int inodedep_find(struct inodedep_hashhead *, ino_t, 894 struct inodedep **); 895static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); 896static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, 897 int, struct pagedep **); 898static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, 899 struct pagedep **); 900static void pause_timer(void *); 901static int request_cleanup(struct mount *, int); 902static int process_worklist_item(struct mount *, int, int); 903static void process_removes(struct vnode *); 904static void process_truncates(struct vnode *); 905static void jwork_move(struct workhead *, struct workhead *); 906static void jwork_insert(struct workhead *, struct jsegdep *); 907static void add_to_worklist(struct worklist *, int); 908static void wake_worklist(struct worklist *); 909static void wait_worklist(struct worklist *, char *); 910static void remove_from_worklist(struct worklist *); 911static void softdep_flush(void *); 912static void softdep_flushjournal(struct mount *); 913static int softdep_speedup(struct ufsmount *); 914static void worklist_speedup(struct mount *); 915static int journal_mount(struct mount *, struct fs *, struct ucred *); 916static void journal_unmount(struct ufsmount *); 917static int journal_space(struct ufsmount *, int); 918static void journal_suspend(struct ufsmount *); 919static int journal_unsuspend(struct ufsmount *ump); 920static void softdep_prelink(struct vnode *, struct vnode *); 921static void add_to_journal(struct worklist *); 922static void remove_from_journal(struct worklist *); 923static void softdep_process_journal(struct mount *, struct worklist *, int); 924static struct jremref *newjremref(struct dirrem *, struct inode *, 925 struct inode *ip, off_t, nlink_t); 926static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, 927 uint16_t); 928static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, 929 uint16_t); 930static inline struct jsegdep *inoref_jseg(struct inoref *); 931static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); 932static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, 933 ufs2_daddr_t, int); 934static void adjust_newfreework(struct freeblks *, int); 935static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); 936static void move_newblock_dep(struct jaddref *, struct inodedep *); 937static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); 938static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, 939 ufs2_daddr_t, long, ufs_lbn_t); 940static struct freework *newfreework(struct ufsmount *, struct freeblks *, 941 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); 942static int jwait(struct worklist *, int); 943static struct inodedep *inodedep_lookup_ip(struct inode *); 944static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); 945static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); 946static void handle_jwork(struct workhead *); 947static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, 948 struct mkdir **); 949static struct jblocks *jblocks_create(void); 950static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); 951static void jblocks_free(struct jblocks *, struct mount *, int); 952static void jblocks_destroy(struct jblocks *); 953static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); 954 955/* 956 * Exported softdep operations. 957 */ 958static void softdep_disk_io_initiation(struct buf *); 959static void softdep_disk_write_complete(struct buf *); 960static void softdep_deallocate_dependencies(struct buf *); 961static int softdep_count_dependencies(struct buf *bp, int); 962 963/* 964 * Global lock over all of soft updates. 965 */ 966static struct mtx lk; 967MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF); 968 969#define ACQUIRE_GBLLOCK(lk) mtx_lock(lk) 970#define FREE_GBLLOCK(lk) mtx_unlock(lk) 971#define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED) 972 973/* 974 * Per-filesystem soft-updates locking. 975 */ 976#define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock) 977#define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock) 978#define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock) 979#define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock) 980#define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \ 981 RA_WLOCKED) 982 983#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) 984#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) 985 986/* 987 * Worklist queue management. 988 * These routines require that the lock be held. 989 */ 990#ifndef /* NOT */ DEBUG 991#define WORKLIST_INSERT(head, item) do { \ 992 (item)->wk_state |= ONWORKLIST; \ 993 LIST_INSERT_HEAD(head, item, wk_list); \ 994} while (0) 995#define WORKLIST_REMOVE(item) do { \ 996 (item)->wk_state &= ~ONWORKLIST; \ 997 LIST_REMOVE(item, wk_list); \ 998} while (0) 999#define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT 1000#define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE 1001 1002#else /* DEBUG */ 1003static void worklist_insert(struct workhead *, struct worklist *, int); 1004static void worklist_remove(struct worklist *, int); 1005 1006#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) 1007#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) 1008#define WORKLIST_REMOVE(item) worklist_remove(item, 1) 1009#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) 1010 1011static void 1012worklist_insert(head, item, locked) 1013 struct workhead *head; 1014 struct worklist *item; 1015 int locked; 1016{ 1017 1018 if (locked) 1019 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1020 if (item->wk_state & ONWORKLIST) 1021 panic("worklist_insert: %p %s(0x%X) already on list", 1022 item, TYPENAME(item->wk_type), item->wk_state); 1023 item->wk_state |= ONWORKLIST; 1024 LIST_INSERT_HEAD(head, item, wk_list); 1025} 1026 1027static void 1028worklist_remove(item, locked) 1029 struct worklist *item; 1030 int locked; 1031{ 1032 1033 if (locked) 1034 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1035 if ((item->wk_state & ONWORKLIST) == 0) 1036 panic("worklist_remove: %p %s(0x%X) not on list", 1037 item, TYPENAME(item->wk_type), item->wk_state); 1038 item->wk_state &= ~ONWORKLIST; 1039 LIST_REMOVE(item, wk_list); 1040} 1041#endif /* DEBUG */ 1042 1043/* 1044 * Merge two jsegdeps keeping only the oldest one as newer references 1045 * can't be discarded until after older references. 1046 */ 1047static inline struct jsegdep * 1048jsegdep_merge(struct jsegdep *one, struct jsegdep *two) 1049{ 1050 struct jsegdep *swp; 1051 1052 if (two == NULL) 1053 return (one); 1054 1055 if (one->jd_seg->js_seq > two->jd_seg->js_seq) { 1056 swp = one; 1057 one = two; 1058 two = swp; 1059 } 1060 WORKLIST_REMOVE(&two->jd_list); 1061 free_jsegdep(two); 1062 1063 return (one); 1064} 1065 1066/* 1067 * If two freedeps are compatible free one to reduce list size. 1068 */ 1069static inline struct freedep * 1070freedep_merge(struct freedep *one, struct freedep *two) 1071{ 1072 if (two == NULL) 1073 return (one); 1074 1075 if (one->fd_freework == two->fd_freework) { 1076 WORKLIST_REMOVE(&two->fd_list); 1077 free_freedep(two); 1078 } 1079 return (one); 1080} 1081 1082/* 1083 * Move journal work from one list to another. Duplicate freedeps and 1084 * jsegdeps are coalesced to keep the lists as small as possible. 1085 */ 1086static void 1087jwork_move(dst, src) 1088 struct workhead *dst; 1089 struct workhead *src; 1090{ 1091 struct freedep *freedep; 1092 struct jsegdep *jsegdep; 1093 struct worklist *wkn; 1094 struct worklist *wk; 1095 1096 KASSERT(dst != src, 1097 ("jwork_move: dst == src")); 1098 freedep = NULL; 1099 jsegdep = NULL; 1100 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { 1101 if (wk->wk_type == D_JSEGDEP) 1102 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1103 else if (wk->wk_type == D_FREEDEP) 1104 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1105 } 1106 1107 while ((wk = LIST_FIRST(src)) != NULL) { 1108 WORKLIST_REMOVE(wk); 1109 WORKLIST_INSERT(dst, wk); 1110 if (wk->wk_type == D_JSEGDEP) { 1111 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1112 continue; 1113 } 1114 if (wk->wk_type == D_FREEDEP) 1115 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1116 } 1117} 1118 1119static void 1120jwork_insert(dst, jsegdep) 1121 struct workhead *dst; 1122 struct jsegdep *jsegdep; 1123{ 1124 struct jsegdep *jsegdepn; 1125 struct worklist *wk; 1126 1127 LIST_FOREACH(wk, dst, wk_list) 1128 if (wk->wk_type == D_JSEGDEP) 1129 break; 1130 if (wk == NULL) { 1131 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1132 return; 1133 } 1134 jsegdepn = WK_JSEGDEP(wk); 1135 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { 1136 WORKLIST_REMOVE(wk); 1137 free_jsegdep(jsegdepn); 1138 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1139 } else 1140 free_jsegdep(jsegdep); 1141} 1142 1143/* 1144 * Routines for tracking and managing workitems. 1145 */ 1146static void workitem_free(struct worklist *, int); 1147static void workitem_alloc(struct worklist *, int, struct mount *); 1148static void workitem_reassign(struct worklist *, int); 1149 1150#define WORKITEM_FREE(item, type) \ 1151 workitem_free((struct worklist *)(item), (type)) 1152#define WORKITEM_REASSIGN(item, type) \ 1153 workitem_reassign((struct worklist *)(item), (type)) 1154 1155static void 1156workitem_free(item, type) 1157 struct worklist *item; 1158 int type; 1159{ 1160 struct ufsmount *ump; 1161 1162#ifdef DEBUG 1163 if (item->wk_state & ONWORKLIST) 1164 panic("workitem_free: %s(0x%X) still on list", 1165 TYPENAME(item->wk_type), item->wk_state); 1166 if (item->wk_type != type && type != D_NEWBLK) 1167 panic("workitem_free: type mismatch %s != %s", 1168 TYPENAME(item->wk_type), TYPENAME(type)); 1169#endif 1170 if (item->wk_state & IOWAITING) 1171 wakeup(item); 1172 ump = VFSTOUFS(item->wk_mp); 1173 LOCK_OWNED(ump); 1174 KASSERT(ump->softdep_deps > 0, 1175 ("workitem_free: %s: softdep_deps going negative", 1176 ump->um_fs->fs_fsmnt)); 1177 if (--ump->softdep_deps == 0 && ump->softdep_req) 1178 wakeup(&ump->softdep_deps); 1179 KASSERT(dep_current[item->wk_type] > 0, 1180 ("workitem_free: %s: dep_current[%s] going negative", 1181 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1182 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1183 ("workitem_free: %s: softdep_curdeps[%s] going negative", 1184 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1185 atomic_subtract_long(&dep_current[item->wk_type], 1); 1186 ump->softdep_curdeps[item->wk_type] -= 1; 1187 free(item, DtoM(type)); 1188} 1189 1190static void 1191workitem_alloc(item, type, mp) 1192 struct worklist *item; 1193 int type; 1194 struct mount *mp; 1195{ 1196 struct ufsmount *ump; 1197 1198 item->wk_type = type; 1199 item->wk_mp = mp; 1200 item->wk_state = 0; 1201 1202 ump = VFSTOUFS(mp); 1203 ACQUIRE_GBLLOCK(&lk); 1204 dep_current[type]++; 1205 if (dep_current[type] > dep_highuse[type]) 1206 dep_highuse[type] = dep_current[type]; 1207 dep_total[type]++; 1208 FREE_GBLLOCK(&lk); 1209 ACQUIRE_LOCK(ump); 1210 ump->softdep_curdeps[type] += 1; 1211 ump->softdep_deps++; 1212 ump->softdep_accdeps++; 1213 FREE_LOCK(ump); 1214} 1215 1216static void 1217workitem_reassign(item, newtype) 1218 struct worklist *item; 1219 int newtype; 1220{ 1221 struct ufsmount *ump; 1222 1223 ump = VFSTOUFS(item->wk_mp); 1224 LOCK_OWNED(ump); 1225 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1226 ("workitem_reassign: %s: softdep_curdeps[%s] going negative", 1227 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1228 ump->softdep_curdeps[item->wk_type] -= 1; 1229 ump->softdep_curdeps[newtype] += 1; 1230 KASSERT(dep_current[item->wk_type] > 0, 1231 ("workitem_reassign: %s: dep_current[%s] going negative", 1232 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1233 ACQUIRE_GBLLOCK(&lk); 1234 dep_current[newtype]++; 1235 dep_current[item->wk_type]--; 1236 if (dep_current[newtype] > dep_highuse[newtype]) 1237 dep_highuse[newtype] = dep_current[newtype]; 1238 dep_total[newtype]++; 1239 FREE_GBLLOCK(&lk); 1240 item->wk_type = newtype; 1241} 1242 1243/* 1244 * Workitem queue management 1245 */ 1246static int max_softdeps; /* maximum number of structs before slowdown */ 1247static int tickdelay = 2; /* number of ticks to pause during slowdown */ 1248static int proc_waiting; /* tracks whether we have a timeout posted */ 1249static int *stat_countp; /* statistic to count in proc_waiting timeout */ 1250static struct callout softdep_callout; 1251static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 1252static int req_clear_remove; /* syncer process flush some freeblks */ 1253static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ 1254 1255/* 1256 * runtime statistics 1257 */ 1258static int stat_flush_threads; /* number of softdep flushing threads */ 1259static int stat_worklist_push; /* number of worklist cleanups */ 1260static int stat_blk_limit_push; /* number of times block limit neared */ 1261static int stat_ino_limit_push; /* number of times inode limit neared */ 1262static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 1263static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 1264static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 1265static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 1266static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 1267static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 1268static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 1269static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ 1270static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ 1271static int stat_journal_min; /* Times hit journal min threshold */ 1272static int stat_journal_low; /* Times hit journal low threshold */ 1273static int stat_journal_wait; /* Times blocked in jwait(). */ 1274static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ 1275static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ 1276static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ 1277static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ 1278static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ 1279static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ 1280static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ 1281static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ 1282static int stat_cleanup_failures; /* Number of cleanup requests that failed */ 1283static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ 1284 1285SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, 1286 &max_softdeps, 0, ""); 1287SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, 1288 &tickdelay, 0, ""); 1289SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD, 1290 &stat_flush_threads, 0, ""); 1291SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, 1292 &stat_worklist_push, 0,""); 1293SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, 1294 &stat_blk_limit_push, 0,""); 1295SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, 1296 &stat_ino_limit_push, 0,""); 1297SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, 1298 &stat_blk_limit_hit, 0, ""); 1299SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, 1300 &stat_ino_limit_hit, 0, ""); 1301SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, 1302 &stat_sync_limit_hit, 0, ""); 1303SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, 1304 &stat_indir_blk_ptrs, 0, ""); 1305SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, 1306 &stat_inode_bitmap, 0, ""); 1307SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, 1308 &stat_direct_blk_ptrs, 0, ""); 1309SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, 1310 &stat_dir_entry, 0, ""); 1311SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, 1312 &stat_jaddref, 0, ""); 1313SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, 1314 &stat_jnewblk, 0, ""); 1315SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, 1316 &stat_journal_low, 0, ""); 1317SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, 1318 &stat_journal_min, 0, ""); 1319SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, 1320 &stat_journal_wait, 0, ""); 1321SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, 1322 &stat_jwait_filepage, 0, ""); 1323SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, 1324 &stat_jwait_freeblks, 0, ""); 1325SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, 1326 &stat_jwait_inode, 0, ""); 1327SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, 1328 &stat_jwait_newblk, 0, ""); 1329SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, 1330 &stat_cleanup_blkrequests, 0, ""); 1331SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, 1332 &stat_cleanup_inorequests, 0, ""); 1333SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, 1334 &stat_cleanup_high_delay, 0, ""); 1335SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, 1336 &stat_cleanup_retries, 0, ""); 1337SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, 1338 &stat_cleanup_failures, 0, ""); 1339SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, 1340 &softdep_flushcache, 0, ""); 1341SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, 1342 &stat_emptyjblocks, 0, ""); 1343 1344SYSCTL_DECL(_vfs_ffs); 1345 1346/* Whether to recompute the summary at mount time */ 1347static int compute_summary_at_mount = 0; 1348SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, 1349 &compute_summary_at_mount, 0, "Recompute summary at mount"); 1350static int print_threads = 0; 1351SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW, 1352 &print_threads, 0, "Notify flusher thread start/stop"); 1353 1354/* List of all filesystems mounted with soft updates */ 1355static TAILQ_HEAD(, mount_softdeps) softdepmounts; 1356 1357/* 1358 * This function cleans the worklist for a filesystem. 1359 * Each filesystem running with soft dependencies gets its own 1360 * thread to run in this function. The thread is started up in 1361 * softdep_mount and shutdown in softdep_unmount. They show up 1362 * as part of the kernel "bufdaemon" process whose process 1363 * entry is available in bufdaemonproc. 1364 */ 1365static int searchfailed; 1366extern struct proc *bufdaemonproc; 1367static void 1368softdep_flush(addr) 1369 void *addr; 1370{ 1371 struct mount *mp; 1372 struct thread *td; 1373 struct ufsmount *ump; 1374 1375 td = curthread; 1376 td->td_pflags |= TDP_NORUNNINGBUF; 1377 mp = (struct mount *)addr; 1378 ump = VFSTOUFS(mp); 1379 atomic_add_int(&stat_flush_threads, 1); 1380 ACQUIRE_LOCK(ump); 1381 ump->softdep_flags &= ~FLUSH_STARTING; 1382 wakeup(&ump->softdep_flushtd); 1383 FREE_LOCK(ump); 1384 if (print_threads) { 1385 if (stat_flush_threads == 1) 1386 printf("Running %s at pid %d\n", bufdaemonproc->p_comm, 1387 bufdaemonproc->p_pid); 1388 printf("Start thread %s\n", td->td_name); 1389 } 1390 for (;;) { 1391 while (softdep_process_worklist(mp, 0) > 0 || 1392 (MOUNTEDSUJ(mp) && 1393 VFSTOUFS(mp)->softdep_jblocks->jb_suspended)) 1394 kthread_suspend_check(); 1395 ACQUIRE_LOCK(ump); 1396 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1397 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, 1398 "sdflush", hz / 2); 1399 ump->softdep_flags &= ~FLUSH_CLEANUP; 1400 /* 1401 * Check to see if we are done and need to exit. 1402 */ 1403 if ((ump->softdep_flags & FLUSH_EXIT) == 0) { 1404 FREE_LOCK(ump); 1405 continue; 1406 } 1407 ump->softdep_flags &= ~FLUSH_EXIT; 1408 FREE_LOCK(ump); 1409 wakeup(&ump->softdep_flags); 1410 if (print_threads) 1411 printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups); 1412 atomic_subtract_int(&stat_flush_threads, 1); 1413 kthread_exit(); 1414 panic("kthread_exit failed\n"); 1415 } 1416} 1417 1418static void 1419worklist_speedup(mp) 1420 struct mount *mp; 1421{ 1422 struct ufsmount *ump; 1423 1424 ump = VFSTOUFS(mp); 1425 LOCK_OWNED(ump); 1426 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1427 ump->softdep_flags |= FLUSH_CLEANUP; 1428 wakeup(&ump->softdep_flushtd); 1429} 1430 1431static int 1432softdep_speedup(ump) 1433 struct ufsmount *ump; 1434{ 1435 struct ufsmount *altump; 1436 struct mount_softdeps *sdp; 1437 1438 LOCK_OWNED(ump); 1439 worklist_speedup(ump->um_mountp); 1440 bd_speedup(); 1441 /* 1442 * If we have global shortages, then we need other 1443 * filesystems to help with the cleanup. Here we wakeup a 1444 * flusher thread for a filesystem that is over its fair 1445 * share of resources. 1446 */ 1447 if (req_clear_inodedeps || req_clear_remove) { 1448 ACQUIRE_GBLLOCK(&lk); 1449 TAILQ_FOREACH(sdp, &softdepmounts, sd_next) { 1450 if ((altump = sdp->sd_ump) == ump) 1451 continue; 1452 if (((req_clear_inodedeps && 1453 altump->softdep_curdeps[D_INODEDEP] > 1454 max_softdeps / stat_flush_threads) || 1455 (req_clear_remove && 1456 altump->softdep_curdeps[D_DIRREM] > 1457 (max_softdeps / 2) / stat_flush_threads)) && 1458 TRY_ACQUIRE_LOCK(altump)) 1459 break; 1460 } 1461 if (sdp == NULL) { 1462 searchfailed++; 1463 FREE_GBLLOCK(&lk); 1464 } else { 1465 /* 1466 * Move to the end of the list so we pick a 1467 * different one on out next try. 1468 */ 1469 TAILQ_REMOVE(&softdepmounts, sdp, sd_next); 1470 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 1471 FREE_GBLLOCK(&lk); 1472 if ((altump->softdep_flags & 1473 (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1474 altump->softdep_flags |= FLUSH_CLEANUP; 1475 altump->um_softdep->sd_cleanups++; 1476 wakeup(&altump->softdep_flushtd); 1477 FREE_LOCK(altump); 1478 } 1479 } 1480 return (speedup_syncer()); 1481} 1482 1483/* 1484 * Add an item to the end of the work queue. 1485 * This routine requires that the lock be held. 1486 * This is the only routine that adds items to the list. 1487 * The following routine is the only one that removes items 1488 * and does so in order from first to last. 1489 */ 1490 1491#define WK_HEAD 0x0001 /* Add to HEAD. */ 1492#define WK_NODELAY 0x0002 /* Process immediately. */ 1493 1494static void 1495add_to_worklist(wk, flags) 1496 struct worklist *wk; 1497 int flags; 1498{ 1499 struct ufsmount *ump; 1500 1501 ump = VFSTOUFS(wk->wk_mp); 1502 LOCK_OWNED(ump); 1503 if (wk->wk_state & ONWORKLIST) 1504 panic("add_to_worklist: %s(0x%X) already on list", 1505 TYPENAME(wk->wk_type), wk->wk_state); 1506 wk->wk_state |= ONWORKLIST; 1507 if (ump->softdep_on_worklist == 0) { 1508 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1509 ump->softdep_worklist_tail = wk; 1510 } else if (flags & WK_HEAD) { 1511 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1512 } else { 1513 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); 1514 ump->softdep_worklist_tail = wk; 1515 } 1516 ump->softdep_on_worklist += 1; 1517 if (flags & WK_NODELAY) 1518 worklist_speedup(wk->wk_mp); 1519} 1520 1521/* 1522 * Remove the item to be processed. If we are removing the last 1523 * item on the list, we need to recalculate the tail pointer. 1524 */ 1525static void 1526remove_from_worklist(wk) 1527 struct worklist *wk; 1528{ 1529 struct ufsmount *ump; 1530 1531 ump = VFSTOUFS(wk->wk_mp); 1532 WORKLIST_REMOVE(wk); 1533 if (ump->softdep_worklist_tail == wk) 1534 ump->softdep_worklist_tail = 1535 (struct worklist *)wk->wk_list.le_prev; 1536 ump->softdep_on_worklist -= 1; 1537} 1538 1539static void 1540wake_worklist(wk) 1541 struct worklist *wk; 1542{ 1543 if (wk->wk_state & IOWAITING) { 1544 wk->wk_state &= ~IOWAITING; 1545 wakeup(wk); 1546 } 1547} 1548 1549static void 1550wait_worklist(wk, wmesg) 1551 struct worklist *wk; 1552 char *wmesg; 1553{ 1554 struct ufsmount *ump; 1555 1556 ump = VFSTOUFS(wk->wk_mp); 1557 wk->wk_state |= IOWAITING; 1558 msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); 1559} 1560 1561/* 1562 * Process that runs once per second to handle items in the background queue. 1563 * 1564 * Note that we ensure that everything is done in the order in which they 1565 * appear in the queue. The code below depends on this property to ensure 1566 * that blocks of a file are freed before the inode itself is freed. This 1567 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 1568 * until all the old ones have been purged from the dependency lists. 1569 */ 1570static int 1571softdep_process_worklist(mp, full) 1572 struct mount *mp; 1573 int full; 1574{ 1575 int cnt, matchcnt; 1576 struct ufsmount *ump; 1577 long starttime; 1578 1579 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); 1580 if (MOUNTEDSOFTDEP(mp) == 0) 1581 return (0); 1582 matchcnt = 0; 1583 ump = VFSTOUFS(mp); 1584 ACQUIRE_LOCK(ump); 1585 starttime = time_second; 1586 softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); 1587 check_clear_deps(mp); 1588 while (ump->softdep_on_worklist > 0) { 1589 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) 1590 break; 1591 else 1592 matchcnt += cnt; 1593 check_clear_deps(mp); 1594 /* 1595 * We do not generally want to stop for buffer space, but if 1596 * we are really being a buffer hog, we will stop and wait. 1597 */ 1598 if (should_yield()) { 1599 FREE_LOCK(ump); 1600 kern_yield(PRI_USER); 1601 bwillwrite(); 1602 ACQUIRE_LOCK(ump); 1603 } 1604 /* 1605 * Never allow processing to run for more than one 1606 * second. This gives the syncer thread the opportunity 1607 * to pause if appropriate. 1608 */ 1609 if (!full && starttime != time_second) 1610 break; 1611 } 1612 if (full == 0) 1613 journal_unsuspend(ump); 1614 FREE_LOCK(ump); 1615 return (matchcnt); 1616} 1617 1618/* 1619 * Process all removes associated with a vnode if we are running out of 1620 * journal space. Any other process which attempts to flush these will 1621 * be unable as we have the vnodes locked. 1622 */ 1623static void 1624process_removes(vp) 1625 struct vnode *vp; 1626{ 1627 struct inodedep *inodedep; 1628 struct dirrem *dirrem; 1629 struct ufsmount *ump; 1630 struct mount *mp; 1631 ino_t inum; 1632 1633 mp = vp->v_mount; 1634 ump = VFSTOUFS(mp); 1635 LOCK_OWNED(ump); 1636 inum = VTOI(vp)->i_number; 1637 for (;;) { 1638top: 1639 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1640 return; 1641 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { 1642 /* 1643 * If another thread is trying to lock this vnode 1644 * it will fail but we must wait for it to do so 1645 * before we can proceed. 1646 */ 1647 if (dirrem->dm_state & INPROGRESS) { 1648 wait_worklist(&dirrem->dm_list, "pwrwait"); 1649 goto top; 1650 } 1651 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == 1652 (COMPLETE | ONWORKLIST)) 1653 break; 1654 } 1655 if (dirrem == NULL) 1656 return; 1657 remove_from_worklist(&dirrem->dm_list); 1658 FREE_LOCK(ump); 1659 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1660 panic("process_removes: suspended filesystem"); 1661 handle_workitem_remove(dirrem, 0); 1662 vn_finished_secondary_write(mp); 1663 ACQUIRE_LOCK(ump); 1664 } 1665} 1666 1667/* 1668 * Process all truncations associated with a vnode if we are running out 1669 * of journal space. This is called when the vnode lock is already held 1670 * and no other process can clear the truncation. This function returns 1671 * a value greater than zero if it did any work. 1672 */ 1673static void 1674process_truncates(vp) 1675 struct vnode *vp; 1676{ 1677 struct inodedep *inodedep; 1678 struct freeblks *freeblks; 1679 struct ufsmount *ump; 1680 struct mount *mp; 1681 ino_t inum; 1682 int cgwait; 1683 1684 mp = vp->v_mount; 1685 ump = VFSTOUFS(mp); 1686 LOCK_OWNED(ump); 1687 inum = VTOI(vp)->i_number; 1688 for (;;) { 1689 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1690 return; 1691 cgwait = 0; 1692 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { 1693 /* Journal entries not yet written. */ 1694 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { 1695 jwait(&LIST_FIRST( 1696 &freeblks->fb_jblkdephd)->jb_list, 1697 MNT_WAIT); 1698 break; 1699 } 1700 /* Another thread is executing this item. */ 1701 if (freeblks->fb_state & INPROGRESS) { 1702 wait_worklist(&freeblks->fb_list, "ptrwait"); 1703 break; 1704 } 1705 /* Freeblks is waiting on a inode write. */ 1706 if ((freeblks->fb_state & COMPLETE) == 0) { 1707 FREE_LOCK(ump); 1708 ffs_update(vp, 1); 1709 ACQUIRE_LOCK(ump); 1710 break; 1711 } 1712 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == 1713 (ALLCOMPLETE | ONWORKLIST)) { 1714 remove_from_worklist(&freeblks->fb_list); 1715 freeblks->fb_state |= INPROGRESS; 1716 FREE_LOCK(ump); 1717 if (vn_start_secondary_write(NULL, &mp, 1718 V_NOWAIT)) 1719 panic("process_truncates: " 1720 "suspended filesystem"); 1721 handle_workitem_freeblocks(freeblks, 0); 1722 vn_finished_secondary_write(mp); 1723 ACQUIRE_LOCK(ump); 1724 break; 1725 } 1726 if (freeblks->fb_cgwait) 1727 cgwait++; 1728 } 1729 if (cgwait) { 1730 FREE_LOCK(ump); 1731 sync_cgs(mp, MNT_WAIT); 1732 ffs_sync_snap(mp, MNT_WAIT); 1733 ACQUIRE_LOCK(ump); 1734 continue; 1735 } 1736 if (freeblks == NULL) 1737 break; 1738 } 1739 return; 1740} 1741 1742/* 1743 * Process one item on the worklist. 1744 */ 1745static int 1746process_worklist_item(mp, target, flags) 1747 struct mount *mp; 1748 int target; 1749 int flags; 1750{ 1751 struct worklist sentinel; 1752 struct worklist *wk; 1753 struct ufsmount *ump; 1754 int matchcnt; 1755 int error; 1756 1757 KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); 1758 /* 1759 * If we are being called because of a process doing a 1760 * copy-on-write, then it is not safe to write as we may 1761 * recurse into the copy-on-write routine. 1762 */ 1763 if (curthread->td_pflags & TDP_COWINPROGRESS) 1764 return (-1); 1765 PHOLD(curproc); /* Don't let the stack go away. */ 1766 ump = VFSTOUFS(mp); 1767 LOCK_OWNED(ump); 1768 matchcnt = 0; 1769 sentinel.wk_mp = NULL; 1770 sentinel.wk_type = D_SENTINEL; 1771 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); 1772 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; 1773 wk = LIST_NEXT(&sentinel, wk_list)) { 1774 if (wk->wk_type == D_SENTINEL) { 1775 LIST_REMOVE(&sentinel, wk_list); 1776 LIST_INSERT_AFTER(wk, &sentinel, wk_list); 1777 continue; 1778 } 1779 if (wk->wk_state & INPROGRESS) 1780 panic("process_worklist_item: %p already in progress.", 1781 wk); 1782 wk->wk_state |= INPROGRESS; 1783 remove_from_worklist(wk); 1784 FREE_LOCK(ump); 1785 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1786 panic("process_worklist_item: suspended filesystem"); 1787 switch (wk->wk_type) { 1788 case D_DIRREM: 1789 /* removal of a directory entry */ 1790 error = handle_workitem_remove(WK_DIRREM(wk), flags); 1791 break; 1792 1793 case D_FREEBLKS: 1794 /* releasing blocks and/or fragments from a file */ 1795 error = handle_workitem_freeblocks(WK_FREEBLKS(wk), 1796 flags); 1797 break; 1798 1799 case D_FREEFRAG: 1800 /* releasing a fragment when replaced as a file grows */ 1801 handle_workitem_freefrag(WK_FREEFRAG(wk)); 1802 error = 0; 1803 break; 1804 1805 case D_FREEFILE: 1806 /* releasing an inode when its link count drops to 0 */ 1807 handle_workitem_freefile(WK_FREEFILE(wk)); 1808 error = 0; 1809 break; 1810 1811 default: 1812 panic("%s_process_worklist: Unknown type %s", 1813 "softdep", TYPENAME(wk->wk_type)); 1814 /* NOTREACHED */ 1815 } 1816 vn_finished_secondary_write(mp); 1817 ACQUIRE_LOCK(ump); 1818 if (error == 0) { 1819 if (++matchcnt == target) 1820 break; 1821 continue; 1822 } 1823 /* 1824 * We have to retry the worklist item later. Wake up any 1825 * waiters who may be able to complete it immediately and 1826 * add the item back to the head so we don't try to execute 1827 * it again. 1828 */ 1829 wk->wk_state &= ~INPROGRESS; 1830 wake_worklist(wk); 1831 add_to_worklist(wk, WK_HEAD); 1832 } 1833 LIST_REMOVE(&sentinel, wk_list); 1834 /* Sentinal could've become the tail from remove_from_worklist. */ 1835 if (ump->softdep_worklist_tail == &sentinel) 1836 ump->softdep_worklist_tail = 1837 (struct worklist *)sentinel.wk_list.le_prev; 1838 PRELE(curproc); 1839 return (matchcnt); 1840} 1841 1842/* 1843 * Move dependencies from one buffer to another. 1844 */ 1845int 1846softdep_move_dependencies(oldbp, newbp) 1847 struct buf *oldbp; 1848 struct buf *newbp; 1849{ 1850 struct worklist *wk, *wktail; 1851 struct ufsmount *ump; 1852 int dirty; 1853 1854 if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) 1855 return (0); 1856 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 1857 ("softdep_move_dependencies called on non-softdep filesystem")); 1858 dirty = 0; 1859 wktail = NULL; 1860 ump = VFSTOUFS(wk->wk_mp); 1861 ACQUIRE_LOCK(ump); 1862 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 1863 LIST_REMOVE(wk, wk_list); 1864 if (wk->wk_type == D_BMSAFEMAP && 1865 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) 1866 dirty = 1; 1867 if (wktail == 0) 1868 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 1869 else 1870 LIST_INSERT_AFTER(wktail, wk, wk_list); 1871 wktail = wk; 1872 } 1873 FREE_LOCK(ump); 1874 1875 return (dirty); 1876} 1877 1878/* 1879 * Purge the work list of all items associated with a particular mount point. 1880 */ 1881int 1882softdep_flushworklist(oldmnt, countp, td) 1883 struct mount *oldmnt; 1884 int *countp; 1885 struct thread *td; 1886{ 1887 struct vnode *devvp; 1888 struct ufsmount *ump; 1889 int count, error; 1890 1891 /* 1892 * Alternately flush the block device associated with the mount 1893 * point and process any dependencies that the flushing 1894 * creates. We continue until no more worklist dependencies 1895 * are found. 1896 */ 1897 *countp = 0; 1898 error = 0; 1899 ump = VFSTOUFS(oldmnt); 1900 devvp = ump->um_devvp; 1901 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { 1902 *countp += count; 1903 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1904 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1905 VOP_UNLOCK(devvp, 0); 1906 if (error != 0) 1907 break; 1908 } 1909 return (error); 1910} 1911 1912#define SU_WAITIDLE_RETRIES 20 1913static int 1914softdep_waitidle(struct mount *mp, int flags __unused) 1915{ 1916 struct ufsmount *ump; 1917 struct vnode *devvp; 1918 struct thread *td; 1919 int error, i; 1920 1921 ump = VFSTOUFS(mp); 1922 devvp = ump->um_devvp; 1923 td = curthread; 1924 error = 0; 1925 ACQUIRE_LOCK(ump); 1926 for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) { 1927 ump->softdep_req = 1; 1928 KASSERT((flags & FORCECLOSE) == 0 || 1929 ump->softdep_on_worklist == 0, 1930 ("softdep_waitidle: work added after flush")); 1931 msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP, 1932 "softdeps", 10 * hz); 1933 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1934 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1935 VOP_UNLOCK(devvp, 0); 1936 if (error != 0) 1937 break; 1938 ACQUIRE_LOCK(ump); 1939 } 1940 ump->softdep_req = 0; 1941 if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) { 1942 error = EBUSY; 1943 printf("softdep_waitidle: Failed to flush worklist for %p\n", 1944 mp); 1945 } 1946 FREE_LOCK(ump); 1947 return (error); 1948} 1949 1950/* 1951 * Flush all vnodes and worklist items associated with a specified mount point. 1952 */ 1953int 1954softdep_flushfiles(oldmnt, flags, td) 1955 struct mount *oldmnt; 1956 int flags; 1957 struct thread *td; 1958{ 1959#ifdef QUOTA 1960 struct ufsmount *ump; 1961 int i; 1962#endif 1963 int error, early, depcount, loopcnt, retry_flush_count, retry; 1964 int morework; 1965 1966 KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, 1967 ("softdep_flushfiles called on non-softdep filesystem")); 1968 loopcnt = 10; 1969 retry_flush_count = 3; 1970retry_flush: 1971 error = 0; 1972 1973 /* 1974 * Alternately flush the vnodes associated with the mount 1975 * point and process any dependencies that the flushing 1976 * creates. In theory, this loop can happen at most twice, 1977 * but we give it a few extra just to be sure. 1978 */ 1979 for (; loopcnt > 0; loopcnt--) { 1980 /* 1981 * Do another flush in case any vnodes were brought in 1982 * as part of the cleanup operations. 1983 */ 1984 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & 1985 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; 1986 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) 1987 break; 1988 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || 1989 depcount == 0) 1990 break; 1991 } 1992 /* 1993 * If we are unmounting then it is an error to fail. If we 1994 * are simply trying to downgrade to read-only, then filesystem 1995 * activity can keep us busy forever, so we just fail with EBUSY. 1996 */ 1997 if (loopcnt == 0) { 1998 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 1999 panic("softdep_flushfiles: looping"); 2000 error = EBUSY; 2001 } 2002 if (!error) 2003 error = softdep_waitidle(oldmnt, flags); 2004 if (!error) { 2005 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { 2006 retry = 0; 2007 MNT_ILOCK(oldmnt); 2008 KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, 2009 ("softdep_flushfiles: !MNTK_NOINSMNTQ")); 2010 morework = oldmnt->mnt_nvnodelistsize > 0; 2011#ifdef QUOTA 2012 ump = VFSTOUFS(oldmnt); 2013 UFS_LOCK(ump); 2014 for (i = 0; i < MAXQUOTAS; i++) { 2015 if (ump->um_quotas[i] != NULLVP) 2016 morework = 1; 2017 } 2018 UFS_UNLOCK(ump); 2019#endif 2020 if (morework) { 2021 if (--retry_flush_count > 0) { 2022 retry = 1; 2023 loopcnt = 3; 2024 } else 2025 error = EBUSY; 2026 } 2027 MNT_IUNLOCK(oldmnt); 2028 if (retry) 2029 goto retry_flush; 2030 } 2031 } 2032 return (error); 2033} 2034 2035/* 2036 * Structure hashing. 2037 * 2038 * There are four types of structures that can be looked up: 2039 * 1) pagedep structures identified by mount point, inode number, 2040 * and logical block. 2041 * 2) inodedep structures identified by mount point and inode number. 2042 * 3) newblk structures identified by mount point and 2043 * physical block number. 2044 * 4) bmsafemap structures identified by mount point and 2045 * cylinder group number. 2046 * 2047 * The "pagedep" and "inodedep" dependency structures are hashed 2048 * separately from the file blocks and inodes to which they correspond. 2049 * This separation helps when the in-memory copy of an inode or 2050 * file block must be replaced. It also obviates the need to access 2051 * an inode or file page when simply updating (or de-allocating) 2052 * dependency structures. Lookup of newblk structures is needed to 2053 * find newly allocated blocks when trying to associate them with 2054 * their allocdirect or allocindir structure. 2055 * 2056 * The lookup routines optionally create and hash a new instance when 2057 * an existing entry is not found. The bmsafemap lookup routine always 2058 * allocates a new structure if an existing one is not found. 2059 */ 2060#define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 2061#define NODELAY 0x0002 /* cannot do background work */ 2062 2063/* 2064 * Structures and routines associated with pagedep caching. 2065 */ 2066#define PAGEDEP_HASH(ump, inum, lbn) \ 2067 (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) 2068 2069static int 2070pagedep_find(pagedephd, ino, lbn, pagedeppp) 2071 struct pagedep_hashhead *pagedephd; 2072 ino_t ino; 2073 ufs_lbn_t lbn; 2074 struct pagedep **pagedeppp; 2075{ 2076 struct pagedep *pagedep; 2077 2078 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 2079 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { 2080 *pagedeppp = pagedep; 2081 return (1); 2082 } 2083 } 2084 *pagedeppp = NULL; 2085 return (0); 2086} 2087/* 2088 * Look up a pagedep. Return 1 if found, 0 otherwise. 2089 * If not found, allocate if DEPALLOC flag is passed. 2090 * Found or allocated entry is returned in pagedeppp. 2091 * This routine must be called with splbio interrupts blocked. 2092 */ 2093static int 2094pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) 2095 struct mount *mp; 2096 struct buf *bp; 2097 ino_t ino; 2098 ufs_lbn_t lbn; 2099 int flags; 2100 struct pagedep **pagedeppp; 2101{ 2102 struct pagedep *pagedep; 2103 struct pagedep_hashhead *pagedephd; 2104 struct worklist *wk; 2105 struct ufsmount *ump; 2106 int ret; 2107 int i; 2108 2109 ump = VFSTOUFS(mp); 2110 LOCK_OWNED(ump); 2111 if (bp) { 2112 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 2113 if (wk->wk_type == D_PAGEDEP) { 2114 *pagedeppp = WK_PAGEDEP(wk); 2115 return (1); 2116 } 2117 } 2118 } 2119 pagedephd = PAGEDEP_HASH(ump, ino, lbn); 2120 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2121 if (ret) { 2122 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) 2123 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); 2124 return (1); 2125 } 2126 if ((flags & DEPALLOC) == 0) 2127 return (0); 2128 FREE_LOCK(ump); 2129 pagedep = malloc(sizeof(struct pagedep), 2130 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); 2131 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); 2132 ACQUIRE_LOCK(ump); 2133 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2134 if (*pagedeppp) { 2135 /* 2136 * This should never happen since we only create pagedeps 2137 * with the vnode lock held. Could be an assert. 2138 */ 2139 WORKITEM_FREE(pagedep, D_PAGEDEP); 2140 return (ret); 2141 } 2142 pagedep->pd_ino = ino; 2143 pagedep->pd_lbn = lbn; 2144 LIST_INIT(&pagedep->pd_dirremhd); 2145 LIST_INIT(&pagedep->pd_pendinghd); 2146 for (i = 0; i < DAHASHSZ; i++) 2147 LIST_INIT(&pagedep->pd_diraddhd[i]); 2148 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 2149 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2150 *pagedeppp = pagedep; 2151 return (0); 2152} 2153 2154/* 2155 * Structures and routines associated with inodedep caching. 2156 */ 2157#define INODEDEP_HASH(ump, inum) \ 2158 (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) 2159 2160static int 2161inodedep_find(inodedephd, inum, inodedeppp) 2162 struct inodedep_hashhead *inodedephd; 2163 ino_t inum; 2164 struct inodedep **inodedeppp; 2165{ 2166 struct inodedep *inodedep; 2167 2168 LIST_FOREACH(inodedep, inodedephd, id_hash) 2169 if (inum == inodedep->id_ino) 2170 break; 2171 if (inodedep) { 2172 *inodedeppp = inodedep; 2173 return (1); 2174 } 2175 *inodedeppp = NULL; 2176 2177 return (0); 2178} 2179/* 2180 * Look up an inodedep. Return 1 if found, 0 if not found. 2181 * If not found, allocate if DEPALLOC flag is passed. 2182 * Found or allocated entry is returned in inodedeppp. 2183 * This routine must be called with splbio interrupts blocked. 2184 */ 2185static int 2186inodedep_lookup(mp, inum, flags, inodedeppp) 2187 struct mount *mp; 2188 ino_t inum; 2189 int flags; 2190 struct inodedep **inodedeppp; 2191{ 2192 struct inodedep *inodedep; 2193 struct inodedep_hashhead *inodedephd; 2194 struct ufsmount *ump; 2195 struct fs *fs; 2196 2197 ump = VFSTOUFS(mp); 2198 LOCK_OWNED(ump); 2199 fs = ump->um_fs; 2200 inodedephd = INODEDEP_HASH(ump, inum); 2201 2202 if (inodedep_find(inodedephd, inum, inodedeppp)) 2203 return (1); 2204 if ((flags & DEPALLOC) == 0) 2205 return (0); 2206 /* 2207 * If the system is over its limit and our filesystem is 2208 * responsible for more than our share of that usage and 2209 * we are not in a rush, request some inodedep cleanup. 2210 */ 2211 while (dep_current[D_INODEDEP] > max_softdeps && 2212 (flags & NODELAY) == 0 && 2213 ump->softdep_curdeps[D_INODEDEP] > 2214 max_softdeps / stat_flush_threads) 2215 request_cleanup(mp, FLUSH_INODES); 2216 FREE_LOCK(ump); 2217 inodedep = malloc(sizeof(struct inodedep), 2218 M_INODEDEP, M_SOFTDEP_FLAGS); 2219 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); 2220 ACQUIRE_LOCK(ump); 2221 if (inodedep_find(inodedephd, inum, inodedeppp)) { 2222 WORKITEM_FREE(inodedep, D_INODEDEP); 2223 return (1); 2224 } 2225 inodedep->id_fs = fs; 2226 inodedep->id_ino = inum; 2227 inodedep->id_state = ALLCOMPLETE; 2228 inodedep->id_nlinkdelta = 0; 2229 inodedep->id_savedino1 = NULL; 2230 inodedep->id_savedsize = -1; 2231 inodedep->id_savedextsize = -1; 2232 inodedep->id_savednlink = -1; 2233 inodedep->id_bmsafemap = NULL; 2234 inodedep->id_mkdiradd = NULL; 2235 LIST_INIT(&inodedep->id_dirremhd); 2236 LIST_INIT(&inodedep->id_pendinghd); 2237 LIST_INIT(&inodedep->id_inowait); 2238 LIST_INIT(&inodedep->id_bufwait); 2239 TAILQ_INIT(&inodedep->id_inoreflst); 2240 TAILQ_INIT(&inodedep->id_inoupdt); 2241 TAILQ_INIT(&inodedep->id_newinoupdt); 2242 TAILQ_INIT(&inodedep->id_extupdt); 2243 TAILQ_INIT(&inodedep->id_newextupdt); 2244 TAILQ_INIT(&inodedep->id_freeblklst); 2245 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 2246 *inodedeppp = inodedep; 2247 return (0); 2248} 2249 2250/* 2251 * Structures and routines associated with newblk caching. 2252 */ 2253#define NEWBLK_HASH(ump, inum) \ 2254 (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) 2255 2256static int 2257newblk_find(newblkhd, newblkno, flags, newblkpp) 2258 struct newblk_hashhead *newblkhd; 2259 ufs2_daddr_t newblkno; 2260 int flags; 2261 struct newblk **newblkpp; 2262{ 2263 struct newblk *newblk; 2264 2265 LIST_FOREACH(newblk, newblkhd, nb_hash) { 2266 if (newblkno != newblk->nb_newblkno) 2267 continue; 2268 /* 2269 * If we're creating a new dependency don't match those that 2270 * have already been converted to allocdirects. This is for 2271 * a frag extend. 2272 */ 2273 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) 2274 continue; 2275 break; 2276 } 2277 if (newblk) { 2278 *newblkpp = newblk; 2279 return (1); 2280 } 2281 *newblkpp = NULL; 2282 return (0); 2283} 2284 2285/* 2286 * Look up a newblk. Return 1 if found, 0 if not found. 2287 * If not found, allocate if DEPALLOC flag is passed. 2288 * Found or allocated entry is returned in newblkpp. 2289 */ 2290static int 2291newblk_lookup(mp, newblkno, flags, newblkpp) 2292 struct mount *mp; 2293 ufs2_daddr_t newblkno; 2294 int flags; 2295 struct newblk **newblkpp; 2296{ 2297 struct newblk *newblk; 2298 struct newblk_hashhead *newblkhd; 2299 struct ufsmount *ump; 2300 2301 ump = VFSTOUFS(mp); 2302 LOCK_OWNED(ump); 2303 newblkhd = NEWBLK_HASH(ump, newblkno); 2304 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) 2305 return (1); 2306 if ((flags & DEPALLOC) == 0) 2307 return (0); 2308 FREE_LOCK(ump); 2309 newblk = malloc(sizeof(union allblk), M_NEWBLK, 2310 M_SOFTDEP_FLAGS | M_ZERO); 2311 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); 2312 ACQUIRE_LOCK(ump); 2313 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { 2314 WORKITEM_FREE(newblk, D_NEWBLK); 2315 return (1); 2316 } 2317 newblk->nb_freefrag = NULL; 2318 LIST_INIT(&newblk->nb_indirdeps); 2319 LIST_INIT(&newblk->nb_newdirblk); 2320 LIST_INIT(&newblk->nb_jwork); 2321 newblk->nb_state = ATTACHED; 2322 newblk->nb_newblkno = newblkno; 2323 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 2324 *newblkpp = newblk; 2325 return (0); 2326} 2327 2328/* 2329 * Structures and routines associated with freed indirect block caching. 2330 */ 2331#define INDIR_HASH(ump, blkno) \ 2332 (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) 2333 2334/* 2335 * Lookup an indirect block in the indir hash table. The freework is 2336 * removed and potentially freed. The caller must do a blocking journal 2337 * write before writing to the blkno. 2338 */ 2339static int 2340indirblk_lookup(mp, blkno) 2341 struct mount *mp; 2342 ufs2_daddr_t blkno; 2343{ 2344 struct freework *freework; 2345 struct indir_hashhead *wkhd; 2346 struct ufsmount *ump; 2347 2348 ump = VFSTOUFS(mp); 2349 wkhd = INDIR_HASH(ump, blkno); 2350 TAILQ_FOREACH(freework, wkhd, fw_next) { 2351 if (freework->fw_blkno != blkno) 2352 continue; 2353 indirblk_remove(freework); 2354 return (1); 2355 } 2356 return (0); 2357} 2358 2359/* 2360 * Insert an indirect block represented by freework into the indirblk 2361 * hash table so that it may prevent the block from being re-used prior 2362 * to the journal being written. 2363 */ 2364static void 2365indirblk_insert(freework) 2366 struct freework *freework; 2367{ 2368 struct jblocks *jblocks; 2369 struct jseg *jseg; 2370 struct ufsmount *ump; 2371 2372 ump = VFSTOUFS(freework->fw_list.wk_mp); 2373 jblocks = ump->softdep_jblocks; 2374 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); 2375 if (jseg == NULL) 2376 return; 2377 2378 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); 2379 TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, 2380 fw_next); 2381 freework->fw_state &= ~DEPCOMPLETE; 2382} 2383 2384static void 2385indirblk_remove(freework) 2386 struct freework *freework; 2387{ 2388 struct ufsmount *ump; 2389 2390 ump = VFSTOUFS(freework->fw_list.wk_mp); 2391 LIST_REMOVE(freework, fw_segs); 2392 TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); 2393 freework->fw_state |= DEPCOMPLETE; 2394 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 2395 WORKITEM_FREE(freework, D_FREEWORK); 2396} 2397 2398/* 2399 * Executed during filesystem system initialization before 2400 * mounting any filesystems. 2401 */ 2402void 2403softdep_initialize() 2404{ 2405 2406 TAILQ_INIT(&softdepmounts); 2407 max_softdeps = desiredvnodes * 4; 2408 2409 /* initialise bioops hack */ 2410 bioops.io_start = softdep_disk_io_initiation; 2411 bioops.io_complete = softdep_disk_write_complete; 2412 bioops.io_deallocate = softdep_deallocate_dependencies; 2413 bioops.io_countdeps = softdep_count_dependencies; 2414 2415 /* Initialize the callout with an mtx. */ 2416 callout_init_mtx(&softdep_callout, &lk, 0); 2417} 2418 2419/* 2420 * Executed after all filesystems have been unmounted during 2421 * filesystem module unload. 2422 */ 2423void 2424softdep_uninitialize() 2425{ 2426 2427 /* clear bioops hack */ 2428 bioops.io_start = NULL; 2429 bioops.io_complete = NULL; 2430 bioops.io_deallocate = NULL; 2431 bioops.io_countdeps = NULL; 2432 2433 callout_drain(&softdep_callout); 2434} 2435 2436/* 2437 * Called at mount time to notify the dependency code that a 2438 * filesystem wishes to use it. 2439 */ 2440int 2441softdep_mount(devvp, mp, fs, cred) 2442 struct vnode *devvp; 2443 struct mount *mp; 2444 struct fs *fs; 2445 struct ucred *cred; 2446{ 2447 struct csum_total cstotal; 2448 struct mount_softdeps *sdp; 2449 struct ufsmount *ump; 2450 struct cg *cgp; 2451 struct buf *bp; 2452 int i, error, cyl; 2453 2454 sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, 2455 M_WAITOK | M_ZERO); 2456 MNT_ILOCK(mp); 2457 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; 2458 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { 2459 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | 2460 MNTK_SOFTDEP | MNTK_NOASYNC; 2461 } 2462 ump = VFSTOUFS(mp); 2463 ump->um_softdep = sdp; 2464 MNT_IUNLOCK(mp); 2465 rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock"); 2466 sdp->sd_ump = ump; 2467 LIST_INIT(&ump->softdep_workitem_pending); 2468 LIST_INIT(&ump->softdep_journal_pending); 2469 TAILQ_INIT(&ump->softdep_unlinked); 2470 LIST_INIT(&ump->softdep_dirtycg); 2471 ump->softdep_worklist_tail = NULL; 2472 ump->softdep_on_worklist = 0; 2473 ump->softdep_deps = 0; 2474 LIST_INIT(&ump->softdep_mkdirlisthd); 2475 ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 2476 &ump->pagedep_hash_size); 2477 ump->pagedep_nextclean = 0; 2478 ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, 2479 &ump->inodedep_hash_size); 2480 ump->inodedep_nextclean = 0; 2481 ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, 2482 &ump->newblk_hash_size); 2483 ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, 2484 &ump->bmsafemap_hash_size); 2485 i = 1 << (ffs(desiredvnodes / 10) - 1); 2486 ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), 2487 M_FREEWORK, M_WAITOK); 2488 ump->indir_hash_size = i - 1; 2489 for (i = 0; i <= ump->indir_hash_size; i++) 2490 TAILQ_INIT(&ump->indir_hashtbl[i]); 2491 ACQUIRE_GBLLOCK(&lk); 2492 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 2493 FREE_GBLLOCK(&lk); 2494 if ((fs->fs_flags & FS_SUJ) && 2495 (error = journal_mount(mp, fs, cred)) != 0) { 2496 printf("Failed to start journal: %d\n", error); 2497 softdep_unmount(mp); 2498 return (error); 2499 } 2500 /* 2501 * Start our flushing thread in the bufdaemon process. 2502 */ 2503 ACQUIRE_LOCK(ump); 2504 ump->softdep_flags |= FLUSH_STARTING; 2505 FREE_LOCK(ump); 2506 kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc, 2507 &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker", 2508 mp->mnt_stat.f_mntonname); 2509 ACQUIRE_LOCK(ump); 2510 while ((ump->softdep_flags & FLUSH_STARTING) != 0) { 2511 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart", 2512 hz / 2); 2513 } 2514 FREE_LOCK(ump); 2515 /* 2516 * When doing soft updates, the counters in the 2517 * superblock may have gotten out of sync. Recomputation 2518 * can take a long time and can be deferred for background 2519 * fsck. However, the old behavior of scanning the cylinder 2520 * groups and recalculating them at mount time is available 2521 * by setting vfs.ffs.compute_summary_at_mount to one. 2522 */ 2523 if (compute_summary_at_mount == 0 || fs->fs_clean != 0) 2524 return (0); 2525 bzero(&cstotal, sizeof cstotal); 2526 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 2527 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 2528 fs->fs_cgsize, cred, &bp)) != 0) { 2529 brelse(bp); 2530 softdep_unmount(mp); 2531 return (error); 2532 } 2533 cgp = (struct cg *)bp->b_data; 2534 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 2535 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 2536 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 2537 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 2538 fs->fs_cs(fs, cyl) = cgp->cg_cs; 2539 brelse(bp); 2540 } 2541#ifdef DEBUG 2542 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 2543 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 2544#endif 2545 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 2546 return (0); 2547} 2548 2549void 2550softdep_unmount(mp) 2551 struct mount *mp; 2552{ 2553 struct ufsmount *ump; 2554#ifdef INVARIANTS 2555 int i; 2556#endif 2557 2558 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 2559 ("softdep_unmount called on non-softdep filesystem")); 2560 ump = VFSTOUFS(mp); 2561 MNT_ILOCK(mp); 2562 mp->mnt_flag &= ~MNT_SOFTDEP; 2563 if (MOUNTEDSUJ(mp) == 0) { 2564 MNT_IUNLOCK(mp); 2565 } else { 2566 mp->mnt_flag &= ~MNT_SUJ; 2567 MNT_IUNLOCK(mp); 2568 journal_unmount(ump); 2569 } 2570 /* 2571 * Shut down our flushing thread. Check for NULL is if 2572 * softdep_mount errors out before the thread has been created. 2573 */ 2574 if (ump->softdep_flushtd != NULL) { 2575 ACQUIRE_LOCK(ump); 2576 ump->softdep_flags |= FLUSH_EXIT; 2577 wakeup(&ump->softdep_flushtd); 2578 msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP, 2579 "sdwait", 0); 2580 KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0, 2581 ("Thread shutdown failed")); 2582 } 2583 /* 2584 * Free up our resources. 2585 */ 2586 ACQUIRE_GBLLOCK(&lk); 2587 TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next); 2588 FREE_GBLLOCK(&lk); 2589 rw_destroy(LOCK_PTR(ump)); 2590 hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); 2591 hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); 2592 hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); 2593 hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, 2594 ump->bmsafemap_hash_size); 2595 free(ump->indir_hashtbl, M_FREEWORK); 2596#ifdef INVARIANTS 2597 for (i = 0; i <= D_LAST; i++) 2598 KASSERT(ump->softdep_curdeps[i] == 0, 2599 ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, 2600 TYPENAME(i), ump->softdep_curdeps[i])); 2601#endif 2602 free(ump->um_softdep, M_MOUNTDATA); 2603} 2604 2605static struct jblocks * 2606jblocks_create(void) 2607{ 2608 struct jblocks *jblocks; 2609 2610 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); 2611 TAILQ_INIT(&jblocks->jb_segs); 2612 jblocks->jb_avail = 10; 2613 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2614 M_JBLOCKS, M_WAITOK | M_ZERO); 2615 2616 return (jblocks); 2617} 2618 2619static ufs2_daddr_t 2620jblocks_alloc(jblocks, bytes, actual) 2621 struct jblocks *jblocks; 2622 int bytes; 2623 int *actual; 2624{ 2625 ufs2_daddr_t daddr; 2626 struct jextent *jext; 2627 int freecnt; 2628 int blocks; 2629 2630 blocks = bytes / DEV_BSIZE; 2631 jext = &jblocks->jb_extent[jblocks->jb_head]; 2632 freecnt = jext->je_blocks - jblocks->jb_off; 2633 if (freecnt == 0) { 2634 jblocks->jb_off = 0; 2635 if (++jblocks->jb_head > jblocks->jb_used) 2636 jblocks->jb_head = 0; 2637 jext = &jblocks->jb_extent[jblocks->jb_head]; 2638 freecnt = jext->je_blocks; 2639 } 2640 if (freecnt > blocks) 2641 freecnt = blocks; 2642 *actual = freecnt * DEV_BSIZE; 2643 daddr = jext->je_daddr + jblocks->jb_off; 2644 jblocks->jb_off += freecnt; 2645 jblocks->jb_free -= freecnt; 2646 2647 return (daddr); 2648} 2649 2650static void 2651jblocks_free(jblocks, mp, bytes) 2652 struct jblocks *jblocks; 2653 struct mount *mp; 2654 int bytes; 2655{ 2656 2657 LOCK_OWNED(VFSTOUFS(mp)); 2658 jblocks->jb_free += bytes / DEV_BSIZE; 2659 if (jblocks->jb_suspended) 2660 worklist_speedup(mp); 2661 wakeup(jblocks); 2662} 2663 2664static void 2665jblocks_destroy(jblocks) 2666 struct jblocks *jblocks; 2667{ 2668 2669 if (jblocks->jb_extent) 2670 free(jblocks->jb_extent, M_JBLOCKS); 2671 free(jblocks, M_JBLOCKS); 2672} 2673 2674static void 2675jblocks_add(jblocks, daddr, blocks) 2676 struct jblocks *jblocks; 2677 ufs2_daddr_t daddr; 2678 int blocks; 2679{ 2680 struct jextent *jext; 2681 2682 jblocks->jb_blocks += blocks; 2683 jblocks->jb_free += blocks; 2684 jext = &jblocks->jb_extent[jblocks->jb_used]; 2685 /* Adding the first block. */ 2686 if (jext->je_daddr == 0) { 2687 jext->je_daddr = daddr; 2688 jext->je_blocks = blocks; 2689 return; 2690 } 2691 /* Extending the last extent. */ 2692 if (jext->je_daddr + jext->je_blocks == daddr) { 2693 jext->je_blocks += blocks; 2694 return; 2695 } 2696 /* Adding a new extent. */ 2697 if (++jblocks->jb_used == jblocks->jb_avail) { 2698 jblocks->jb_avail *= 2; 2699 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2700 M_JBLOCKS, M_WAITOK | M_ZERO); 2701 memcpy(jext, jblocks->jb_extent, 2702 sizeof(struct jextent) * jblocks->jb_used); 2703 free(jblocks->jb_extent, M_JBLOCKS); 2704 jblocks->jb_extent = jext; 2705 } 2706 jext = &jblocks->jb_extent[jblocks->jb_used]; 2707 jext->je_daddr = daddr; 2708 jext->je_blocks = blocks; 2709 return; 2710} 2711 2712int 2713softdep_journal_lookup(mp, vpp) 2714 struct mount *mp; 2715 struct vnode **vpp; 2716{ 2717 struct componentname cnp; 2718 struct vnode *dvp; 2719 ino_t sujournal; 2720 int error; 2721 2722 error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp); 2723 if (error) 2724 return (error); 2725 bzero(&cnp, sizeof(cnp)); 2726 cnp.cn_nameiop = LOOKUP; 2727 cnp.cn_flags = ISLASTCN; 2728 cnp.cn_thread = curthread; 2729 cnp.cn_cred = curthread->td_ucred; 2730 cnp.cn_pnbuf = SUJ_FILE; 2731 cnp.cn_nameptr = SUJ_FILE; 2732 cnp.cn_namelen = strlen(SUJ_FILE); 2733 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); 2734 vput(dvp); 2735 if (error != 0) 2736 return (error); 2737 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); 2738 return (error); 2739} 2740 2741/* 2742 * Open and verify the journal file. 2743 */ 2744static int 2745journal_mount(mp, fs, cred) 2746 struct mount *mp; 2747 struct fs *fs; 2748 struct ucred *cred; 2749{ 2750 struct jblocks *jblocks; 2751 struct ufsmount *ump; 2752 struct vnode *vp; 2753 struct inode *ip; 2754 ufs2_daddr_t blkno; 2755 int bcount; 2756 int error; 2757 int i; 2758 2759 ump = VFSTOUFS(mp); 2760 ump->softdep_journal_tail = NULL; 2761 ump->softdep_on_journal = 0; 2762 ump->softdep_accdeps = 0; 2763 ump->softdep_req = 0; 2764 ump->softdep_jblocks = NULL; 2765 error = softdep_journal_lookup(mp, &vp); 2766 if (error != 0) { 2767 printf("Failed to find journal. Use tunefs to create one\n"); 2768 return (error); 2769 } 2770 ip = VTOI(vp); 2771 if (ip->i_size < SUJ_MIN) { 2772 error = ENOSPC; 2773 goto out; 2774 } 2775 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ 2776 jblocks = jblocks_create(); 2777 for (i = 0; i < bcount; i++) { 2778 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); 2779 if (error) 2780 break; 2781 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); 2782 } 2783 if (error) { 2784 jblocks_destroy(jblocks); 2785 goto out; 2786 } 2787 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ 2788 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ 2789 ump->softdep_jblocks = jblocks; 2790out: 2791 if (error == 0) { 2792 MNT_ILOCK(mp); 2793 mp->mnt_flag |= MNT_SUJ; 2794 mp->mnt_flag &= ~MNT_SOFTDEP; 2795 MNT_IUNLOCK(mp); 2796 /* 2797 * Only validate the journal contents if the 2798 * filesystem is clean, otherwise we write the logs 2799 * but they'll never be used. If the filesystem was 2800 * still dirty when we mounted it the journal is 2801 * invalid and a new journal can only be valid if it 2802 * starts from a clean mount. 2803 */ 2804 if (fs->fs_clean) { 2805 DIP_SET(ip, i_modrev, fs->fs_mtime); 2806 ip->i_flags |= IN_MODIFIED; 2807 ffs_update(vp, 1); 2808 } 2809 } 2810 vput(vp); 2811 return (error); 2812} 2813 2814static void 2815journal_unmount(ump) 2816 struct ufsmount *ump; 2817{ 2818 2819 if (ump->softdep_jblocks) 2820 jblocks_destroy(ump->softdep_jblocks); 2821 ump->softdep_jblocks = NULL; 2822} 2823 2824/* 2825 * Called when a journal record is ready to be written. Space is allocated 2826 * and the journal entry is created when the journal is flushed to stable 2827 * store. 2828 */ 2829static void 2830add_to_journal(wk) 2831 struct worklist *wk; 2832{ 2833 struct ufsmount *ump; 2834 2835 ump = VFSTOUFS(wk->wk_mp); 2836 LOCK_OWNED(ump); 2837 if (wk->wk_state & ONWORKLIST) 2838 panic("add_to_journal: %s(0x%X) already on list", 2839 TYPENAME(wk->wk_type), wk->wk_state); 2840 wk->wk_state |= ONWORKLIST | DEPCOMPLETE; 2841 if (LIST_EMPTY(&ump->softdep_journal_pending)) { 2842 ump->softdep_jblocks->jb_age = ticks; 2843 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); 2844 } else 2845 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); 2846 ump->softdep_journal_tail = wk; 2847 ump->softdep_on_journal += 1; 2848} 2849 2850/* 2851 * Remove an arbitrary item for the journal worklist maintain the tail 2852 * pointer. This happens when a new operation obviates the need to 2853 * journal an old operation. 2854 */ 2855static void 2856remove_from_journal(wk) 2857 struct worklist *wk; 2858{ 2859 struct ufsmount *ump; 2860 2861 ump = VFSTOUFS(wk->wk_mp); 2862 LOCK_OWNED(ump); 2863#ifdef SUJ_DEBUG 2864 { 2865 struct worklist *wkn; 2866 2867 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) 2868 if (wkn == wk) 2869 break; 2870 if (wkn == NULL) 2871 panic("remove_from_journal: %p is not in journal", wk); 2872 } 2873#endif 2874 /* 2875 * We emulate a TAILQ to save space in most structures which do not 2876 * require TAILQ semantics. Here we must update the tail position 2877 * when removing the tail which is not the final entry. This works 2878 * only if the worklist linkage are at the beginning of the structure. 2879 */ 2880 if (ump->softdep_journal_tail == wk) 2881 ump->softdep_journal_tail = 2882 (struct worklist *)wk->wk_list.le_prev; 2883 2884 WORKLIST_REMOVE(wk); 2885 ump->softdep_on_journal -= 1; 2886} 2887 2888/* 2889 * Check for journal space as well as dependency limits so the prelink 2890 * code can throttle both journaled and non-journaled filesystems. 2891 * Threshold is 0 for low and 1 for min. 2892 */ 2893static int 2894journal_space(ump, thresh) 2895 struct ufsmount *ump; 2896 int thresh; 2897{ 2898 struct jblocks *jblocks; 2899 int limit, avail; 2900 2901 jblocks = ump->softdep_jblocks; 2902 if (jblocks == NULL) 2903 return (1); 2904 /* 2905 * We use a tighter restriction here to prevent request_cleanup() 2906 * running in threads from running into locks we currently hold. 2907 * We have to be over the limit and our filesystem has to be 2908 * responsible for more than our share of that usage. 2909 */ 2910 limit = (max_softdeps / 10) * 9; 2911 if (dep_current[D_INODEDEP] > limit && 2912 ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads) 2913 return (0); 2914 if (thresh) 2915 thresh = jblocks->jb_min; 2916 else 2917 thresh = jblocks->jb_low; 2918 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; 2919 avail = jblocks->jb_free - avail; 2920 2921 return (avail > thresh); 2922} 2923 2924static void 2925journal_suspend(ump) 2926 struct ufsmount *ump; 2927{ 2928 struct jblocks *jblocks; 2929 struct mount *mp; 2930 2931 mp = UFSTOVFS(ump); 2932 jblocks = ump->softdep_jblocks; 2933 MNT_ILOCK(mp); 2934 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2935 stat_journal_min++; 2936 mp->mnt_kern_flag |= MNTK_SUSPEND; 2937 mp->mnt_susp_owner = ump->softdep_flushtd; 2938 } 2939 jblocks->jb_suspended = 1; 2940 MNT_IUNLOCK(mp); 2941} 2942 2943static int 2944journal_unsuspend(struct ufsmount *ump) 2945{ 2946 struct jblocks *jblocks; 2947 struct mount *mp; 2948 2949 mp = UFSTOVFS(ump); 2950 jblocks = ump->softdep_jblocks; 2951 2952 if (jblocks != NULL && jblocks->jb_suspended && 2953 journal_space(ump, jblocks->jb_min)) { 2954 jblocks->jb_suspended = 0; 2955 FREE_LOCK(ump); 2956 mp->mnt_susp_owner = curthread; 2957 vfs_write_resume(mp, 0); 2958 ACQUIRE_LOCK(ump); 2959 return (1); 2960 } 2961 return (0); 2962} 2963 2964/* 2965 * Called before any allocation function to be certain that there is 2966 * sufficient space in the journal prior to creating any new records. 2967 * Since in the case of block allocation we may have multiple locked 2968 * buffers at the time of the actual allocation we can not block 2969 * when the journal records are created. Doing so would create a deadlock 2970 * if any of these buffers needed to be flushed to reclaim space. Instead 2971 * we require a sufficiently large amount of available space such that 2972 * each thread in the system could have passed this allocation check and 2973 * still have sufficient free space. With 20% of a minimum journal size 2974 * of 1MB we have 6553 records available. 2975 */ 2976int 2977softdep_prealloc(vp, waitok) 2978 struct vnode *vp; 2979 int waitok; 2980{ 2981 struct ufsmount *ump; 2982 2983 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 2984 ("softdep_prealloc called on non-softdep filesystem")); 2985 /* 2986 * Nothing to do if we are not running journaled soft updates. 2987 * If we currently hold the snapshot lock, we must avoid handling 2988 * other resources that could cause deadlock. 2989 */ 2990 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp))) 2991 return (0); 2992 ump = VFSTOUFS(vp->v_mount); 2993 ACQUIRE_LOCK(ump); 2994 if (journal_space(ump, 0)) { 2995 FREE_LOCK(ump); 2996 return (0); 2997 } 2998 stat_journal_low++; 2999 FREE_LOCK(ump); 3000 if (waitok == MNT_NOWAIT) 3001 return (ENOSPC); 3002 /* 3003 * Attempt to sync this vnode once to flush any journal 3004 * work attached to it. 3005 */ 3006 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) 3007 ffs_syncvnode(vp, waitok, 0); 3008 ACQUIRE_LOCK(ump); 3009 process_removes(vp); 3010 process_truncates(vp); 3011 if (journal_space(ump, 0) == 0) { 3012 softdep_speedup(ump); 3013 if (journal_space(ump, 1) == 0) 3014 journal_suspend(ump); 3015 } 3016 FREE_LOCK(ump); 3017 3018 return (0); 3019} 3020 3021/* 3022 * Before adjusting a link count on a vnode verify that we have sufficient 3023 * journal space. If not, process operations that depend on the currently 3024 * locked pair of vnodes to try to flush space as the syncer, buf daemon, 3025 * and softdep flush threads can not acquire these locks to reclaim space. 3026 */ 3027static void 3028softdep_prelink(dvp, vp) 3029 struct vnode *dvp; 3030 struct vnode *vp; 3031{ 3032 struct ufsmount *ump; 3033 3034 ump = VFSTOUFS(dvp->v_mount); 3035 LOCK_OWNED(ump); 3036 /* 3037 * Nothing to do if we have sufficient journal space. 3038 * If we currently hold the snapshot lock, we must avoid 3039 * handling other resources that could cause deadlock. 3040 */ 3041 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) 3042 return; 3043 stat_journal_low++; 3044 FREE_LOCK(ump); 3045 if (vp) 3046 ffs_syncvnode(vp, MNT_NOWAIT, 0); 3047 ffs_syncvnode(dvp, MNT_WAIT, 0); 3048 ACQUIRE_LOCK(ump); 3049 /* Process vp before dvp as it may create .. removes. */ 3050 if (vp) { 3051 process_removes(vp); 3052 process_truncates(vp); 3053 } 3054 process_removes(dvp); 3055 process_truncates(dvp); 3056 softdep_speedup(ump); 3057 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); 3058 if (journal_space(ump, 0) == 0) { 3059 softdep_speedup(ump); 3060 if (journal_space(ump, 1) == 0) 3061 journal_suspend(ump); 3062 } 3063} 3064 3065static void 3066jseg_write(ump, jseg, data) 3067 struct ufsmount *ump; 3068 struct jseg *jseg; 3069 uint8_t *data; 3070{ 3071 struct jsegrec *rec; 3072 3073 rec = (struct jsegrec *)data; 3074 rec->jsr_seq = jseg->js_seq; 3075 rec->jsr_oldest = jseg->js_oldseq; 3076 rec->jsr_cnt = jseg->js_cnt; 3077 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; 3078 rec->jsr_crc = 0; 3079 rec->jsr_time = ump->um_fs->fs_mtime; 3080} 3081 3082static inline void 3083inoref_write(inoref, jseg, rec) 3084 struct inoref *inoref; 3085 struct jseg *jseg; 3086 struct jrefrec *rec; 3087{ 3088 3089 inoref->if_jsegdep->jd_seg = jseg; 3090 rec->jr_ino = inoref->if_ino; 3091 rec->jr_parent = inoref->if_parent; 3092 rec->jr_nlink = inoref->if_nlink; 3093 rec->jr_mode = inoref->if_mode; 3094 rec->jr_diroff = inoref->if_diroff; 3095} 3096 3097static void 3098jaddref_write(jaddref, jseg, data) 3099 struct jaddref *jaddref; 3100 struct jseg *jseg; 3101 uint8_t *data; 3102{ 3103 struct jrefrec *rec; 3104 3105 rec = (struct jrefrec *)data; 3106 rec->jr_op = JOP_ADDREF; 3107 inoref_write(&jaddref->ja_ref, jseg, rec); 3108} 3109 3110static void 3111jremref_write(jremref, jseg, data) 3112 struct jremref *jremref; 3113 struct jseg *jseg; 3114 uint8_t *data; 3115{ 3116 struct jrefrec *rec; 3117 3118 rec = (struct jrefrec *)data; 3119 rec->jr_op = JOP_REMREF; 3120 inoref_write(&jremref->jr_ref, jseg, rec); 3121} 3122 3123static void 3124jmvref_write(jmvref, jseg, data) 3125 struct jmvref *jmvref; 3126 struct jseg *jseg; 3127 uint8_t *data; 3128{ 3129 struct jmvrec *rec; 3130 3131 rec = (struct jmvrec *)data; 3132 rec->jm_op = JOP_MVREF; 3133 rec->jm_ino = jmvref->jm_ino; 3134 rec->jm_parent = jmvref->jm_parent; 3135 rec->jm_oldoff = jmvref->jm_oldoff; 3136 rec->jm_newoff = jmvref->jm_newoff; 3137} 3138 3139static void 3140jnewblk_write(jnewblk, jseg, data) 3141 struct jnewblk *jnewblk; 3142 struct jseg *jseg; 3143 uint8_t *data; 3144{ 3145 struct jblkrec *rec; 3146 3147 jnewblk->jn_jsegdep->jd_seg = jseg; 3148 rec = (struct jblkrec *)data; 3149 rec->jb_op = JOP_NEWBLK; 3150 rec->jb_ino = jnewblk->jn_ino; 3151 rec->jb_blkno = jnewblk->jn_blkno; 3152 rec->jb_lbn = jnewblk->jn_lbn; 3153 rec->jb_frags = jnewblk->jn_frags; 3154 rec->jb_oldfrags = jnewblk->jn_oldfrags; 3155} 3156 3157static void 3158jfreeblk_write(jfreeblk, jseg, data) 3159 struct jfreeblk *jfreeblk; 3160 struct jseg *jseg; 3161 uint8_t *data; 3162{ 3163 struct jblkrec *rec; 3164 3165 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; 3166 rec = (struct jblkrec *)data; 3167 rec->jb_op = JOP_FREEBLK; 3168 rec->jb_ino = jfreeblk->jf_ino; 3169 rec->jb_blkno = jfreeblk->jf_blkno; 3170 rec->jb_lbn = jfreeblk->jf_lbn; 3171 rec->jb_frags = jfreeblk->jf_frags; 3172 rec->jb_oldfrags = 0; 3173} 3174 3175static void 3176jfreefrag_write(jfreefrag, jseg, data) 3177 struct jfreefrag *jfreefrag; 3178 struct jseg *jseg; 3179 uint8_t *data; 3180{ 3181 struct jblkrec *rec; 3182 3183 jfreefrag->fr_jsegdep->jd_seg = jseg; 3184 rec = (struct jblkrec *)data; 3185 rec->jb_op = JOP_FREEBLK; 3186 rec->jb_ino = jfreefrag->fr_ino; 3187 rec->jb_blkno = jfreefrag->fr_blkno; 3188 rec->jb_lbn = jfreefrag->fr_lbn; 3189 rec->jb_frags = jfreefrag->fr_frags; 3190 rec->jb_oldfrags = 0; 3191} 3192 3193static void 3194jtrunc_write(jtrunc, jseg, data) 3195 struct jtrunc *jtrunc; 3196 struct jseg *jseg; 3197 uint8_t *data; 3198{ 3199 struct jtrncrec *rec; 3200 3201 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; 3202 rec = (struct jtrncrec *)data; 3203 rec->jt_op = JOP_TRUNC; 3204 rec->jt_ino = jtrunc->jt_ino; 3205 rec->jt_size = jtrunc->jt_size; 3206 rec->jt_extsize = jtrunc->jt_extsize; 3207} 3208 3209static void 3210jfsync_write(jfsync, jseg, data) 3211 struct jfsync *jfsync; 3212 struct jseg *jseg; 3213 uint8_t *data; 3214{ 3215 struct jtrncrec *rec; 3216 3217 rec = (struct jtrncrec *)data; 3218 rec->jt_op = JOP_SYNC; 3219 rec->jt_ino = jfsync->jfs_ino; 3220 rec->jt_size = jfsync->jfs_size; 3221 rec->jt_extsize = jfsync->jfs_extsize; 3222} 3223 3224static void 3225softdep_flushjournal(mp) 3226 struct mount *mp; 3227{ 3228 struct jblocks *jblocks; 3229 struct ufsmount *ump; 3230 3231 if (MOUNTEDSUJ(mp) == 0) 3232 return; 3233 ump = VFSTOUFS(mp); 3234 jblocks = ump->softdep_jblocks; 3235 ACQUIRE_LOCK(ump); 3236 while (ump->softdep_on_journal) { 3237 jblocks->jb_needseg = 1; 3238 softdep_process_journal(mp, NULL, MNT_WAIT); 3239 } 3240 FREE_LOCK(ump); 3241} 3242 3243static void softdep_synchronize_completed(struct bio *); 3244static void softdep_synchronize(struct bio *, struct ufsmount *, void *); 3245 3246static void 3247softdep_synchronize_completed(bp) 3248 struct bio *bp; 3249{ 3250 struct jseg *oldest; 3251 struct jseg *jseg; 3252 struct ufsmount *ump; 3253 3254 /* 3255 * caller1 marks the last segment written before we issued the 3256 * synchronize cache. 3257 */ 3258 jseg = bp->bio_caller1; 3259 if (jseg == NULL) { 3260 g_destroy_bio(bp); 3261 return; 3262 } 3263 ump = VFSTOUFS(jseg->js_list.wk_mp); 3264 ACQUIRE_LOCK(ump); 3265 oldest = NULL; 3266 /* 3267 * Mark all the journal entries waiting on the synchronize cache 3268 * as completed so they may continue on. 3269 */ 3270 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { 3271 jseg->js_state |= COMPLETE; 3272 oldest = jseg; 3273 jseg = TAILQ_PREV(jseg, jseglst, js_next); 3274 } 3275 /* 3276 * Restart deferred journal entry processing from the oldest 3277 * completed jseg. 3278 */ 3279 if (oldest) 3280 complete_jsegs(oldest); 3281 3282 FREE_LOCK(ump); 3283 g_destroy_bio(bp); 3284} 3285 3286/* 3287 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering 3288 * barriers. The journal must be written prior to any blocks that depend 3289 * on it and the journal can not be released until the blocks have be 3290 * written. This code handles both barriers simultaneously. 3291 */ 3292static void 3293softdep_synchronize(bp, ump, caller1) 3294 struct bio *bp; 3295 struct ufsmount *ump; 3296 void *caller1; 3297{ 3298 3299 bp->bio_cmd = BIO_FLUSH; 3300 bp->bio_flags |= BIO_ORDERED; 3301 bp->bio_data = NULL; 3302 bp->bio_offset = ump->um_cp->provider->mediasize; 3303 bp->bio_length = 0; 3304 bp->bio_done = softdep_synchronize_completed; 3305 bp->bio_caller1 = caller1; 3306 g_io_request(bp, 3307 (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); 3308} 3309 3310/* 3311 * Flush some journal records to disk. 3312 */ 3313static void 3314softdep_process_journal(mp, needwk, flags) 3315 struct mount *mp; 3316 struct worklist *needwk; 3317 int flags; 3318{ 3319 struct jblocks *jblocks; 3320 struct ufsmount *ump; 3321 struct worklist *wk; 3322 struct jseg *jseg; 3323 struct buf *bp; 3324 struct bio *bio; 3325 uint8_t *data; 3326 struct fs *fs; 3327 int shouldflush; 3328 int segwritten; 3329 int jrecmin; /* Minimum records per block. */ 3330 int jrecmax; /* Maximum records per block. */ 3331 int size; 3332 int cnt; 3333 int off; 3334 int devbsize; 3335 3336 if (MOUNTEDSUJ(mp) == 0) 3337 return; 3338 shouldflush = softdep_flushcache; 3339 bio = NULL; 3340 jseg = NULL; 3341 ump = VFSTOUFS(mp); 3342 LOCK_OWNED(ump); 3343 fs = ump->um_fs; 3344 jblocks = ump->softdep_jblocks; 3345 devbsize = ump->um_devvp->v_bufobj.bo_bsize; 3346 /* 3347 * We write anywhere between a disk block and fs block. The upper 3348 * bound is picked to prevent buffer cache fragmentation and limit 3349 * processing time per I/O. 3350 */ 3351 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ 3352 jrecmax = (fs->fs_bsize / devbsize) * jrecmin; 3353 segwritten = 0; 3354 for (;;) { 3355 cnt = ump->softdep_on_journal; 3356 /* 3357 * Criteria for writing a segment: 3358 * 1) We have a full block. 3359 * 2) We're called from jwait() and haven't found the 3360 * journal item yet. 3361 * 3) Always write if needseg is set. 3362 * 4) If we are called from process_worklist and have 3363 * not yet written anything we write a partial block 3364 * to enforce a 1 second maximum latency on journal 3365 * entries. 3366 */ 3367 if (cnt < (jrecmax - 1) && needwk == NULL && 3368 jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) 3369 break; 3370 cnt++; 3371 /* 3372 * Verify some free journal space. softdep_prealloc() should 3373 * guarantee that we don't run out so this is indicative of 3374 * a problem with the flow control. Try to recover 3375 * gracefully in any event. 3376 */ 3377 while (jblocks->jb_free == 0) { 3378 if (flags != MNT_WAIT) 3379 break; 3380 printf("softdep: Out of journal space!\n"); 3381 softdep_speedup(ump); 3382 msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); 3383 } 3384 FREE_LOCK(ump); 3385 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); 3386 workitem_alloc(&jseg->js_list, D_JSEG, mp); 3387 LIST_INIT(&jseg->js_entries); 3388 LIST_INIT(&jseg->js_indirs); 3389 jseg->js_state = ATTACHED; 3390 if (shouldflush == 0) 3391 jseg->js_state |= COMPLETE; 3392 else if (bio == NULL) 3393 bio = g_alloc_bio(); 3394 jseg->js_jblocks = jblocks; 3395 bp = geteblk(fs->fs_bsize, 0); 3396 ACQUIRE_LOCK(ump); 3397 /* 3398 * If there was a race while we were allocating the block 3399 * and jseg the entry we care about was likely written. 3400 * We bail out in both the WAIT and NOWAIT case and assume 3401 * the caller will loop if the entry it cares about is 3402 * not written. 3403 */ 3404 cnt = ump->softdep_on_journal; 3405 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { 3406 bp->b_flags |= B_INVAL | B_NOCACHE; 3407 WORKITEM_FREE(jseg, D_JSEG); 3408 FREE_LOCK(ump); 3409 brelse(bp); 3410 ACQUIRE_LOCK(ump); 3411 break; 3412 } 3413 /* 3414 * Calculate the disk block size required for the available 3415 * records rounded to the min size. 3416 */ 3417 if (cnt == 0) 3418 size = devbsize; 3419 else if (cnt < jrecmax) 3420 size = howmany(cnt, jrecmin) * devbsize; 3421 else 3422 size = fs->fs_bsize; 3423 /* 3424 * Allocate a disk block for this journal data and account 3425 * for truncation of the requested size if enough contiguous 3426 * space was not available. 3427 */ 3428 bp->b_blkno = jblocks_alloc(jblocks, size, &size); 3429 bp->b_lblkno = bp->b_blkno; 3430 bp->b_offset = bp->b_blkno * DEV_BSIZE; 3431 bp->b_bcount = size; 3432 bp->b_flags &= ~B_INVAL; 3433 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; 3434 /* 3435 * Initialize our jseg with cnt records. Assign the next 3436 * sequence number to it and link it in-order. 3437 */ 3438 cnt = MIN(cnt, (size / devbsize) * jrecmin); 3439 jseg->js_buf = bp; 3440 jseg->js_cnt = cnt; 3441 jseg->js_refs = cnt + 1; /* Self ref. */ 3442 jseg->js_size = size; 3443 jseg->js_seq = jblocks->jb_nextseq++; 3444 if (jblocks->jb_oldestseg == NULL) 3445 jblocks->jb_oldestseg = jseg; 3446 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; 3447 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); 3448 if (jblocks->jb_writeseg == NULL) 3449 jblocks->jb_writeseg = jseg; 3450 /* 3451 * Start filling in records from the pending list. 3452 */ 3453 data = bp->b_data; 3454 off = 0; 3455 3456 /* 3457 * Always put a header on the first block. 3458 * XXX As with below, there might not be a chance to get 3459 * into the loop. Ensure that something valid is written. 3460 */ 3461 jseg_write(ump, jseg, data); 3462 off += JREC_SIZE; 3463 data = bp->b_data + off; 3464 3465 /* 3466 * XXX Something is wrong here. There's no work to do, 3467 * but we need to perform and I/O and allow it to complete 3468 * anyways. 3469 */ 3470 if (LIST_EMPTY(&ump->softdep_journal_pending)) 3471 stat_emptyjblocks++; 3472 3473 while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) 3474 != NULL) { 3475 if (cnt == 0) 3476 break; 3477 /* Place a segment header on every device block. */ 3478 if ((off % devbsize) == 0) { 3479 jseg_write(ump, jseg, data); 3480 off += JREC_SIZE; 3481 data = bp->b_data + off; 3482 } 3483 if (wk == needwk) 3484 needwk = NULL; 3485 remove_from_journal(wk); 3486 wk->wk_state |= INPROGRESS; 3487 WORKLIST_INSERT(&jseg->js_entries, wk); 3488 switch (wk->wk_type) { 3489 case D_JADDREF: 3490 jaddref_write(WK_JADDREF(wk), jseg, data); 3491 break; 3492 case D_JREMREF: 3493 jremref_write(WK_JREMREF(wk), jseg, data); 3494 break; 3495 case D_JMVREF: 3496 jmvref_write(WK_JMVREF(wk), jseg, data); 3497 break; 3498 case D_JNEWBLK: 3499 jnewblk_write(WK_JNEWBLK(wk), jseg, data); 3500 break; 3501 case D_JFREEBLK: 3502 jfreeblk_write(WK_JFREEBLK(wk), jseg, data); 3503 break; 3504 case D_JFREEFRAG: 3505 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); 3506 break; 3507 case D_JTRUNC: 3508 jtrunc_write(WK_JTRUNC(wk), jseg, data); 3509 break; 3510 case D_JFSYNC: 3511 jfsync_write(WK_JFSYNC(wk), jseg, data); 3512 break; 3513 default: 3514 panic("process_journal: Unknown type %s", 3515 TYPENAME(wk->wk_type)); 3516 /* NOTREACHED */ 3517 } 3518 off += JREC_SIZE; 3519 data = bp->b_data + off; 3520 cnt--; 3521 } 3522 3523 /* Clear any remaining space so we don't leak kernel data */ 3524 if (size > off) 3525 bzero(data, size - off); 3526 3527 /* 3528 * Write this one buffer and continue. 3529 */ 3530 segwritten = 1; 3531 jblocks->jb_needseg = 0; 3532 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); 3533 FREE_LOCK(ump); 3534 pbgetvp(ump->um_devvp, bp); 3535 /* 3536 * We only do the blocking wait once we find the journal 3537 * entry we're looking for. 3538 */ 3539 if (needwk == NULL && flags == MNT_WAIT) 3540 bwrite(bp); 3541 else 3542 bawrite(bp); 3543 ACQUIRE_LOCK(ump); 3544 } 3545 /* 3546 * If we wrote a segment issue a synchronize cache so the journal 3547 * is reflected on disk before the data is written. Since reclaiming 3548 * journal space also requires writing a journal record this 3549 * process also enforces a barrier before reclamation. 3550 */ 3551 if (segwritten && shouldflush) { 3552 softdep_synchronize(bio, ump, 3553 TAILQ_LAST(&jblocks->jb_segs, jseglst)); 3554 } else if (bio) 3555 g_destroy_bio(bio); 3556 /* 3557 * If we've suspended the filesystem because we ran out of journal 3558 * space either try to sync it here to make some progress or 3559 * unsuspend it if we already have. 3560 */ 3561 if (flags == 0 && jblocks->jb_suspended) { 3562 if (journal_unsuspend(ump)) 3563 return; 3564 FREE_LOCK(ump); 3565 VFS_SYNC(mp, MNT_NOWAIT); 3566 ffs_sbupdate(ump, MNT_WAIT, 0); 3567 ACQUIRE_LOCK(ump); 3568 } 3569} 3570 3571/* 3572 * Complete a jseg, allowing all dependencies awaiting journal writes 3573 * to proceed. Each journal dependency also attaches a jsegdep to dependent 3574 * structures so that the journal segment can be freed to reclaim space. 3575 */ 3576static void 3577complete_jseg(jseg) 3578 struct jseg *jseg; 3579{ 3580 struct worklist *wk; 3581 struct jmvref *jmvref; 3582 int waiting; 3583#ifdef INVARIANTS 3584 int i = 0; 3585#endif 3586 3587 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { 3588 WORKLIST_REMOVE(wk); 3589 waiting = wk->wk_state & IOWAITING; 3590 wk->wk_state &= ~(INPROGRESS | IOWAITING); 3591 wk->wk_state |= COMPLETE; 3592 KASSERT(i++ < jseg->js_cnt, 3593 ("handle_written_jseg: overflow %d >= %d", 3594 i - 1, jseg->js_cnt)); 3595 switch (wk->wk_type) { 3596 case D_JADDREF: 3597 handle_written_jaddref(WK_JADDREF(wk)); 3598 break; 3599 case D_JREMREF: 3600 handle_written_jremref(WK_JREMREF(wk)); 3601 break; 3602 case D_JMVREF: 3603 rele_jseg(jseg); /* No jsegdep. */ 3604 jmvref = WK_JMVREF(wk); 3605 LIST_REMOVE(jmvref, jm_deps); 3606 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) 3607 free_pagedep(jmvref->jm_pagedep); 3608 WORKITEM_FREE(jmvref, D_JMVREF); 3609 break; 3610 case D_JNEWBLK: 3611 handle_written_jnewblk(WK_JNEWBLK(wk)); 3612 break; 3613 case D_JFREEBLK: 3614 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); 3615 break; 3616 case D_JTRUNC: 3617 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); 3618 break; 3619 case D_JFSYNC: 3620 rele_jseg(jseg); /* No jsegdep. */ 3621 WORKITEM_FREE(wk, D_JFSYNC); 3622 break; 3623 case D_JFREEFRAG: 3624 handle_written_jfreefrag(WK_JFREEFRAG(wk)); 3625 break; 3626 default: 3627 panic("handle_written_jseg: Unknown type %s", 3628 TYPENAME(wk->wk_type)); 3629 /* NOTREACHED */ 3630 } 3631 if (waiting) 3632 wakeup(wk); 3633 } 3634 /* Release the self reference so the structure may be freed. */ 3635 rele_jseg(jseg); 3636} 3637 3638/* 3639 * Determine which jsegs are ready for completion processing. Waits for 3640 * synchronize cache to complete as well as forcing in-order completion 3641 * of journal entries. 3642 */ 3643static void 3644complete_jsegs(jseg) 3645 struct jseg *jseg; 3646{ 3647 struct jblocks *jblocks; 3648 struct jseg *jsegn; 3649 3650 jblocks = jseg->js_jblocks; 3651 /* 3652 * Don't allow out of order completions. If this isn't the first 3653 * block wait for it to write before we're done. 3654 */ 3655 if (jseg != jblocks->jb_writeseg) 3656 return; 3657 /* Iterate through available jsegs processing their entries. */ 3658 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { 3659 jblocks->jb_oldestwrseq = jseg->js_oldseq; 3660 jsegn = TAILQ_NEXT(jseg, js_next); 3661 complete_jseg(jseg); 3662 jseg = jsegn; 3663 } 3664 jblocks->jb_writeseg = jseg; 3665 /* 3666 * Attempt to free jsegs now that oldestwrseq may have advanced. 3667 */ 3668 free_jsegs(jblocks); 3669} 3670 3671/* 3672 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle 3673 * the final completions. 3674 */ 3675static void 3676handle_written_jseg(jseg, bp) 3677 struct jseg *jseg; 3678 struct buf *bp; 3679{ 3680 3681 if (jseg->js_refs == 0) 3682 panic("handle_written_jseg: No self-reference on %p", jseg); 3683 jseg->js_state |= DEPCOMPLETE; 3684 /* 3685 * We'll never need this buffer again, set flags so it will be 3686 * discarded. 3687 */ 3688 bp->b_flags |= B_INVAL | B_NOCACHE; 3689 pbrelvp(bp); 3690 complete_jsegs(jseg); 3691} 3692 3693static inline struct jsegdep * 3694inoref_jseg(inoref) 3695 struct inoref *inoref; 3696{ 3697 struct jsegdep *jsegdep; 3698 3699 jsegdep = inoref->if_jsegdep; 3700 inoref->if_jsegdep = NULL; 3701 3702 return (jsegdep); 3703} 3704 3705/* 3706 * Called once a jremref has made it to stable store. The jremref is marked 3707 * complete and we attempt to free it. Any pagedeps writes sleeping waiting 3708 * for the jremref to complete will be awoken by free_jremref. 3709 */ 3710static void 3711handle_written_jremref(jremref) 3712 struct jremref *jremref; 3713{ 3714 struct inodedep *inodedep; 3715 struct jsegdep *jsegdep; 3716 struct dirrem *dirrem; 3717 3718 /* Grab the jsegdep. */ 3719 jsegdep = inoref_jseg(&jremref->jr_ref); 3720 /* 3721 * Remove us from the inoref list. 3722 */ 3723 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 3724 0, &inodedep) == 0) 3725 panic("handle_written_jremref: Lost inodedep"); 3726 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 3727 /* 3728 * Complete the dirrem. 3729 */ 3730 dirrem = jremref->jr_dirrem; 3731 jremref->jr_dirrem = NULL; 3732 LIST_REMOVE(jremref, jr_deps); 3733 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; 3734 jwork_insert(&dirrem->dm_jwork, jsegdep); 3735 if (LIST_EMPTY(&dirrem->dm_jremrefhd) && 3736 (dirrem->dm_state & COMPLETE) != 0) 3737 add_to_worklist(&dirrem->dm_list, 0); 3738 free_jremref(jremref); 3739} 3740 3741/* 3742 * Called once a jaddref has made it to stable store. The dependency is 3743 * marked complete and any dependent structures are added to the inode 3744 * bufwait list to be completed as soon as it is written. If a bitmap write 3745 * depends on this entry we move the inode into the inodedephd of the 3746 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. 3747 */ 3748static void 3749handle_written_jaddref(jaddref) 3750 struct jaddref *jaddref; 3751{ 3752 struct jsegdep *jsegdep; 3753 struct inodedep *inodedep; 3754 struct diradd *diradd; 3755 struct mkdir *mkdir; 3756 3757 /* Grab the jsegdep. */ 3758 jsegdep = inoref_jseg(&jaddref->ja_ref); 3759 mkdir = NULL; 3760 diradd = NULL; 3761 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 3762 0, &inodedep) == 0) 3763 panic("handle_written_jaddref: Lost inodedep."); 3764 if (jaddref->ja_diradd == NULL) 3765 panic("handle_written_jaddref: No dependency"); 3766 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { 3767 diradd = jaddref->ja_diradd; 3768 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); 3769 } else if (jaddref->ja_state & MKDIR_PARENT) { 3770 mkdir = jaddref->ja_mkdir; 3771 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); 3772 } else if (jaddref->ja_state & MKDIR_BODY) 3773 mkdir = jaddref->ja_mkdir; 3774 else 3775 panic("handle_written_jaddref: Unknown dependency %p", 3776 jaddref->ja_diradd); 3777 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ 3778 /* 3779 * Remove us from the inode list. 3780 */ 3781 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); 3782 /* 3783 * The mkdir may be waiting on the jaddref to clear before freeing. 3784 */ 3785 if (mkdir) { 3786 KASSERT(mkdir->md_list.wk_type == D_MKDIR, 3787 ("handle_written_jaddref: Incorrect type for mkdir %s", 3788 TYPENAME(mkdir->md_list.wk_type))); 3789 mkdir->md_jaddref = NULL; 3790 diradd = mkdir->md_diradd; 3791 mkdir->md_state |= DEPCOMPLETE; 3792 complete_mkdir(mkdir); 3793 } 3794 jwork_insert(&diradd->da_jwork, jsegdep); 3795 if (jaddref->ja_state & NEWBLOCK) { 3796 inodedep->id_state |= ONDEPLIST; 3797 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, 3798 inodedep, id_deps); 3799 } 3800 free_jaddref(jaddref); 3801} 3802 3803/* 3804 * Called once a jnewblk journal is written. The allocdirect or allocindir 3805 * is placed in the bmsafemap to await notification of a written bitmap. If 3806 * the operation was canceled we add the segdep to the appropriate 3807 * dependency to free the journal space once the canceling operation 3808 * completes. 3809 */ 3810static void 3811handle_written_jnewblk(jnewblk) 3812 struct jnewblk *jnewblk; 3813{ 3814 struct bmsafemap *bmsafemap; 3815 struct freefrag *freefrag; 3816 struct freework *freework; 3817 struct jsegdep *jsegdep; 3818 struct newblk *newblk; 3819 3820 /* Grab the jsegdep. */ 3821 jsegdep = jnewblk->jn_jsegdep; 3822 jnewblk->jn_jsegdep = NULL; 3823 if (jnewblk->jn_dep == NULL) 3824 panic("handle_written_jnewblk: No dependency for the segdep."); 3825 switch (jnewblk->jn_dep->wk_type) { 3826 case D_NEWBLK: 3827 case D_ALLOCDIRECT: 3828 case D_ALLOCINDIR: 3829 /* 3830 * Add the written block to the bmsafemap so it can 3831 * be notified when the bitmap is on disk. 3832 */ 3833 newblk = WK_NEWBLK(jnewblk->jn_dep); 3834 newblk->nb_jnewblk = NULL; 3835 if ((newblk->nb_state & GOINGAWAY) == 0) { 3836 bmsafemap = newblk->nb_bmsafemap; 3837 newblk->nb_state |= ONDEPLIST; 3838 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, 3839 nb_deps); 3840 } 3841 jwork_insert(&newblk->nb_jwork, jsegdep); 3842 break; 3843 case D_FREEFRAG: 3844 /* 3845 * A newblock being removed by a freefrag when replaced by 3846 * frag extension. 3847 */ 3848 freefrag = WK_FREEFRAG(jnewblk->jn_dep); 3849 freefrag->ff_jdep = NULL; 3850 jwork_insert(&freefrag->ff_jwork, jsegdep); 3851 break; 3852 case D_FREEWORK: 3853 /* 3854 * A direct block was removed by truncate. 3855 */ 3856 freework = WK_FREEWORK(jnewblk->jn_dep); 3857 freework->fw_jnewblk = NULL; 3858 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); 3859 break; 3860 default: 3861 panic("handle_written_jnewblk: Unknown type %d.", 3862 jnewblk->jn_dep->wk_type); 3863 } 3864 jnewblk->jn_dep = NULL; 3865 free_jnewblk(jnewblk); 3866} 3867 3868/* 3869 * Cancel a jfreefrag that won't be needed, probably due to colliding with 3870 * an in-flight allocation that has not yet been committed. Divorce us 3871 * from the freefrag and mark it DEPCOMPLETE so that it may be added 3872 * to the worklist. 3873 */ 3874static void 3875cancel_jfreefrag(jfreefrag) 3876 struct jfreefrag *jfreefrag; 3877{ 3878 struct freefrag *freefrag; 3879 3880 if (jfreefrag->fr_jsegdep) { 3881 free_jsegdep(jfreefrag->fr_jsegdep); 3882 jfreefrag->fr_jsegdep = NULL; 3883 } 3884 freefrag = jfreefrag->fr_freefrag; 3885 jfreefrag->fr_freefrag = NULL; 3886 free_jfreefrag(jfreefrag); 3887 freefrag->ff_state |= DEPCOMPLETE; 3888 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); 3889} 3890 3891/* 3892 * Free a jfreefrag when the parent freefrag is rendered obsolete. 3893 */ 3894static void 3895free_jfreefrag(jfreefrag) 3896 struct jfreefrag *jfreefrag; 3897{ 3898 3899 if (jfreefrag->fr_state & INPROGRESS) 3900 WORKLIST_REMOVE(&jfreefrag->fr_list); 3901 else if (jfreefrag->fr_state & ONWORKLIST) 3902 remove_from_journal(&jfreefrag->fr_list); 3903 if (jfreefrag->fr_freefrag != NULL) 3904 panic("free_jfreefrag: Still attached to a freefrag."); 3905 WORKITEM_FREE(jfreefrag, D_JFREEFRAG); 3906} 3907 3908/* 3909 * Called when the journal write for a jfreefrag completes. The parent 3910 * freefrag is added to the worklist if this completes its dependencies. 3911 */ 3912static void 3913handle_written_jfreefrag(jfreefrag) 3914 struct jfreefrag *jfreefrag; 3915{ 3916 struct jsegdep *jsegdep; 3917 struct freefrag *freefrag; 3918 3919 /* Grab the jsegdep. */ 3920 jsegdep = jfreefrag->fr_jsegdep; 3921 jfreefrag->fr_jsegdep = NULL; 3922 freefrag = jfreefrag->fr_freefrag; 3923 if (freefrag == NULL) 3924 panic("handle_written_jfreefrag: No freefrag."); 3925 freefrag->ff_state |= DEPCOMPLETE; 3926 freefrag->ff_jdep = NULL; 3927 jwork_insert(&freefrag->ff_jwork, jsegdep); 3928 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 3929 add_to_worklist(&freefrag->ff_list, 0); 3930 jfreefrag->fr_freefrag = NULL; 3931 free_jfreefrag(jfreefrag); 3932} 3933 3934/* 3935 * Called when the journal write for a jfreeblk completes. The jfreeblk 3936 * is removed from the freeblks list of pending journal writes and the 3937 * jsegdep is moved to the freeblks jwork to be completed when all blocks 3938 * have been reclaimed. 3939 */ 3940static void 3941handle_written_jblkdep(jblkdep) 3942 struct jblkdep *jblkdep; 3943{ 3944 struct freeblks *freeblks; 3945 struct jsegdep *jsegdep; 3946 3947 /* Grab the jsegdep. */ 3948 jsegdep = jblkdep->jb_jsegdep; 3949 jblkdep->jb_jsegdep = NULL; 3950 freeblks = jblkdep->jb_freeblks; 3951 LIST_REMOVE(jblkdep, jb_deps); 3952 jwork_insert(&freeblks->fb_jwork, jsegdep); 3953 /* 3954 * If the freeblks is all journaled, we can add it to the worklist. 3955 */ 3956 if (LIST_EMPTY(&freeblks->fb_jblkdephd) && 3957 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 3958 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 3959 3960 free_jblkdep(jblkdep); 3961} 3962 3963static struct jsegdep * 3964newjsegdep(struct worklist *wk) 3965{ 3966 struct jsegdep *jsegdep; 3967 3968 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); 3969 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); 3970 jsegdep->jd_seg = NULL; 3971 3972 return (jsegdep); 3973} 3974 3975static struct jmvref * 3976newjmvref(dp, ino, oldoff, newoff) 3977 struct inode *dp; 3978 ino_t ino; 3979 off_t oldoff; 3980 off_t newoff; 3981{ 3982 struct jmvref *jmvref; 3983 3984 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); 3985 workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump)); 3986 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; 3987 jmvref->jm_parent = dp->i_number; 3988 jmvref->jm_ino = ino; 3989 jmvref->jm_oldoff = oldoff; 3990 jmvref->jm_newoff = newoff; 3991 3992 return (jmvref); 3993} 3994 3995/* 3996 * Allocate a new jremref that tracks the removal of ip from dp with the 3997 * directory entry offset of diroff. Mark the entry as ATTACHED and 3998 * DEPCOMPLETE as we have all the information required for the journal write 3999 * and the directory has already been removed from the buffer. The caller 4000 * is responsible for linking the jremref into the pagedep and adding it 4001 * to the journal to write. The MKDIR_PARENT flag is set if we're doing 4002 * a DOTDOT addition so handle_workitem_remove() can properly assign 4003 * the jsegdep when we're done. 4004 */ 4005static struct jremref * 4006newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, 4007 off_t diroff, nlink_t nlink) 4008{ 4009 struct jremref *jremref; 4010 4011 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); 4012 workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump)); 4013 jremref->jr_state = ATTACHED; 4014 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, 4015 nlink, ip->i_mode); 4016 jremref->jr_dirrem = dirrem; 4017 4018 return (jremref); 4019} 4020 4021static inline void 4022newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, 4023 nlink_t nlink, uint16_t mode) 4024{ 4025 4026 inoref->if_jsegdep = newjsegdep(&inoref->if_list); 4027 inoref->if_diroff = diroff; 4028 inoref->if_ino = ino; 4029 inoref->if_parent = parent; 4030 inoref->if_nlink = nlink; 4031 inoref->if_mode = mode; 4032} 4033 4034/* 4035 * Allocate a new jaddref to track the addition of ino to dp at diroff. The 4036 * directory offset may not be known until later. The caller is responsible 4037 * adding the entry to the journal when this information is available. nlink 4038 * should be the link count prior to the addition and mode is only required 4039 * to have the correct FMT. 4040 */ 4041static struct jaddref * 4042newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, 4043 uint16_t mode) 4044{ 4045 struct jaddref *jaddref; 4046 4047 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); 4048 workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump)); 4049 jaddref->ja_state = ATTACHED; 4050 jaddref->ja_mkdir = NULL; 4051 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); 4052 4053 return (jaddref); 4054} 4055 4056/* 4057 * Create a new free dependency for a freework. The caller is responsible 4058 * for adjusting the reference count when it has the lock held. The freedep 4059 * will track an outstanding bitmap write that will ultimately clear the 4060 * freework to continue. 4061 */ 4062static struct freedep * 4063newfreedep(struct freework *freework) 4064{ 4065 struct freedep *freedep; 4066 4067 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); 4068 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); 4069 freedep->fd_freework = freework; 4070 4071 return (freedep); 4072} 4073 4074/* 4075 * Free a freedep structure once the buffer it is linked to is written. If 4076 * this is the last reference to the freework schedule it for completion. 4077 */ 4078static void 4079free_freedep(freedep) 4080 struct freedep *freedep; 4081{ 4082 struct freework *freework; 4083 4084 freework = freedep->fd_freework; 4085 freework->fw_freeblks->fb_cgwait--; 4086 if (--freework->fw_ref == 0) 4087 freework_enqueue(freework); 4088 WORKITEM_FREE(freedep, D_FREEDEP); 4089} 4090 4091/* 4092 * Allocate a new freework structure that may be a level in an indirect 4093 * when parent is not NULL or a top level block when it is. The top level 4094 * freework structures are allocated without the per-filesystem lock held 4095 * and before the freeblks is visible outside of softdep_setup_freeblocks(). 4096 */ 4097static struct freework * 4098newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) 4099 struct ufsmount *ump; 4100 struct freeblks *freeblks; 4101 struct freework *parent; 4102 ufs_lbn_t lbn; 4103 ufs2_daddr_t nb; 4104 int frags; 4105 int off; 4106 int journal; 4107{ 4108 struct freework *freework; 4109 4110 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); 4111 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); 4112 freework->fw_state = ATTACHED; 4113 freework->fw_jnewblk = NULL; 4114 freework->fw_freeblks = freeblks; 4115 freework->fw_parent = parent; 4116 freework->fw_lbn = lbn; 4117 freework->fw_blkno = nb; 4118 freework->fw_frags = frags; 4119 freework->fw_indir = NULL; 4120 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR) 4121 ? 0 : NINDIR(ump->um_fs) + 1; 4122 freework->fw_start = freework->fw_off = off; 4123 if (journal) 4124 newjfreeblk(freeblks, lbn, nb, frags); 4125 if (parent == NULL) { 4126 ACQUIRE_LOCK(ump); 4127 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 4128 freeblks->fb_ref++; 4129 FREE_LOCK(ump); 4130 } 4131 4132 return (freework); 4133} 4134 4135/* 4136 * Eliminate a jfreeblk for a block that does not need journaling. 4137 */ 4138static void 4139cancel_jfreeblk(freeblks, blkno) 4140 struct freeblks *freeblks; 4141 ufs2_daddr_t blkno; 4142{ 4143 struct jfreeblk *jfreeblk; 4144 struct jblkdep *jblkdep; 4145 4146 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { 4147 if (jblkdep->jb_list.wk_type != D_JFREEBLK) 4148 continue; 4149 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); 4150 if (jfreeblk->jf_blkno == blkno) 4151 break; 4152 } 4153 if (jblkdep == NULL) 4154 return; 4155 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); 4156 free_jsegdep(jblkdep->jb_jsegdep); 4157 LIST_REMOVE(jblkdep, jb_deps); 4158 WORKITEM_FREE(jfreeblk, D_JFREEBLK); 4159} 4160 4161/* 4162 * Allocate a new jfreeblk to journal top level block pointer when truncating 4163 * a file. The caller must add this to the worklist when the per-filesystem 4164 * lock is held. 4165 */ 4166static struct jfreeblk * 4167newjfreeblk(freeblks, lbn, blkno, frags) 4168 struct freeblks *freeblks; 4169 ufs_lbn_t lbn; 4170 ufs2_daddr_t blkno; 4171 int frags; 4172{ 4173 struct jfreeblk *jfreeblk; 4174 4175 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); 4176 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, 4177 freeblks->fb_list.wk_mp); 4178 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); 4179 jfreeblk->jf_dep.jb_freeblks = freeblks; 4180 jfreeblk->jf_ino = freeblks->fb_inum; 4181 jfreeblk->jf_lbn = lbn; 4182 jfreeblk->jf_blkno = blkno; 4183 jfreeblk->jf_frags = frags; 4184 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); 4185 4186 return (jfreeblk); 4187} 4188 4189/* 4190 * The journal is only prepared to handle full-size block numbers, so we 4191 * have to adjust the record to reflect the change to a full-size block. 4192 * For example, suppose we have a block made up of fragments 8-15 and 4193 * want to free its last two fragments. We are given a request that says: 4194 * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 4195 * where frags are the number of fragments to free and oldfrags are the 4196 * number of fragments to keep. To block align it, we have to change it to 4197 * have a valid full-size blkno, so it becomes: 4198 * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 4199 */ 4200static void 4201adjust_newfreework(freeblks, frag_offset) 4202 struct freeblks *freeblks; 4203 int frag_offset; 4204{ 4205 struct jfreeblk *jfreeblk; 4206 4207 KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && 4208 LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), 4209 ("adjust_newfreework: Missing freeblks dependency")); 4210 4211 jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); 4212 jfreeblk->jf_blkno -= frag_offset; 4213 jfreeblk->jf_frags += frag_offset; 4214} 4215 4216/* 4217 * Allocate a new jtrunc to track a partial truncation. 4218 */ 4219static struct jtrunc * 4220newjtrunc(freeblks, size, extsize) 4221 struct freeblks *freeblks; 4222 off_t size; 4223 int extsize; 4224{ 4225 struct jtrunc *jtrunc; 4226 4227 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); 4228 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, 4229 freeblks->fb_list.wk_mp); 4230 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); 4231 jtrunc->jt_dep.jb_freeblks = freeblks; 4232 jtrunc->jt_ino = freeblks->fb_inum; 4233 jtrunc->jt_size = size; 4234 jtrunc->jt_extsize = extsize; 4235 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); 4236 4237 return (jtrunc); 4238} 4239 4240/* 4241 * If we're canceling a new bitmap we have to search for another ref 4242 * to move into the bmsafemap dep. This might be better expressed 4243 * with another structure. 4244 */ 4245static void 4246move_newblock_dep(jaddref, inodedep) 4247 struct jaddref *jaddref; 4248 struct inodedep *inodedep; 4249{ 4250 struct inoref *inoref; 4251 struct jaddref *jaddrefn; 4252 4253 jaddrefn = NULL; 4254 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4255 inoref = TAILQ_NEXT(inoref, if_deps)) { 4256 if ((jaddref->ja_state & NEWBLOCK) && 4257 inoref->if_list.wk_type == D_JADDREF) { 4258 jaddrefn = (struct jaddref *)inoref; 4259 break; 4260 } 4261 } 4262 if (jaddrefn == NULL) 4263 return; 4264 jaddrefn->ja_state &= ~(ATTACHED | UNDONE); 4265 jaddrefn->ja_state |= jaddref->ja_state & 4266 (ATTACHED | UNDONE | NEWBLOCK); 4267 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); 4268 jaddref->ja_state |= ATTACHED; 4269 LIST_REMOVE(jaddref, ja_bmdeps); 4270 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, 4271 ja_bmdeps); 4272} 4273 4274/* 4275 * Cancel a jaddref either before it has been written or while it is being 4276 * written. This happens when a link is removed before the add reaches 4277 * the disk. The jaddref dependency is kept linked into the bmsafemap 4278 * and inode to prevent the link count or bitmap from reaching the disk 4279 * until handle_workitem_remove() re-adjusts the counts and bitmaps as 4280 * required. 4281 * 4282 * Returns 1 if the canceled addref requires journaling of the remove and 4283 * 0 otherwise. 4284 */ 4285static int 4286cancel_jaddref(jaddref, inodedep, wkhd) 4287 struct jaddref *jaddref; 4288 struct inodedep *inodedep; 4289 struct workhead *wkhd; 4290{ 4291 struct inoref *inoref; 4292 struct jsegdep *jsegdep; 4293 int needsj; 4294 4295 KASSERT((jaddref->ja_state & COMPLETE) == 0, 4296 ("cancel_jaddref: Canceling complete jaddref")); 4297 if (jaddref->ja_state & (INPROGRESS | COMPLETE)) 4298 needsj = 1; 4299 else 4300 needsj = 0; 4301 if (inodedep == NULL) 4302 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 4303 0, &inodedep) == 0) 4304 panic("cancel_jaddref: Lost inodedep"); 4305 /* 4306 * We must adjust the nlink of any reference operation that follows 4307 * us so that it is consistent with the in-memory reference. This 4308 * ensures that inode nlink rollbacks always have the correct link. 4309 */ 4310 if (needsj == 0) { 4311 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4312 inoref = TAILQ_NEXT(inoref, if_deps)) { 4313 if (inoref->if_state & GOINGAWAY) 4314 break; 4315 inoref->if_nlink--; 4316 } 4317 } 4318 jsegdep = inoref_jseg(&jaddref->ja_ref); 4319 if (jaddref->ja_state & NEWBLOCK) 4320 move_newblock_dep(jaddref, inodedep); 4321 wake_worklist(&jaddref->ja_list); 4322 jaddref->ja_mkdir = NULL; 4323 if (jaddref->ja_state & INPROGRESS) { 4324 jaddref->ja_state &= ~INPROGRESS; 4325 WORKLIST_REMOVE(&jaddref->ja_list); 4326 jwork_insert(wkhd, jsegdep); 4327 } else { 4328 free_jsegdep(jsegdep); 4329 if (jaddref->ja_state & DEPCOMPLETE) 4330 remove_from_journal(&jaddref->ja_list); 4331 } 4332 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); 4333 /* 4334 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove 4335 * can arrange for them to be freed with the bitmap. Otherwise we 4336 * no longer need this addref attached to the inoreflst and it 4337 * will incorrectly adjust nlink if we leave it. 4338 */ 4339 if ((jaddref->ja_state & NEWBLOCK) == 0) { 4340 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 4341 if_deps); 4342 jaddref->ja_state |= COMPLETE; 4343 free_jaddref(jaddref); 4344 return (needsj); 4345 } 4346 /* 4347 * Leave the head of the list for jsegdeps for fast merging. 4348 */ 4349 if (LIST_FIRST(wkhd) != NULL) { 4350 jaddref->ja_state |= ONWORKLIST; 4351 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); 4352 } else 4353 WORKLIST_INSERT(wkhd, &jaddref->ja_list); 4354 4355 return (needsj); 4356} 4357 4358/* 4359 * Attempt to free a jaddref structure when some work completes. This 4360 * should only succeed once the entry is written and all dependencies have 4361 * been notified. 4362 */ 4363static void 4364free_jaddref(jaddref) 4365 struct jaddref *jaddref; 4366{ 4367 4368 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) 4369 return; 4370 if (jaddref->ja_ref.if_jsegdep) 4371 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", 4372 jaddref, jaddref->ja_state); 4373 if (jaddref->ja_state & NEWBLOCK) 4374 LIST_REMOVE(jaddref, ja_bmdeps); 4375 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) 4376 panic("free_jaddref: Bad state %p(0x%X)", 4377 jaddref, jaddref->ja_state); 4378 if (jaddref->ja_mkdir != NULL) 4379 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); 4380 WORKITEM_FREE(jaddref, D_JADDREF); 4381} 4382 4383/* 4384 * Free a jremref structure once it has been written or discarded. 4385 */ 4386static void 4387free_jremref(jremref) 4388 struct jremref *jremref; 4389{ 4390 4391 if (jremref->jr_ref.if_jsegdep) 4392 free_jsegdep(jremref->jr_ref.if_jsegdep); 4393 if (jremref->jr_state & INPROGRESS) 4394 panic("free_jremref: IO still pending"); 4395 WORKITEM_FREE(jremref, D_JREMREF); 4396} 4397 4398/* 4399 * Free a jnewblk structure. 4400 */ 4401static void 4402free_jnewblk(jnewblk) 4403 struct jnewblk *jnewblk; 4404{ 4405 4406 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) 4407 return; 4408 LIST_REMOVE(jnewblk, jn_deps); 4409 if (jnewblk->jn_dep != NULL) 4410 panic("free_jnewblk: Dependency still attached."); 4411 WORKITEM_FREE(jnewblk, D_JNEWBLK); 4412} 4413 4414/* 4415 * Cancel a jnewblk which has been been made redundant by frag extension. 4416 */ 4417static void 4418cancel_jnewblk(jnewblk, wkhd) 4419 struct jnewblk *jnewblk; 4420 struct workhead *wkhd; 4421{ 4422 struct jsegdep *jsegdep; 4423 4424 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); 4425 jsegdep = jnewblk->jn_jsegdep; 4426 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) 4427 panic("cancel_jnewblk: Invalid state"); 4428 jnewblk->jn_jsegdep = NULL; 4429 jnewblk->jn_dep = NULL; 4430 jnewblk->jn_state |= GOINGAWAY; 4431 if (jnewblk->jn_state & INPROGRESS) { 4432 jnewblk->jn_state &= ~INPROGRESS; 4433 WORKLIST_REMOVE(&jnewblk->jn_list); 4434 jwork_insert(wkhd, jsegdep); 4435 } else { 4436 free_jsegdep(jsegdep); 4437 remove_from_journal(&jnewblk->jn_list); 4438 } 4439 wake_worklist(&jnewblk->jn_list); 4440 WORKLIST_INSERT(wkhd, &jnewblk->jn_list); 4441} 4442 4443static void 4444free_jblkdep(jblkdep) 4445 struct jblkdep *jblkdep; 4446{ 4447 4448 if (jblkdep->jb_list.wk_type == D_JFREEBLK) 4449 WORKITEM_FREE(jblkdep, D_JFREEBLK); 4450 else if (jblkdep->jb_list.wk_type == D_JTRUNC) 4451 WORKITEM_FREE(jblkdep, D_JTRUNC); 4452 else 4453 panic("free_jblkdep: Unexpected type %s", 4454 TYPENAME(jblkdep->jb_list.wk_type)); 4455} 4456 4457/* 4458 * Free a single jseg once it is no longer referenced in memory or on 4459 * disk. Reclaim journal blocks and dependencies waiting for the segment 4460 * to disappear. 4461 */ 4462static void 4463free_jseg(jseg, jblocks) 4464 struct jseg *jseg; 4465 struct jblocks *jblocks; 4466{ 4467 struct freework *freework; 4468 4469 /* 4470 * Free freework structures that were lingering to indicate freed 4471 * indirect blocks that forced journal write ordering on reallocate. 4472 */ 4473 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) 4474 indirblk_remove(freework); 4475 if (jblocks->jb_oldestseg == jseg) 4476 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); 4477 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); 4478 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); 4479 KASSERT(LIST_EMPTY(&jseg->js_entries), 4480 ("free_jseg: Freed jseg has valid entries.")); 4481 WORKITEM_FREE(jseg, D_JSEG); 4482} 4483 4484/* 4485 * Free all jsegs that meet the criteria for being reclaimed and update 4486 * oldestseg. 4487 */ 4488static void 4489free_jsegs(jblocks) 4490 struct jblocks *jblocks; 4491{ 4492 struct jseg *jseg; 4493 4494 /* 4495 * Free only those jsegs which have none allocated before them to 4496 * preserve the journal space ordering. 4497 */ 4498 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { 4499 /* 4500 * Only reclaim space when nothing depends on this journal 4501 * set and another set has written that it is no longer 4502 * valid. 4503 */ 4504 if (jseg->js_refs != 0) { 4505 jblocks->jb_oldestseg = jseg; 4506 return; 4507 } 4508 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) 4509 break; 4510 if (jseg->js_seq > jblocks->jb_oldestwrseq) 4511 break; 4512 /* 4513 * We can free jsegs that didn't write entries when 4514 * oldestwrseq == js_seq. 4515 */ 4516 if (jseg->js_seq == jblocks->jb_oldestwrseq && 4517 jseg->js_cnt != 0) 4518 break; 4519 free_jseg(jseg, jblocks); 4520 } 4521 /* 4522 * If we exited the loop above we still must discover the 4523 * oldest valid segment. 4524 */ 4525 if (jseg) 4526 for (jseg = jblocks->jb_oldestseg; jseg != NULL; 4527 jseg = TAILQ_NEXT(jseg, js_next)) 4528 if (jseg->js_refs != 0) 4529 break; 4530 jblocks->jb_oldestseg = jseg; 4531 /* 4532 * The journal has no valid records but some jsegs may still be 4533 * waiting on oldestwrseq to advance. We force a small record 4534 * out to permit these lingering records to be reclaimed. 4535 */ 4536 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) 4537 jblocks->jb_needseg = 1; 4538} 4539 4540/* 4541 * Release one reference to a jseg and free it if the count reaches 0. This 4542 * should eventually reclaim journal space as well. 4543 */ 4544static void 4545rele_jseg(jseg) 4546 struct jseg *jseg; 4547{ 4548 4549 KASSERT(jseg->js_refs > 0, 4550 ("free_jseg: Invalid refcnt %d", jseg->js_refs)); 4551 if (--jseg->js_refs != 0) 4552 return; 4553 free_jsegs(jseg->js_jblocks); 4554} 4555 4556/* 4557 * Release a jsegdep and decrement the jseg count. 4558 */ 4559static void 4560free_jsegdep(jsegdep) 4561 struct jsegdep *jsegdep; 4562{ 4563 4564 if (jsegdep->jd_seg) 4565 rele_jseg(jsegdep->jd_seg); 4566 WORKITEM_FREE(jsegdep, D_JSEGDEP); 4567} 4568 4569/* 4570 * Wait for a journal item to make it to disk. Initiate journal processing 4571 * if required. 4572 */ 4573static int 4574jwait(wk, waitfor) 4575 struct worklist *wk; 4576 int waitfor; 4577{ 4578 4579 LOCK_OWNED(VFSTOUFS(wk->wk_mp)); 4580 /* 4581 * Blocking journal waits cause slow synchronous behavior. Record 4582 * stats on the frequency of these blocking operations. 4583 */ 4584 if (waitfor == MNT_WAIT) { 4585 stat_journal_wait++; 4586 switch (wk->wk_type) { 4587 case D_JREMREF: 4588 case D_JMVREF: 4589 stat_jwait_filepage++; 4590 break; 4591 case D_JTRUNC: 4592 case D_JFREEBLK: 4593 stat_jwait_freeblks++; 4594 break; 4595 case D_JNEWBLK: 4596 stat_jwait_newblk++; 4597 break; 4598 case D_JADDREF: 4599 stat_jwait_inode++; 4600 break; 4601 default: 4602 break; 4603 } 4604 } 4605 /* 4606 * If IO has not started we process the journal. We can't mark the 4607 * worklist item as IOWAITING because we drop the lock while 4608 * processing the journal and the worklist entry may be freed after 4609 * this point. The caller may call back in and re-issue the request. 4610 */ 4611 if ((wk->wk_state & INPROGRESS) == 0) { 4612 softdep_process_journal(wk->wk_mp, wk, waitfor); 4613 if (waitfor != MNT_WAIT) 4614 return (EBUSY); 4615 return (0); 4616 } 4617 if (waitfor != MNT_WAIT) 4618 return (EBUSY); 4619 wait_worklist(wk, "jwait"); 4620 return (0); 4621} 4622 4623/* 4624 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as 4625 * appropriate. This is a convenience function to reduce duplicate code 4626 * for the setup and revert functions below. 4627 */ 4628static struct inodedep * 4629inodedep_lookup_ip(ip) 4630 struct inode *ip; 4631{ 4632 struct inodedep *inodedep; 4633 int dflags; 4634 4635 KASSERT(ip->i_nlink >= ip->i_effnlink, 4636 ("inodedep_lookup_ip: bad delta")); 4637 dflags = DEPALLOC; 4638 if (IS_SNAPSHOT(ip)) 4639 dflags |= NODELAY; 4640 (void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, 4641 &inodedep); 4642 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 4643 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 4644 4645 return (inodedep); 4646} 4647 4648/* 4649 * Called prior to creating a new inode and linking it to a directory. The 4650 * jaddref structure must already be allocated by softdep_setup_inomapdep 4651 * and it is discovered here so we can initialize the mode and update 4652 * nlinkdelta. 4653 */ 4654void 4655softdep_setup_create(dp, ip) 4656 struct inode *dp; 4657 struct inode *ip; 4658{ 4659 struct inodedep *inodedep; 4660 struct jaddref *jaddref; 4661 struct vnode *dvp; 4662 4663 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4664 ("softdep_setup_create called on non-softdep filesystem")); 4665 KASSERT(ip->i_nlink == 1, 4666 ("softdep_setup_create: Invalid link count.")); 4667 dvp = ITOV(dp); 4668 ACQUIRE_LOCK(dp->i_ump); 4669 inodedep = inodedep_lookup_ip(ip); 4670 if (DOINGSUJ(dvp)) { 4671 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4672 inoreflst); 4673 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 4674 ("softdep_setup_create: No addref structure present.")); 4675 } 4676 softdep_prelink(dvp, NULL); 4677 FREE_LOCK(dp->i_ump); 4678} 4679 4680/* 4681 * Create a jaddref structure to track the addition of a DOTDOT link when 4682 * we are reparenting an inode as part of a rename. This jaddref will be 4683 * found by softdep_setup_directory_change. Adjusts nlinkdelta for 4684 * non-journaling softdep. 4685 */ 4686void 4687softdep_setup_dotdot_link(dp, ip) 4688 struct inode *dp; 4689 struct inode *ip; 4690{ 4691 struct inodedep *inodedep; 4692 struct jaddref *jaddref; 4693 struct vnode *dvp; 4694 struct vnode *vp; 4695 4696 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4697 ("softdep_setup_dotdot_link called on non-softdep filesystem")); 4698 dvp = ITOV(dp); 4699 vp = ITOV(ip); 4700 jaddref = NULL; 4701 /* 4702 * We don't set MKDIR_PARENT as this is not tied to a mkdir and 4703 * is used as a normal link would be. 4704 */ 4705 if (DOINGSUJ(dvp)) 4706 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4707 dp->i_effnlink - 1, dp->i_mode); 4708 ACQUIRE_LOCK(dp->i_ump); 4709 inodedep = inodedep_lookup_ip(dp); 4710 if (jaddref) 4711 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4712 if_deps); 4713 softdep_prelink(dvp, ITOV(ip)); 4714 FREE_LOCK(dp->i_ump); 4715} 4716 4717/* 4718 * Create a jaddref structure to track a new link to an inode. The directory 4719 * offset is not known until softdep_setup_directory_add or 4720 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling 4721 * softdep. 4722 */ 4723void 4724softdep_setup_link(dp, ip) 4725 struct inode *dp; 4726 struct inode *ip; 4727{ 4728 struct inodedep *inodedep; 4729 struct jaddref *jaddref; 4730 struct vnode *dvp; 4731 4732 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4733 ("softdep_setup_link called on non-softdep filesystem")); 4734 dvp = ITOV(dp); 4735 jaddref = NULL; 4736 if (DOINGSUJ(dvp)) 4737 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, 4738 ip->i_mode); 4739 ACQUIRE_LOCK(dp->i_ump); 4740 inodedep = inodedep_lookup_ip(ip); 4741 if (jaddref) 4742 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4743 if_deps); 4744 softdep_prelink(dvp, ITOV(ip)); 4745 FREE_LOCK(dp->i_ump); 4746} 4747 4748/* 4749 * Called to create the jaddref structures to track . and .. references as 4750 * well as lookup and further initialize the incomplete jaddref created 4751 * by softdep_setup_inomapdep when the inode was allocated. Adjusts 4752 * nlinkdelta for non-journaling softdep. 4753 */ 4754void 4755softdep_setup_mkdir(dp, ip) 4756 struct inode *dp; 4757 struct inode *ip; 4758{ 4759 struct inodedep *inodedep; 4760 struct jaddref *dotdotaddref; 4761 struct jaddref *dotaddref; 4762 struct jaddref *jaddref; 4763 struct vnode *dvp; 4764 4765 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4766 ("softdep_setup_mkdir called on non-softdep filesystem")); 4767 dvp = ITOV(dp); 4768 dotaddref = dotdotaddref = NULL; 4769 if (DOINGSUJ(dvp)) { 4770 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, 4771 ip->i_mode); 4772 dotaddref->ja_state |= MKDIR_BODY; 4773 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4774 dp->i_effnlink - 1, dp->i_mode); 4775 dotdotaddref->ja_state |= MKDIR_PARENT; 4776 } 4777 ACQUIRE_LOCK(dp->i_ump); 4778 inodedep = inodedep_lookup_ip(ip); 4779 if (DOINGSUJ(dvp)) { 4780 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4781 inoreflst); 4782 KASSERT(jaddref != NULL, 4783 ("softdep_setup_mkdir: No addref structure present.")); 4784 KASSERT(jaddref->ja_parent == dp->i_number, 4785 ("softdep_setup_mkdir: bad parent %ju", 4786 (uintmax_t)jaddref->ja_parent)); 4787 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, 4788 if_deps); 4789 } 4790 inodedep = inodedep_lookup_ip(dp); 4791 if (DOINGSUJ(dvp)) 4792 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, 4793 &dotdotaddref->ja_ref, if_deps); 4794 softdep_prelink(ITOV(dp), NULL); 4795 FREE_LOCK(dp->i_ump); 4796} 4797 4798/* 4799 * Called to track nlinkdelta of the inode and parent directories prior to 4800 * unlinking a directory. 4801 */ 4802void 4803softdep_setup_rmdir(dp, ip) 4804 struct inode *dp; 4805 struct inode *ip; 4806{ 4807 struct vnode *dvp; 4808 4809 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4810 ("softdep_setup_rmdir called on non-softdep filesystem")); 4811 dvp = ITOV(dp); 4812 ACQUIRE_LOCK(dp->i_ump); 4813 (void) inodedep_lookup_ip(ip); 4814 (void) inodedep_lookup_ip(dp); 4815 softdep_prelink(dvp, ITOV(ip)); 4816 FREE_LOCK(dp->i_ump); 4817} 4818 4819/* 4820 * Called to track nlinkdelta of the inode and parent directories prior to 4821 * unlink. 4822 */ 4823void 4824softdep_setup_unlink(dp, ip) 4825 struct inode *dp; 4826 struct inode *ip; 4827{ 4828 struct vnode *dvp; 4829 4830 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4831 ("softdep_setup_unlink called on non-softdep filesystem")); 4832 dvp = ITOV(dp); 4833 ACQUIRE_LOCK(dp->i_ump); 4834 (void) inodedep_lookup_ip(ip); 4835 (void) inodedep_lookup_ip(dp); 4836 softdep_prelink(dvp, ITOV(ip)); 4837 FREE_LOCK(dp->i_ump); 4838} 4839 4840/* 4841 * Called to release the journal structures created by a failed non-directory 4842 * creation. Adjusts nlinkdelta for non-journaling softdep. 4843 */ 4844void 4845softdep_revert_create(dp, ip) 4846 struct inode *dp; 4847 struct inode *ip; 4848{ 4849 struct inodedep *inodedep; 4850 struct jaddref *jaddref; 4851 struct vnode *dvp; 4852 4853 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4854 ("softdep_revert_create called on non-softdep filesystem")); 4855 dvp = ITOV(dp); 4856 ACQUIRE_LOCK(dp->i_ump); 4857 inodedep = inodedep_lookup_ip(ip); 4858 if (DOINGSUJ(dvp)) { 4859 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4860 inoreflst); 4861 KASSERT(jaddref->ja_parent == dp->i_number, 4862 ("softdep_revert_create: addref parent mismatch")); 4863 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4864 } 4865 FREE_LOCK(dp->i_ump); 4866} 4867 4868/* 4869 * Called to release the journal structures created by a failed link 4870 * addition. Adjusts nlinkdelta for non-journaling softdep. 4871 */ 4872void 4873softdep_revert_link(dp, ip) 4874 struct inode *dp; 4875 struct inode *ip; 4876{ 4877 struct inodedep *inodedep; 4878 struct jaddref *jaddref; 4879 struct vnode *dvp; 4880 4881 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4882 ("softdep_revert_link called on non-softdep filesystem")); 4883 dvp = ITOV(dp); 4884 ACQUIRE_LOCK(dp->i_ump); 4885 inodedep = inodedep_lookup_ip(ip); 4886 if (DOINGSUJ(dvp)) { 4887 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4888 inoreflst); 4889 KASSERT(jaddref->ja_parent == dp->i_number, 4890 ("softdep_revert_link: addref parent mismatch")); 4891 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4892 } 4893 FREE_LOCK(dp->i_ump); 4894} 4895 4896/* 4897 * Called to release the journal structures created by a failed mkdir 4898 * attempt. Adjusts nlinkdelta for non-journaling softdep. 4899 */ 4900void 4901softdep_revert_mkdir(dp, ip) 4902 struct inode *dp; 4903 struct inode *ip; 4904{ 4905 struct inodedep *inodedep; 4906 struct jaddref *jaddref; 4907 struct jaddref *dotaddref; 4908 struct vnode *dvp; 4909 4910 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4911 ("softdep_revert_mkdir called on non-softdep filesystem")); 4912 dvp = ITOV(dp); 4913 4914 ACQUIRE_LOCK(dp->i_ump); 4915 inodedep = inodedep_lookup_ip(dp); 4916 if (DOINGSUJ(dvp)) { 4917 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4918 inoreflst); 4919 KASSERT(jaddref->ja_parent == ip->i_number, 4920 ("softdep_revert_mkdir: dotdot addref parent mismatch")); 4921 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4922 } 4923 inodedep = inodedep_lookup_ip(ip); 4924 if (DOINGSUJ(dvp)) { 4925 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4926 inoreflst); 4927 KASSERT(jaddref->ja_parent == dp->i_number, 4928 ("softdep_revert_mkdir: addref parent mismatch")); 4929 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 4930 inoreflst, if_deps); 4931 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4932 KASSERT(dotaddref->ja_parent == ip->i_number, 4933 ("softdep_revert_mkdir: dot addref parent mismatch")); 4934 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); 4935 } 4936 FREE_LOCK(dp->i_ump); 4937} 4938 4939/* 4940 * Called to correct nlinkdelta after a failed rmdir. 4941 */ 4942void 4943softdep_revert_rmdir(dp, ip) 4944 struct inode *dp; 4945 struct inode *ip; 4946{ 4947 4948 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4949 ("softdep_revert_rmdir called on non-softdep filesystem")); 4950 ACQUIRE_LOCK(dp->i_ump); 4951 (void) inodedep_lookup_ip(ip); 4952 (void) inodedep_lookup_ip(dp); 4953 FREE_LOCK(dp->i_ump); 4954} 4955 4956/* 4957 * Protecting the freemaps (or bitmaps). 4958 * 4959 * To eliminate the need to execute fsck before mounting a filesystem 4960 * after a power failure, one must (conservatively) guarantee that the 4961 * on-disk copy of the bitmaps never indicate that a live inode or block is 4962 * free. So, when a block or inode is allocated, the bitmap should be 4963 * updated (on disk) before any new pointers. When a block or inode is 4964 * freed, the bitmap should not be updated until all pointers have been 4965 * reset. The latter dependency is handled by the delayed de-allocation 4966 * approach described below for block and inode de-allocation. The former 4967 * dependency is handled by calling the following procedure when a block or 4968 * inode is allocated. When an inode is allocated an "inodedep" is created 4969 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 4970 * Each "inodedep" is also inserted into the hash indexing structure so 4971 * that any additional link additions can be made dependent on the inode 4972 * allocation. 4973 * 4974 * The ufs filesystem maintains a number of free block counts (e.g., per 4975 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 4976 * in addition to the bitmaps. These counts are used to improve efficiency 4977 * during allocation and therefore must be consistent with the bitmaps. 4978 * There is no convenient way to guarantee post-crash consistency of these 4979 * counts with simple update ordering, for two main reasons: (1) The counts 4980 * and bitmaps for a single cylinder group block are not in the same disk 4981 * sector. If a disk write is interrupted (e.g., by power failure), one may 4982 * be written and the other not. (2) Some of the counts are located in the 4983 * superblock rather than the cylinder group block. So, we focus our soft 4984 * updates implementation on protecting the bitmaps. When mounting a 4985 * filesystem, we recompute the auxiliary counts from the bitmaps. 4986 */ 4987 4988/* 4989 * Called just after updating the cylinder group block to allocate an inode. 4990 */ 4991void 4992softdep_setup_inomapdep(bp, ip, newinum, mode) 4993 struct buf *bp; /* buffer for cylgroup block with inode map */ 4994 struct inode *ip; /* inode related to allocation */ 4995 ino_t newinum; /* new inode number being allocated */ 4996 int mode; 4997{ 4998 struct inodedep *inodedep; 4999 struct bmsafemap *bmsafemap; 5000 struct jaddref *jaddref; 5001 struct mount *mp; 5002 struct fs *fs; 5003 5004 mp = UFSTOVFS(ip->i_ump); 5005 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5006 ("softdep_setup_inomapdep called on non-softdep filesystem")); 5007 fs = ip->i_ump->um_fs; 5008 jaddref = NULL; 5009 5010 /* 5011 * Allocate the journal reference add structure so that the bitmap 5012 * can be dependent on it. 5013 */ 5014 if (MOUNTEDSUJ(mp)) { 5015 jaddref = newjaddref(ip, newinum, 0, 0, mode); 5016 jaddref->ja_state |= NEWBLOCK; 5017 } 5018 5019 /* 5020 * Create a dependency for the newly allocated inode. 5021 * Panic if it already exists as something is seriously wrong. 5022 * Otherwise add it to the dependency list for the buffer holding 5023 * the cylinder group map from which it was allocated. 5024 * 5025 * We have to preallocate a bmsafemap entry in case it is needed 5026 * in bmsafemap_lookup since once we allocate the inodedep, we 5027 * have to finish initializing it before we can FREE_LOCK(). 5028 * By preallocating, we avoid FREE_LOCK() while doing a malloc 5029 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before 5030 * creating the inodedep as it can be freed during the time 5031 * that we FREE_LOCK() while allocating the inodedep. We must 5032 * call workitem_alloc() before entering the locked section as 5033 * it also acquires the lock and we must avoid trying doing so 5034 * recursively. 5035 */ 5036 bmsafemap = malloc(sizeof(struct bmsafemap), 5037 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5038 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5039 ACQUIRE_LOCK(ip->i_ump); 5040 if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep))) 5041 panic("softdep_setup_inomapdep: dependency %p for new" 5042 "inode already exists", inodedep); 5043 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); 5044 if (jaddref) { 5045 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); 5046 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 5047 if_deps); 5048 } else { 5049 inodedep->id_state |= ONDEPLIST; 5050 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 5051 } 5052 inodedep->id_bmsafemap = bmsafemap; 5053 inodedep->id_state &= ~DEPCOMPLETE; 5054 FREE_LOCK(ip->i_ump); 5055} 5056 5057/* 5058 * Called just after updating the cylinder group block to 5059 * allocate block or fragment. 5060 */ 5061void 5062softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 5063 struct buf *bp; /* buffer for cylgroup block with block map */ 5064 struct mount *mp; /* filesystem doing allocation */ 5065 ufs2_daddr_t newblkno; /* number of newly allocated block */ 5066 int frags; /* Number of fragments. */ 5067 int oldfrags; /* Previous number of fragments for extend. */ 5068{ 5069 struct newblk *newblk; 5070 struct bmsafemap *bmsafemap; 5071 struct jnewblk *jnewblk; 5072 struct ufsmount *ump; 5073 struct fs *fs; 5074 5075 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5076 ("softdep_setup_blkmapdep called on non-softdep filesystem")); 5077 ump = VFSTOUFS(mp); 5078 fs = ump->um_fs; 5079 jnewblk = NULL; 5080 /* 5081 * Create a dependency for the newly allocated block. 5082 * Add it to the dependency list for the buffer holding 5083 * the cylinder group map from which it was allocated. 5084 */ 5085 if (MOUNTEDSUJ(mp)) { 5086 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); 5087 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); 5088 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); 5089 jnewblk->jn_state = ATTACHED; 5090 jnewblk->jn_blkno = newblkno; 5091 jnewblk->jn_frags = frags; 5092 jnewblk->jn_oldfrags = oldfrags; 5093#ifdef SUJ_DEBUG 5094 { 5095 struct cg *cgp; 5096 uint8_t *blksfree; 5097 long bno; 5098 int i; 5099 5100 cgp = (struct cg *)bp->b_data; 5101 blksfree = cg_blksfree(cgp); 5102 bno = dtogd(fs, jnewblk->jn_blkno); 5103 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; 5104 i++) { 5105 if (isset(blksfree, bno + i)) 5106 panic("softdep_setup_blkmapdep: " 5107 "free fragment %d from %d-%d " 5108 "state 0x%X dep %p", i, 5109 jnewblk->jn_oldfrags, 5110 jnewblk->jn_frags, 5111 jnewblk->jn_state, 5112 jnewblk->jn_dep); 5113 } 5114 } 5115#endif 5116 } 5117 5118 CTR3(KTR_SUJ, 5119 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", 5120 newblkno, frags, oldfrags); 5121 ACQUIRE_LOCK(ump); 5122 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) 5123 panic("softdep_setup_blkmapdep: found block"); 5124 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, 5125 dtog(fs, newblkno), NULL); 5126 if (jnewblk) { 5127 jnewblk->jn_dep = (struct worklist *)newblk; 5128 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); 5129 } else { 5130 newblk->nb_state |= ONDEPLIST; 5131 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 5132 } 5133 newblk->nb_bmsafemap = bmsafemap; 5134 newblk->nb_jnewblk = jnewblk; 5135 FREE_LOCK(ump); 5136} 5137 5138#define BMSAFEMAP_HASH(ump, cg) \ 5139 (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) 5140 5141static int 5142bmsafemap_find(bmsafemaphd, cg, bmsafemapp) 5143 struct bmsafemap_hashhead *bmsafemaphd; 5144 int cg; 5145 struct bmsafemap **bmsafemapp; 5146{ 5147 struct bmsafemap *bmsafemap; 5148 5149 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) 5150 if (bmsafemap->sm_cg == cg) 5151 break; 5152 if (bmsafemap) { 5153 *bmsafemapp = bmsafemap; 5154 return (1); 5155 } 5156 *bmsafemapp = NULL; 5157 5158 return (0); 5159} 5160 5161/* 5162 * Find the bmsafemap associated with a cylinder group buffer. 5163 * If none exists, create one. The buffer must be locked when 5164 * this routine is called and this routine must be called with 5165 * the softdep lock held. To avoid giving up the lock while 5166 * allocating a new bmsafemap, a preallocated bmsafemap may be 5167 * provided. If it is provided but not needed, it is freed. 5168 */ 5169static struct bmsafemap * 5170bmsafemap_lookup(mp, bp, cg, newbmsafemap) 5171 struct mount *mp; 5172 struct buf *bp; 5173 int cg; 5174 struct bmsafemap *newbmsafemap; 5175{ 5176 struct bmsafemap_hashhead *bmsafemaphd; 5177 struct bmsafemap *bmsafemap, *collision; 5178 struct worklist *wk; 5179 struct ufsmount *ump; 5180 5181 ump = VFSTOUFS(mp); 5182 LOCK_OWNED(ump); 5183 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); 5184 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5185 if (wk->wk_type == D_BMSAFEMAP) { 5186 if (newbmsafemap) 5187 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5188 return (WK_BMSAFEMAP(wk)); 5189 } 5190 } 5191 bmsafemaphd = BMSAFEMAP_HASH(ump, cg); 5192 if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { 5193 if (newbmsafemap) 5194 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5195 return (bmsafemap); 5196 } 5197 if (newbmsafemap) { 5198 bmsafemap = newbmsafemap; 5199 } else { 5200 FREE_LOCK(ump); 5201 bmsafemap = malloc(sizeof(struct bmsafemap), 5202 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5203 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5204 ACQUIRE_LOCK(ump); 5205 } 5206 bmsafemap->sm_buf = bp; 5207 LIST_INIT(&bmsafemap->sm_inodedephd); 5208 LIST_INIT(&bmsafemap->sm_inodedepwr); 5209 LIST_INIT(&bmsafemap->sm_newblkhd); 5210 LIST_INIT(&bmsafemap->sm_newblkwr); 5211 LIST_INIT(&bmsafemap->sm_jaddrefhd); 5212 LIST_INIT(&bmsafemap->sm_jnewblkhd); 5213 LIST_INIT(&bmsafemap->sm_freehd); 5214 LIST_INIT(&bmsafemap->sm_freewr); 5215 if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { 5216 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 5217 return (collision); 5218 } 5219 bmsafemap->sm_cg = cg; 5220 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); 5221 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 5222 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 5223 return (bmsafemap); 5224} 5225 5226/* 5227 * Direct block allocation dependencies. 5228 * 5229 * When a new block is allocated, the corresponding disk locations must be 5230 * initialized (with zeros or new data) before the on-disk inode points to 5231 * them. Also, the freemap from which the block was allocated must be 5232 * updated (on disk) before the inode's pointer. These two dependencies are 5233 * independent of each other and are needed for all file blocks and indirect 5234 * blocks that are pointed to directly by the inode. Just before the 5235 * "in-core" version of the inode is updated with a newly allocated block 5236 * number, a procedure (below) is called to setup allocation dependency 5237 * structures. These structures are removed when the corresponding 5238 * dependencies are satisfied or when the block allocation becomes obsolete 5239 * (i.e., the file is deleted, the block is de-allocated, or the block is a 5240 * fragment that gets upgraded). All of these cases are handled in 5241 * procedures described later. 5242 * 5243 * When a file extension causes a fragment to be upgraded, either to a larger 5244 * fragment or to a full block, the on-disk location may change (if the 5245 * previous fragment could not simply be extended). In this case, the old 5246 * fragment must be de-allocated, but not until after the inode's pointer has 5247 * been updated. In most cases, this is handled by later procedures, which 5248 * will construct a "freefrag" structure to be added to the workitem queue 5249 * when the inode update is complete (or obsolete). The main exception to 5250 * this is when an allocation occurs while a pending allocation dependency 5251 * (for the same block pointer) remains. This case is handled in the main 5252 * allocation dependency setup procedure by immediately freeing the 5253 * unreferenced fragments. 5254 */ 5255void 5256softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5257 struct inode *ip; /* inode to which block is being added */ 5258 ufs_lbn_t off; /* block pointer within inode */ 5259 ufs2_daddr_t newblkno; /* disk block number being added */ 5260 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 5261 long newsize; /* size of new block */ 5262 long oldsize; /* size of new block */ 5263 struct buf *bp; /* bp for allocated block */ 5264{ 5265 struct allocdirect *adp, *oldadp; 5266 struct allocdirectlst *adphead; 5267 struct freefrag *freefrag; 5268 struct inodedep *inodedep; 5269 struct pagedep *pagedep; 5270 struct jnewblk *jnewblk; 5271 struct newblk *newblk; 5272 struct mount *mp; 5273 ufs_lbn_t lbn; 5274 5275 lbn = bp->b_lblkno; 5276 mp = UFSTOVFS(ip->i_ump); 5277 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5278 ("softdep_setup_allocdirect called on non-softdep filesystem")); 5279 if (oldblkno && oldblkno != newblkno) 5280 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5281 else 5282 freefrag = NULL; 5283 5284 CTR6(KTR_SUJ, 5285 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " 5286 "off %jd newsize %ld oldsize %d", 5287 ip->i_number, newblkno, oldblkno, off, newsize, oldsize); 5288 ACQUIRE_LOCK(ip->i_ump); 5289 if (off >= NDADDR) { 5290 if (lbn > 0) 5291 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", 5292 lbn, off); 5293 /* allocating an indirect block */ 5294 if (oldblkno != 0) 5295 panic("softdep_setup_allocdirect: non-zero indir"); 5296 } else { 5297 if (off != lbn) 5298 panic("softdep_setup_allocdirect: lbn %jd != off %jd", 5299 lbn, off); 5300 /* 5301 * Allocating a direct block. 5302 * 5303 * If we are allocating a directory block, then we must 5304 * allocate an associated pagedep to track additions and 5305 * deletions. 5306 */ 5307 if ((ip->i_mode & IFMT) == IFDIR) 5308 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, 5309 &pagedep); 5310 } 5311 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5312 panic("softdep_setup_allocdirect: lost block"); 5313 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5314 ("softdep_setup_allocdirect: newblk already initialized")); 5315 /* 5316 * Convert the newblk to an allocdirect. 5317 */ 5318 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5319 adp = (struct allocdirect *)newblk; 5320 newblk->nb_freefrag = freefrag; 5321 adp->ad_offset = off; 5322 adp->ad_oldblkno = oldblkno; 5323 adp->ad_newsize = newsize; 5324 adp->ad_oldsize = oldsize; 5325 5326 /* 5327 * Finish initializing the journal. 5328 */ 5329 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5330 jnewblk->jn_ino = ip->i_number; 5331 jnewblk->jn_lbn = lbn; 5332 add_to_journal(&jnewblk->jn_list); 5333 } 5334 if (freefrag && freefrag->ff_jdep != NULL && 5335 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5336 add_to_journal(freefrag->ff_jdep); 5337 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5338 adp->ad_inodedep = inodedep; 5339 5340 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5341 /* 5342 * The list of allocdirects must be kept in sorted and ascending 5343 * order so that the rollback routines can quickly determine the 5344 * first uncommitted block (the size of the file stored on disk 5345 * ends at the end of the lowest committed fragment, or if there 5346 * are no fragments, at the end of the highest committed block). 5347 * Since files generally grow, the typical case is that the new 5348 * block is to be added at the end of the list. We speed this 5349 * special case by checking against the last allocdirect in the 5350 * list before laboriously traversing the list looking for the 5351 * insertion point. 5352 */ 5353 adphead = &inodedep->id_newinoupdt; 5354 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5355 if (oldadp == NULL || oldadp->ad_offset <= off) { 5356 /* insert at end of list */ 5357 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5358 if (oldadp != NULL && oldadp->ad_offset == off) 5359 allocdirect_merge(adphead, adp, oldadp); 5360 FREE_LOCK(ip->i_ump); 5361 return; 5362 } 5363 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5364 if (oldadp->ad_offset >= off) 5365 break; 5366 } 5367 if (oldadp == NULL) 5368 panic("softdep_setup_allocdirect: lost entry"); 5369 /* insert in middle of list */ 5370 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5371 if (oldadp->ad_offset == off) 5372 allocdirect_merge(adphead, adp, oldadp); 5373 5374 FREE_LOCK(ip->i_ump); 5375} 5376 5377/* 5378 * Merge a newer and older journal record to be stored either in a 5379 * newblock or freefrag. This handles aggregating journal records for 5380 * fragment allocation into a second record as well as replacing a 5381 * journal free with an aborted journal allocation. A segment for the 5382 * oldest record will be placed on wkhd if it has been written. If not 5383 * the segment for the newer record will suffice. 5384 */ 5385static struct worklist * 5386jnewblk_merge(new, old, wkhd) 5387 struct worklist *new; 5388 struct worklist *old; 5389 struct workhead *wkhd; 5390{ 5391 struct jnewblk *njnewblk; 5392 struct jnewblk *jnewblk; 5393 5394 /* Handle NULLs to simplify callers. */ 5395 if (new == NULL) 5396 return (old); 5397 if (old == NULL) 5398 return (new); 5399 /* Replace a jfreefrag with a jnewblk. */ 5400 if (new->wk_type == D_JFREEFRAG) { 5401 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) 5402 panic("jnewblk_merge: blkno mismatch: %p, %p", 5403 old, new); 5404 cancel_jfreefrag(WK_JFREEFRAG(new)); 5405 return (old); 5406 } 5407 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) 5408 panic("jnewblk_merge: Bad type: old %d new %d\n", 5409 old->wk_type, new->wk_type); 5410 /* 5411 * Handle merging of two jnewblk records that describe 5412 * different sets of fragments in the same block. 5413 */ 5414 jnewblk = WK_JNEWBLK(old); 5415 njnewblk = WK_JNEWBLK(new); 5416 if (jnewblk->jn_blkno != njnewblk->jn_blkno) 5417 panic("jnewblk_merge: Merging disparate blocks."); 5418 /* 5419 * The record may be rolled back in the cg. 5420 */ 5421 if (jnewblk->jn_state & UNDONE) { 5422 jnewblk->jn_state &= ~UNDONE; 5423 njnewblk->jn_state |= UNDONE; 5424 njnewblk->jn_state &= ~ATTACHED; 5425 } 5426 /* 5427 * We modify the newer addref and free the older so that if neither 5428 * has been written the most up-to-date copy will be on disk. If 5429 * both have been written but rolled back we only temporarily need 5430 * one of them to fix the bits when the cg write completes. 5431 */ 5432 jnewblk->jn_state |= ATTACHED | COMPLETE; 5433 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; 5434 cancel_jnewblk(jnewblk, wkhd); 5435 WORKLIST_REMOVE(&jnewblk->jn_list); 5436 free_jnewblk(jnewblk); 5437 return (new); 5438} 5439 5440/* 5441 * Replace an old allocdirect dependency with a newer one. 5442 * This routine must be called with splbio interrupts blocked. 5443 */ 5444static void 5445allocdirect_merge(adphead, newadp, oldadp) 5446 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 5447 struct allocdirect *newadp; /* allocdirect being added */ 5448 struct allocdirect *oldadp; /* existing allocdirect being checked */ 5449{ 5450 struct worklist *wk; 5451 struct freefrag *freefrag; 5452 5453 freefrag = NULL; 5454 LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); 5455 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 5456 newadp->ad_oldsize != oldadp->ad_newsize || 5457 newadp->ad_offset >= NDADDR) 5458 panic("%s %jd != new %jd || old size %ld != new %ld", 5459 "allocdirect_merge: old blkno", 5460 (intmax_t)newadp->ad_oldblkno, 5461 (intmax_t)oldadp->ad_newblkno, 5462 newadp->ad_oldsize, oldadp->ad_newsize); 5463 newadp->ad_oldblkno = oldadp->ad_oldblkno; 5464 newadp->ad_oldsize = oldadp->ad_oldsize; 5465 /* 5466 * If the old dependency had a fragment to free or had never 5467 * previously had a block allocated, then the new dependency 5468 * can immediately post its freefrag and adopt the old freefrag. 5469 * This action is done by swapping the freefrag dependencies. 5470 * The new dependency gains the old one's freefrag, and the 5471 * old one gets the new one and then immediately puts it on 5472 * the worklist when it is freed by free_newblk. It is 5473 * not possible to do this swap when the old dependency had a 5474 * non-zero size but no previous fragment to free. This condition 5475 * arises when the new block is an extension of the old block. 5476 * Here, the first part of the fragment allocated to the new 5477 * dependency is part of the block currently claimed on disk by 5478 * the old dependency, so cannot legitimately be freed until the 5479 * conditions for the new dependency are fulfilled. 5480 */ 5481 freefrag = newadp->ad_freefrag; 5482 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 5483 newadp->ad_freefrag = oldadp->ad_freefrag; 5484 oldadp->ad_freefrag = freefrag; 5485 } 5486 /* 5487 * If we are tracking a new directory-block allocation, 5488 * move it from the old allocdirect to the new allocdirect. 5489 */ 5490 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 5491 WORKLIST_REMOVE(wk); 5492 if (!LIST_EMPTY(&oldadp->ad_newdirblk)) 5493 panic("allocdirect_merge: extra newdirblk"); 5494 WORKLIST_INSERT(&newadp->ad_newdirblk, wk); 5495 } 5496 TAILQ_REMOVE(adphead, oldadp, ad_next); 5497 /* 5498 * We need to move any journal dependencies over to the freefrag 5499 * that releases this block if it exists. Otherwise we are 5500 * extending an existing block and we'll wait until that is 5501 * complete to release the journal space and extend the 5502 * new journal to cover this old space as well. 5503 */ 5504 if (freefrag == NULL) { 5505 if (oldadp->ad_newblkno != newadp->ad_newblkno) 5506 panic("allocdirect_merge: %jd != %jd", 5507 oldadp->ad_newblkno, newadp->ad_newblkno); 5508 newadp->ad_block.nb_jnewblk = (struct jnewblk *) 5509 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, 5510 &oldadp->ad_block.nb_jnewblk->jn_list, 5511 &newadp->ad_block.nb_jwork); 5512 oldadp->ad_block.nb_jnewblk = NULL; 5513 cancel_newblk(&oldadp->ad_block, NULL, 5514 &newadp->ad_block.nb_jwork); 5515 } else { 5516 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, 5517 &freefrag->ff_list, &freefrag->ff_jwork); 5518 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, 5519 &freefrag->ff_jwork); 5520 } 5521 free_newblk(&oldadp->ad_block); 5522} 5523 5524/* 5525 * Allocate a jfreefrag structure to journal a single block free. 5526 */ 5527static struct jfreefrag * 5528newjfreefrag(freefrag, ip, blkno, size, lbn) 5529 struct freefrag *freefrag; 5530 struct inode *ip; 5531 ufs2_daddr_t blkno; 5532 long size; 5533 ufs_lbn_t lbn; 5534{ 5535 struct jfreefrag *jfreefrag; 5536 struct fs *fs; 5537 5538 fs = ip->i_fs; 5539 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, 5540 M_SOFTDEP_FLAGS); 5541 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump)); 5542 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); 5543 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; 5544 jfreefrag->fr_ino = ip->i_number; 5545 jfreefrag->fr_lbn = lbn; 5546 jfreefrag->fr_blkno = blkno; 5547 jfreefrag->fr_frags = numfrags(fs, size); 5548 jfreefrag->fr_freefrag = freefrag; 5549 5550 return (jfreefrag); 5551} 5552 5553/* 5554 * Allocate a new freefrag structure. 5555 */ 5556static struct freefrag * 5557newfreefrag(ip, blkno, size, lbn) 5558 struct inode *ip; 5559 ufs2_daddr_t blkno; 5560 long size; 5561 ufs_lbn_t lbn; 5562{ 5563 struct freefrag *freefrag; 5564 struct fs *fs; 5565 5566 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", 5567 ip->i_number, blkno, size, lbn); 5568 fs = ip->i_fs; 5569 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 5570 panic("newfreefrag: frag size"); 5571 freefrag = malloc(sizeof(struct freefrag), 5572 M_FREEFRAG, M_SOFTDEP_FLAGS); 5573 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump)); 5574 freefrag->ff_state = ATTACHED; 5575 LIST_INIT(&freefrag->ff_jwork); 5576 freefrag->ff_inum = ip->i_number; 5577 freefrag->ff_vtype = ITOV(ip)->v_type; 5578 freefrag->ff_blkno = blkno; 5579 freefrag->ff_fragsize = size; 5580 5581 if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) { 5582 freefrag->ff_jdep = (struct worklist *) 5583 newjfreefrag(freefrag, ip, blkno, size, lbn); 5584 } else { 5585 freefrag->ff_state |= DEPCOMPLETE; 5586 freefrag->ff_jdep = NULL; 5587 } 5588 5589 return (freefrag); 5590} 5591 5592/* 5593 * This workitem de-allocates fragments that were replaced during 5594 * file block allocation. 5595 */ 5596static void 5597handle_workitem_freefrag(freefrag) 5598 struct freefrag *freefrag; 5599{ 5600 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); 5601 struct workhead wkhd; 5602 5603 CTR3(KTR_SUJ, 5604 "handle_workitem_freefrag: ino %d blkno %jd size %ld", 5605 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); 5606 /* 5607 * It would be illegal to add new completion items to the 5608 * freefrag after it was schedule to be done so it must be 5609 * safe to modify the list head here. 5610 */ 5611 LIST_INIT(&wkhd); 5612 ACQUIRE_LOCK(ump); 5613 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); 5614 /* 5615 * If the journal has not been written we must cancel it here. 5616 */ 5617 if (freefrag->ff_jdep) { 5618 if (freefrag->ff_jdep->wk_type != D_JNEWBLK) 5619 panic("handle_workitem_freefrag: Unexpected type %d\n", 5620 freefrag->ff_jdep->wk_type); 5621 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); 5622 } 5623 FREE_LOCK(ump); 5624 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 5625 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd); 5626 ACQUIRE_LOCK(ump); 5627 WORKITEM_FREE(freefrag, D_FREEFRAG); 5628 FREE_LOCK(ump); 5629} 5630 5631/* 5632 * Set up a dependency structure for an external attributes data block. 5633 * This routine follows much of the structure of softdep_setup_allocdirect. 5634 * See the description of softdep_setup_allocdirect above for details. 5635 */ 5636void 5637softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5638 struct inode *ip; 5639 ufs_lbn_t off; 5640 ufs2_daddr_t newblkno; 5641 ufs2_daddr_t oldblkno; 5642 long newsize; 5643 long oldsize; 5644 struct buf *bp; 5645{ 5646 struct allocdirect *adp, *oldadp; 5647 struct allocdirectlst *adphead; 5648 struct freefrag *freefrag; 5649 struct inodedep *inodedep; 5650 struct jnewblk *jnewblk; 5651 struct newblk *newblk; 5652 struct mount *mp; 5653 ufs_lbn_t lbn; 5654 5655 mp = UFSTOVFS(ip->i_ump); 5656 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5657 ("softdep_setup_allocext called on non-softdep filesystem")); 5658 KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR", 5659 (long long)off)); 5660 5661 lbn = bp->b_lblkno; 5662 if (oldblkno && oldblkno != newblkno) 5663 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5664 else 5665 freefrag = NULL; 5666 5667 ACQUIRE_LOCK(ip->i_ump); 5668 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5669 panic("softdep_setup_allocext: lost block"); 5670 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5671 ("softdep_setup_allocext: newblk already initialized")); 5672 /* 5673 * Convert the newblk to an allocdirect. 5674 */ 5675 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5676 adp = (struct allocdirect *)newblk; 5677 newblk->nb_freefrag = freefrag; 5678 adp->ad_offset = off; 5679 adp->ad_oldblkno = oldblkno; 5680 adp->ad_newsize = newsize; 5681 adp->ad_oldsize = oldsize; 5682 adp->ad_state |= EXTDATA; 5683 5684 /* 5685 * Finish initializing the journal. 5686 */ 5687 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5688 jnewblk->jn_ino = ip->i_number; 5689 jnewblk->jn_lbn = lbn; 5690 add_to_journal(&jnewblk->jn_list); 5691 } 5692 if (freefrag && freefrag->ff_jdep != NULL && 5693 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5694 add_to_journal(freefrag->ff_jdep); 5695 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5696 adp->ad_inodedep = inodedep; 5697 5698 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5699 /* 5700 * The list of allocdirects must be kept in sorted and ascending 5701 * order so that the rollback routines can quickly determine the 5702 * first uncommitted block (the size of the file stored on disk 5703 * ends at the end of the lowest committed fragment, or if there 5704 * are no fragments, at the end of the highest committed block). 5705 * Since files generally grow, the typical case is that the new 5706 * block is to be added at the end of the list. We speed this 5707 * special case by checking against the last allocdirect in the 5708 * list before laboriously traversing the list looking for the 5709 * insertion point. 5710 */ 5711 adphead = &inodedep->id_newextupdt; 5712 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5713 if (oldadp == NULL || oldadp->ad_offset <= off) { 5714 /* insert at end of list */ 5715 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5716 if (oldadp != NULL && oldadp->ad_offset == off) 5717 allocdirect_merge(adphead, adp, oldadp); 5718 FREE_LOCK(ip->i_ump); 5719 return; 5720 } 5721 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5722 if (oldadp->ad_offset >= off) 5723 break; 5724 } 5725 if (oldadp == NULL) 5726 panic("softdep_setup_allocext: lost entry"); 5727 /* insert in middle of list */ 5728 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5729 if (oldadp->ad_offset == off) 5730 allocdirect_merge(adphead, adp, oldadp); 5731 FREE_LOCK(ip->i_ump); 5732} 5733 5734/* 5735 * Indirect block allocation dependencies. 5736 * 5737 * The same dependencies that exist for a direct block also exist when 5738 * a new block is allocated and pointed to by an entry in a block of 5739 * indirect pointers. The undo/redo states described above are also 5740 * used here. Because an indirect block contains many pointers that 5741 * may have dependencies, a second copy of the entire in-memory indirect 5742 * block is kept. The buffer cache copy is always completely up-to-date. 5743 * The second copy, which is used only as a source for disk writes, 5744 * contains only the safe pointers (i.e., those that have no remaining 5745 * update dependencies). The second copy is freed when all pointers 5746 * are safe. The cache is not allowed to replace indirect blocks with 5747 * pending update dependencies. If a buffer containing an indirect 5748 * block with dependencies is written, these routines will mark it 5749 * dirty again. It can only be successfully written once all the 5750 * dependencies are removed. The ffs_fsync routine in conjunction with 5751 * softdep_sync_metadata work together to get all the dependencies 5752 * removed so that a file can be successfully written to disk. Three 5753 * procedures are used when setting up indirect block pointer 5754 * dependencies. The division is necessary because of the organization 5755 * of the "balloc" routine and because of the distinction between file 5756 * pages and file metadata blocks. 5757 */ 5758 5759/* 5760 * Allocate a new allocindir structure. 5761 */ 5762static struct allocindir * 5763newallocindir(ip, ptrno, newblkno, oldblkno, lbn) 5764 struct inode *ip; /* inode for file being extended */ 5765 int ptrno; /* offset of pointer in indirect block */ 5766 ufs2_daddr_t newblkno; /* disk block number being added */ 5767 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5768 ufs_lbn_t lbn; 5769{ 5770 struct newblk *newblk; 5771 struct allocindir *aip; 5772 struct freefrag *freefrag; 5773 struct jnewblk *jnewblk; 5774 5775 if (oldblkno) 5776 freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn); 5777 else 5778 freefrag = NULL; 5779 ACQUIRE_LOCK(ip->i_ump); 5780 if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0) 5781 panic("new_allocindir: lost block"); 5782 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5783 ("newallocindir: newblk already initialized")); 5784 WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); 5785 newblk->nb_freefrag = freefrag; 5786 aip = (struct allocindir *)newblk; 5787 aip->ai_offset = ptrno; 5788 aip->ai_oldblkno = oldblkno; 5789 aip->ai_lbn = lbn; 5790 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5791 jnewblk->jn_ino = ip->i_number; 5792 jnewblk->jn_lbn = lbn; 5793 add_to_journal(&jnewblk->jn_list); 5794 } 5795 if (freefrag && freefrag->ff_jdep != NULL && 5796 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5797 add_to_journal(freefrag->ff_jdep); 5798 return (aip); 5799} 5800 5801/* 5802 * Called just before setting an indirect block pointer 5803 * to a newly allocated file page. 5804 */ 5805void 5806softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 5807 struct inode *ip; /* inode for file being extended */ 5808 ufs_lbn_t lbn; /* allocated block number within file */ 5809 struct buf *bp; /* buffer with indirect blk referencing page */ 5810 int ptrno; /* offset of pointer in indirect block */ 5811 ufs2_daddr_t newblkno; /* disk block number being added */ 5812 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5813 struct buf *nbp; /* buffer holding allocated page */ 5814{ 5815 struct inodedep *inodedep; 5816 struct freefrag *freefrag; 5817 struct allocindir *aip; 5818 struct pagedep *pagedep; 5819 struct mount *mp; 5820 int dflags; 5821 5822 mp = UFSTOVFS(ip->i_ump); 5823 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5824 ("softdep_setup_allocindir_page called on non-softdep filesystem")); 5825 KASSERT(lbn == nbp->b_lblkno, 5826 ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", 5827 lbn, bp->b_lblkno)); 5828 CTR4(KTR_SUJ, 5829 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " 5830 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); 5831 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); 5832 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); 5833 dflags = DEPALLOC; 5834 if (IS_SNAPSHOT(ip)) 5835 dflags |= NODELAY; 5836 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 5837 /* 5838 * If we are allocating a directory page, then we must 5839 * allocate an associated pagedep to track additions and 5840 * deletions. 5841 */ 5842 if ((ip->i_mode & IFMT) == IFDIR) 5843 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); 5844 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5845 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); 5846 FREE_LOCK(ip->i_ump); 5847 if (freefrag) 5848 handle_workitem_freefrag(freefrag); 5849} 5850 5851/* 5852 * Called just before setting an indirect block pointer to a 5853 * newly allocated indirect block. 5854 */ 5855void 5856softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 5857 struct buf *nbp; /* newly allocated indirect block */ 5858 struct inode *ip; /* inode for file being extended */ 5859 struct buf *bp; /* indirect block referencing allocated block */ 5860 int ptrno; /* offset of pointer in indirect block */ 5861 ufs2_daddr_t newblkno; /* disk block number being added */ 5862{ 5863 struct inodedep *inodedep; 5864 struct allocindir *aip; 5865 ufs_lbn_t lbn; 5866 int dflags; 5867 5868 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 5869 ("softdep_setup_allocindir_meta called on non-softdep filesystem")); 5870 CTR3(KTR_SUJ, 5871 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", 5872 ip->i_number, newblkno, ptrno); 5873 lbn = nbp->b_lblkno; 5874 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); 5875 aip = newallocindir(ip, ptrno, newblkno, 0, lbn); 5876 dflags = DEPALLOC; 5877 if (IS_SNAPSHOT(ip)) 5878 dflags |= NODELAY; 5879 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 5880 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5881 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) 5882 panic("softdep_setup_allocindir_meta: Block already existed"); 5883 FREE_LOCK(ip->i_ump); 5884} 5885 5886static void 5887indirdep_complete(indirdep) 5888 struct indirdep *indirdep; 5889{ 5890 struct allocindir *aip; 5891 5892 LIST_REMOVE(indirdep, ir_next); 5893 indirdep->ir_state |= DEPCOMPLETE; 5894 5895 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { 5896 LIST_REMOVE(aip, ai_next); 5897 free_newblk(&aip->ai_block); 5898 } 5899 /* 5900 * If this indirdep is not attached to a buf it was simply waiting 5901 * on completion to clear completehd. free_indirdep() asserts 5902 * that nothing is dangling. 5903 */ 5904 if ((indirdep->ir_state & ONWORKLIST) == 0) 5905 free_indirdep(indirdep); 5906} 5907 5908static struct indirdep * 5909indirdep_lookup(mp, ip, bp) 5910 struct mount *mp; 5911 struct inode *ip; 5912 struct buf *bp; 5913{ 5914 struct indirdep *indirdep, *newindirdep; 5915 struct newblk *newblk; 5916 struct ufsmount *ump; 5917 struct worklist *wk; 5918 struct fs *fs; 5919 ufs2_daddr_t blkno; 5920 5921 ump = VFSTOUFS(mp); 5922 LOCK_OWNED(ump); 5923 indirdep = NULL; 5924 newindirdep = NULL; 5925 fs = ip->i_fs; 5926 for (;;) { 5927 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5928 if (wk->wk_type != D_INDIRDEP) 5929 continue; 5930 indirdep = WK_INDIRDEP(wk); 5931 break; 5932 } 5933 /* Found on the buffer worklist, no new structure to free. */ 5934 if (indirdep != NULL && newindirdep == NULL) 5935 return (indirdep); 5936 if (indirdep != NULL && newindirdep != NULL) 5937 panic("indirdep_lookup: simultaneous create"); 5938 /* None found on the buffer and a new structure is ready. */ 5939 if (indirdep == NULL && newindirdep != NULL) 5940 break; 5941 /* None found and no new structure available. */ 5942 FREE_LOCK(ump); 5943 newindirdep = malloc(sizeof(struct indirdep), 5944 M_INDIRDEP, M_SOFTDEP_FLAGS); 5945 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); 5946 newindirdep->ir_state = ATTACHED; 5947 if (ip->i_ump->um_fstype == UFS1) 5948 newindirdep->ir_state |= UFS1FMT; 5949 TAILQ_INIT(&newindirdep->ir_trunc); 5950 newindirdep->ir_saveddata = NULL; 5951 LIST_INIT(&newindirdep->ir_deplisthd); 5952 LIST_INIT(&newindirdep->ir_donehd); 5953 LIST_INIT(&newindirdep->ir_writehd); 5954 LIST_INIT(&newindirdep->ir_completehd); 5955 if (bp->b_blkno == bp->b_lblkno) { 5956 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 5957 NULL, NULL); 5958 bp->b_blkno = blkno; 5959 } 5960 newindirdep->ir_freeblks = NULL; 5961 newindirdep->ir_savebp = 5962 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); 5963 newindirdep->ir_bp = bp; 5964 BUF_KERNPROC(newindirdep->ir_savebp); 5965 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 5966 ACQUIRE_LOCK(ump); 5967 } 5968 indirdep = newindirdep; 5969 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 5970 /* 5971 * If the block is not yet allocated we don't set DEPCOMPLETE so 5972 * that we don't free dependencies until the pointers are valid. 5973 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather 5974 * than using the hash. 5975 */ 5976 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) 5977 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); 5978 else 5979 indirdep->ir_state |= DEPCOMPLETE; 5980 return (indirdep); 5981} 5982 5983/* 5984 * Called to finish the allocation of the "aip" allocated 5985 * by one of the two routines above. 5986 */ 5987static struct freefrag * 5988setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) 5989 struct buf *bp; /* in-memory copy of the indirect block */ 5990 struct inode *ip; /* inode for file being extended */ 5991 struct inodedep *inodedep; /* Inodedep for ip */ 5992 struct allocindir *aip; /* allocindir allocated by the above routines */ 5993 ufs_lbn_t lbn; /* Logical block number for this block. */ 5994{ 5995 struct fs *fs; 5996 struct indirdep *indirdep; 5997 struct allocindir *oldaip; 5998 struct freefrag *freefrag; 5999 struct mount *mp; 6000 6001 LOCK_OWNED(ip->i_ump); 6002 mp = UFSTOVFS(ip->i_ump); 6003 fs = ip->i_fs; 6004 if (bp->b_lblkno >= 0) 6005 panic("setup_allocindir_phase2: not indir blk"); 6006 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), 6007 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); 6008 indirdep = indirdep_lookup(mp, ip, bp); 6009 KASSERT(indirdep->ir_savebp != NULL, 6010 ("setup_allocindir_phase2 NULL ir_savebp")); 6011 aip->ai_indirdep = indirdep; 6012 /* 6013 * Check for an unwritten dependency for this indirect offset. If 6014 * there is, merge the old dependency into the new one. This happens 6015 * as a result of reallocblk only. 6016 */ 6017 freefrag = NULL; 6018 if (aip->ai_oldblkno != 0) { 6019 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { 6020 if (oldaip->ai_offset == aip->ai_offset) { 6021 freefrag = allocindir_merge(aip, oldaip); 6022 goto done; 6023 } 6024 } 6025 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { 6026 if (oldaip->ai_offset == aip->ai_offset) { 6027 freefrag = allocindir_merge(aip, oldaip); 6028 goto done; 6029 } 6030 } 6031 } 6032done: 6033 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 6034 return (freefrag); 6035} 6036 6037/* 6038 * Merge two allocindirs which refer to the same block. Move newblock 6039 * dependencies and setup the freefrags appropriately. 6040 */ 6041static struct freefrag * 6042allocindir_merge(aip, oldaip) 6043 struct allocindir *aip; 6044 struct allocindir *oldaip; 6045{ 6046 struct freefrag *freefrag; 6047 struct worklist *wk; 6048 6049 if (oldaip->ai_newblkno != aip->ai_oldblkno) 6050 panic("allocindir_merge: blkno"); 6051 aip->ai_oldblkno = oldaip->ai_oldblkno; 6052 freefrag = aip->ai_freefrag; 6053 aip->ai_freefrag = oldaip->ai_freefrag; 6054 oldaip->ai_freefrag = NULL; 6055 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); 6056 /* 6057 * If we are tracking a new directory-block allocation, 6058 * move it from the old allocindir to the new allocindir. 6059 */ 6060 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { 6061 WORKLIST_REMOVE(wk); 6062 if (!LIST_EMPTY(&oldaip->ai_newdirblk)) 6063 panic("allocindir_merge: extra newdirblk"); 6064 WORKLIST_INSERT(&aip->ai_newdirblk, wk); 6065 } 6066 /* 6067 * We can skip journaling for this freefrag and just complete 6068 * any pending journal work for the allocindir that is being 6069 * removed after the freefrag completes. 6070 */ 6071 if (freefrag->ff_jdep) 6072 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); 6073 LIST_REMOVE(oldaip, ai_next); 6074 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, 6075 &freefrag->ff_list, &freefrag->ff_jwork); 6076 free_newblk(&oldaip->ai_block); 6077 6078 return (freefrag); 6079} 6080 6081static inline void 6082setup_freedirect(freeblks, ip, i, needj) 6083 struct freeblks *freeblks; 6084 struct inode *ip; 6085 int i; 6086 int needj; 6087{ 6088 ufs2_daddr_t blkno; 6089 int frags; 6090 6091 blkno = DIP(ip, i_db[i]); 6092 if (blkno == 0) 6093 return; 6094 DIP_SET(ip, i_db[i], 0); 6095 frags = sblksize(ip->i_fs, ip->i_size, i); 6096 frags = numfrags(ip->i_fs, frags); 6097 newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj); 6098} 6099 6100static inline void 6101setup_freeext(freeblks, ip, i, needj) 6102 struct freeblks *freeblks; 6103 struct inode *ip; 6104 int i; 6105 int needj; 6106{ 6107 ufs2_daddr_t blkno; 6108 int frags; 6109 6110 blkno = ip->i_din2->di_extb[i]; 6111 if (blkno == 0) 6112 return; 6113 ip->i_din2->di_extb[i] = 0; 6114 frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i); 6115 frags = numfrags(ip->i_fs, frags); 6116 newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); 6117} 6118 6119static inline void 6120setup_freeindir(freeblks, ip, i, lbn, needj) 6121 struct freeblks *freeblks; 6122 struct inode *ip; 6123 int i; 6124 ufs_lbn_t lbn; 6125 int needj; 6126{ 6127 ufs2_daddr_t blkno; 6128 6129 blkno = DIP(ip, i_ib[i]); 6130 if (blkno == 0) 6131 return; 6132 DIP_SET(ip, i_ib[i], 0); 6133 newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag, 6134 0, needj); 6135} 6136 6137static inline struct freeblks * 6138newfreeblks(mp, ip) 6139 struct mount *mp; 6140 struct inode *ip; 6141{ 6142 struct freeblks *freeblks; 6143 6144 freeblks = malloc(sizeof(struct freeblks), 6145 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 6146 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); 6147 LIST_INIT(&freeblks->fb_jblkdephd); 6148 LIST_INIT(&freeblks->fb_jwork); 6149 freeblks->fb_ref = 0; 6150 freeblks->fb_cgwait = 0; 6151 freeblks->fb_state = ATTACHED; 6152 freeblks->fb_uid = ip->i_uid; 6153 freeblks->fb_inum = ip->i_number; 6154 freeblks->fb_vtype = ITOV(ip)->v_type; 6155 freeblks->fb_modrev = DIP(ip, i_modrev); 6156 freeblks->fb_devvp = ip->i_devvp; 6157 freeblks->fb_chkcnt = 0; 6158 freeblks->fb_len = 0; 6159 6160 return (freeblks); 6161} 6162 6163static void 6164trunc_indirdep(indirdep, freeblks, bp, off) 6165 struct indirdep *indirdep; 6166 struct freeblks *freeblks; 6167 struct buf *bp; 6168 int off; 6169{ 6170 struct allocindir *aip, *aipn; 6171 6172 /* 6173 * The first set of allocindirs won't be in savedbp. 6174 */ 6175 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) 6176 if (aip->ai_offset > off) 6177 cancel_allocindir(aip, bp, freeblks, 1); 6178 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) 6179 if (aip->ai_offset > off) 6180 cancel_allocindir(aip, bp, freeblks, 1); 6181 /* 6182 * These will exist in savedbp. 6183 */ 6184 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) 6185 if (aip->ai_offset > off) 6186 cancel_allocindir(aip, NULL, freeblks, 0); 6187 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) 6188 if (aip->ai_offset > off) 6189 cancel_allocindir(aip, NULL, freeblks, 0); 6190} 6191 6192/* 6193 * Follow the chain of indirects down to lastlbn creating a freework 6194 * structure for each. This will be used to start indir_trunc() at 6195 * the right offset and create the journal records for the parrtial 6196 * truncation. A second step will handle the truncated dependencies. 6197 */ 6198static int 6199setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) 6200 struct freeblks *freeblks; 6201 struct inode *ip; 6202 ufs_lbn_t lbn; 6203 ufs_lbn_t lastlbn; 6204 ufs2_daddr_t blkno; 6205{ 6206 struct indirdep *indirdep; 6207 struct indirdep *indirn; 6208 struct freework *freework; 6209 struct newblk *newblk; 6210 struct mount *mp; 6211 struct buf *bp; 6212 uint8_t *start; 6213 uint8_t *end; 6214 ufs_lbn_t lbnadd; 6215 int level; 6216 int error; 6217 int off; 6218 6219 6220 freework = NULL; 6221 if (blkno == 0) 6222 return (0); 6223 mp = freeblks->fb_list.wk_mp; 6224 bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); 6225 if ((bp->b_flags & B_CACHE) == 0) { 6226 bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); 6227 bp->b_iocmd = BIO_READ; 6228 bp->b_flags &= ~B_INVAL; 6229 bp->b_ioflags &= ~BIO_ERROR; 6230 vfs_busy_pages(bp, 0); 6231 bp->b_iooffset = dbtob(bp->b_blkno); 6232 bstrategy(bp); 6233 curthread->td_ru.ru_inblock++; 6234 error = bufwait(bp); 6235 if (error) { 6236 brelse(bp); 6237 return (error); 6238 } 6239 } 6240 level = lbn_level(lbn); 6241 lbnadd = lbn_offset(ip->i_fs, level); 6242 /* 6243 * Compute the offset of the last block we want to keep. Store 6244 * in the freework the first block we want to completely free. 6245 */ 6246 off = (lastlbn - -(lbn + level)) / lbnadd; 6247 if (off + 1 == NINDIR(ip->i_fs)) 6248 goto nowork; 6249 freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1, 6250 0); 6251 /* 6252 * Link the freework into the indirdep. This will prevent any new 6253 * allocations from proceeding until we are finished with the 6254 * truncate and the block is written. 6255 */ 6256 ACQUIRE_LOCK(ip->i_ump); 6257 indirdep = indirdep_lookup(mp, ip, bp); 6258 if (indirdep->ir_freeblks) 6259 panic("setup_trunc_indir: indirdep already truncated."); 6260 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); 6261 freework->fw_indir = indirdep; 6262 /* 6263 * Cancel any allocindirs that will not make it to disk. 6264 * We have to do this for all copies of the indirdep that 6265 * live on this newblk. 6266 */ 6267 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 6268 newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk); 6269 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) 6270 trunc_indirdep(indirn, freeblks, bp, off); 6271 } else 6272 trunc_indirdep(indirdep, freeblks, bp, off); 6273 FREE_LOCK(ip->i_ump); 6274 /* 6275 * Creation is protected by the buf lock. The saveddata is only 6276 * needed if a full truncation follows a partial truncation but it 6277 * is difficult to allocate in that case so we fetch it anyway. 6278 */ 6279 if (indirdep->ir_saveddata == NULL) 6280 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 6281 M_SOFTDEP_FLAGS); 6282nowork: 6283 /* Fetch the blkno of the child and the zero start offset. */ 6284 if (ip->i_ump->um_fstype == UFS1) { 6285 blkno = ((ufs1_daddr_t *)bp->b_data)[off]; 6286 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; 6287 } else { 6288 blkno = ((ufs2_daddr_t *)bp->b_data)[off]; 6289 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; 6290 } 6291 if (freework) { 6292 /* Zero the truncated pointers. */ 6293 end = bp->b_data + bp->b_bcount; 6294 bzero(start, end - start); 6295 bdwrite(bp); 6296 } else 6297 bqrelse(bp); 6298 if (level == 0) 6299 return (0); 6300 lbn++; /* adjust level */ 6301 lbn -= (off * lbnadd); 6302 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); 6303} 6304 6305/* 6306 * Complete the partial truncation of an indirect block setup by 6307 * setup_trunc_indir(). This zeros the truncated pointers in the saved 6308 * copy and writes them to disk before the freeblks is allowed to complete. 6309 */ 6310static void 6311complete_trunc_indir(freework) 6312 struct freework *freework; 6313{ 6314 struct freework *fwn; 6315 struct indirdep *indirdep; 6316 struct ufsmount *ump; 6317 struct buf *bp; 6318 uintptr_t start; 6319 int count; 6320 6321 ump = VFSTOUFS(freework->fw_list.wk_mp); 6322 LOCK_OWNED(ump); 6323 indirdep = freework->fw_indir; 6324 for (;;) { 6325 bp = indirdep->ir_bp; 6326 /* See if the block was discarded. */ 6327 if (bp == NULL) 6328 break; 6329 /* Inline part of getdirtybuf(). We dont want bremfree. */ 6330 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) 6331 break; 6332 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6333 LOCK_PTR(ump)) == 0) 6334 BUF_UNLOCK(bp); 6335 ACQUIRE_LOCK(ump); 6336 } 6337 freework->fw_state |= DEPCOMPLETE; 6338 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); 6339 /* 6340 * Zero the pointers in the saved copy. 6341 */ 6342 if (indirdep->ir_state & UFS1FMT) 6343 start = sizeof(ufs1_daddr_t); 6344 else 6345 start = sizeof(ufs2_daddr_t); 6346 start *= freework->fw_start; 6347 count = indirdep->ir_savebp->b_bcount - start; 6348 start += (uintptr_t)indirdep->ir_savebp->b_data; 6349 bzero((char *)start, count); 6350 /* 6351 * We need to start the next truncation in the list if it has not 6352 * been started yet. 6353 */ 6354 fwn = TAILQ_FIRST(&indirdep->ir_trunc); 6355 if (fwn != NULL) { 6356 if (fwn->fw_freeblks == indirdep->ir_freeblks) 6357 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); 6358 if ((fwn->fw_state & ONWORKLIST) == 0) 6359 freework_enqueue(fwn); 6360 } 6361 /* 6362 * If bp is NULL the block was fully truncated, restore 6363 * the saved block list otherwise free it if it is no 6364 * longer needed. 6365 */ 6366 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 6367 if (bp == NULL) 6368 bcopy(indirdep->ir_saveddata, 6369 indirdep->ir_savebp->b_data, 6370 indirdep->ir_savebp->b_bcount); 6371 free(indirdep->ir_saveddata, M_INDIRDEP); 6372 indirdep->ir_saveddata = NULL; 6373 } 6374 /* 6375 * When bp is NULL there is a full truncation pending. We 6376 * must wait for this full truncation to be journaled before 6377 * we can release this freework because the disk pointers will 6378 * never be written as zero. 6379 */ 6380 if (bp == NULL) { 6381 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) 6382 handle_written_freework(freework); 6383 else 6384 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, 6385 &freework->fw_list); 6386 } else { 6387 /* Complete when the real copy is written. */ 6388 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); 6389 BUF_UNLOCK(bp); 6390 } 6391} 6392 6393/* 6394 * Calculate the number of blocks we are going to release where datablocks 6395 * is the current total and length is the new file size. 6396 */ 6397static ufs2_daddr_t 6398blkcount(fs, datablocks, length) 6399 struct fs *fs; 6400 ufs2_daddr_t datablocks; 6401 off_t length; 6402{ 6403 off_t totblks, numblks; 6404 6405 totblks = 0; 6406 numblks = howmany(length, fs->fs_bsize); 6407 if (numblks <= NDADDR) { 6408 totblks = howmany(length, fs->fs_fsize); 6409 goto out; 6410 } 6411 totblks = blkstofrags(fs, numblks); 6412 numblks -= NDADDR; 6413 /* 6414 * Count all single, then double, then triple indirects required. 6415 * Subtracting one indirects worth of blocks for each pass 6416 * acknowledges one of each pointed to by the inode. 6417 */ 6418 for (;;) { 6419 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); 6420 numblks -= NINDIR(fs); 6421 if (numblks <= 0) 6422 break; 6423 numblks = howmany(numblks, NINDIR(fs)); 6424 } 6425out: 6426 totblks = fsbtodb(fs, totblks); 6427 /* 6428 * Handle sparse files. We can't reclaim more blocks than the inode 6429 * references. We will correct it later in handle_complete_freeblks() 6430 * when we know the real count. 6431 */ 6432 if (totblks > datablocks) 6433 return (0); 6434 return (datablocks - totblks); 6435} 6436 6437/* 6438 * Handle freeblocks for journaled softupdate filesystems. 6439 * 6440 * Contrary to normal softupdates, we must preserve the block pointers in 6441 * indirects until their subordinates are free. This is to avoid journaling 6442 * every block that is freed which may consume more space than the journal 6443 * itself. The recovery program will see the free block journals at the 6444 * base of the truncated area and traverse them to reclaim space. The 6445 * pointers in the inode may be cleared immediately after the journal 6446 * records are written because each direct and indirect pointer in the 6447 * inode is recorded in a journal. This permits full truncation to proceed 6448 * asynchronously. The write order is journal -> inode -> cgs -> indirects. 6449 * 6450 * The algorithm is as follows: 6451 * 1) Traverse the in-memory state and create journal entries to release 6452 * the relevant blocks and full indirect trees. 6453 * 2) Traverse the indirect block chain adding partial truncation freework 6454 * records to indirects in the path to lastlbn. The freework will 6455 * prevent new allocation dependencies from being satisfied in this 6456 * indirect until the truncation completes. 6457 * 3) Read and lock the inode block, performing an update with the new size 6458 * and pointers. This prevents truncated data from becoming valid on 6459 * disk through step 4. 6460 * 4) Reap unsatisfied dependencies that are beyond the truncated area, 6461 * eliminate journal work for those records that do not require it. 6462 * 5) Schedule the journal records to be written followed by the inode block. 6463 * 6) Allocate any necessary frags for the end of file. 6464 * 7) Zero any partially truncated blocks. 6465 * 6466 * From this truncation proceeds asynchronously using the freework and 6467 * indir_trunc machinery. The file will not be extended again into a 6468 * partially truncated indirect block until all work is completed but 6469 * the normal dependency mechanism ensures that it is rolled back/forward 6470 * as appropriate. Further truncation may occur without delay and is 6471 * serialized in indir_trunc(). 6472 */ 6473void 6474softdep_journal_freeblocks(ip, cred, length, flags) 6475 struct inode *ip; /* The inode whose length is to be reduced */ 6476 struct ucred *cred; 6477 off_t length; /* The new length for the file */ 6478 int flags; /* IO_EXT and/or IO_NORMAL */ 6479{ 6480 struct freeblks *freeblks, *fbn; 6481 struct worklist *wk, *wkn; 6482 struct inodedep *inodedep; 6483 struct jblkdep *jblkdep; 6484 struct allocdirect *adp, *adpn; 6485 struct ufsmount *ump; 6486 struct fs *fs; 6487 struct buf *bp; 6488 struct vnode *vp; 6489 struct mount *mp; 6490 ufs2_daddr_t extblocks, datablocks; 6491 ufs_lbn_t tmpval, lbn, lastlbn; 6492 int frags, lastoff, iboff, allocblock, needj, dflags, error, i; 6493 6494 fs = ip->i_fs; 6495 ump = ip->i_ump; 6496 mp = UFSTOVFS(ump); 6497 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6498 ("softdep_journal_freeblocks called on non-softdep filesystem")); 6499 vp = ITOV(ip); 6500 needj = 1; 6501 iboff = -1; 6502 allocblock = 0; 6503 extblocks = 0; 6504 datablocks = 0; 6505 frags = 0; 6506 freeblks = newfreeblks(mp, ip); 6507 ACQUIRE_LOCK(ump); 6508 /* 6509 * If we're truncating a removed file that will never be written 6510 * we don't need to journal the block frees. The canceled journals 6511 * for the allocations will suffice. 6512 */ 6513 dflags = DEPALLOC; 6514 if (IS_SNAPSHOT(ip)) 6515 dflags |= NODELAY; 6516 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6517 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && 6518 length == 0) 6519 needj = 0; 6520 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", 6521 ip->i_number, length, needj); 6522 FREE_LOCK(ump); 6523 /* 6524 * Calculate the lbn that we are truncating to. This results in -1 6525 * if we're truncating the 0 bytes. So it is the last lbn we want 6526 * to keep, not the first lbn we want to truncate. 6527 */ 6528 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; 6529 lastoff = blkoff(fs, length); 6530 /* 6531 * Compute frags we are keeping in lastlbn. 0 means all. 6532 */ 6533 if (lastlbn >= 0 && lastlbn < NDADDR) { 6534 frags = fragroundup(fs, lastoff); 6535 /* adp offset of last valid allocdirect. */ 6536 iboff = lastlbn; 6537 } else if (lastlbn > 0) 6538 iboff = NDADDR; 6539 if (fs->fs_magic == FS_UFS2_MAGIC) 6540 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6541 /* 6542 * Handle normal data blocks and indirects. This section saves 6543 * values used after the inode update to complete frag and indirect 6544 * truncation. 6545 */ 6546 if ((flags & IO_NORMAL) != 0) { 6547 /* 6548 * Handle truncation of whole direct and indirect blocks. 6549 */ 6550 for (i = iboff + 1; i < NDADDR; i++) 6551 setup_freedirect(freeblks, ip, i, needj); 6552 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6553 i++, lbn += tmpval, tmpval *= NINDIR(fs)) { 6554 /* Release a whole indirect tree. */ 6555 if (lbn > lastlbn) { 6556 setup_freeindir(freeblks, ip, i, -lbn -i, 6557 needj); 6558 continue; 6559 } 6560 iboff = i + NDADDR; 6561 /* 6562 * Traverse partially truncated indirect tree. 6563 */ 6564 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) 6565 setup_trunc_indir(freeblks, ip, -lbn - i, 6566 lastlbn, DIP(ip, i_ib[i])); 6567 } 6568 /* 6569 * Handle partial truncation to a frag boundary. 6570 */ 6571 if (frags) { 6572 ufs2_daddr_t blkno; 6573 long oldfrags; 6574 6575 oldfrags = blksize(fs, ip, lastlbn); 6576 blkno = DIP(ip, i_db[lastlbn]); 6577 if (blkno && oldfrags != frags) { 6578 oldfrags -= frags; 6579 oldfrags = numfrags(ip->i_fs, oldfrags); 6580 blkno += numfrags(ip->i_fs, frags); 6581 newfreework(ump, freeblks, NULL, lastlbn, 6582 blkno, oldfrags, 0, needj); 6583 if (needj) 6584 adjust_newfreework(freeblks, 6585 numfrags(ip->i_fs, frags)); 6586 } else if (blkno == 0) 6587 allocblock = 1; 6588 } 6589 /* 6590 * Add a journal record for partial truncate if we are 6591 * handling indirect blocks. Non-indirects need no extra 6592 * journaling. 6593 */ 6594 if (length != 0 && lastlbn >= NDADDR) { 6595 ip->i_flag |= IN_TRUNCATED; 6596 newjtrunc(freeblks, length, 0); 6597 } 6598 ip->i_size = length; 6599 DIP_SET(ip, i_size, ip->i_size); 6600 datablocks = DIP(ip, i_blocks) - extblocks; 6601 if (length != 0) 6602 datablocks = blkcount(ip->i_fs, datablocks, length); 6603 freeblks->fb_len = length; 6604 } 6605 if ((flags & IO_EXT) != 0) { 6606 for (i = 0; i < NXADDR; i++) 6607 setup_freeext(freeblks, ip, i, needj); 6608 ip->i_din2->di_extsize = 0; 6609 datablocks += extblocks; 6610 } 6611#ifdef QUOTA 6612 /* Reference the quotas in case the block count is wrong in the end. */ 6613 quotaref(vp, freeblks->fb_quota); 6614 (void) chkdq(ip, -datablocks, NOCRED, 0); 6615#endif 6616 freeblks->fb_chkcnt = -datablocks; 6617 UFS_LOCK(ump); 6618 fs->fs_pendingblocks += datablocks; 6619 UFS_UNLOCK(ump); 6620 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6621 /* 6622 * Handle truncation of incomplete alloc direct dependencies. We 6623 * hold the inode block locked to prevent incomplete dependencies 6624 * from reaching the disk while we are eliminating those that 6625 * have been truncated. This is a partially inlined ffs_update(). 6626 */ 6627 ufs_itimes(vp); 6628 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 6629 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6630 (int)fs->fs_bsize, cred, &bp); 6631 if (error) { 6632 brelse(bp); 6633 softdep_error("softdep_journal_freeblocks", error); 6634 return; 6635 } 6636 if (bp->b_bufsize == fs->fs_bsize) 6637 bp->b_flags |= B_CLUSTEROK; 6638 softdep_update_inodeblock(ip, bp, 0); 6639 if (ump->um_fstype == UFS1) 6640 *((struct ufs1_dinode *)bp->b_data + 6641 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 6642 else 6643 *((struct ufs2_dinode *)bp->b_data + 6644 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 6645 ACQUIRE_LOCK(ump); 6646 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6647 if ((inodedep->id_state & IOSTARTED) != 0) 6648 panic("softdep_setup_freeblocks: inode busy"); 6649 /* 6650 * Add the freeblks structure to the list of operations that 6651 * must await the zero'ed inode being written to disk. If we 6652 * still have a bitmap dependency (needj), then the inode 6653 * has never been written to disk, so we can process the 6654 * freeblks below once we have deleted the dependencies. 6655 */ 6656 if (needj) 6657 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6658 else 6659 freeblks->fb_state |= COMPLETE; 6660 if ((flags & IO_NORMAL) != 0) { 6661 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { 6662 if (adp->ad_offset > iboff) 6663 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6664 freeblks); 6665 /* 6666 * Truncate the allocdirect. We could eliminate 6667 * or modify journal records as well. 6668 */ 6669 else if (adp->ad_offset == iboff && frags) 6670 adp->ad_newsize = frags; 6671 } 6672 } 6673 if ((flags & IO_EXT) != 0) 6674 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6675 cancel_allocdirect(&inodedep->id_extupdt, adp, 6676 freeblks); 6677 /* 6678 * Scan the bufwait list for newblock dependencies that will never 6679 * make it to disk. 6680 */ 6681 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { 6682 if (wk->wk_type != D_ALLOCDIRECT) 6683 continue; 6684 adp = WK_ALLOCDIRECT(wk); 6685 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || 6686 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { 6687 cancel_jfreeblk(freeblks, adp->ad_newblkno); 6688 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); 6689 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 6690 } 6691 } 6692 /* 6693 * Add journal work. 6694 */ 6695 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) 6696 add_to_journal(&jblkdep->jb_list); 6697 FREE_LOCK(ump); 6698 bdwrite(bp); 6699 /* 6700 * Truncate dependency structures beyond length. 6701 */ 6702 trunc_dependencies(ip, freeblks, lastlbn, frags, flags); 6703 /* 6704 * This is only set when we need to allocate a fragment because 6705 * none existed at the end of a frag-sized file. It handles only 6706 * allocating a new, zero filled block. 6707 */ 6708 if (allocblock) { 6709 ip->i_size = length - lastoff; 6710 DIP_SET(ip, i_size, ip->i_size); 6711 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); 6712 if (error != 0) { 6713 softdep_error("softdep_journal_freeblks", error); 6714 return; 6715 } 6716 ip->i_size = length; 6717 DIP_SET(ip, i_size, length); 6718 ip->i_flag |= IN_CHANGE | IN_UPDATE; 6719 allocbuf(bp, frags); 6720 ffs_update(vp, 0); 6721 bawrite(bp); 6722 } else if (lastoff != 0 && vp->v_type != VDIR) { 6723 int size; 6724 6725 /* 6726 * Zero the end of a truncated frag or block. 6727 */ 6728 size = sblksize(fs, length, lastlbn); 6729 error = bread(vp, lastlbn, size, cred, &bp); 6730 if (error) { 6731 softdep_error("softdep_journal_freeblks", error); 6732 return; 6733 } 6734 bzero((char *)bp->b_data + lastoff, size - lastoff); 6735 bawrite(bp); 6736 6737 } 6738 ACQUIRE_LOCK(ump); 6739 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6740 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); 6741 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; 6742 /* 6743 * We zero earlier truncations so they don't erroneously 6744 * update i_blocks. 6745 */ 6746 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) 6747 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) 6748 fbn->fb_len = 0; 6749 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && 6750 LIST_EMPTY(&freeblks->fb_jblkdephd)) 6751 freeblks->fb_state |= INPROGRESS; 6752 else 6753 freeblks = NULL; 6754 FREE_LOCK(ump); 6755 if (freeblks) 6756 handle_workitem_freeblocks(freeblks, 0); 6757 trunc_pages(ip, length, extblocks, flags); 6758 6759} 6760 6761/* 6762 * Flush a JOP_SYNC to the journal. 6763 */ 6764void 6765softdep_journal_fsync(ip) 6766 struct inode *ip; 6767{ 6768 struct jfsync *jfsync; 6769 6770 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 6771 ("softdep_journal_fsync called on non-softdep filesystem")); 6772 if ((ip->i_flag & IN_TRUNCATED) == 0) 6773 return; 6774 ip->i_flag &= ~IN_TRUNCATED; 6775 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); 6776 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump)); 6777 jfsync->jfs_size = ip->i_size; 6778 jfsync->jfs_ino = ip->i_number; 6779 ACQUIRE_LOCK(ip->i_ump); 6780 add_to_journal(&jfsync->jfs_list); 6781 jwait(&jfsync->jfs_list, MNT_WAIT); 6782 FREE_LOCK(ip->i_ump); 6783} 6784 6785/* 6786 * Block de-allocation dependencies. 6787 * 6788 * When blocks are de-allocated, the on-disk pointers must be nullified before 6789 * the blocks are made available for use by other files. (The true 6790 * requirement is that old pointers must be nullified before new on-disk 6791 * pointers are set. We chose this slightly more stringent requirement to 6792 * reduce complexity.) Our implementation handles this dependency by updating 6793 * the inode (or indirect block) appropriately but delaying the actual block 6794 * de-allocation (i.e., freemap and free space count manipulation) until 6795 * after the updated versions reach stable storage. After the disk is 6796 * updated, the blocks can be safely de-allocated whenever it is convenient. 6797 * This implementation handles only the common case of reducing a file's 6798 * length to zero. Other cases are handled by the conventional synchronous 6799 * write approach. 6800 * 6801 * The ffs implementation with which we worked double-checks 6802 * the state of the block pointers and file size as it reduces 6803 * a file's length. Some of this code is replicated here in our 6804 * soft updates implementation. The freeblks->fb_chkcnt field is 6805 * used to transfer a part of this information to the procedure 6806 * that eventually de-allocates the blocks. 6807 * 6808 * This routine should be called from the routine that shortens 6809 * a file's length, before the inode's size or block pointers 6810 * are modified. It will save the block pointer information for 6811 * later release and zero the inode so that the calling routine 6812 * can release it. 6813 */ 6814void 6815softdep_setup_freeblocks(ip, length, flags) 6816 struct inode *ip; /* The inode whose length is to be reduced */ 6817 off_t length; /* The new length for the file */ 6818 int flags; /* IO_EXT and/or IO_NORMAL */ 6819{ 6820 struct ufs1_dinode *dp1; 6821 struct ufs2_dinode *dp2; 6822 struct freeblks *freeblks; 6823 struct inodedep *inodedep; 6824 struct allocdirect *adp; 6825 struct ufsmount *ump; 6826 struct buf *bp; 6827 struct fs *fs; 6828 ufs2_daddr_t extblocks, datablocks; 6829 struct mount *mp; 6830 int i, delay, error, dflags; 6831 ufs_lbn_t tmpval; 6832 ufs_lbn_t lbn; 6833 6834 ump = ip->i_ump; 6835 mp = UFSTOVFS(ump); 6836 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6837 ("softdep_setup_freeblocks called on non-softdep filesystem")); 6838 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", 6839 ip->i_number, length); 6840 KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); 6841 fs = ip->i_fs; 6842 freeblks = newfreeblks(mp, ip); 6843 extblocks = 0; 6844 datablocks = 0; 6845 if (fs->fs_magic == FS_UFS2_MAGIC) 6846 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6847 if ((flags & IO_NORMAL) != 0) { 6848 for (i = 0; i < NDADDR; i++) 6849 setup_freedirect(freeblks, ip, i, 0); 6850 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6851 i++, lbn += tmpval, tmpval *= NINDIR(fs)) 6852 setup_freeindir(freeblks, ip, i, -lbn -i, 0); 6853 ip->i_size = 0; 6854 DIP_SET(ip, i_size, 0); 6855 datablocks = DIP(ip, i_blocks) - extblocks; 6856 } 6857 if ((flags & IO_EXT) != 0) { 6858 for (i = 0; i < NXADDR; i++) 6859 setup_freeext(freeblks, ip, i, 0); 6860 ip->i_din2->di_extsize = 0; 6861 datablocks += extblocks; 6862 } 6863#ifdef QUOTA 6864 /* Reference the quotas in case the block count is wrong in the end. */ 6865 quotaref(ITOV(ip), freeblks->fb_quota); 6866 (void) chkdq(ip, -datablocks, NOCRED, 0); 6867#endif 6868 freeblks->fb_chkcnt = -datablocks; 6869 UFS_LOCK(ump); 6870 fs->fs_pendingblocks += datablocks; 6871 UFS_UNLOCK(ump); 6872 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6873 /* 6874 * Push the zero'ed inode to to its disk buffer so that we are free 6875 * to delete its dependencies below. Once the dependencies are gone 6876 * the buffer can be safely released. 6877 */ 6878 if ((error = bread(ip->i_devvp, 6879 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6880 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 6881 brelse(bp); 6882 softdep_error("softdep_setup_freeblocks", error); 6883 } 6884 if (ump->um_fstype == UFS1) { 6885 dp1 = ((struct ufs1_dinode *)bp->b_data + 6886 ino_to_fsbo(fs, ip->i_number)); 6887 ip->i_din1->di_freelink = dp1->di_freelink; 6888 *dp1 = *ip->i_din1; 6889 } else { 6890 dp2 = ((struct ufs2_dinode *)bp->b_data + 6891 ino_to_fsbo(fs, ip->i_number)); 6892 ip->i_din2->di_freelink = dp2->di_freelink; 6893 *dp2 = *ip->i_din2; 6894 } 6895 /* 6896 * Find and eliminate any inode dependencies. 6897 */ 6898 ACQUIRE_LOCK(ump); 6899 dflags = DEPALLOC; 6900 if (IS_SNAPSHOT(ip)) 6901 dflags |= NODELAY; 6902 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6903 if ((inodedep->id_state & IOSTARTED) != 0) 6904 panic("softdep_setup_freeblocks: inode busy"); 6905 /* 6906 * Add the freeblks structure to the list of operations that 6907 * must await the zero'ed inode being written to disk. If we 6908 * still have a bitmap dependency (delay == 0), then the inode 6909 * has never been written to disk, so we can process the 6910 * freeblks below once we have deleted the dependencies. 6911 */ 6912 delay = (inodedep->id_state & DEPCOMPLETE); 6913 if (delay) 6914 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6915 else 6916 freeblks->fb_state |= COMPLETE; 6917 /* 6918 * Because the file length has been truncated to zero, any 6919 * pending block allocation dependency structures associated 6920 * with this inode are obsolete and can simply be de-allocated. 6921 * We must first merge the two dependency lists to get rid of 6922 * any duplicate freefrag structures, then purge the merged list. 6923 * If we still have a bitmap dependency, then the inode has never 6924 * been written to disk, so we can free any fragments without delay. 6925 */ 6926 if (flags & IO_NORMAL) { 6927 merge_inode_lists(&inodedep->id_newinoupdt, 6928 &inodedep->id_inoupdt); 6929 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 6930 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6931 freeblks); 6932 } 6933 if (flags & IO_EXT) { 6934 merge_inode_lists(&inodedep->id_newextupdt, 6935 &inodedep->id_extupdt); 6936 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6937 cancel_allocdirect(&inodedep->id_extupdt, adp, 6938 freeblks); 6939 } 6940 FREE_LOCK(ump); 6941 bdwrite(bp); 6942 trunc_dependencies(ip, freeblks, -1, 0, flags); 6943 ACQUIRE_LOCK(ump); 6944 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 6945 (void) free_inodedep(inodedep); 6946 freeblks->fb_state |= DEPCOMPLETE; 6947 /* 6948 * If the inode with zeroed block pointers is now on disk 6949 * we can start freeing blocks. 6950 */ 6951 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 6952 freeblks->fb_state |= INPROGRESS; 6953 else 6954 freeblks = NULL; 6955 FREE_LOCK(ump); 6956 if (freeblks) 6957 handle_workitem_freeblocks(freeblks, 0); 6958 trunc_pages(ip, length, extblocks, flags); 6959} 6960 6961/* 6962 * Eliminate pages from the page cache that back parts of this inode and 6963 * adjust the vnode pager's idea of our size. This prevents stale data 6964 * from hanging around in the page cache. 6965 */ 6966static void 6967trunc_pages(ip, length, extblocks, flags) 6968 struct inode *ip; 6969 off_t length; 6970 ufs2_daddr_t extblocks; 6971 int flags; 6972{ 6973 struct vnode *vp; 6974 struct fs *fs; 6975 ufs_lbn_t lbn; 6976 off_t end, extend; 6977 6978 vp = ITOV(ip); 6979 fs = ip->i_fs; 6980 extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); 6981 if ((flags & IO_EXT) != 0) 6982 vn_pages_remove(vp, extend, 0); 6983 if ((flags & IO_NORMAL) == 0) 6984 return; 6985 BO_LOCK(&vp->v_bufobj); 6986 drain_output(vp); 6987 BO_UNLOCK(&vp->v_bufobj); 6988 /* 6989 * The vnode pager eliminates file pages we eliminate indirects 6990 * below. 6991 */ 6992 vnode_pager_setsize(vp, length); 6993 /* 6994 * Calculate the end based on the last indirect we want to keep. If 6995 * the block extends into indirects we can just use the negative of 6996 * its lbn. Doubles and triples exist at lower numbers so we must 6997 * be careful not to remove those, if they exist. double and triple 6998 * indirect lbns do not overlap with others so it is not important 6999 * to verify how many levels are required. 7000 */ 7001 lbn = lblkno(fs, length); 7002 if (lbn >= NDADDR) { 7003 /* Calculate the virtual lbn of the triple indirect. */ 7004 lbn = -lbn - (NIADDR - 1); 7005 end = OFF_TO_IDX(lblktosize(fs, lbn)); 7006 } else 7007 end = extend; 7008 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); 7009} 7010 7011/* 7012 * See if the buf bp is in the range eliminated by truncation. 7013 */ 7014static int 7015trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) 7016 struct buf *bp; 7017 int *blkoffp; 7018 ufs_lbn_t lastlbn; 7019 int lastoff; 7020 int flags; 7021{ 7022 ufs_lbn_t lbn; 7023 7024 *blkoffp = 0; 7025 /* Only match ext/normal blocks as appropriate. */ 7026 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 7027 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) 7028 return (0); 7029 /* ALTDATA is always a full truncation. */ 7030 if ((bp->b_xflags & BX_ALTDATA) != 0) 7031 return (1); 7032 /* -1 is full truncation. */ 7033 if (lastlbn == -1) 7034 return (1); 7035 /* 7036 * If this is a partial truncate we only want those 7037 * blocks and indirect blocks that cover the range 7038 * we're after. 7039 */ 7040 lbn = bp->b_lblkno; 7041 if (lbn < 0) 7042 lbn = -(lbn + lbn_level(lbn)); 7043 if (lbn < lastlbn) 7044 return (0); 7045 /* Here we only truncate lblkno if it's partial. */ 7046 if (lbn == lastlbn) { 7047 if (lastoff == 0) 7048 return (0); 7049 *blkoffp = lastoff; 7050 } 7051 return (1); 7052} 7053 7054/* 7055 * Eliminate any dependencies that exist in memory beyond lblkno:off 7056 */ 7057static void 7058trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) 7059 struct inode *ip; 7060 struct freeblks *freeblks; 7061 ufs_lbn_t lastlbn; 7062 int lastoff; 7063 int flags; 7064{ 7065 struct bufobj *bo; 7066 struct vnode *vp; 7067 struct buf *bp; 7068 struct fs *fs; 7069 int blkoff; 7070 7071 /* 7072 * We must wait for any I/O in progress to finish so that 7073 * all potential buffers on the dirty list will be visible. 7074 * Once they are all there, walk the list and get rid of 7075 * any dependencies. 7076 */ 7077 fs = ip->i_fs; 7078 vp = ITOV(ip); 7079 bo = &vp->v_bufobj; 7080 BO_LOCK(bo); 7081 drain_output(vp); 7082 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 7083 bp->b_vflags &= ~BV_SCANNED; 7084restart: 7085 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 7086 if (bp->b_vflags & BV_SCANNED) 7087 continue; 7088 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7089 bp->b_vflags |= BV_SCANNED; 7090 continue; 7091 } 7092 KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); 7093 if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) 7094 goto restart; 7095 BO_UNLOCK(bo); 7096 if (deallocate_dependencies(bp, freeblks, blkoff)) 7097 bqrelse(bp); 7098 else 7099 brelse(bp); 7100 BO_LOCK(bo); 7101 goto restart; 7102 } 7103 /* 7104 * Now do the work of vtruncbuf while also matching indirect blocks. 7105 */ 7106 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) 7107 bp->b_vflags &= ~BV_SCANNED; 7108cleanrestart: 7109 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { 7110 if (bp->b_vflags & BV_SCANNED) 7111 continue; 7112 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7113 bp->b_vflags |= BV_SCANNED; 7114 continue; 7115 } 7116 if (BUF_LOCK(bp, 7117 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 7118 BO_LOCKPTR(bo)) == ENOLCK) { 7119 BO_LOCK(bo); 7120 goto cleanrestart; 7121 } 7122 bp->b_vflags |= BV_SCANNED; 7123 bremfree(bp); 7124 if (blkoff != 0) { 7125 allocbuf(bp, blkoff); 7126 bqrelse(bp); 7127 } else { 7128 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; 7129 brelse(bp); 7130 } 7131 BO_LOCK(bo); 7132 goto cleanrestart; 7133 } 7134 drain_output(vp); 7135 BO_UNLOCK(bo); 7136} 7137 7138static int 7139cancel_pagedep(pagedep, freeblks, blkoff) 7140 struct pagedep *pagedep; 7141 struct freeblks *freeblks; 7142 int blkoff; 7143{ 7144 struct jremref *jremref; 7145 struct jmvref *jmvref; 7146 struct dirrem *dirrem, *tmp; 7147 int i; 7148 7149 /* 7150 * Copy any directory remove dependencies to the list 7151 * to be processed after the freeblks proceeds. If 7152 * directory entry never made it to disk they 7153 * can be dumped directly onto the work list. 7154 */ 7155 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { 7156 /* Skip this directory removal if it is intended to remain. */ 7157 if (dirrem->dm_offset < blkoff) 7158 continue; 7159 /* 7160 * If there are any dirrems we wait for the journal write 7161 * to complete and then restart the buf scan as the lock 7162 * has been dropped. 7163 */ 7164 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { 7165 jwait(&jremref->jr_list, MNT_WAIT); 7166 return (ERESTART); 7167 } 7168 LIST_REMOVE(dirrem, dm_next); 7169 dirrem->dm_dirinum = pagedep->pd_ino; 7170 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); 7171 } 7172 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { 7173 jwait(&jmvref->jm_list, MNT_WAIT); 7174 return (ERESTART); 7175 } 7176 /* 7177 * When we're partially truncating a pagedep we just want to flush 7178 * journal entries and return. There can not be any adds in the 7179 * truncated portion of the directory and newblk must remain if 7180 * part of the block remains. 7181 */ 7182 if (blkoff != 0) { 7183 struct diradd *dap; 7184 7185 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 7186 if (dap->da_offset > blkoff) 7187 panic("cancel_pagedep: diradd %p off %d > %d", 7188 dap, dap->da_offset, blkoff); 7189 for (i = 0; i < DAHASHSZ; i++) 7190 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) 7191 if (dap->da_offset > blkoff) 7192 panic("cancel_pagedep: diradd %p off %d > %d", 7193 dap, dap->da_offset, blkoff); 7194 return (0); 7195 } 7196 /* 7197 * There should be no directory add dependencies present 7198 * as the directory could not be truncated until all 7199 * children were removed. 7200 */ 7201 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, 7202 ("deallocate_dependencies: pendinghd != NULL")); 7203 for (i = 0; i < DAHASHSZ; i++) 7204 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, 7205 ("deallocate_dependencies: diraddhd != NULL")); 7206 if ((pagedep->pd_state & NEWBLOCK) != 0) 7207 free_newdirblk(pagedep->pd_newdirblk); 7208 if (free_pagedep(pagedep) == 0) 7209 panic("Failed to free pagedep %p", pagedep); 7210 return (0); 7211} 7212 7213/* 7214 * Reclaim any dependency structures from a buffer that is about to 7215 * be reallocated to a new vnode. The buffer must be locked, thus, 7216 * no I/O completion operations can occur while we are manipulating 7217 * its associated dependencies. The mutex is held so that other I/O's 7218 * associated with related dependencies do not occur. 7219 */ 7220static int 7221deallocate_dependencies(bp, freeblks, off) 7222 struct buf *bp; 7223 struct freeblks *freeblks; 7224 int off; 7225{ 7226 struct indirdep *indirdep; 7227 struct pagedep *pagedep; 7228 struct allocdirect *adp; 7229 struct worklist *wk, *wkn; 7230 struct ufsmount *ump; 7231 7232 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 7233 goto done; 7234 ump = VFSTOUFS(wk->wk_mp); 7235 ACQUIRE_LOCK(ump); 7236 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { 7237 switch (wk->wk_type) { 7238 case D_INDIRDEP: 7239 indirdep = WK_INDIRDEP(wk); 7240 if (bp->b_lblkno >= 0 || 7241 bp->b_blkno != indirdep->ir_savebp->b_lblkno) 7242 panic("deallocate_dependencies: not indir"); 7243 cancel_indirdep(indirdep, bp, freeblks); 7244 continue; 7245 7246 case D_PAGEDEP: 7247 pagedep = WK_PAGEDEP(wk); 7248 if (cancel_pagedep(pagedep, freeblks, off)) { 7249 FREE_LOCK(ump); 7250 return (ERESTART); 7251 } 7252 continue; 7253 7254 case D_ALLOCINDIR: 7255 /* 7256 * Simply remove the allocindir, we'll find it via 7257 * the indirdep where we can clear pointers if 7258 * needed. 7259 */ 7260 WORKLIST_REMOVE(wk); 7261 continue; 7262 7263 case D_FREEWORK: 7264 /* 7265 * A truncation is waiting for the zero'd pointers 7266 * to be written. It can be freed when the freeblks 7267 * is journaled. 7268 */ 7269 WORKLIST_REMOVE(wk); 7270 wk->wk_state |= ONDEPLIST; 7271 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 7272 break; 7273 7274 case D_ALLOCDIRECT: 7275 adp = WK_ALLOCDIRECT(wk); 7276 if (off != 0) 7277 continue; 7278 /* FALLTHROUGH */ 7279 default: 7280 panic("deallocate_dependencies: Unexpected type %s", 7281 TYPENAME(wk->wk_type)); 7282 /* NOTREACHED */ 7283 } 7284 } 7285 FREE_LOCK(ump); 7286done: 7287 /* 7288 * Don't throw away this buf, we were partially truncating and 7289 * some deps may always remain. 7290 */ 7291 if (off) { 7292 allocbuf(bp, off); 7293 bp->b_vflags |= BV_SCANNED; 7294 return (EBUSY); 7295 } 7296 bp->b_flags |= B_INVAL | B_NOCACHE; 7297 7298 return (0); 7299} 7300 7301/* 7302 * An allocdirect is being canceled due to a truncate. We must make sure 7303 * the journal entry is released in concert with the blkfree that releases 7304 * the storage. Completed journal entries must not be released until the 7305 * space is no longer pointed to by the inode or in the bitmap. 7306 */ 7307static void 7308cancel_allocdirect(adphead, adp, freeblks) 7309 struct allocdirectlst *adphead; 7310 struct allocdirect *adp; 7311 struct freeblks *freeblks; 7312{ 7313 struct freework *freework; 7314 struct newblk *newblk; 7315 struct worklist *wk; 7316 7317 TAILQ_REMOVE(adphead, adp, ad_next); 7318 newblk = (struct newblk *)adp; 7319 freework = NULL; 7320 /* 7321 * Find the correct freework structure. 7322 */ 7323 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { 7324 if (wk->wk_type != D_FREEWORK) 7325 continue; 7326 freework = WK_FREEWORK(wk); 7327 if (freework->fw_blkno == newblk->nb_newblkno) 7328 break; 7329 } 7330 if (freework == NULL) 7331 panic("cancel_allocdirect: Freework not found"); 7332 /* 7333 * If a newblk exists at all we still have the journal entry that 7334 * initiated the allocation so we do not need to journal the free. 7335 */ 7336 cancel_jfreeblk(freeblks, freework->fw_blkno); 7337 /* 7338 * If the journal hasn't been written the jnewblk must be passed 7339 * to the call to ffs_blkfree that reclaims the space. We accomplish 7340 * this by linking the journal dependency into the freework to be 7341 * freed when freework_freeblock() is called. If the journal has 7342 * been written we can simply reclaim the journal space when the 7343 * freeblks work is complete. 7344 */ 7345 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, 7346 &freeblks->fb_jwork); 7347 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 7348} 7349 7350 7351/* 7352 * Cancel a new block allocation. May be an indirect or direct block. We 7353 * remove it from various lists and return any journal record that needs to 7354 * be resolved by the caller. 7355 * 7356 * A special consideration is made for indirects which were never pointed 7357 * at on disk and will never be found once this block is released. 7358 */ 7359static struct jnewblk * 7360cancel_newblk(newblk, wk, wkhd) 7361 struct newblk *newblk; 7362 struct worklist *wk; 7363 struct workhead *wkhd; 7364{ 7365 struct jnewblk *jnewblk; 7366 7367 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); 7368 7369 newblk->nb_state |= GOINGAWAY; 7370 /* 7371 * Previously we traversed the completedhd on each indirdep 7372 * attached to this newblk to cancel them and gather journal 7373 * work. Since we need only the oldest journal segment and 7374 * the lowest point on the tree will always have the oldest 7375 * journal segment we are free to release the segments 7376 * of any subordinates and may leave the indirdep list to 7377 * indirdep_complete() when this newblk is freed. 7378 */ 7379 if (newblk->nb_state & ONDEPLIST) { 7380 newblk->nb_state &= ~ONDEPLIST; 7381 LIST_REMOVE(newblk, nb_deps); 7382 } 7383 if (newblk->nb_state & ONWORKLIST) 7384 WORKLIST_REMOVE(&newblk->nb_list); 7385 /* 7386 * If the journal entry hasn't been written we save a pointer to 7387 * the dependency that frees it until it is written or the 7388 * superseding operation completes. 7389 */ 7390 jnewblk = newblk->nb_jnewblk; 7391 if (jnewblk != NULL && wk != NULL) { 7392 newblk->nb_jnewblk = NULL; 7393 jnewblk->jn_dep = wk; 7394 } 7395 if (!LIST_EMPTY(&newblk->nb_jwork)) 7396 jwork_move(wkhd, &newblk->nb_jwork); 7397 /* 7398 * When truncating we must free the newdirblk early to remove 7399 * the pagedep from the hash before returning. 7400 */ 7401 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7402 free_newdirblk(WK_NEWDIRBLK(wk)); 7403 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7404 panic("cancel_newblk: extra newdirblk"); 7405 7406 return (jnewblk); 7407} 7408 7409/* 7410 * Schedule the freefrag associated with a newblk to be released once 7411 * the pointers are written and the previous block is no longer needed. 7412 */ 7413static void 7414newblk_freefrag(newblk) 7415 struct newblk *newblk; 7416{ 7417 struct freefrag *freefrag; 7418 7419 if (newblk->nb_freefrag == NULL) 7420 return; 7421 freefrag = newblk->nb_freefrag; 7422 newblk->nb_freefrag = NULL; 7423 freefrag->ff_state |= COMPLETE; 7424 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 7425 add_to_worklist(&freefrag->ff_list, 0); 7426} 7427 7428/* 7429 * Free a newblk. Generate a new freefrag work request if appropriate. 7430 * This must be called after the inode pointer and any direct block pointers 7431 * are valid or fully removed via truncate or frag extension. 7432 */ 7433static void 7434free_newblk(newblk) 7435 struct newblk *newblk; 7436{ 7437 struct indirdep *indirdep; 7438 struct worklist *wk; 7439 7440 KASSERT(newblk->nb_jnewblk == NULL, 7441 ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); 7442 KASSERT(newblk->nb_list.wk_type != D_NEWBLK, 7443 ("free_newblk: unclaimed newblk")); 7444 LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); 7445 newblk_freefrag(newblk); 7446 if (newblk->nb_state & ONDEPLIST) 7447 LIST_REMOVE(newblk, nb_deps); 7448 if (newblk->nb_state & ONWORKLIST) 7449 WORKLIST_REMOVE(&newblk->nb_list); 7450 LIST_REMOVE(newblk, nb_hash); 7451 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7452 free_newdirblk(WK_NEWDIRBLK(wk)); 7453 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7454 panic("free_newblk: extra newdirblk"); 7455 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) 7456 indirdep_complete(indirdep); 7457 handle_jwork(&newblk->nb_jwork); 7458 WORKITEM_FREE(newblk, D_NEWBLK); 7459} 7460 7461/* 7462 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 7463 * This routine must be called with splbio interrupts blocked. 7464 */ 7465static void 7466free_newdirblk(newdirblk) 7467 struct newdirblk *newdirblk; 7468{ 7469 struct pagedep *pagedep; 7470 struct diradd *dap; 7471 struct worklist *wk; 7472 7473 LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); 7474 WORKLIST_REMOVE(&newdirblk->db_list); 7475 /* 7476 * If the pagedep is still linked onto the directory buffer 7477 * dependency chain, then some of the entries on the 7478 * pd_pendinghd list may not be committed to disk yet. In 7479 * this case, we will simply clear the NEWBLOCK flag and 7480 * let the pd_pendinghd list be processed when the pagedep 7481 * is next written. If the pagedep is no longer on the buffer 7482 * dependency chain, then all the entries on the pd_pending 7483 * list are committed to disk and we can free them here. 7484 */ 7485 pagedep = newdirblk->db_pagedep; 7486 pagedep->pd_state &= ~NEWBLOCK; 7487 if ((pagedep->pd_state & ONWORKLIST) == 0) { 7488 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 7489 free_diradd(dap, NULL); 7490 /* 7491 * If no dependencies remain, the pagedep will be freed. 7492 */ 7493 free_pagedep(pagedep); 7494 } 7495 /* Should only ever be one item in the list. */ 7496 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { 7497 WORKLIST_REMOVE(wk); 7498 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 7499 } 7500 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 7501} 7502 7503/* 7504 * Prepare an inode to be freed. The actual free operation is not 7505 * done until the zero'ed inode has been written to disk. 7506 */ 7507void 7508softdep_freefile(pvp, ino, mode) 7509 struct vnode *pvp; 7510 ino_t ino; 7511 int mode; 7512{ 7513 struct inode *ip = VTOI(pvp); 7514 struct inodedep *inodedep; 7515 struct freefile *freefile; 7516 struct freeblks *freeblks; 7517 struct ufsmount *ump; 7518 7519 ump = ip->i_ump; 7520 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 7521 ("softdep_freefile called on non-softdep filesystem")); 7522 /* 7523 * This sets up the inode de-allocation dependency. 7524 */ 7525 freefile = malloc(sizeof(struct freefile), 7526 M_FREEFILE, M_SOFTDEP_FLAGS); 7527 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); 7528 freefile->fx_mode = mode; 7529 freefile->fx_oldinum = ino; 7530 freefile->fx_devvp = ip->i_devvp; 7531 LIST_INIT(&freefile->fx_jwork); 7532 UFS_LOCK(ump); 7533 ip->i_fs->fs_pendinginodes += 1; 7534 UFS_UNLOCK(ump); 7535 7536 /* 7537 * If the inodedep does not exist, then the zero'ed inode has 7538 * been written to disk. If the allocated inode has never been 7539 * written to disk, then the on-disk inode is zero'ed. In either 7540 * case we can free the file immediately. If the journal was 7541 * canceled before being written the inode will never make it to 7542 * disk and we must send the canceled journal entrys to 7543 * ffs_freefile() to be cleared in conjunction with the bitmap. 7544 * Any blocks waiting on the inode to write can be safely freed 7545 * here as it will never been written. 7546 */ 7547 ACQUIRE_LOCK(ump); 7548 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7549 if (inodedep) { 7550 /* 7551 * Clear out freeblks that no longer need to reference 7552 * this inode. 7553 */ 7554 while ((freeblks = 7555 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { 7556 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, 7557 fb_next); 7558 freeblks->fb_state &= ~ONDEPLIST; 7559 } 7560 /* 7561 * Remove this inode from the unlinked list. 7562 */ 7563 if (inodedep->id_state & UNLINKED) { 7564 /* 7565 * Save the journal work to be freed with the bitmap 7566 * before we clear UNLINKED. Otherwise it can be lost 7567 * if the inode block is written. 7568 */ 7569 handle_bufwait(inodedep, &freefile->fx_jwork); 7570 clear_unlinked_inodedep(inodedep); 7571 /* 7572 * Re-acquire inodedep as we've dropped the 7573 * per-filesystem lock in clear_unlinked_inodedep(). 7574 */ 7575 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7576 } 7577 } 7578 if (inodedep == NULL || check_inode_unwritten(inodedep)) { 7579 FREE_LOCK(ump); 7580 handle_workitem_freefile(freefile); 7581 return; 7582 } 7583 if ((inodedep->id_state & DEPCOMPLETE) == 0) 7584 inodedep->id_state |= GOINGAWAY; 7585 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 7586 FREE_LOCK(ump); 7587 if (ip->i_number == ino) 7588 ip->i_flag |= IN_MODIFIED; 7589} 7590 7591/* 7592 * Check to see if an inode has never been written to disk. If 7593 * so free the inodedep and return success, otherwise return failure. 7594 * This routine must be called with splbio interrupts blocked. 7595 * 7596 * If we still have a bitmap dependency, then the inode has never 7597 * been written to disk. Drop the dependency as it is no longer 7598 * necessary since the inode is being deallocated. We set the 7599 * ALLCOMPLETE flags since the bitmap now properly shows that the 7600 * inode is not allocated. Even if the inode is actively being 7601 * written, it has been rolled back to its zero'ed state, so we 7602 * are ensured that a zero inode is what is on the disk. For short 7603 * lived files, this change will usually result in removing all the 7604 * dependencies from the inode so that it can be freed immediately. 7605 */ 7606static int 7607check_inode_unwritten(inodedep) 7608 struct inodedep *inodedep; 7609{ 7610 7611 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7612 7613 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || 7614 !LIST_EMPTY(&inodedep->id_dirremhd) || 7615 !LIST_EMPTY(&inodedep->id_pendinghd) || 7616 !LIST_EMPTY(&inodedep->id_bufwait) || 7617 !LIST_EMPTY(&inodedep->id_inowait) || 7618 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7619 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7620 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7621 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7622 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7623 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7624 inodedep->id_mkdiradd != NULL || 7625 inodedep->id_nlinkdelta != 0) 7626 return (0); 7627 /* 7628 * Another process might be in initiate_write_inodeblock_ufs[12] 7629 * trying to allocate memory without holding "Softdep Lock". 7630 */ 7631 if ((inodedep->id_state & IOSTARTED) != 0 && 7632 inodedep->id_savedino1 == NULL) 7633 return (0); 7634 7635 if (inodedep->id_state & ONDEPLIST) 7636 LIST_REMOVE(inodedep, id_deps); 7637 inodedep->id_state &= ~ONDEPLIST; 7638 inodedep->id_state |= ALLCOMPLETE; 7639 inodedep->id_bmsafemap = NULL; 7640 if (inodedep->id_state & ONWORKLIST) 7641 WORKLIST_REMOVE(&inodedep->id_list); 7642 if (inodedep->id_savedino1 != NULL) { 7643 free(inodedep->id_savedino1, M_SAVEDINO); 7644 inodedep->id_savedino1 = NULL; 7645 } 7646 if (free_inodedep(inodedep) == 0) 7647 panic("check_inode_unwritten: busy inode"); 7648 return (1); 7649} 7650 7651static int 7652check_inodedep_free(inodedep) 7653 struct inodedep *inodedep; 7654{ 7655 7656 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7657 if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 7658 !LIST_EMPTY(&inodedep->id_dirremhd) || 7659 !LIST_EMPTY(&inodedep->id_pendinghd) || 7660 !LIST_EMPTY(&inodedep->id_bufwait) || 7661 !LIST_EMPTY(&inodedep->id_inowait) || 7662 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7663 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7664 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7665 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7666 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7667 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7668 inodedep->id_mkdiradd != NULL || 7669 inodedep->id_nlinkdelta != 0 || 7670 inodedep->id_savedino1 != NULL) 7671 return (0); 7672 return (1); 7673} 7674 7675/* 7676 * Try to free an inodedep structure. Return 1 if it could be freed. 7677 */ 7678static int 7679free_inodedep(inodedep) 7680 struct inodedep *inodedep; 7681{ 7682 7683 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7684 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || 7685 !check_inodedep_free(inodedep)) 7686 return (0); 7687 if (inodedep->id_state & ONDEPLIST) 7688 LIST_REMOVE(inodedep, id_deps); 7689 LIST_REMOVE(inodedep, id_hash); 7690 WORKITEM_FREE(inodedep, D_INODEDEP); 7691 return (1); 7692} 7693 7694/* 7695 * Free the block referenced by a freework structure. The parent freeblks 7696 * structure is released and completed when the final cg bitmap reaches 7697 * the disk. This routine may be freeing a jnewblk which never made it to 7698 * disk in which case we do not have to wait as the operation is undone 7699 * in memory immediately. 7700 */ 7701static void 7702freework_freeblock(freework) 7703 struct freework *freework; 7704{ 7705 struct freeblks *freeblks; 7706 struct jnewblk *jnewblk; 7707 struct ufsmount *ump; 7708 struct workhead wkhd; 7709 struct fs *fs; 7710 int bsize; 7711 int needj; 7712 7713 ump = VFSTOUFS(freework->fw_list.wk_mp); 7714 LOCK_OWNED(ump); 7715 /* 7716 * Handle partial truncate separately. 7717 */ 7718 if (freework->fw_indir) { 7719 complete_trunc_indir(freework); 7720 return; 7721 } 7722 freeblks = freework->fw_freeblks; 7723 fs = ump->um_fs; 7724 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; 7725 bsize = lfragtosize(fs, freework->fw_frags); 7726 LIST_INIT(&wkhd); 7727 /* 7728 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives 7729 * on the indirblk hashtable and prevents premature freeing. 7730 */ 7731 freework->fw_state |= DEPCOMPLETE; 7732 /* 7733 * SUJ needs to wait for the segment referencing freed indirect 7734 * blocks to expire so that we know the checker will not confuse 7735 * a re-allocated indirect block with its old contents. 7736 */ 7737 if (needj && freework->fw_lbn <= -NDADDR) 7738 indirblk_insert(freework); 7739 /* 7740 * If we are canceling an existing jnewblk pass it to the free 7741 * routine, otherwise pass the freeblk which will ultimately 7742 * release the freeblks. If we're not journaling, we can just 7743 * free the freeblks immediately. 7744 */ 7745 jnewblk = freework->fw_jnewblk; 7746 if (jnewblk != NULL) { 7747 cancel_jnewblk(jnewblk, &wkhd); 7748 needj = 0; 7749 } else if (needj) { 7750 freework->fw_state |= DELAYEDFREE; 7751 freeblks->fb_cgwait++; 7752 WORKLIST_INSERT(&wkhd, &freework->fw_list); 7753 } 7754 FREE_LOCK(ump); 7755 freeblks_free(ump, freeblks, btodb(bsize)); 7756 CTR4(KTR_SUJ, 7757 "freework_freeblock: ino %d blkno %jd lbn %jd size %ld", 7758 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); 7759 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, 7760 freeblks->fb_inum, freeblks->fb_vtype, &wkhd); 7761 ACQUIRE_LOCK(ump); 7762 /* 7763 * The jnewblk will be discarded and the bits in the map never 7764 * made it to disk. We can immediately free the freeblk. 7765 */ 7766 if (needj == 0) 7767 handle_written_freework(freework); 7768} 7769 7770/* 7771 * We enqueue freework items that need processing back on the freeblks and 7772 * add the freeblks to the worklist. This makes it easier to find all work 7773 * required to flush a truncation in process_truncates(). 7774 */ 7775static void 7776freework_enqueue(freework) 7777 struct freework *freework; 7778{ 7779 struct freeblks *freeblks; 7780 7781 freeblks = freework->fw_freeblks; 7782 if ((freework->fw_state & INPROGRESS) == 0) 7783 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 7784 if ((freeblks->fb_state & 7785 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && 7786 LIST_EMPTY(&freeblks->fb_jblkdephd)) 7787 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7788} 7789 7790/* 7791 * Start, continue, or finish the process of freeing an indirect block tree. 7792 * The free operation may be paused at any point with fw_off containing the 7793 * offset to restart from. This enables us to implement some flow control 7794 * for large truncates which may fan out and generate a huge number of 7795 * dependencies. 7796 */ 7797static void 7798handle_workitem_indirblk(freework) 7799 struct freework *freework; 7800{ 7801 struct freeblks *freeblks; 7802 struct ufsmount *ump; 7803 struct fs *fs; 7804 7805 freeblks = freework->fw_freeblks; 7806 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7807 fs = ump->um_fs; 7808 if (freework->fw_state & DEPCOMPLETE) { 7809 handle_written_freework(freework); 7810 return; 7811 } 7812 if (freework->fw_off == NINDIR(fs)) { 7813 freework_freeblock(freework); 7814 return; 7815 } 7816 freework->fw_state |= INPROGRESS; 7817 FREE_LOCK(ump); 7818 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), 7819 freework->fw_lbn); 7820 ACQUIRE_LOCK(ump); 7821} 7822 7823/* 7824 * Called when a freework structure attached to a cg buf is written. The 7825 * ref on either the parent or the freeblks structure is released and 7826 * the freeblks is added back to the worklist if there is more work to do. 7827 */ 7828static void 7829handle_written_freework(freework) 7830 struct freework *freework; 7831{ 7832 struct freeblks *freeblks; 7833 struct freework *parent; 7834 7835 freeblks = freework->fw_freeblks; 7836 parent = freework->fw_parent; 7837 if (freework->fw_state & DELAYEDFREE) 7838 freeblks->fb_cgwait--; 7839 freework->fw_state |= COMPLETE; 7840 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 7841 WORKITEM_FREE(freework, D_FREEWORK); 7842 if (parent) { 7843 if (--parent->fw_ref == 0) 7844 freework_enqueue(parent); 7845 return; 7846 } 7847 if (--freeblks->fb_ref != 0) 7848 return; 7849 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == 7850 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) 7851 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7852} 7853 7854/* 7855 * This workitem routine performs the block de-allocation. 7856 * The workitem is added to the pending list after the updated 7857 * inode block has been written to disk. As mentioned above, 7858 * checks regarding the number of blocks de-allocated (compared 7859 * to the number of blocks allocated for the file) are also 7860 * performed in this function. 7861 */ 7862static int 7863handle_workitem_freeblocks(freeblks, flags) 7864 struct freeblks *freeblks; 7865 int flags; 7866{ 7867 struct freework *freework; 7868 struct newblk *newblk; 7869 struct allocindir *aip; 7870 struct ufsmount *ump; 7871 struct worklist *wk; 7872 7873 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), 7874 ("handle_workitem_freeblocks: Journal entries not written.")); 7875 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7876 ACQUIRE_LOCK(ump); 7877 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { 7878 WORKLIST_REMOVE(wk); 7879 switch (wk->wk_type) { 7880 case D_DIRREM: 7881 wk->wk_state |= COMPLETE; 7882 add_to_worklist(wk, 0); 7883 continue; 7884 7885 case D_ALLOCDIRECT: 7886 free_newblk(WK_NEWBLK(wk)); 7887 continue; 7888 7889 case D_ALLOCINDIR: 7890 aip = WK_ALLOCINDIR(wk); 7891 freework = NULL; 7892 if (aip->ai_state & DELAYEDFREE) { 7893 FREE_LOCK(ump); 7894 freework = newfreework(ump, freeblks, NULL, 7895 aip->ai_lbn, aip->ai_newblkno, 7896 ump->um_fs->fs_frag, 0, 0); 7897 ACQUIRE_LOCK(ump); 7898 } 7899 newblk = WK_NEWBLK(wk); 7900 if (newblk->nb_jnewblk) { 7901 freework->fw_jnewblk = newblk->nb_jnewblk; 7902 newblk->nb_jnewblk->jn_dep = &freework->fw_list; 7903 newblk->nb_jnewblk = NULL; 7904 } 7905 free_newblk(newblk); 7906 continue; 7907 7908 case D_FREEWORK: 7909 freework = WK_FREEWORK(wk); 7910 if (freework->fw_lbn <= -NDADDR) 7911 handle_workitem_indirblk(freework); 7912 else 7913 freework_freeblock(freework); 7914 continue; 7915 default: 7916 panic("handle_workitem_freeblocks: Unknown type %s", 7917 TYPENAME(wk->wk_type)); 7918 } 7919 } 7920 if (freeblks->fb_ref != 0) { 7921 freeblks->fb_state &= ~INPROGRESS; 7922 wake_worklist(&freeblks->fb_list); 7923 freeblks = NULL; 7924 } 7925 FREE_LOCK(ump); 7926 if (freeblks) 7927 return handle_complete_freeblocks(freeblks, flags); 7928 return (0); 7929} 7930 7931/* 7932 * Handle completion of block free via truncate. This allows fs_pending 7933 * to track the actual free block count more closely than if we only updated 7934 * it at the end. We must be careful to handle cases where the block count 7935 * on free was incorrect. 7936 */ 7937static void 7938freeblks_free(ump, freeblks, blocks) 7939 struct ufsmount *ump; 7940 struct freeblks *freeblks; 7941 int blocks; 7942{ 7943 struct fs *fs; 7944 ufs2_daddr_t remain; 7945 7946 UFS_LOCK(ump); 7947 remain = -freeblks->fb_chkcnt; 7948 freeblks->fb_chkcnt += blocks; 7949 if (remain > 0) { 7950 if (remain < blocks) 7951 blocks = remain; 7952 fs = ump->um_fs; 7953 fs->fs_pendingblocks -= blocks; 7954 } 7955 UFS_UNLOCK(ump); 7956} 7957 7958/* 7959 * Once all of the freework workitems are complete we can retire the 7960 * freeblocks dependency and any journal work awaiting completion. This 7961 * can not be called until all other dependencies are stable on disk. 7962 */ 7963static int 7964handle_complete_freeblocks(freeblks, flags) 7965 struct freeblks *freeblks; 7966 int flags; 7967{ 7968 struct inodedep *inodedep; 7969 struct inode *ip; 7970 struct vnode *vp; 7971 struct fs *fs; 7972 struct ufsmount *ump; 7973 ufs2_daddr_t spare; 7974 7975 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7976 fs = ump->um_fs; 7977 flags = LK_EXCLUSIVE | flags; 7978 spare = freeblks->fb_chkcnt; 7979 7980 /* 7981 * If we did not release the expected number of blocks we may have 7982 * to adjust the inode block count here. Only do so if it wasn't 7983 * a truncation to zero and the modrev still matches. 7984 */ 7985 if (spare && freeblks->fb_len != 0) { 7986 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7987 flags, &vp, FFSV_FORCEINSMQ) != 0) 7988 return (EBUSY); 7989 ip = VTOI(vp); 7990 if (DIP(ip, i_modrev) == freeblks->fb_modrev) { 7991 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); 7992 ip->i_flag |= IN_CHANGE; 7993 /* 7994 * We must wait so this happens before the 7995 * journal is reclaimed. 7996 */ 7997 ffs_update(vp, 1); 7998 } 7999 vput(vp); 8000 } 8001 if (spare < 0) { 8002 UFS_LOCK(ump); 8003 fs->fs_pendingblocks += spare; 8004 UFS_UNLOCK(ump); 8005 } 8006#ifdef QUOTA 8007 /* Handle spare. */ 8008 if (spare) 8009 quotaadj(freeblks->fb_quota, ump, -spare); 8010 quotarele(freeblks->fb_quota); 8011#endif 8012 ACQUIRE_LOCK(ump); 8013 if (freeblks->fb_state & ONDEPLIST) { 8014 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 8015 0, &inodedep); 8016 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); 8017 freeblks->fb_state &= ~ONDEPLIST; 8018 if (TAILQ_EMPTY(&inodedep->id_freeblklst)) 8019 free_inodedep(inodedep); 8020 } 8021 /* 8022 * All of the freeblock deps must be complete prior to this call 8023 * so it's now safe to complete earlier outstanding journal entries. 8024 */ 8025 handle_jwork(&freeblks->fb_jwork); 8026 WORKITEM_FREE(freeblks, D_FREEBLKS); 8027 FREE_LOCK(ump); 8028 return (0); 8029} 8030 8031/* 8032 * Release blocks associated with the freeblks and stored in the indirect 8033 * block dbn. If level is greater than SINGLE, the block is an indirect block 8034 * and recursive calls to indirtrunc must be used to cleanse other indirect 8035 * blocks. 8036 * 8037 * This handles partial and complete truncation of blocks. Partial is noted 8038 * with goingaway == 0. In this case the freework is completed after the 8039 * zero'd indirects are written to disk. For full truncation the freework 8040 * is completed after the block is freed. 8041 */ 8042static void 8043indir_trunc(freework, dbn, lbn) 8044 struct freework *freework; 8045 ufs2_daddr_t dbn; 8046 ufs_lbn_t lbn; 8047{ 8048 struct freework *nfreework; 8049 struct workhead wkhd; 8050 struct freeblks *freeblks; 8051 struct buf *bp; 8052 struct fs *fs; 8053 struct indirdep *indirdep; 8054 struct ufsmount *ump; 8055 ufs1_daddr_t *bap1 = 0; 8056 ufs2_daddr_t nb, nnb, *bap2 = 0; 8057 ufs_lbn_t lbnadd, nlbn; 8058 int i, nblocks, ufs1fmt; 8059 int freedblocks; 8060 int goingaway; 8061 int freedeps; 8062 int needj; 8063 int level; 8064 int cnt; 8065 8066 freeblks = freework->fw_freeblks; 8067 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 8068 fs = ump->um_fs; 8069 /* 8070 * Get buffer of block pointers to be freed. There are three cases: 8071 * 8072 * 1) Partial truncate caches the indirdep pointer in the freework 8073 * which provides us a back copy to the save bp which holds the 8074 * pointers we want to clear. When this completes the zero 8075 * pointers are written to the real copy. 8076 * 2) The indirect is being completely truncated, cancel_indirdep() 8077 * eliminated the real copy and placed the indirdep on the saved 8078 * copy. The indirdep and buf are discarded when this completes. 8079 * 3) The indirect was not in memory, we read a copy off of the disk 8080 * using the devvp and drop and invalidate the buffer when we're 8081 * done. 8082 */ 8083 goingaway = 1; 8084 indirdep = NULL; 8085 if (freework->fw_indir != NULL) { 8086 goingaway = 0; 8087 indirdep = freework->fw_indir; 8088 bp = indirdep->ir_savebp; 8089 if (bp == NULL || bp->b_blkno != dbn) 8090 panic("indir_trunc: Bad saved buf %p blkno %jd", 8091 bp, (intmax_t)dbn); 8092 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { 8093 /* 8094 * The lock prevents the buf dep list from changing and 8095 * indirects on devvp should only ever have one dependency. 8096 */ 8097 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); 8098 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) 8099 panic("indir_trunc: Bad indirdep %p from buf %p", 8100 indirdep, bp); 8101 } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 8102 NOCRED, &bp) != 0) { 8103 brelse(bp); 8104 return; 8105 } 8106 ACQUIRE_LOCK(ump); 8107 /* Protects against a race with complete_trunc_indir(). */ 8108 freework->fw_state &= ~INPROGRESS; 8109 /* 8110 * If we have an indirdep we need to enforce the truncation order 8111 * and discard it when it is complete. 8112 */ 8113 if (indirdep) { 8114 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && 8115 !TAILQ_EMPTY(&indirdep->ir_trunc)) { 8116 /* 8117 * Add the complete truncate to the list on the 8118 * indirdep to enforce in-order processing. 8119 */ 8120 if (freework->fw_indir == NULL) 8121 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, 8122 freework, fw_next); 8123 FREE_LOCK(ump); 8124 return; 8125 } 8126 /* 8127 * If we're goingaway, free the indirdep. Otherwise it will 8128 * linger until the write completes. 8129 */ 8130 if (goingaway) 8131 free_indirdep(indirdep); 8132 } 8133 FREE_LOCK(ump); 8134 /* Initialize pointers depending on block size. */ 8135 if (ump->um_fstype == UFS1) { 8136 bap1 = (ufs1_daddr_t *)bp->b_data; 8137 nb = bap1[freework->fw_off]; 8138 ufs1fmt = 1; 8139 } else { 8140 bap2 = (ufs2_daddr_t *)bp->b_data; 8141 nb = bap2[freework->fw_off]; 8142 ufs1fmt = 0; 8143 } 8144 level = lbn_level(lbn); 8145 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; 8146 lbnadd = lbn_offset(fs, level); 8147 nblocks = btodb(fs->fs_bsize); 8148 nfreework = freework; 8149 freedeps = 0; 8150 cnt = 0; 8151 /* 8152 * Reclaim blocks. Traverses into nested indirect levels and 8153 * arranges for the current level to be freed when subordinates 8154 * are free when journaling. 8155 */ 8156 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { 8157 if (i != NINDIR(fs) - 1) { 8158 if (ufs1fmt) 8159 nnb = bap1[i+1]; 8160 else 8161 nnb = bap2[i+1]; 8162 } else 8163 nnb = 0; 8164 if (nb == 0) 8165 continue; 8166 cnt++; 8167 if (level != 0) { 8168 nlbn = (lbn + 1) - (i * lbnadd); 8169 if (needj != 0) { 8170 nfreework = newfreework(ump, freeblks, freework, 8171 nlbn, nb, fs->fs_frag, 0, 0); 8172 freedeps++; 8173 } 8174 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); 8175 } else { 8176 struct freedep *freedep; 8177 8178 /* 8179 * Attempt to aggregate freedep dependencies for 8180 * all blocks being released to the same CG. 8181 */ 8182 LIST_INIT(&wkhd); 8183 if (needj != 0 && 8184 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { 8185 freedep = newfreedep(freework); 8186 WORKLIST_INSERT_UNLOCKED(&wkhd, 8187 &freedep->fd_list); 8188 freedeps++; 8189 } 8190 CTR3(KTR_SUJ, 8191 "indir_trunc: ino %d blkno %jd size %ld", 8192 freeblks->fb_inum, nb, fs->fs_bsize); 8193 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, 8194 fs->fs_bsize, freeblks->fb_inum, 8195 freeblks->fb_vtype, &wkhd); 8196 } 8197 } 8198 if (goingaway) { 8199 bp->b_flags |= B_INVAL | B_NOCACHE; 8200 brelse(bp); 8201 } 8202 freedblocks = 0; 8203 if (level == 0) 8204 freedblocks = (nblocks * cnt); 8205 if (needj == 0) 8206 freedblocks += nblocks; 8207 freeblks_free(ump, freeblks, freedblocks); 8208 /* 8209 * If we are journaling set up the ref counts and offset so this 8210 * indirect can be completed when its children are free. 8211 */ 8212 if (needj) { 8213 ACQUIRE_LOCK(ump); 8214 freework->fw_off = i; 8215 freework->fw_ref += freedeps; 8216 freework->fw_ref -= NINDIR(fs) + 1; 8217 if (level == 0) 8218 freeblks->fb_cgwait += freedeps; 8219 if (freework->fw_ref == 0) 8220 freework_freeblock(freework); 8221 FREE_LOCK(ump); 8222 return; 8223 } 8224 /* 8225 * If we're not journaling we can free the indirect now. 8226 */ 8227 dbn = dbtofsb(fs, dbn); 8228 CTR3(KTR_SUJ, 8229 "indir_trunc 2: ino %d blkno %jd size %ld", 8230 freeblks->fb_inum, dbn, fs->fs_bsize); 8231 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, 8232 freeblks->fb_inum, freeblks->fb_vtype, NULL); 8233 /* Non SUJ softdep does single-threaded truncations. */ 8234 if (freework->fw_blkno == dbn) { 8235 freework->fw_state |= ALLCOMPLETE; 8236 ACQUIRE_LOCK(ump); 8237 handle_written_freework(freework); 8238 FREE_LOCK(ump); 8239 } 8240 return; 8241} 8242 8243/* 8244 * Cancel an allocindir when it is removed via truncation. When bp is not 8245 * NULL the indirect never appeared on disk and is scheduled to be freed 8246 * independently of the indir so we can more easily track journal work. 8247 */ 8248static void 8249cancel_allocindir(aip, bp, freeblks, trunc) 8250 struct allocindir *aip; 8251 struct buf *bp; 8252 struct freeblks *freeblks; 8253 int trunc; 8254{ 8255 struct indirdep *indirdep; 8256 struct freefrag *freefrag; 8257 struct newblk *newblk; 8258 8259 newblk = (struct newblk *)aip; 8260 LIST_REMOVE(aip, ai_next); 8261 /* 8262 * We must eliminate the pointer in bp if it must be freed on its 8263 * own due to partial truncate or pending journal work. 8264 */ 8265 if (bp && (trunc || newblk->nb_jnewblk)) { 8266 /* 8267 * Clear the pointer and mark the aip to be freed 8268 * directly if it never existed on disk. 8269 */ 8270 aip->ai_state |= DELAYEDFREE; 8271 indirdep = aip->ai_indirdep; 8272 if (indirdep->ir_state & UFS1FMT) 8273 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8274 else 8275 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8276 } 8277 /* 8278 * When truncating the previous pointer will be freed via 8279 * savedbp. Eliminate the freefrag which would dup free. 8280 */ 8281 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { 8282 newblk->nb_freefrag = NULL; 8283 if (freefrag->ff_jdep) 8284 cancel_jfreefrag( 8285 WK_JFREEFRAG(freefrag->ff_jdep)); 8286 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); 8287 WORKITEM_FREE(freefrag, D_FREEFRAG); 8288 } 8289 /* 8290 * If the journal hasn't been written the jnewblk must be passed 8291 * to the call to ffs_blkfree that reclaims the space. We accomplish 8292 * this by leaving the journal dependency on the newblk to be freed 8293 * when a freework is created in handle_workitem_freeblocks(). 8294 */ 8295 cancel_newblk(newblk, NULL, &freeblks->fb_jwork); 8296 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 8297} 8298 8299/* 8300 * Create the mkdir dependencies for . and .. in a new directory. Link them 8301 * in to a newdirblk so any subsequent additions are tracked properly. The 8302 * caller is responsible for adding the mkdir1 dependency to the journal 8303 * and updating id_mkdiradd. This function returns with the per-filesystem 8304 * lock held. 8305 */ 8306static struct mkdir * 8307setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) 8308 struct diradd *dap; 8309 ino_t newinum; 8310 ino_t dinum; 8311 struct buf *newdirbp; 8312 struct mkdir **mkdirp; 8313{ 8314 struct newblk *newblk; 8315 struct pagedep *pagedep; 8316 struct inodedep *inodedep; 8317 struct newdirblk *newdirblk = 0; 8318 struct mkdir *mkdir1, *mkdir2; 8319 struct worklist *wk; 8320 struct jaddref *jaddref; 8321 struct ufsmount *ump; 8322 struct mount *mp; 8323 8324 mp = dap->da_list.wk_mp; 8325 ump = VFSTOUFS(mp); 8326 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, 8327 M_SOFTDEP_FLAGS); 8328 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8329 LIST_INIT(&newdirblk->db_mkdir); 8330 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8331 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); 8332 mkdir1->md_state = ATTACHED | MKDIR_BODY; 8333 mkdir1->md_diradd = dap; 8334 mkdir1->md_jaddref = NULL; 8335 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8336 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); 8337 mkdir2->md_state = ATTACHED | MKDIR_PARENT; 8338 mkdir2->md_diradd = dap; 8339 mkdir2->md_jaddref = NULL; 8340 if (MOUNTEDSUJ(mp) == 0) { 8341 mkdir1->md_state |= DEPCOMPLETE; 8342 mkdir2->md_state |= DEPCOMPLETE; 8343 } 8344 /* 8345 * Dependency on "." and ".." being written to disk. 8346 */ 8347 mkdir1->md_buf = newdirbp; 8348 ACQUIRE_LOCK(VFSTOUFS(mp)); 8349 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); 8350 /* 8351 * We must link the pagedep, allocdirect, and newdirblk for 8352 * the initial file page so the pointer to the new directory 8353 * is not written until the directory contents are live and 8354 * any subsequent additions are not marked live until the 8355 * block is reachable via the inode. 8356 */ 8357 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) 8358 panic("setup_newdir: lost pagedep"); 8359 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) 8360 if (wk->wk_type == D_ALLOCDIRECT) 8361 break; 8362 if (wk == NULL) 8363 panic("setup_newdir: lost allocdirect"); 8364 if (pagedep->pd_state & NEWBLOCK) 8365 panic("setup_newdir: NEWBLOCK already set"); 8366 newblk = WK_NEWBLK(wk); 8367 pagedep->pd_state |= NEWBLOCK; 8368 pagedep->pd_newdirblk = newdirblk; 8369 newdirblk->db_pagedep = pagedep; 8370 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8371 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); 8372 /* 8373 * Look up the inodedep for the parent directory so that we 8374 * can link mkdir2 into the pending dotdot jaddref or 8375 * the inode write if there is none. If the inode is 8376 * ALLCOMPLETE and no jaddref is present all dependencies have 8377 * been satisfied and mkdir2 can be freed. 8378 */ 8379 inodedep_lookup(mp, dinum, 0, &inodedep); 8380 if (MOUNTEDSUJ(mp)) { 8381 if (inodedep == NULL) 8382 panic("setup_newdir: Lost parent."); 8383 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8384 inoreflst); 8385 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && 8386 (jaddref->ja_state & MKDIR_PARENT), 8387 ("setup_newdir: bad dotdot jaddref %p", jaddref)); 8388 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8389 mkdir2->md_jaddref = jaddref; 8390 jaddref->ja_mkdir = mkdir2; 8391 } else if (inodedep == NULL || 8392 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 8393 dap->da_state &= ~MKDIR_PARENT; 8394 WORKITEM_FREE(mkdir2, D_MKDIR); 8395 mkdir2 = NULL; 8396 } else { 8397 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8398 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); 8399 } 8400 *mkdirp = mkdir2; 8401 8402 return (mkdir1); 8403} 8404 8405/* 8406 * Directory entry addition dependencies. 8407 * 8408 * When adding a new directory entry, the inode (with its incremented link 8409 * count) must be written to disk before the directory entry's pointer to it. 8410 * Also, if the inode is newly allocated, the corresponding freemap must be 8411 * updated (on disk) before the directory entry's pointer. These requirements 8412 * are met via undo/redo on the directory entry's pointer, which consists 8413 * simply of the inode number. 8414 * 8415 * As directory entries are added and deleted, the free space within a 8416 * directory block can become fragmented. The ufs filesystem will compact 8417 * a fragmented directory block to make space for a new entry. When this 8418 * occurs, the offsets of previously added entries change. Any "diradd" 8419 * dependency structures corresponding to these entries must be updated with 8420 * the new offsets. 8421 */ 8422 8423/* 8424 * This routine is called after the in-memory inode's link 8425 * count has been incremented, but before the directory entry's 8426 * pointer to the inode has been set. 8427 */ 8428int 8429softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 8430 struct buf *bp; /* buffer containing directory block */ 8431 struct inode *dp; /* inode for directory */ 8432 off_t diroffset; /* offset of new entry in directory */ 8433 ino_t newinum; /* inode referenced by new directory entry */ 8434 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 8435 int isnewblk; /* entry is in a newly allocated block */ 8436{ 8437 int offset; /* offset of new entry within directory block */ 8438 ufs_lbn_t lbn; /* block in directory containing new entry */ 8439 struct fs *fs; 8440 struct diradd *dap; 8441 struct newblk *newblk; 8442 struct pagedep *pagedep; 8443 struct inodedep *inodedep; 8444 struct newdirblk *newdirblk = 0; 8445 struct mkdir *mkdir1, *mkdir2; 8446 struct jaddref *jaddref; 8447 struct ufsmount *ump; 8448 struct mount *mp; 8449 int isindir; 8450 8451 ump = dp->i_ump; 8452 mp = UFSTOVFS(ump); 8453 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8454 ("softdep_setup_directory_add called on non-softdep filesystem")); 8455 /* 8456 * Whiteouts have no dependencies. 8457 */ 8458 if (newinum == WINO) { 8459 if (newdirbp != NULL) 8460 bdwrite(newdirbp); 8461 return (0); 8462 } 8463 jaddref = NULL; 8464 mkdir1 = mkdir2 = NULL; 8465 fs = dp->i_fs; 8466 lbn = lblkno(fs, diroffset); 8467 offset = blkoff(fs, diroffset); 8468 dap = malloc(sizeof(struct diradd), M_DIRADD, 8469 M_SOFTDEP_FLAGS|M_ZERO); 8470 workitem_alloc(&dap->da_list, D_DIRADD, mp); 8471 dap->da_offset = offset; 8472 dap->da_newinum = newinum; 8473 dap->da_state = ATTACHED; 8474 LIST_INIT(&dap->da_jwork); 8475 isindir = bp->b_lblkno >= NDADDR; 8476 if (isnewblk && 8477 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { 8478 newdirblk = malloc(sizeof(struct newdirblk), 8479 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 8480 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8481 LIST_INIT(&newdirblk->db_mkdir); 8482 } 8483 /* 8484 * If we're creating a new directory setup the dependencies and set 8485 * the dap state to wait for them. Otherwise it's COMPLETE and 8486 * we can move on. 8487 */ 8488 if (newdirbp == NULL) { 8489 dap->da_state |= DEPCOMPLETE; 8490 ACQUIRE_LOCK(ump); 8491 } else { 8492 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 8493 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, 8494 &mkdir2); 8495 } 8496 /* 8497 * Link into parent directory pagedep to await its being written. 8498 */ 8499 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); 8500#ifdef DEBUG 8501 if (diradd_lookup(pagedep, offset) != NULL) 8502 panic("softdep_setup_directory_add: %p already at off %d\n", 8503 diradd_lookup(pagedep, offset), offset); 8504#endif 8505 dap->da_pagedep = pagedep; 8506 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 8507 da_pdlist); 8508 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 8509 /* 8510 * If we're journaling, link the diradd into the jaddref so it 8511 * may be completed after the journal entry is written. Otherwise, 8512 * link the diradd into its inodedep. If the inode is not yet 8513 * written place it on the bufwait list, otherwise do the post-inode 8514 * write processing to put it on the id_pendinghd list. 8515 */ 8516 if (MOUNTEDSUJ(mp)) { 8517 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8518 inoreflst); 8519 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 8520 ("softdep_setup_directory_add: bad jaddref %p", jaddref)); 8521 jaddref->ja_diroff = diroffset; 8522 jaddref->ja_diradd = dap; 8523 add_to_journal(&jaddref->ja_list); 8524 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 8525 diradd_inode_written(dap, inodedep); 8526 else 8527 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 8528 /* 8529 * Add the journal entries for . and .. links now that the primary 8530 * link is written. 8531 */ 8532 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { 8533 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 8534 inoreflst, if_deps); 8535 KASSERT(jaddref != NULL && 8536 jaddref->ja_ino == jaddref->ja_parent && 8537 (jaddref->ja_state & MKDIR_BODY), 8538 ("softdep_setup_directory_add: bad dot jaddref %p", 8539 jaddref)); 8540 mkdir1->md_jaddref = jaddref; 8541 jaddref->ja_mkdir = mkdir1; 8542 /* 8543 * It is important that the dotdot journal entry 8544 * is added prior to the dot entry since dot writes 8545 * both the dot and dotdot links. These both must 8546 * be added after the primary link for the journal 8547 * to remain consistent. 8548 */ 8549 add_to_journal(&mkdir2->md_jaddref->ja_list); 8550 add_to_journal(&jaddref->ja_list); 8551 } 8552 /* 8553 * If we are adding a new directory remember this diradd so that if 8554 * we rename it we can keep the dot and dotdot dependencies. If 8555 * we are adding a new name for an inode that has a mkdiradd we 8556 * must be in rename and we have to move the dot and dotdot 8557 * dependencies to this new name. The old name is being orphaned 8558 * soon. 8559 */ 8560 if (mkdir1 != NULL) { 8561 if (inodedep->id_mkdiradd != NULL) 8562 panic("softdep_setup_directory_add: Existing mkdir"); 8563 inodedep->id_mkdiradd = dap; 8564 } else if (inodedep->id_mkdiradd) 8565 merge_diradd(inodedep, dap); 8566 if (newdirblk) { 8567 /* 8568 * There is nothing to do if we are already tracking 8569 * this block. 8570 */ 8571 if ((pagedep->pd_state & NEWBLOCK) != 0) { 8572 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 8573 FREE_LOCK(ump); 8574 return (0); 8575 } 8576 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) 8577 == 0) 8578 panic("softdep_setup_directory_add: lost entry"); 8579 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8580 pagedep->pd_state |= NEWBLOCK; 8581 pagedep->pd_newdirblk = newdirblk; 8582 newdirblk->db_pagedep = pagedep; 8583 FREE_LOCK(ump); 8584 /* 8585 * If we extended into an indirect signal direnter to sync. 8586 */ 8587 if (isindir) 8588 return (1); 8589 return (0); 8590 } 8591 FREE_LOCK(ump); 8592 return (0); 8593} 8594 8595/* 8596 * This procedure is called to change the offset of a directory 8597 * entry when compacting a directory block which must be owned 8598 * exclusively by the caller. Note that the actual entry movement 8599 * must be done in this procedure to ensure that no I/O completions 8600 * occur while the move is in progress. 8601 */ 8602void 8603softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 8604 struct buf *bp; /* Buffer holding directory block. */ 8605 struct inode *dp; /* inode for directory */ 8606 caddr_t base; /* address of dp->i_offset */ 8607 caddr_t oldloc; /* address of old directory location */ 8608 caddr_t newloc; /* address of new directory location */ 8609 int entrysize; /* size of directory entry */ 8610{ 8611 int offset, oldoffset, newoffset; 8612 struct pagedep *pagedep; 8613 struct jmvref *jmvref; 8614 struct diradd *dap; 8615 struct direct *de; 8616 struct mount *mp; 8617 ufs_lbn_t lbn; 8618 int flags; 8619 8620 mp = UFSTOVFS(dp->i_ump); 8621 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8622 ("softdep_change_directoryentry_offset called on " 8623 "non-softdep filesystem")); 8624 de = (struct direct *)oldloc; 8625 jmvref = NULL; 8626 flags = 0; 8627 /* 8628 * Moves are always journaled as it would be too complex to 8629 * determine if any affected adds or removes are present in the 8630 * journal. 8631 */ 8632 if (MOUNTEDSUJ(mp)) { 8633 flags = DEPALLOC; 8634 jmvref = newjmvref(dp, de->d_ino, 8635 dp->i_offset + (oldloc - base), 8636 dp->i_offset + (newloc - base)); 8637 } 8638 lbn = lblkno(dp->i_fs, dp->i_offset); 8639 offset = blkoff(dp->i_fs, dp->i_offset); 8640 oldoffset = offset + (oldloc - base); 8641 newoffset = offset + (newloc - base); 8642 ACQUIRE_LOCK(dp->i_ump); 8643 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) 8644 goto done; 8645 dap = diradd_lookup(pagedep, oldoffset); 8646 if (dap) { 8647 dap->da_offset = newoffset; 8648 newoffset = DIRADDHASH(newoffset); 8649 oldoffset = DIRADDHASH(oldoffset); 8650 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && 8651 newoffset != oldoffset) { 8652 LIST_REMOVE(dap, da_pdlist); 8653 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], 8654 dap, da_pdlist); 8655 } 8656 } 8657done: 8658 if (jmvref) { 8659 jmvref->jm_pagedep = pagedep; 8660 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); 8661 add_to_journal(&jmvref->jm_list); 8662 } 8663 bcopy(oldloc, newloc, entrysize); 8664 FREE_LOCK(dp->i_ump); 8665} 8666 8667/* 8668 * Move the mkdir dependencies and journal work from one diradd to another 8669 * when renaming a directory. The new name must depend on the mkdir deps 8670 * completing as the old name did. Directories can only have one valid link 8671 * at a time so one must be canonical. 8672 */ 8673static void 8674merge_diradd(inodedep, newdap) 8675 struct inodedep *inodedep; 8676 struct diradd *newdap; 8677{ 8678 struct diradd *olddap; 8679 struct mkdir *mkdir, *nextmd; 8680 struct ufsmount *ump; 8681 short state; 8682 8683 olddap = inodedep->id_mkdiradd; 8684 inodedep->id_mkdiradd = newdap; 8685 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8686 newdap->da_state &= ~DEPCOMPLETE; 8687 ump = VFSTOUFS(inodedep->id_list.wk_mp); 8688 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8689 mkdir = nextmd) { 8690 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8691 if (mkdir->md_diradd != olddap) 8692 continue; 8693 mkdir->md_diradd = newdap; 8694 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); 8695 newdap->da_state |= state; 8696 olddap->da_state &= ~state; 8697 if ((olddap->da_state & 8698 (MKDIR_PARENT | MKDIR_BODY)) == 0) 8699 break; 8700 } 8701 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8702 panic("merge_diradd: unfound ref"); 8703 } 8704 /* 8705 * Any mkdir related journal items are not safe to be freed until 8706 * the new name is stable. 8707 */ 8708 jwork_move(&newdap->da_jwork, &olddap->da_jwork); 8709 olddap->da_state |= DEPCOMPLETE; 8710 complete_diradd(olddap); 8711} 8712 8713/* 8714 * Move the diradd to the pending list when all diradd dependencies are 8715 * complete. 8716 */ 8717static void 8718complete_diradd(dap) 8719 struct diradd *dap; 8720{ 8721 struct pagedep *pagedep; 8722 8723 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 8724 if (dap->da_state & DIRCHG) 8725 pagedep = dap->da_previous->dm_pagedep; 8726 else 8727 pagedep = dap->da_pagedep; 8728 LIST_REMOVE(dap, da_pdlist); 8729 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 8730 } 8731} 8732 8733/* 8734 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal 8735 * add entries and conditonally journal the remove. 8736 */ 8737static void 8738cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) 8739 struct diradd *dap; 8740 struct dirrem *dirrem; 8741 struct jremref *jremref; 8742 struct jremref *dotremref; 8743 struct jremref *dotdotremref; 8744{ 8745 struct inodedep *inodedep; 8746 struct jaddref *jaddref; 8747 struct inoref *inoref; 8748 struct ufsmount *ump; 8749 struct mkdir *mkdir; 8750 8751 /* 8752 * If no remove references were allocated we're on a non-journaled 8753 * filesystem and can skip the cancel step. 8754 */ 8755 if (jremref == NULL) { 8756 free_diradd(dap, NULL); 8757 return; 8758 } 8759 /* 8760 * Cancel the primary name an free it if it does not require 8761 * journaling. 8762 */ 8763 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 8764 0, &inodedep) != 0) { 8765 /* Abort the addref that reference this diradd. */ 8766 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 8767 if (inoref->if_list.wk_type != D_JADDREF) 8768 continue; 8769 jaddref = (struct jaddref *)inoref; 8770 if (jaddref->ja_diradd != dap) 8771 continue; 8772 if (cancel_jaddref(jaddref, inodedep, 8773 &dirrem->dm_jwork) == 0) { 8774 free_jremref(jremref); 8775 jremref = NULL; 8776 } 8777 break; 8778 } 8779 } 8780 /* 8781 * Cancel subordinate names and free them if they do not require 8782 * journaling. 8783 */ 8784 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8785 ump = VFSTOUFS(dap->da_list.wk_mp); 8786 LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { 8787 if (mkdir->md_diradd != dap) 8788 continue; 8789 if ((jaddref = mkdir->md_jaddref) == NULL) 8790 continue; 8791 mkdir->md_jaddref = NULL; 8792 if (mkdir->md_state & MKDIR_PARENT) { 8793 if (cancel_jaddref(jaddref, NULL, 8794 &dirrem->dm_jwork) == 0) { 8795 free_jremref(dotdotremref); 8796 dotdotremref = NULL; 8797 } 8798 } else { 8799 if (cancel_jaddref(jaddref, inodedep, 8800 &dirrem->dm_jwork) == 0) { 8801 free_jremref(dotremref); 8802 dotremref = NULL; 8803 } 8804 } 8805 } 8806 } 8807 8808 if (jremref) 8809 journal_jremref(dirrem, jremref, inodedep); 8810 if (dotremref) 8811 journal_jremref(dirrem, dotremref, inodedep); 8812 if (dotdotremref) 8813 journal_jremref(dirrem, dotdotremref, NULL); 8814 jwork_move(&dirrem->dm_jwork, &dap->da_jwork); 8815 free_diradd(dap, &dirrem->dm_jwork); 8816} 8817 8818/* 8819 * Free a diradd dependency structure. This routine must be called 8820 * with splbio interrupts blocked. 8821 */ 8822static void 8823free_diradd(dap, wkhd) 8824 struct diradd *dap; 8825 struct workhead *wkhd; 8826{ 8827 struct dirrem *dirrem; 8828 struct pagedep *pagedep; 8829 struct inodedep *inodedep; 8830 struct mkdir *mkdir, *nextmd; 8831 struct ufsmount *ump; 8832 8833 ump = VFSTOUFS(dap->da_list.wk_mp); 8834 LOCK_OWNED(ump); 8835 LIST_REMOVE(dap, da_pdlist); 8836 if (dap->da_state & ONWORKLIST) 8837 WORKLIST_REMOVE(&dap->da_list); 8838 if ((dap->da_state & DIRCHG) == 0) { 8839 pagedep = dap->da_pagedep; 8840 } else { 8841 dirrem = dap->da_previous; 8842 pagedep = dirrem->dm_pagedep; 8843 dirrem->dm_dirinum = pagedep->pd_ino; 8844 dirrem->dm_state |= COMPLETE; 8845 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 8846 add_to_worklist(&dirrem->dm_list, 0); 8847 } 8848 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 8849 0, &inodedep) != 0) 8850 if (inodedep->id_mkdiradd == dap) 8851 inodedep->id_mkdiradd = NULL; 8852 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8853 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8854 mkdir = nextmd) { 8855 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8856 if (mkdir->md_diradd != dap) 8857 continue; 8858 dap->da_state &= 8859 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 8860 LIST_REMOVE(mkdir, md_mkdirs); 8861 if (mkdir->md_state & ONWORKLIST) 8862 WORKLIST_REMOVE(&mkdir->md_list); 8863 if (mkdir->md_jaddref != NULL) 8864 panic("free_diradd: Unexpected jaddref"); 8865 WORKITEM_FREE(mkdir, D_MKDIR); 8866 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 8867 break; 8868 } 8869 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8870 panic("free_diradd: unfound ref"); 8871 } 8872 if (inodedep) 8873 free_inodedep(inodedep); 8874 /* 8875 * Free any journal segments waiting for the directory write. 8876 */ 8877 handle_jwork(&dap->da_jwork); 8878 WORKITEM_FREE(dap, D_DIRADD); 8879} 8880 8881/* 8882 * Directory entry removal dependencies. 8883 * 8884 * When removing a directory entry, the entry's inode pointer must be 8885 * zero'ed on disk before the corresponding inode's link count is decremented 8886 * (possibly freeing the inode for re-use). This dependency is handled by 8887 * updating the directory entry but delaying the inode count reduction until 8888 * after the directory block has been written to disk. After this point, the 8889 * inode count can be decremented whenever it is convenient. 8890 */ 8891 8892/* 8893 * This routine should be called immediately after removing 8894 * a directory entry. The inode's link count should not be 8895 * decremented by the calling procedure -- the soft updates 8896 * code will do this task when it is safe. 8897 */ 8898void 8899softdep_setup_remove(bp, dp, ip, isrmdir) 8900 struct buf *bp; /* buffer containing directory block */ 8901 struct inode *dp; /* inode for the directory being modified */ 8902 struct inode *ip; /* inode for directory entry being removed */ 8903 int isrmdir; /* indicates if doing RMDIR */ 8904{ 8905 struct dirrem *dirrem, *prevdirrem; 8906 struct inodedep *inodedep; 8907 int direct; 8908 8909 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 8910 ("softdep_setup_remove called on non-softdep filesystem")); 8911 /* 8912 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want 8913 * newdirrem() to setup the full directory remove which requires 8914 * isrmdir > 1. 8915 */ 8916 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 8917 /* 8918 * Add the dirrem to the inodedep's pending remove list for quick 8919 * discovery later. 8920 */ 8921 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8922 &inodedep) == 0) 8923 panic("softdep_setup_remove: Lost inodedep."); 8924 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 8925 dirrem->dm_state |= ONDEPLIST; 8926 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 8927 8928 /* 8929 * If the COMPLETE flag is clear, then there were no active 8930 * entries and we want to roll back to a zeroed entry until 8931 * the new inode is committed to disk. If the COMPLETE flag is 8932 * set then we have deleted an entry that never made it to 8933 * disk. If the entry we deleted resulted from a name change, 8934 * then the old name still resides on disk. We cannot delete 8935 * its inode (returned to us in prevdirrem) until the zeroed 8936 * directory entry gets to disk. The new inode has never been 8937 * referenced on the disk, so can be deleted immediately. 8938 */ 8939 if ((dirrem->dm_state & COMPLETE) == 0) { 8940 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 8941 dm_next); 8942 FREE_LOCK(ip->i_ump); 8943 } else { 8944 if (prevdirrem != NULL) 8945 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 8946 prevdirrem, dm_next); 8947 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 8948 direct = LIST_EMPTY(&dirrem->dm_jremrefhd); 8949 FREE_LOCK(ip->i_ump); 8950 if (direct) 8951 handle_workitem_remove(dirrem, 0); 8952 } 8953} 8954 8955/* 8956 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the 8957 * pd_pendinghd list of a pagedep. 8958 */ 8959static struct diradd * 8960diradd_lookup(pagedep, offset) 8961 struct pagedep *pagedep; 8962 int offset; 8963{ 8964 struct diradd *dap; 8965 8966 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 8967 if (dap->da_offset == offset) 8968 return (dap); 8969 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 8970 if (dap->da_offset == offset) 8971 return (dap); 8972 return (NULL); 8973} 8974 8975/* 8976 * Search for a .. diradd dependency in a directory that is being removed. 8977 * If the directory was renamed to a new parent we have a diradd rather 8978 * than a mkdir for the .. entry. We need to cancel it now before 8979 * it is found in truncate(). 8980 */ 8981static struct jremref * 8982cancel_diradd_dotdot(ip, dirrem, jremref) 8983 struct inode *ip; 8984 struct dirrem *dirrem; 8985 struct jremref *jremref; 8986{ 8987 struct pagedep *pagedep; 8988 struct diradd *dap; 8989 struct worklist *wk; 8990 8991 if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0, 8992 &pagedep) == 0) 8993 return (jremref); 8994 dap = diradd_lookup(pagedep, DOTDOT_OFFSET); 8995 if (dap == NULL) 8996 return (jremref); 8997 cancel_diradd(dap, dirrem, jremref, NULL, NULL); 8998 /* 8999 * Mark any journal work as belonging to the parent so it is freed 9000 * with the .. reference. 9001 */ 9002 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9003 wk->wk_state |= MKDIR_PARENT; 9004 return (NULL); 9005} 9006 9007/* 9008 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to 9009 * replace it with a dirrem/diradd pair as a result of re-parenting a 9010 * directory. This ensures that we don't simultaneously have a mkdir and 9011 * a diradd for the same .. entry. 9012 */ 9013static struct jremref * 9014cancel_mkdir_dotdot(ip, dirrem, jremref) 9015 struct inode *ip; 9016 struct dirrem *dirrem; 9017 struct jremref *jremref; 9018{ 9019 struct inodedep *inodedep; 9020 struct jaddref *jaddref; 9021 struct ufsmount *ump; 9022 struct mkdir *mkdir; 9023 struct diradd *dap; 9024 9025 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 9026 &inodedep) == 0) 9027 return (jremref); 9028 dap = inodedep->id_mkdiradd; 9029 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) 9030 return (jremref); 9031 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9032 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 9033 mkdir = LIST_NEXT(mkdir, md_mkdirs)) 9034 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) 9035 break; 9036 if (mkdir == NULL) 9037 panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); 9038 if ((jaddref = mkdir->md_jaddref) != NULL) { 9039 mkdir->md_jaddref = NULL; 9040 jaddref->ja_state &= ~MKDIR_PARENT; 9041 if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0, 9042 &inodedep) == 0) 9043 panic("cancel_mkdir_dotdot: Lost parent inodedep"); 9044 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { 9045 journal_jremref(dirrem, jremref, inodedep); 9046 jremref = NULL; 9047 } 9048 } 9049 if (mkdir->md_state & ONWORKLIST) 9050 WORKLIST_REMOVE(&mkdir->md_list); 9051 mkdir->md_state |= ALLCOMPLETE; 9052 complete_mkdir(mkdir); 9053 return (jremref); 9054} 9055 9056static void 9057journal_jremref(dirrem, jremref, inodedep) 9058 struct dirrem *dirrem; 9059 struct jremref *jremref; 9060 struct inodedep *inodedep; 9061{ 9062 9063 if (inodedep == NULL) 9064 if (inodedep_lookup(jremref->jr_list.wk_mp, 9065 jremref->jr_ref.if_ino, 0, &inodedep) == 0) 9066 panic("journal_jremref: Lost inodedep"); 9067 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); 9068 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 9069 add_to_journal(&jremref->jr_list); 9070} 9071 9072static void 9073dirrem_journal(dirrem, jremref, dotremref, dotdotremref) 9074 struct dirrem *dirrem; 9075 struct jremref *jremref; 9076 struct jremref *dotremref; 9077 struct jremref *dotdotremref; 9078{ 9079 struct inodedep *inodedep; 9080 9081 9082 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, 9083 &inodedep) == 0) 9084 panic("dirrem_journal: Lost inodedep"); 9085 journal_jremref(dirrem, jremref, inodedep); 9086 if (dotremref) 9087 journal_jremref(dirrem, dotremref, inodedep); 9088 if (dotdotremref) 9089 journal_jremref(dirrem, dotdotremref, NULL); 9090} 9091 9092/* 9093 * Allocate a new dirrem if appropriate and return it along with 9094 * its associated pagedep. Called without a lock, returns with lock. 9095 */ 9096static struct dirrem * 9097newdirrem(bp, dp, ip, isrmdir, prevdirremp) 9098 struct buf *bp; /* buffer containing directory block */ 9099 struct inode *dp; /* inode for the directory being modified */ 9100 struct inode *ip; /* inode for directory entry being removed */ 9101 int isrmdir; /* indicates if doing RMDIR */ 9102 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 9103{ 9104 int offset; 9105 ufs_lbn_t lbn; 9106 struct diradd *dap; 9107 struct dirrem *dirrem; 9108 struct pagedep *pagedep; 9109 struct jremref *jremref; 9110 struct jremref *dotremref; 9111 struct jremref *dotdotremref; 9112 struct vnode *dvp; 9113 9114 /* 9115 * Whiteouts have no deletion dependencies. 9116 */ 9117 if (ip == NULL) 9118 panic("newdirrem: whiteout"); 9119 dvp = ITOV(dp); 9120 /* 9121 * If the system is over its limit and our filesystem is 9122 * responsible for more than our share of that usage and 9123 * we are not a snapshot, request some inodedep cleanup. 9124 * Limiting the number of dirrem structures will also limit 9125 * the number of freefile and freeblks structures. 9126 */ 9127 ACQUIRE_LOCK(ip->i_ump); 9128 while (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2 && 9129 ip->i_ump->softdep_curdeps[D_DIRREM] > 9130 (max_softdeps / 2) / stat_flush_threads) 9131 (void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS); 9132 FREE_LOCK(ip->i_ump); 9133 dirrem = malloc(sizeof(struct dirrem), 9134 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 9135 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); 9136 LIST_INIT(&dirrem->dm_jremrefhd); 9137 LIST_INIT(&dirrem->dm_jwork); 9138 dirrem->dm_state = isrmdir ? RMDIR : 0; 9139 dirrem->dm_oldinum = ip->i_number; 9140 *prevdirremp = NULL; 9141 /* 9142 * Allocate remove reference structures to track journal write 9143 * dependencies. We will always have one for the link and 9144 * when doing directories we will always have one more for dot. 9145 * When renaming a directory we skip the dotdot link change so 9146 * this is not needed. 9147 */ 9148 jremref = dotremref = dotdotremref = NULL; 9149 if (DOINGSUJ(dvp)) { 9150 if (isrmdir) { 9151 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9152 ip->i_effnlink + 2); 9153 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, 9154 ip->i_effnlink + 1); 9155 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, 9156 dp->i_effnlink + 1); 9157 dotdotremref->jr_state |= MKDIR_PARENT; 9158 } else 9159 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9160 ip->i_effnlink + 1); 9161 } 9162 ACQUIRE_LOCK(ip->i_ump); 9163 lbn = lblkno(dp->i_fs, dp->i_offset); 9164 offset = blkoff(dp->i_fs, dp->i_offset); 9165 pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC, 9166 &pagedep); 9167 dirrem->dm_pagedep = pagedep; 9168 dirrem->dm_offset = offset; 9169 /* 9170 * If we're renaming a .. link to a new directory, cancel any 9171 * existing MKDIR_PARENT mkdir. If it has already been canceled 9172 * the jremref is preserved for any potential diradd in this 9173 * location. This can not coincide with a rmdir. 9174 */ 9175 if (dp->i_offset == DOTDOT_OFFSET) { 9176 if (isrmdir) 9177 panic("newdirrem: .. directory change during remove?"); 9178 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); 9179 } 9180 /* 9181 * If we're removing a directory search for the .. dependency now and 9182 * cancel it. Any pending journal work will be added to the dirrem 9183 * to be completed when the workitem remove completes. 9184 */ 9185 if (isrmdir) 9186 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); 9187 /* 9188 * Check for a diradd dependency for the same directory entry. 9189 * If present, then both dependencies become obsolete and can 9190 * be de-allocated. 9191 */ 9192 dap = diradd_lookup(pagedep, offset); 9193 if (dap == NULL) { 9194 /* 9195 * Link the jremref structures into the dirrem so they are 9196 * written prior to the pagedep. 9197 */ 9198 if (jremref) 9199 dirrem_journal(dirrem, jremref, dotremref, 9200 dotdotremref); 9201 return (dirrem); 9202 } 9203 /* 9204 * Must be ATTACHED at this point. 9205 */ 9206 if ((dap->da_state & ATTACHED) == 0) 9207 panic("newdirrem: not ATTACHED"); 9208 if (dap->da_newinum != ip->i_number) 9209 panic("newdirrem: inum %ju should be %ju", 9210 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); 9211 /* 9212 * If we are deleting a changed name that never made it to disk, 9213 * then return the dirrem describing the previous inode (which 9214 * represents the inode currently referenced from this entry on disk). 9215 */ 9216 if ((dap->da_state & DIRCHG) != 0) { 9217 *prevdirremp = dap->da_previous; 9218 dap->da_state &= ~DIRCHG; 9219 dap->da_pagedep = pagedep; 9220 } 9221 /* 9222 * We are deleting an entry that never made it to disk. 9223 * Mark it COMPLETE so we can delete its inode immediately. 9224 */ 9225 dirrem->dm_state |= COMPLETE; 9226 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); 9227#ifdef SUJ_DEBUG 9228 if (isrmdir == 0) { 9229 struct worklist *wk; 9230 9231 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9232 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) 9233 panic("bad wk %p (0x%X)\n", wk, wk->wk_state); 9234 } 9235#endif 9236 9237 return (dirrem); 9238} 9239 9240/* 9241 * Directory entry change dependencies. 9242 * 9243 * Changing an existing directory entry requires that an add operation 9244 * be completed first followed by a deletion. The semantics for the addition 9245 * are identical to the description of adding a new entry above except 9246 * that the rollback is to the old inode number rather than zero. Once 9247 * the addition dependency is completed, the removal is done as described 9248 * in the removal routine above. 9249 */ 9250 9251/* 9252 * This routine should be called immediately after changing 9253 * a directory entry. The inode's link count should not be 9254 * decremented by the calling procedure -- the soft updates 9255 * code will perform this task when it is safe. 9256 */ 9257void 9258softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 9259 struct buf *bp; /* buffer containing directory block */ 9260 struct inode *dp; /* inode for the directory being modified */ 9261 struct inode *ip; /* inode for directory entry being removed */ 9262 ino_t newinum; /* new inode number for changed entry */ 9263 int isrmdir; /* indicates if doing RMDIR */ 9264{ 9265 int offset; 9266 struct diradd *dap = NULL; 9267 struct dirrem *dirrem, *prevdirrem; 9268 struct pagedep *pagedep; 9269 struct inodedep *inodedep; 9270 struct jaddref *jaddref; 9271 struct mount *mp; 9272 9273 offset = blkoff(dp->i_fs, dp->i_offset); 9274 mp = UFSTOVFS(dp->i_ump); 9275 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 9276 ("softdep_setup_directory_change called on non-softdep filesystem")); 9277 9278 /* 9279 * Whiteouts do not need diradd dependencies. 9280 */ 9281 if (newinum != WINO) { 9282 dap = malloc(sizeof(struct diradd), 9283 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 9284 workitem_alloc(&dap->da_list, D_DIRADD, mp); 9285 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 9286 dap->da_offset = offset; 9287 dap->da_newinum = newinum; 9288 LIST_INIT(&dap->da_jwork); 9289 } 9290 9291 /* 9292 * Allocate a new dirrem and ACQUIRE_LOCK. 9293 */ 9294 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9295 pagedep = dirrem->dm_pagedep; 9296 /* 9297 * The possible values for isrmdir: 9298 * 0 - non-directory file rename 9299 * 1 - directory rename within same directory 9300 * inum - directory rename to new directory of given inode number 9301 * When renaming to a new directory, we are both deleting and 9302 * creating a new directory entry, so the link count on the new 9303 * directory should not change. Thus we do not need the followup 9304 * dirrem which is usually done in handle_workitem_remove. We set 9305 * the DIRCHG flag to tell handle_workitem_remove to skip the 9306 * followup dirrem. 9307 */ 9308 if (isrmdir > 1) 9309 dirrem->dm_state |= DIRCHG; 9310 9311 /* 9312 * Whiteouts have no additional dependencies, 9313 * so just put the dirrem on the correct list. 9314 */ 9315 if (newinum == WINO) { 9316 if ((dirrem->dm_state & COMPLETE) == 0) { 9317 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 9318 dm_next); 9319 } else { 9320 dirrem->dm_dirinum = pagedep->pd_ino; 9321 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9322 add_to_worklist(&dirrem->dm_list, 0); 9323 } 9324 FREE_LOCK(dp->i_ump); 9325 return; 9326 } 9327 /* 9328 * Add the dirrem to the inodedep's pending remove list for quick 9329 * discovery later. A valid nlinkdelta ensures that this lookup 9330 * will not fail. 9331 */ 9332 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9333 panic("softdep_setup_directory_change: Lost inodedep."); 9334 dirrem->dm_state |= ONDEPLIST; 9335 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9336 9337 /* 9338 * If the COMPLETE flag is clear, then there were no active 9339 * entries and we want to roll back to the previous inode until 9340 * the new inode is committed to disk. If the COMPLETE flag is 9341 * set, then we have deleted an entry that never made it to disk. 9342 * If the entry we deleted resulted from a name change, then the old 9343 * inode reference still resides on disk. Any rollback that we do 9344 * needs to be to that old inode (returned to us in prevdirrem). If 9345 * the entry we deleted resulted from a create, then there is 9346 * no entry on the disk, so we want to roll back to zero rather 9347 * than the uncommitted inode. In either of the COMPLETE cases we 9348 * want to immediately free the unwritten and unreferenced inode. 9349 */ 9350 if ((dirrem->dm_state & COMPLETE) == 0) { 9351 dap->da_previous = dirrem; 9352 } else { 9353 if (prevdirrem != NULL) { 9354 dap->da_previous = prevdirrem; 9355 } else { 9356 dap->da_state &= ~DIRCHG; 9357 dap->da_pagedep = pagedep; 9358 } 9359 dirrem->dm_dirinum = pagedep->pd_ino; 9360 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9361 add_to_worklist(&dirrem->dm_list, 0); 9362 } 9363 /* 9364 * Lookup the jaddref for this journal entry. We must finish 9365 * initializing it and make the diradd write dependent on it. 9366 * If we're not journaling, put it on the id_bufwait list if the 9367 * inode is not yet written. If it is written, do the post-inode 9368 * write processing to put it on the id_pendinghd list. 9369 */ 9370 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 9371 if (MOUNTEDSUJ(mp)) { 9372 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 9373 inoreflst); 9374 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 9375 ("softdep_setup_directory_change: bad jaddref %p", 9376 jaddref)); 9377 jaddref->ja_diroff = dp->i_offset; 9378 jaddref->ja_diradd = dap; 9379 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9380 dap, da_pdlist); 9381 add_to_journal(&jaddref->ja_list); 9382 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 9383 dap->da_state |= COMPLETE; 9384 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 9385 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 9386 } else { 9387 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9388 dap, da_pdlist); 9389 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 9390 } 9391 /* 9392 * If we're making a new name for a directory that has not been 9393 * committed when need to move the dot and dotdot references to 9394 * this new name. 9395 */ 9396 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) 9397 merge_diradd(inodedep, dap); 9398 FREE_LOCK(dp->i_ump); 9399} 9400 9401/* 9402 * Called whenever the link count on an inode is changed. 9403 * It creates an inode dependency so that the new reference(s) 9404 * to the inode cannot be committed to disk until the updated 9405 * inode has been written. 9406 */ 9407void 9408softdep_change_linkcnt(ip) 9409 struct inode *ip; /* the inode with the increased link count */ 9410{ 9411 struct inodedep *inodedep; 9412 int dflags; 9413 9414 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 9415 ("softdep_change_linkcnt called on non-softdep filesystem")); 9416 ACQUIRE_LOCK(ip->i_ump); 9417 dflags = DEPALLOC; 9418 if (IS_SNAPSHOT(ip)) 9419 dflags |= NODELAY; 9420 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 9421 if (ip->i_nlink < ip->i_effnlink) 9422 panic("softdep_change_linkcnt: bad delta"); 9423 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9424 FREE_LOCK(ip->i_ump); 9425} 9426 9427/* 9428 * Attach a sbdep dependency to the superblock buf so that we can keep 9429 * track of the head of the linked list of referenced but unlinked inodes. 9430 */ 9431void 9432softdep_setup_sbupdate(ump, fs, bp) 9433 struct ufsmount *ump; 9434 struct fs *fs; 9435 struct buf *bp; 9436{ 9437 struct sbdep *sbdep; 9438 struct worklist *wk; 9439 9440 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9441 ("softdep_setup_sbupdate called on non-softdep filesystem")); 9442 LIST_FOREACH(wk, &bp->b_dep, wk_list) 9443 if (wk->wk_type == D_SBDEP) 9444 break; 9445 if (wk != NULL) 9446 return; 9447 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); 9448 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); 9449 sbdep->sb_fs = fs; 9450 sbdep->sb_ump = ump; 9451 ACQUIRE_LOCK(ump); 9452 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); 9453 FREE_LOCK(ump); 9454} 9455 9456/* 9457 * Return the first unlinked inodedep which is ready to be the head of the 9458 * list. The inodedep and all those after it must have valid next pointers. 9459 */ 9460static struct inodedep * 9461first_unlinked_inodedep(ump) 9462 struct ufsmount *ump; 9463{ 9464 struct inodedep *inodedep; 9465 struct inodedep *idp; 9466 9467 LOCK_OWNED(ump); 9468 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); 9469 inodedep; inodedep = idp) { 9470 if ((inodedep->id_state & UNLINKNEXT) == 0) 9471 return (NULL); 9472 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9473 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) 9474 break; 9475 if ((inodedep->id_state & UNLINKPREV) == 0) 9476 break; 9477 } 9478 return (inodedep); 9479} 9480 9481/* 9482 * Set the sujfree unlinked head pointer prior to writing a superblock. 9483 */ 9484static void 9485initiate_write_sbdep(sbdep) 9486 struct sbdep *sbdep; 9487{ 9488 struct inodedep *inodedep; 9489 struct fs *bpfs; 9490 struct fs *fs; 9491 9492 bpfs = sbdep->sb_fs; 9493 fs = sbdep->sb_ump->um_fs; 9494 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9495 if (inodedep) { 9496 fs->fs_sujfree = inodedep->id_ino; 9497 inodedep->id_state |= UNLINKPREV; 9498 } else 9499 fs->fs_sujfree = 0; 9500 bpfs->fs_sujfree = fs->fs_sujfree; 9501} 9502 9503/* 9504 * After a superblock is written determine whether it must be written again 9505 * due to a changing unlinked list head. 9506 */ 9507static int 9508handle_written_sbdep(sbdep, bp) 9509 struct sbdep *sbdep; 9510 struct buf *bp; 9511{ 9512 struct inodedep *inodedep; 9513 struct mount *mp; 9514 struct fs *fs; 9515 9516 LOCK_OWNED(sbdep->sb_ump); 9517 fs = sbdep->sb_fs; 9518 mp = UFSTOVFS(sbdep->sb_ump); 9519 /* 9520 * If the superblock doesn't match the in-memory list start over. 9521 */ 9522 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9523 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || 9524 (inodedep == NULL && fs->fs_sujfree != 0)) { 9525 bdirty(bp); 9526 return (1); 9527 } 9528 WORKITEM_FREE(sbdep, D_SBDEP); 9529 if (fs->fs_sujfree == 0) 9530 return (0); 9531 /* 9532 * Now that we have a record of this inode in stable store allow it 9533 * to be written to free up pending work. Inodes may see a lot of 9534 * write activity after they are unlinked which we must not hold up. 9535 */ 9536 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 9537 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) 9538 panic("handle_written_sbdep: Bad inodedep %p (0x%X)", 9539 inodedep, inodedep->id_state); 9540 if (inodedep->id_state & UNLINKONLIST) 9541 break; 9542 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; 9543 } 9544 9545 return (0); 9546} 9547 9548/* 9549 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. 9550 */ 9551static void 9552unlinked_inodedep(mp, inodedep) 9553 struct mount *mp; 9554 struct inodedep *inodedep; 9555{ 9556 struct ufsmount *ump; 9557 9558 ump = VFSTOUFS(mp); 9559 LOCK_OWNED(ump); 9560 if (MOUNTEDSUJ(mp) == 0) 9561 return; 9562 ump->um_fs->fs_fmod = 1; 9563 if (inodedep->id_state & UNLINKED) 9564 panic("unlinked_inodedep: %p already unlinked\n", inodedep); 9565 inodedep->id_state |= UNLINKED; 9566 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); 9567} 9568 9569/* 9570 * Remove an inodedep from the unlinked inodedep list. This may require 9571 * disk writes if the inode has made it that far. 9572 */ 9573static void 9574clear_unlinked_inodedep(inodedep) 9575 struct inodedep *inodedep; 9576{ 9577 struct ufsmount *ump; 9578 struct inodedep *idp; 9579 struct inodedep *idn; 9580 struct fs *fs; 9581 struct buf *bp; 9582 ino_t ino; 9583 ino_t nino; 9584 ino_t pino; 9585 int error; 9586 9587 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9588 fs = ump->um_fs; 9589 ino = inodedep->id_ino; 9590 error = 0; 9591 for (;;) { 9592 LOCK_OWNED(ump); 9593 KASSERT((inodedep->id_state & UNLINKED) != 0, 9594 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9595 inodedep)); 9596 /* 9597 * If nothing has yet been written simply remove us from 9598 * the in memory list and return. This is the most common 9599 * case where handle_workitem_remove() loses the final 9600 * reference. 9601 */ 9602 if ((inodedep->id_state & UNLINKLINKS) == 0) 9603 break; 9604 /* 9605 * If we have a NEXT pointer and no PREV pointer we can simply 9606 * clear NEXT's PREV and remove ourselves from the list. Be 9607 * careful not to clear PREV if the superblock points at 9608 * next as well. 9609 */ 9610 idn = TAILQ_NEXT(inodedep, id_unlinked); 9611 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { 9612 if (idn && fs->fs_sujfree != idn->id_ino) 9613 idn->id_state &= ~UNLINKPREV; 9614 break; 9615 } 9616 /* 9617 * Here we have an inodedep which is actually linked into 9618 * the list. We must remove it by forcing a write to the 9619 * link before us, whether it be the superblock or an inode. 9620 * Unfortunately the list may change while we're waiting 9621 * on the buf lock for either resource so we must loop until 9622 * we lock the right one. If both the superblock and an 9623 * inode point to this inode we must clear the inode first 9624 * followed by the superblock. 9625 */ 9626 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9627 pino = 0; 9628 if (idp && (idp->id_state & UNLINKNEXT)) 9629 pino = idp->id_ino; 9630 FREE_LOCK(ump); 9631 if (pino == 0) { 9632 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9633 (int)fs->fs_sbsize, 0, 0, 0); 9634 } else { 9635 error = bread(ump->um_devvp, 9636 fsbtodb(fs, ino_to_fsba(fs, pino)), 9637 (int)fs->fs_bsize, NOCRED, &bp); 9638 if (error) 9639 brelse(bp); 9640 } 9641 ACQUIRE_LOCK(ump); 9642 if (error) 9643 break; 9644 /* If the list has changed restart the loop. */ 9645 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9646 nino = 0; 9647 if (idp && (idp->id_state & UNLINKNEXT)) 9648 nino = idp->id_ino; 9649 if (nino != pino || 9650 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { 9651 FREE_LOCK(ump); 9652 brelse(bp); 9653 ACQUIRE_LOCK(ump); 9654 continue; 9655 } 9656 nino = 0; 9657 idn = TAILQ_NEXT(inodedep, id_unlinked); 9658 if (idn) 9659 nino = idn->id_ino; 9660 /* 9661 * Remove us from the in memory list. After this we cannot 9662 * access the inodedep. 9663 */ 9664 KASSERT((inodedep->id_state & UNLINKED) != 0, 9665 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9666 inodedep)); 9667 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9668 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9669 FREE_LOCK(ump); 9670 /* 9671 * The predecessor's next pointer is manually updated here 9672 * so that the NEXT flag is never cleared for an element 9673 * that is in the list. 9674 */ 9675 if (pino == 0) { 9676 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9677 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9678 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9679 bp); 9680 } else if (fs->fs_magic == FS_UFS1_MAGIC) 9681 ((struct ufs1_dinode *)bp->b_data + 9682 ino_to_fsbo(fs, pino))->di_freelink = nino; 9683 else 9684 ((struct ufs2_dinode *)bp->b_data + 9685 ino_to_fsbo(fs, pino))->di_freelink = nino; 9686 /* 9687 * If the bwrite fails we have no recourse to recover. The 9688 * filesystem is corrupted already. 9689 */ 9690 bwrite(bp); 9691 ACQUIRE_LOCK(ump); 9692 /* 9693 * If the superblock pointer still needs to be cleared force 9694 * a write here. 9695 */ 9696 if (fs->fs_sujfree == ino) { 9697 FREE_LOCK(ump); 9698 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9699 (int)fs->fs_sbsize, 0, 0, 0); 9700 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9701 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9702 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9703 bp); 9704 bwrite(bp); 9705 ACQUIRE_LOCK(ump); 9706 } 9707 9708 if (fs->fs_sujfree != ino) 9709 return; 9710 panic("clear_unlinked_inodedep: Failed to clear free head"); 9711 } 9712 if (inodedep->id_ino == fs->fs_sujfree) 9713 panic("clear_unlinked_inodedep: Freeing head of free list"); 9714 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9715 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9716 return; 9717} 9718 9719/* 9720 * This workitem decrements the inode's link count. 9721 * If the link count reaches zero, the file is removed. 9722 */ 9723static int 9724handle_workitem_remove(dirrem, flags) 9725 struct dirrem *dirrem; 9726 int flags; 9727{ 9728 struct inodedep *inodedep; 9729 struct workhead dotdotwk; 9730 struct worklist *wk; 9731 struct ufsmount *ump; 9732 struct mount *mp; 9733 struct vnode *vp; 9734 struct inode *ip; 9735 ino_t oldinum; 9736 9737 if (dirrem->dm_state & ONWORKLIST) 9738 panic("handle_workitem_remove: dirrem %p still on worklist", 9739 dirrem); 9740 oldinum = dirrem->dm_oldinum; 9741 mp = dirrem->dm_list.wk_mp; 9742 ump = VFSTOUFS(mp); 9743 flags |= LK_EXCLUSIVE; 9744 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) 9745 return (EBUSY); 9746 ip = VTOI(vp); 9747 ACQUIRE_LOCK(ump); 9748 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) 9749 panic("handle_workitem_remove: lost inodedep"); 9750 if (dirrem->dm_state & ONDEPLIST) 9751 LIST_REMOVE(dirrem, dm_inonext); 9752 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 9753 ("handle_workitem_remove: Journal entries not written.")); 9754 9755 /* 9756 * Move all dependencies waiting on the remove to complete 9757 * from the dirrem to the inode inowait list to be completed 9758 * after the inode has been updated and written to disk. Any 9759 * marked MKDIR_PARENT are saved to be completed when the .. ref 9760 * is removed. 9761 */ 9762 LIST_INIT(&dotdotwk); 9763 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { 9764 WORKLIST_REMOVE(wk); 9765 if (wk->wk_state & MKDIR_PARENT) { 9766 wk->wk_state &= ~MKDIR_PARENT; 9767 WORKLIST_INSERT(&dotdotwk, wk); 9768 continue; 9769 } 9770 WORKLIST_INSERT(&inodedep->id_inowait, wk); 9771 } 9772 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); 9773 /* 9774 * Normal file deletion. 9775 */ 9776 if ((dirrem->dm_state & RMDIR) == 0) { 9777 ip->i_nlink--; 9778 DIP_SET(ip, i_nlink, ip->i_nlink); 9779 ip->i_flag |= IN_CHANGE; 9780 if (ip->i_nlink < ip->i_effnlink) 9781 panic("handle_workitem_remove: bad file delta"); 9782 if (ip->i_nlink == 0) 9783 unlinked_inodedep(mp, inodedep); 9784 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9785 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9786 ("handle_workitem_remove: worklist not empty. %s", 9787 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); 9788 WORKITEM_FREE(dirrem, D_DIRREM); 9789 FREE_LOCK(ump); 9790 goto out; 9791 } 9792 /* 9793 * Directory deletion. Decrement reference count for both the 9794 * just deleted parent directory entry and the reference for ".". 9795 * Arrange to have the reference count on the parent decremented 9796 * to account for the loss of "..". 9797 */ 9798 ip->i_nlink -= 2; 9799 DIP_SET(ip, i_nlink, ip->i_nlink); 9800 ip->i_flag |= IN_CHANGE; 9801 if (ip->i_nlink < ip->i_effnlink) 9802 panic("handle_workitem_remove: bad dir delta"); 9803 if (ip->i_nlink == 0) 9804 unlinked_inodedep(mp, inodedep); 9805 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9806 /* 9807 * Rename a directory to a new parent. Since, we are both deleting 9808 * and creating a new directory entry, the link count on the new 9809 * directory should not change. Thus we skip the followup dirrem. 9810 */ 9811 if (dirrem->dm_state & DIRCHG) { 9812 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9813 ("handle_workitem_remove: DIRCHG and worklist not empty.")); 9814 WORKITEM_FREE(dirrem, D_DIRREM); 9815 FREE_LOCK(ump); 9816 goto out; 9817 } 9818 dirrem->dm_state = ONDEPLIST; 9819 dirrem->dm_oldinum = dirrem->dm_dirinum; 9820 /* 9821 * Place the dirrem on the parent's diremhd list. 9822 */ 9823 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) 9824 panic("handle_workitem_remove: lost dir inodedep"); 9825 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9826 /* 9827 * If the allocated inode has never been written to disk, then 9828 * the on-disk inode is zero'ed and we can remove the file 9829 * immediately. When journaling if the inode has been marked 9830 * unlinked and not DEPCOMPLETE we know it can never be written. 9831 */ 9832 inodedep_lookup(mp, oldinum, 0, &inodedep); 9833 if (inodedep == NULL || 9834 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || 9835 check_inode_unwritten(inodedep)) { 9836 FREE_LOCK(ump); 9837 vput(vp); 9838 return handle_workitem_remove(dirrem, flags); 9839 } 9840 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 9841 FREE_LOCK(ump); 9842 ip->i_flag |= IN_CHANGE; 9843out: 9844 ffs_update(vp, 0); 9845 vput(vp); 9846 return (0); 9847} 9848 9849/* 9850 * Inode de-allocation dependencies. 9851 * 9852 * When an inode's link count is reduced to zero, it can be de-allocated. We 9853 * found it convenient to postpone de-allocation until after the inode is 9854 * written to disk with its new link count (zero). At this point, all of the 9855 * on-disk inode's block pointers are nullified and, with careful dependency 9856 * list ordering, all dependencies related to the inode will be satisfied and 9857 * the corresponding dependency structures de-allocated. So, if/when the 9858 * inode is reused, there will be no mixing of old dependencies with new 9859 * ones. This artificial dependency is set up by the block de-allocation 9860 * procedure above (softdep_setup_freeblocks) and completed by the 9861 * following procedure. 9862 */ 9863static void 9864handle_workitem_freefile(freefile) 9865 struct freefile *freefile; 9866{ 9867 struct workhead wkhd; 9868 struct fs *fs; 9869 struct inodedep *idp; 9870 struct ufsmount *ump; 9871 int error; 9872 9873 ump = VFSTOUFS(freefile->fx_list.wk_mp); 9874 fs = ump->um_fs; 9875#ifdef DEBUG 9876 ACQUIRE_LOCK(ump); 9877 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); 9878 FREE_LOCK(ump); 9879 if (error) 9880 panic("handle_workitem_freefile: inodedep %p survived", idp); 9881#endif 9882 UFS_LOCK(ump); 9883 fs->fs_pendinginodes -= 1; 9884 UFS_UNLOCK(ump); 9885 LIST_INIT(&wkhd); 9886 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); 9887 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, 9888 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) 9889 softdep_error("handle_workitem_freefile", error); 9890 ACQUIRE_LOCK(ump); 9891 WORKITEM_FREE(freefile, D_FREEFILE); 9892 FREE_LOCK(ump); 9893} 9894 9895 9896/* 9897 * Helper function which unlinks marker element from work list and returns 9898 * the next element on the list. 9899 */ 9900static __inline struct worklist * 9901markernext(struct worklist *marker) 9902{ 9903 struct worklist *next; 9904 9905 next = LIST_NEXT(marker, wk_list); 9906 LIST_REMOVE(marker, wk_list); 9907 return next; 9908} 9909 9910/* 9911 * Disk writes. 9912 * 9913 * The dependency structures constructed above are most actively used when file 9914 * system blocks are written to disk. No constraints are placed on when a 9915 * block can be written, but unsatisfied update dependencies are made safe by 9916 * modifying (or replacing) the source memory for the duration of the disk 9917 * write. When the disk write completes, the memory block is again brought 9918 * up-to-date. 9919 * 9920 * In-core inode structure reclamation. 9921 * 9922 * Because there are a finite number of "in-core" inode structures, they are 9923 * reused regularly. By transferring all inode-related dependencies to the 9924 * in-memory inode block and indexing them separately (via "inodedep"s), we 9925 * can allow "in-core" inode structures to be reused at any time and avoid 9926 * any increase in contention. 9927 * 9928 * Called just before entering the device driver to initiate a new disk I/O. 9929 * The buffer must be locked, thus, no I/O completion operations can occur 9930 * while we are manipulating its associated dependencies. 9931 */ 9932static void 9933softdep_disk_io_initiation(bp) 9934 struct buf *bp; /* structure describing disk write to occur */ 9935{ 9936 struct worklist *wk; 9937 struct worklist marker; 9938 struct inodedep *inodedep; 9939 struct freeblks *freeblks; 9940 struct jblkdep *jblkdep; 9941 struct newblk *newblk; 9942 struct ufsmount *ump; 9943 9944 /* 9945 * We only care about write operations. There should never 9946 * be dependencies for reads. 9947 */ 9948 if (bp->b_iocmd != BIO_WRITE) 9949 panic("softdep_disk_io_initiation: not write"); 9950 9951 if (bp->b_vflags & BV_BKGRDINPROG) 9952 panic("softdep_disk_io_initiation: Writing buffer with " 9953 "background write in progress: %p", bp); 9954 9955 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 9956 return; 9957 ump = VFSTOUFS(wk->wk_mp); 9958 9959 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 9960 PHOLD(curproc); /* Don't swap out kernel stack */ 9961 ACQUIRE_LOCK(ump); 9962 /* 9963 * Do any necessary pre-I/O processing. 9964 */ 9965 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; 9966 wk = markernext(&marker)) { 9967 LIST_INSERT_AFTER(wk, &marker, wk_list); 9968 switch (wk->wk_type) { 9969 9970 case D_PAGEDEP: 9971 initiate_write_filepage(WK_PAGEDEP(wk), bp); 9972 continue; 9973 9974 case D_INODEDEP: 9975 inodedep = WK_INODEDEP(wk); 9976 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 9977 initiate_write_inodeblock_ufs1(inodedep, bp); 9978 else 9979 initiate_write_inodeblock_ufs2(inodedep, bp); 9980 continue; 9981 9982 case D_INDIRDEP: 9983 initiate_write_indirdep(WK_INDIRDEP(wk), bp); 9984 continue; 9985 9986 case D_BMSAFEMAP: 9987 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); 9988 continue; 9989 9990 case D_JSEG: 9991 WK_JSEG(wk)->js_buf = NULL; 9992 continue; 9993 9994 case D_FREEBLKS: 9995 freeblks = WK_FREEBLKS(wk); 9996 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); 9997 /* 9998 * We have to wait for the freeblks to be journaled 9999 * before we can write an inodeblock with updated 10000 * pointers. Be careful to arrange the marker so 10001 * we revisit the freeblks if it's not removed by 10002 * the first jwait(). 10003 */ 10004 if (jblkdep != NULL) { 10005 LIST_REMOVE(&marker, wk_list); 10006 LIST_INSERT_BEFORE(wk, &marker, wk_list); 10007 jwait(&jblkdep->jb_list, MNT_WAIT); 10008 } 10009 continue; 10010 case D_ALLOCDIRECT: 10011 case D_ALLOCINDIR: 10012 /* 10013 * We have to wait for the jnewblk to be journaled 10014 * before we can write to a block if the contents 10015 * may be confused with an earlier file's indirect 10016 * at recovery time. Handle the marker as described 10017 * above. 10018 */ 10019 newblk = WK_NEWBLK(wk); 10020 if (newblk->nb_jnewblk != NULL && 10021 indirblk_lookup(newblk->nb_list.wk_mp, 10022 newblk->nb_newblkno)) { 10023 LIST_REMOVE(&marker, wk_list); 10024 LIST_INSERT_BEFORE(wk, &marker, wk_list); 10025 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 10026 } 10027 continue; 10028 10029 case D_SBDEP: 10030 initiate_write_sbdep(WK_SBDEP(wk)); 10031 continue; 10032 10033 case D_MKDIR: 10034 case D_FREEWORK: 10035 case D_FREEDEP: 10036 case D_JSEGDEP: 10037 continue; 10038 10039 default: 10040 panic("handle_disk_io_initiation: Unexpected type %s", 10041 TYPENAME(wk->wk_type)); 10042 /* NOTREACHED */ 10043 } 10044 } 10045 FREE_LOCK(ump); 10046 PRELE(curproc); /* Allow swapout of kernel stack */ 10047} 10048 10049/* 10050 * Called from within the procedure above to deal with unsatisfied 10051 * allocation dependencies in a directory. The buffer must be locked, 10052 * thus, no I/O completion operations can occur while we are 10053 * manipulating its associated dependencies. 10054 */ 10055static void 10056initiate_write_filepage(pagedep, bp) 10057 struct pagedep *pagedep; 10058 struct buf *bp; 10059{ 10060 struct jremref *jremref; 10061 struct jmvref *jmvref; 10062 struct dirrem *dirrem; 10063 struct diradd *dap; 10064 struct direct *ep; 10065 int i; 10066 10067 if (pagedep->pd_state & IOSTARTED) { 10068 /* 10069 * This can only happen if there is a driver that does not 10070 * understand chaining. Here biodone will reissue the call 10071 * to strategy for the incomplete buffers. 10072 */ 10073 printf("initiate_write_filepage: already started\n"); 10074 return; 10075 } 10076 pagedep->pd_state |= IOSTARTED; 10077 /* 10078 * Wait for all journal remove dependencies to hit the disk. 10079 * We can not allow any potentially conflicting directory adds 10080 * to be visible before removes and rollback is too difficult. 10081 * The per-filesystem lock may be dropped and re-acquired, however 10082 * we hold the buf locked so the dependency can not go away. 10083 */ 10084 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) 10085 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) 10086 jwait(&jremref->jr_list, MNT_WAIT); 10087 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) 10088 jwait(&jmvref->jm_list, MNT_WAIT); 10089 for (i = 0; i < DAHASHSZ; i++) { 10090 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 10091 ep = (struct direct *) 10092 ((char *)bp->b_data + dap->da_offset); 10093 if (ep->d_ino != dap->da_newinum) 10094 panic("%s: dir inum %ju != new %ju", 10095 "initiate_write_filepage", 10096 (uintmax_t)ep->d_ino, 10097 (uintmax_t)dap->da_newinum); 10098 if (dap->da_state & DIRCHG) 10099 ep->d_ino = dap->da_previous->dm_oldinum; 10100 else 10101 ep->d_ino = 0; 10102 dap->da_state &= ~ATTACHED; 10103 dap->da_state |= UNDONE; 10104 } 10105 } 10106} 10107 10108/* 10109 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 10110 * Note that any bug fixes made to this routine must be done in the 10111 * version found below. 10112 * 10113 * Called from within the procedure above to deal with unsatisfied 10114 * allocation dependencies in an inodeblock. The buffer must be 10115 * locked, thus, no I/O completion operations can occur while we 10116 * are manipulating its associated dependencies. 10117 */ 10118static void 10119initiate_write_inodeblock_ufs1(inodedep, bp) 10120 struct inodedep *inodedep; 10121 struct buf *bp; /* The inode block */ 10122{ 10123 struct allocdirect *adp, *lastadp; 10124 struct ufs1_dinode *dp; 10125 struct ufs1_dinode *sip; 10126 struct inoref *inoref; 10127 struct ufsmount *ump; 10128 struct fs *fs; 10129 ufs_lbn_t i; 10130#ifdef INVARIANTS 10131 ufs_lbn_t prevlbn = 0; 10132#endif 10133 int deplist; 10134 10135 if (inodedep->id_state & IOSTARTED) 10136 panic("initiate_write_inodeblock_ufs1: already started"); 10137 inodedep->id_state |= IOSTARTED; 10138 fs = inodedep->id_fs; 10139 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10140 LOCK_OWNED(ump); 10141 dp = (struct ufs1_dinode *)bp->b_data + 10142 ino_to_fsbo(fs, inodedep->id_ino); 10143 10144 /* 10145 * If we're on the unlinked list but have not yet written our 10146 * next pointer initialize it here. 10147 */ 10148 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10149 struct inodedep *inon; 10150 10151 inon = TAILQ_NEXT(inodedep, id_unlinked); 10152 dp->di_freelink = inon ? inon->id_ino : 0; 10153 } 10154 /* 10155 * If the bitmap is not yet written, then the allocated 10156 * inode cannot be written to disk. 10157 */ 10158 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10159 if (inodedep->id_savedino1 != NULL) 10160 panic("initiate_write_inodeblock_ufs1: I/O underway"); 10161 FREE_LOCK(ump); 10162 sip = malloc(sizeof(struct ufs1_dinode), 10163 M_SAVEDINO, M_SOFTDEP_FLAGS); 10164 ACQUIRE_LOCK(ump); 10165 inodedep->id_savedino1 = sip; 10166 *inodedep->id_savedino1 = *dp; 10167 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 10168 dp->di_gen = inodedep->id_savedino1->di_gen; 10169 dp->di_freelink = inodedep->id_savedino1->di_freelink; 10170 return; 10171 } 10172 /* 10173 * If no dependencies, then there is nothing to roll back. 10174 */ 10175 inodedep->id_savedsize = dp->di_size; 10176 inodedep->id_savedextsize = 0; 10177 inodedep->id_savednlink = dp->di_nlink; 10178 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10179 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10180 return; 10181 /* 10182 * Revert the link count to that of the first unwritten journal entry. 10183 */ 10184 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10185 if (inoref) 10186 dp->di_nlink = inoref->if_nlink; 10187 /* 10188 * Set the dependencies to busy. 10189 */ 10190 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10191 adp = TAILQ_NEXT(adp, ad_next)) { 10192#ifdef INVARIANTS 10193 if (deplist != 0 && prevlbn >= adp->ad_offset) 10194 panic("softdep_write_inodeblock: lbn order"); 10195 prevlbn = adp->ad_offset; 10196 if (adp->ad_offset < NDADDR && 10197 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10198 panic("%s: direct pointer #%jd mismatch %d != %jd", 10199 "softdep_write_inodeblock", 10200 (intmax_t)adp->ad_offset, 10201 dp->di_db[adp->ad_offset], 10202 (intmax_t)adp->ad_newblkno); 10203 if (adp->ad_offset >= NDADDR && 10204 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10205 panic("%s: indirect pointer #%jd mismatch %d != %jd", 10206 "softdep_write_inodeblock", 10207 (intmax_t)adp->ad_offset - NDADDR, 10208 dp->di_ib[adp->ad_offset - NDADDR], 10209 (intmax_t)adp->ad_newblkno); 10210 deplist |= 1 << adp->ad_offset; 10211 if ((adp->ad_state & ATTACHED) == 0) 10212 panic("softdep_write_inodeblock: Unknown state 0x%x", 10213 adp->ad_state); 10214#endif /* INVARIANTS */ 10215 adp->ad_state &= ~ATTACHED; 10216 adp->ad_state |= UNDONE; 10217 } 10218 /* 10219 * The on-disk inode cannot claim to be any larger than the last 10220 * fragment that has been written. Otherwise, the on-disk inode 10221 * might have fragments that were not the last block in the file 10222 * which would corrupt the filesystem. 10223 */ 10224 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10225 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10226 if (adp->ad_offset >= NDADDR) 10227 break; 10228 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10229 /* keep going until hitting a rollback to a frag */ 10230 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10231 continue; 10232 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10233 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10234#ifdef INVARIANTS 10235 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10236 panic("softdep_write_inodeblock: lost dep1"); 10237#endif /* INVARIANTS */ 10238 dp->di_db[i] = 0; 10239 } 10240 for (i = 0; i < NIADDR; i++) { 10241#ifdef INVARIANTS 10242 if (dp->di_ib[i] != 0 && 10243 (deplist & ((1 << NDADDR) << i)) == 0) 10244 panic("softdep_write_inodeblock: lost dep2"); 10245#endif /* INVARIANTS */ 10246 dp->di_ib[i] = 0; 10247 } 10248 return; 10249 } 10250 /* 10251 * If we have zero'ed out the last allocated block of the file, 10252 * roll back the size to the last currently allocated block. 10253 * We know that this last allocated block is a full-sized as 10254 * we already checked for fragments in the loop above. 10255 */ 10256 if (lastadp != NULL && 10257 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10258 for (i = lastadp->ad_offset; i >= 0; i--) 10259 if (dp->di_db[i] != 0) 10260 break; 10261 dp->di_size = (i + 1) * fs->fs_bsize; 10262 } 10263 /* 10264 * The only dependencies are for indirect blocks. 10265 * 10266 * The file size for indirect block additions is not guaranteed. 10267 * Such a guarantee would be non-trivial to achieve. The conventional 10268 * synchronous write implementation also does not make this guarantee. 10269 * Fsck should catch and fix discrepancies. Arguably, the file size 10270 * can be over-estimated without destroying integrity when the file 10271 * moves into the indirect blocks (i.e., is large). If we want to 10272 * postpone fsck, we are stuck with this argument. 10273 */ 10274 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10275 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10276} 10277 10278/* 10279 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 10280 * Note that any bug fixes made to this routine must be done in the 10281 * version found above. 10282 * 10283 * Called from within the procedure above to deal with unsatisfied 10284 * allocation dependencies in an inodeblock. The buffer must be 10285 * locked, thus, no I/O completion operations can occur while we 10286 * are manipulating its associated dependencies. 10287 */ 10288static void 10289initiate_write_inodeblock_ufs2(inodedep, bp) 10290 struct inodedep *inodedep; 10291 struct buf *bp; /* The inode block */ 10292{ 10293 struct allocdirect *adp, *lastadp; 10294 struct ufs2_dinode *dp; 10295 struct ufs2_dinode *sip; 10296 struct inoref *inoref; 10297 struct ufsmount *ump; 10298 struct fs *fs; 10299 ufs_lbn_t i; 10300#ifdef INVARIANTS 10301 ufs_lbn_t prevlbn = 0; 10302#endif 10303 int deplist; 10304 10305 if (inodedep->id_state & IOSTARTED) 10306 panic("initiate_write_inodeblock_ufs2: already started"); 10307 inodedep->id_state |= IOSTARTED; 10308 fs = inodedep->id_fs; 10309 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10310 LOCK_OWNED(ump); 10311 dp = (struct ufs2_dinode *)bp->b_data + 10312 ino_to_fsbo(fs, inodedep->id_ino); 10313 10314 /* 10315 * If we're on the unlinked list but have not yet written our 10316 * next pointer initialize it here. 10317 */ 10318 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10319 struct inodedep *inon; 10320 10321 inon = TAILQ_NEXT(inodedep, id_unlinked); 10322 dp->di_freelink = inon ? inon->id_ino : 0; 10323 } 10324 /* 10325 * If the bitmap is not yet written, then the allocated 10326 * inode cannot be written to disk. 10327 */ 10328 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10329 if (inodedep->id_savedino2 != NULL) 10330 panic("initiate_write_inodeblock_ufs2: I/O underway"); 10331 FREE_LOCK(ump); 10332 sip = malloc(sizeof(struct ufs2_dinode), 10333 M_SAVEDINO, M_SOFTDEP_FLAGS); 10334 ACQUIRE_LOCK(ump); 10335 inodedep->id_savedino2 = sip; 10336 *inodedep->id_savedino2 = *dp; 10337 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 10338 dp->di_gen = inodedep->id_savedino2->di_gen; 10339 dp->di_freelink = inodedep->id_savedino2->di_freelink; 10340 return; 10341 } 10342 /* 10343 * If no dependencies, then there is nothing to roll back. 10344 */ 10345 inodedep->id_savedsize = dp->di_size; 10346 inodedep->id_savedextsize = dp->di_extsize; 10347 inodedep->id_savednlink = dp->di_nlink; 10348 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10349 TAILQ_EMPTY(&inodedep->id_extupdt) && 10350 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10351 return; 10352 /* 10353 * Revert the link count to that of the first unwritten journal entry. 10354 */ 10355 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10356 if (inoref) 10357 dp->di_nlink = inoref->if_nlink; 10358 10359 /* 10360 * Set the ext data dependencies to busy. 10361 */ 10362 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10363 adp = TAILQ_NEXT(adp, ad_next)) { 10364#ifdef INVARIANTS 10365 if (deplist != 0 && prevlbn >= adp->ad_offset) 10366 panic("softdep_write_inodeblock: lbn order"); 10367 prevlbn = adp->ad_offset; 10368 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) 10369 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10370 "softdep_write_inodeblock", 10371 (intmax_t)adp->ad_offset, 10372 (intmax_t)dp->di_extb[adp->ad_offset], 10373 (intmax_t)adp->ad_newblkno); 10374 deplist |= 1 << adp->ad_offset; 10375 if ((adp->ad_state & ATTACHED) == 0) 10376 panic("softdep_write_inodeblock: Unknown state 0x%x", 10377 adp->ad_state); 10378#endif /* INVARIANTS */ 10379 adp->ad_state &= ~ATTACHED; 10380 adp->ad_state |= UNDONE; 10381 } 10382 /* 10383 * The on-disk inode cannot claim to be any larger than the last 10384 * fragment that has been written. Otherwise, the on-disk inode 10385 * might have fragments that were not the last block in the ext 10386 * data which would corrupt the filesystem. 10387 */ 10388 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10389 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10390 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; 10391 /* keep going until hitting a rollback to a frag */ 10392 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10393 continue; 10394 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10395 for (i = adp->ad_offset + 1; i < NXADDR; i++) { 10396#ifdef INVARIANTS 10397 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) 10398 panic("softdep_write_inodeblock: lost dep1"); 10399#endif /* INVARIANTS */ 10400 dp->di_extb[i] = 0; 10401 } 10402 lastadp = NULL; 10403 break; 10404 } 10405 /* 10406 * If we have zero'ed out the last allocated block of the ext 10407 * data, roll back the size to the last currently allocated block. 10408 * We know that this last allocated block is a full-sized as 10409 * we already checked for fragments in the loop above. 10410 */ 10411 if (lastadp != NULL && 10412 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10413 for (i = lastadp->ad_offset; i >= 0; i--) 10414 if (dp->di_extb[i] != 0) 10415 break; 10416 dp->di_extsize = (i + 1) * fs->fs_bsize; 10417 } 10418 /* 10419 * Set the file data dependencies to busy. 10420 */ 10421 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10422 adp = TAILQ_NEXT(adp, ad_next)) { 10423#ifdef INVARIANTS 10424 if (deplist != 0 && prevlbn >= adp->ad_offset) 10425 panic("softdep_write_inodeblock: lbn order"); 10426 if ((adp->ad_state & ATTACHED) == 0) 10427 panic("inodedep %p and adp %p not attached", inodedep, adp); 10428 prevlbn = adp->ad_offset; 10429 if (adp->ad_offset < NDADDR && 10430 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10431 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10432 "softdep_write_inodeblock", 10433 (intmax_t)adp->ad_offset, 10434 (intmax_t)dp->di_db[adp->ad_offset], 10435 (intmax_t)adp->ad_newblkno); 10436 if (adp->ad_offset >= NDADDR && 10437 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10438 panic("%s indirect pointer #%jd mismatch %jd != %jd", 10439 "softdep_write_inodeblock:", 10440 (intmax_t)adp->ad_offset - NDADDR, 10441 (intmax_t)dp->di_ib[adp->ad_offset - NDADDR], 10442 (intmax_t)adp->ad_newblkno); 10443 deplist |= 1 << adp->ad_offset; 10444 if ((adp->ad_state & ATTACHED) == 0) 10445 panic("softdep_write_inodeblock: Unknown state 0x%x", 10446 adp->ad_state); 10447#endif /* INVARIANTS */ 10448 adp->ad_state &= ~ATTACHED; 10449 adp->ad_state |= UNDONE; 10450 } 10451 /* 10452 * The on-disk inode cannot claim to be any larger than the last 10453 * fragment that has been written. Otherwise, the on-disk inode 10454 * might have fragments that were not the last block in the file 10455 * which would corrupt the filesystem. 10456 */ 10457 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10458 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10459 if (adp->ad_offset >= NDADDR) 10460 break; 10461 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10462 /* keep going until hitting a rollback to a frag */ 10463 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10464 continue; 10465 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10466 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10467#ifdef INVARIANTS 10468 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10469 panic("softdep_write_inodeblock: lost dep2"); 10470#endif /* INVARIANTS */ 10471 dp->di_db[i] = 0; 10472 } 10473 for (i = 0; i < NIADDR; i++) { 10474#ifdef INVARIANTS 10475 if (dp->di_ib[i] != 0 && 10476 (deplist & ((1 << NDADDR) << i)) == 0) 10477 panic("softdep_write_inodeblock: lost dep3"); 10478#endif /* INVARIANTS */ 10479 dp->di_ib[i] = 0; 10480 } 10481 return; 10482 } 10483 /* 10484 * If we have zero'ed out the last allocated block of the file, 10485 * roll back the size to the last currently allocated block. 10486 * We know that this last allocated block is a full-sized as 10487 * we already checked for fragments in the loop above. 10488 */ 10489 if (lastadp != NULL && 10490 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10491 for (i = lastadp->ad_offset; i >= 0; i--) 10492 if (dp->di_db[i] != 0) 10493 break; 10494 dp->di_size = (i + 1) * fs->fs_bsize; 10495 } 10496 /* 10497 * The only dependencies are for indirect blocks. 10498 * 10499 * The file size for indirect block additions is not guaranteed. 10500 * Such a guarantee would be non-trivial to achieve. The conventional 10501 * synchronous write implementation also does not make this guarantee. 10502 * Fsck should catch and fix discrepancies. Arguably, the file size 10503 * can be over-estimated without destroying integrity when the file 10504 * moves into the indirect blocks (i.e., is large). If we want to 10505 * postpone fsck, we are stuck with this argument. 10506 */ 10507 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10508 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10509} 10510 10511/* 10512 * Cancel an indirdep as a result of truncation. Release all of the 10513 * children allocindirs and place their journal work on the appropriate 10514 * list. 10515 */ 10516static void 10517cancel_indirdep(indirdep, bp, freeblks) 10518 struct indirdep *indirdep; 10519 struct buf *bp; 10520 struct freeblks *freeblks; 10521{ 10522 struct allocindir *aip; 10523 10524 /* 10525 * None of the indirect pointers will ever be visible, 10526 * so they can simply be tossed. GOINGAWAY ensures 10527 * that allocated pointers will be saved in the buffer 10528 * cache until they are freed. Note that they will 10529 * only be able to be found by their physical address 10530 * since the inode mapping the logical address will 10531 * be gone. The save buffer used for the safe copy 10532 * was allocated in setup_allocindir_phase2 using 10533 * the physical address so it could be used for this 10534 * purpose. Hence we swap the safe copy with the real 10535 * copy, allowing the safe copy to be freed and holding 10536 * on to the real copy for later use in indir_trunc. 10537 */ 10538 if (indirdep->ir_state & GOINGAWAY) 10539 panic("cancel_indirdep: already gone"); 10540 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 10541 indirdep->ir_state |= DEPCOMPLETE; 10542 LIST_REMOVE(indirdep, ir_next); 10543 } 10544 indirdep->ir_state |= GOINGAWAY; 10545 /* 10546 * Pass in bp for blocks still have journal writes 10547 * pending so we can cancel them on their own. 10548 */ 10549 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 10550 cancel_allocindir(aip, bp, freeblks, 0); 10551 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) 10552 cancel_allocindir(aip, NULL, freeblks, 0); 10553 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) 10554 cancel_allocindir(aip, NULL, freeblks, 0); 10555 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0) 10556 cancel_allocindir(aip, NULL, freeblks, 0); 10557 /* 10558 * If there are pending partial truncations we need to keep the 10559 * old block copy around until they complete. This is because 10560 * the current b_data is not a perfect superset of the available 10561 * blocks. 10562 */ 10563 if (TAILQ_EMPTY(&indirdep->ir_trunc)) 10564 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); 10565 else 10566 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10567 WORKLIST_REMOVE(&indirdep->ir_list); 10568 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); 10569 indirdep->ir_bp = NULL; 10570 indirdep->ir_freeblks = freeblks; 10571} 10572 10573/* 10574 * Free an indirdep once it no longer has new pointers to track. 10575 */ 10576static void 10577free_indirdep(indirdep) 10578 struct indirdep *indirdep; 10579{ 10580 10581 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), 10582 ("free_indirdep: Indir trunc list not empty.")); 10583 KASSERT(LIST_EMPTY(&indirdep->ir_completehd), 10584 ("free_indirdep: Complete head not empty.")); 10585 KASSERT(LIST_EMPTY(&indirdep->ir_writehd), 10586 ("free_indirdep: write head not empty.")); 10587 KASSERT(LIST_EMPTY(&indirdep->ir_donehd), 10588 ("free_indirdep: done head not empty.")); 10589 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), 10590 ("free_indirdep: deplist head not empty.")); 10591 KASSERT((indirdep->ir_state & DEPCOMPLETE), 10592 ("free_indirdep: %p still on newblk list.", indirdep)); 10593 KASSERT(indirdep->ir_saveddata == NULL, 10594 ("free_indirdep: %p still has saved data.", indirdep)); 10595 if (indirdep->ir_state & ONWORKLIST) 10596 WORKLIST_REMOVE(&indirdep->ir_list); 10597 WORKITEM_FREE(indirdep, D_INDIRDEP); 10598} 10599 10600/* 10601 * Called before a write to an indirdep. This routine is responsible for 10602 * rolling back pointers to a safe state which includes only those 10603 * allocindirs which have been completed. 10604 */ 10605static void 10606initiate_write_indirdep(indirdep, bp) 10607 struct indirdep *indirdep; 10608 struct buf *bp; 10609{ 10610 struct ufsmount *ump; 10611 10612 indirdep->ir_state |= IOSTARTED; 10613 if (indirdep->ir_state & GOINGAWAY) 10614 panic("disk_io_initiation: indirdep gone"); 10615 /* 10616 * If there are no remaining dependencies, this will be writing 10617 * the real pointers. 10618 */ 10619 if (LIST_EMPTY(&indirdep->ir_deplisthd) && 10620 TAILQ_EMPTY(&indirdep->ir_trunc)) 10621 return; 10622 /* 10623 * Replace up-to-date version with safe version. 10624 */ 10625 if (indirdep->ir_saveddata == NULL) { 10626 ump = VFSTOUFS(indirdep->ir_list.wk_mp); 10627 LOCK_OWNED(ump); 10628 FREE_LOCK(ump); 10629 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 10630 M_SOFTDEP_FLAGS); 10631 ACQUIRE_LOCK(ump); 10632 } 10633 indirdep->ir_state &= ~ATTACHED; 10634 indirdep->ir_state |= UNDONE; 10635 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10636 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 10637 bp->b_bcount); 10638} 10639 10640/* 10641 * Called when an inode has been cleared in a cg bitmap. This finally 10642 * eliminates any canceled jaddrefs 10643 */ 10644void 10645softdep_setup_inofree(mp, bp, ino, wkhd) 10646 struct mount *mp; 10647 struct buf *bp; 10648 ino_t ino; 10649 struct workhead *wkhd; 10650{ 10651 struct worklist *wk, *wkn; 10652 struct inodedep *inodedep; 10653 struct ufsmount *ump; 10654 uint8_t *inosused; 10655 struct cg *cgp; 10656 struct fs *fs; 10657 10658 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 10659 ("softdep_setup_inofree called on non-softdep filesystem")); 10660 ump = VFSTOUFS(mp); 10661 ACQUIRE_LOCK(ump); 10662 fs = ump->um_fs; 10663 cgp = (struct cg *)bp->b_data; 10664 inosused = cg_inosused(cgp); 10665 if (isset(inosused, ino % fs->fs_ipg)) 10666 panic("softdep_setup_inofree: inode %ju not freed.", 10667 (uintmax_t)ino); 10668 if (inodedep_lookup(mp, ino, 0, &inodedep)) 10669 panic("softdep_setup_inofree: ino %ju has existing inodedep %p", 10670 (uintmax_t)ino, inodedep); 10671 if (wkhd) { 10672 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { 10673 if (wk->wk_type != D_JADDREF) 10674 continue; 10675 WORKLIST_REMOVE(wk); 10676 /* 10677 * We can free immediately even if the jaddref 10678 * isn't attached in a background write as now 10679 * the bitmaps are reconciled. 10680 */ 10681 wk->wk_state |= COMPLETE | ATTACHED; 10682 free_jaddref(WK_JADDREF(wk)); 10683 } 10684 jwork_move(&bp->b_dep, wkhd); 10685 } 10686 FREE_LOCK(ump); 10687} 10688 10689 10690/* 10691 * Called via ffs_blkfree() after a set of frags has been cleared from a cg 10692 * map. Any dependencies waiting for the write to clear are added to the 10693 * buf's list and any jnewblks that are being canceled are discarded 10694 * immediately. 10695 */ 10696void 10697softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 10698 struct mount *mp; 10699 struct buf *bp; 10700 ufs2_daddr_t blkno; 10701 int frags; 10702 struct workhead *wkhd; 10703{ 10704 struct bmsafemap *bmsafemap; 10705 struct jnewblk *jnewblk; 10706 struct ufsmount *ump; 10707 struct worklist *wk; 10708 struct fs *fs; 10709#ifdef SUJ_DEBUG 10710 uint8_t *blksfree; 10711 struct cg *cgp; 10712 ufs2_daddr_t jstart; 10713 ufs2_daddr_t jend; 10714 ufs2_daddr_t end; 10715 long bno; 10716 int i; 10717#endif 10718 10719 CTR3(KTR_SUJ, 10720 "softdep_setup_blkfree: blkno %jd frags %d wk head %p", 10721 blkno, frags, wkhd); 10722 10723 ump = VFSTOUFS(mp); 10724 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 10725 ("softdep_setup_blkfree called on non-softdep filesystem")); 10726 ACQUIRE_LOCK(ump); 10727 /* Lookup the bmsafemap so we track when it is dirty. */ 10728 fs = ump->um_fs; 10729 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10730 /* 10731 * Detach any jnewblks which have been canceled. They must linger 10732 * until the bitmap is cleared again by ffs_blkfree() to prevent 10733 * an unjournaled allocation from hitting the disk. 10734 */ 10735 if (wkhd) { 10736 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10737 CTR2(KTR_SUJ, 10738 "softdep_setup_blkfree: blkno %jd wk type %d", 10739 blkno, wk->wk_type); 10740 WORKLIST_REMOVE(wk); 10741 if (wk->wk_type != D_JNEWBLK) { 10742 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); 10743 continue; 10744 } 10745 jnewblk = WK_JNEWBLK(wk); 10746 KASSERT(jnewblk->jn_state & GOINGAWAY, 10747 ("softdep_setup_blkfree: jnewblk not canceled.")); 10748#ifdef SUJ_DEBUG 10749 /* 10750 * Assert that this block is free in the bitmap 10751 * before we discard the jnewblk. 10752 */ 10753 cgp = (struct cg *)bp->b_data; 10754 blksfree = cg_blksfree(cgp); 10755 bno = dtogd(fs, jnewblk->jn_blkno); 10756 for (i = jnewblk->jn_oldfrags; 10757 i < jnewblk->jn_frags; i++) { 10758 if (isset(blksfree, bno + i)) 10759 continue; 10760 panic("softdep_setup_blkfree: not free"); 10761 } 10762#endif 10763 /* 10764 * Even if it's not attached we can free immediately 10765 * as the new bitmap is correct. 10766 */ 10767 wk->wk_state |= COMPLETE | ATTACHED; 10768 free_jnewblk(jnewblk); 10769 } 10770 } 10771 10772#ifdef SUJ_DEBUG 10773 /* 10774 * Assert that we are not freeing a block which has an outstanding 10775 * allocation dependency. 10776 */ 10777 fs = VFSTOUFS(mp)->um_fs; 10778 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10779 end = blkno + frags; 10780 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10781 /* 10782 * Don't match against blocks that will be freed when the 10783 * background write is done. 10784 */ 10785 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == 10786 (COMPLETE | DEPCOMPLETE)) 10787 continue; 10788 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; 10789 jend = jnewblk->jn_blkno + jnewblk->jn_frags; 10790 if ((blkno >= jstart && blkno < jend) || 10791 (end > jstart && end <= jend)) { 10792 printf("state 0x%X %jd - %d %d dep %p\n", 10793 jnewblk->jn_state, jnewblk->jn_blkno, 10794 jnewblk->jn_oldfrags, jnewblk->jn_frags, 10795 jnewblk->jn_dep); 10796 panic("softdep_setup_blkfree: " 10797 "%jd-%jd(%d) overlaps with %jd-%jd", 10798 blkno, end, frags, jstart, jend); 10799 } 10800 } 10801#endif 10802 FREE_LOCK(ump); 10803} 10804 10805/* 10806 * Revert a block allocation when the journal record that describes it 10807 * is not yet written. 10808 */ 10809static int 10810jnewblk_rollback(jnewblk, fs, cgp, blksfree) 10811 struct jnewblk *jnewblk; 10812 struct fs *fs; 10813 struct cg *cgp; 10814 uint8_t *blksfree; 10815{ 10816 ufs1_daddr_t fragno; 10817 long cgbno, bbase; 10818 int frags, blk; 10819 int i; 10820 10821 frags = 0; 10822 cgbno = dtogd(fs, jnewblk->jn_blkno); 10823 /* 10824 * We have to test which frags need to be rolled back. We may 10825 * be operating on a stale copy when doing background writes. 10826 */ 10827 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) 10828 if (isclr(blksfree, cgbno + i)) 10829 frags++; 10830 if (frags == 0) 10831 return (0); 10832 /* 10833 * This is mostly ffs_blkfree() sans some validation and 10834 * superblock updates. 10835 */ 10836 if (frags == fs->fs_frag) { 10837 fragno = fragstoblks(fs, cgbno); 10838 ffs_setblock(fs, blksfree, fragno); 10839 ffs_clusteracct(fs, cgp, fragno, 1); 10840 cgp->cg_cs.cs_nbfree++; 10841 } else { 10842 cgbno += jnewblk->jn_oldfrags; 10843 bbase = cgbno - fragnum(fs, cgbno); 10844 /* Decrement the old frags. */ 10845 blk = blkmap(fs, blksfree, bbase); 10846 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 10847 /* Deallocate the fragment */ 10848 for (i = 0; i < frags; i++) 10849 setbit(blksfree, cgbno + i); 10850 cgp->cg_cs.cs_nffree += frags; 10851 /* Add back in counts associated with the new frags */ 10852 blk = blkmap(fs, blksfree, bbase); 10853 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 10854 /* If a complete block has been reassembled, account for it. */ 10855 fragno = fragstoblks(fs, bbase); 10856 if (ffs_isblock(fs, blksfree, fragno)) { 10857 cgp->cg_cs.cs_nffree -= fs->fs_frag; 10858 ffs_clusteracct(fs, cgp, fragno, 1); 10859 cgp->cg_cs.cs_nbfree++; 10860 } 10861 } 10862 stat_jnewblk++; 10863 jnewblk->jn_state &= ~ATTACHED; 10864 jnewblk->jn_state |= UNDONE; 10865 10866 return (frags); 10867} 10868 10869static void 10870initiate_write_bmsafemap(bmsafemap, bp) 10871 struct bmsafemap *bmsafemap; 10872 struct buf *bp; /* The cg block. */ 10873{ 10874 struct jaddref *jaddref; 10875 struct jnewblk *jnewblk; 10876 uint8_t *inosused; 10877 uint8_t *blksfree; 10878 struct cg *cgp; 10879 struct fs *fs; 10880 ino_t ino; 10881 10882 if (bmsafemap->sm_state & IOSTARTED) 10883 return; 10884 bmsafemap->sm_state |= IOSTARTED; 10885 /* 10886 * Clear any inode allocations which are pending journal writes. 10887 */ 10888 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { 10889 cgp = (struct cg *)bp->b_data; 10890 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10891 inosused = cg_inosused(cgp); 10892 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { 10893 ino = jaddref->ja_ino % fs->fs_ipg; 10894 if (isset(inosused, ino)) { 10895 if ((jaddref->ja_mode & IFMT) == IFDIR) 10896 cgp->cg_cs.cs_ndir--; 10897 cgp->cg_cs.cs_nifree++; 10898 clrbit(inosused, ino); 10899 jaddref->ja_state &= ~ATTACHED; 10900 jaddref->ja_state |= UNDONE; 10901 stat_jaddref++; 10902 } else 10903 panic("initiate_write_bmsafemap: inode %ju " 10904 "marked free", (uintmax_t)jaddref->ja_ino); 10905 } 10906 } 10907 /* 10908 * Clear any block allocations which are pending journal writes. 10909 */ 10910 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 10911 cgp = (struct cg *)bp->b_data; 10912 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10913 blksfree = cg_blksfree(cgp); 10914 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10915 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) 10916 continue; 10917 panic("initiate_write_bmsafemap: block %jd " 10918 "marked free", jnewblk->jn_blkno); 10919 } 10920 } 10921 /* 10922 * Move allocation lists to the written lists so they can be 10923 * cleared once the block write is complete. 10924 */ 10925 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, 10926 inodedep, id_deps); 10927 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 10928 newblk, nb_deps); 10929 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, 10930 wk_list); 10931} 10932 10933/* 10934 * This routine is called during the completion interrupt 10935 * service routine for a disk write (from the procedure called 10936 * by the device driver to inform the filesystem caches of 10937 * a request completion). It should be called early in this 10938 * procedure, before the block is made available to other 10939 * processes or other routines are called. 10940 * 10941 */ 10942static void 10943softdep_disk_write_complete(bp) 10944 struct buf *bp; /* describes the completed disk write */ 10945{ 10946 struct worklist *wk; 10947 struct worklist *owk; 10948 struct ufsmount *ump; 10949 struct workhead reattach; 10950 struct freeblks *freeblks; 10951 struct buf *sbp; 10952 10953 /* 10954 * If an error occurred while doing the write, then the data 10955 * has not hit the disk and the dependencies cannot be unrolled. 10956 */ 10957 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) 10958 return; 10959 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 10960 return; 10961 ump = VFSTOUFS(wk->wk_mp); 10962 LIST_INIT(&reattach); 10963 /* 10964 * This lock must not be released anywhere in this code segment. 10965 */ 10966 sbp = NULL; 10967 owk = NULL; 10968 ACQUIRE_LOCK(ump); 10969 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 10970 WORKLIST_REMOVE(wk); 10971 atomic_add_long(&dep_write[wk->wk_type], 1); 10972 if (wk == owk) 10973 panic("duplicate worklist: %p\n", wk); 10974 owk = wk; 10975 switch (wk->wk_type) { 10976 10977 case D_PAGEDEP: 10978 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 10979 WORKLIST_INSERT(&reattach, wk); 10980 continue; 10981 10982 case D_INODEDEP: 10983 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 10984 WORKLIST_INSERT(&reattach, wk); 10985 continue; 10986 10987 case D_BMSAFEMAP: 10988 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp)) 10989 WORKLIST_INSERT(&reattach, wk); 10990 continue; 10991 10992 case D_MKDIR: 10993 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 10994 continue; 10995 10996 case D_ALLOCDIRECT: 10997 wk->wk_state |= COMPLETE; 10998 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); 10999 continue; 11000 11001 case D_ALLOCINDIR: 11002 wk->wk_state |= COMPLETE; 11003 handle_allocindir_partdone(WK_ALLOCINDIR(wk)); 11004 continue; 11005 11006 case D_INDIRDEP: 11007 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp)) 11008 WORKLIST_INSERT(&reattach, wk); 11009 continue; 11010 11011 case D_FREEBLKS: 11012 wk->wk_state |= COMPLETE; 11013 freeblks = WK_FREEBLKS(wk); 11014 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && 11015 LIST_EMPTY(&freeblks->fb_jblkdephd)) 11016 add_to_worklist(wk, WK_NODELAY); 11017 continue; 11018 11019 case D_FREEWORK: 11020 handle_written_freework(WK_FREEWORK(wk)); 11021 break; 11022 11023 case D_JSEGDEP: 11024 free_jsegdep(WK_JSEGDEP(wk)); 11025 continue; 11026 11027 case D_JSEG: 11028 handle_written_jseg(WK_JSEG(wk), bp); 11029 continue; 11030 11031 case D_SBDEP: 11032 if (handle_written_sbdep(WK_SBDEP(wk), bp)) 11033 WORKLIST_INSERT(&reattach, wk); 11034 continue; 11035 11036 case D_FREEDEP: 11037 free_freedep(WK_FREEDEP(wk)); 11038 continue; 11039 11040 default: 11041 panic("handle_disk_write_complete: Unknown type %s", 11042 TYPENAME(wk->wk_type)); 11043 /* NOTREACHED */ 11044 } 11045 } 11046 /* 11047 * Reattach any requests that must be redone. 11048 */ 11049 while ((wk = LIST_FIRST(&reattach)) != NULL) { 11050 WORKLIST_REMOVE(wk); 11051 WORKLIST_INSERT(&bp->b_dep, wk); 11052 } 11053 FREE_LOCK(ump); 11054 if (sbp) 11055 brelse(sbp); 11056} 11057 11058/* 11059 * Called from within softdep_disk_write_complete above. Note that 11060 * this routine is always called from interrupt level with further 11061 * splbio interrupts blocked. 11062 */ 11063static void 11064handle_allocdirect_partdone(adp, wkhd) 11065 struct allocdirect *adp; /* the completed allocdirect */ 11066 struct workhead *wkhd; /* Work to do when inode is writtne. */ 11067{ 11068 struct allocdirectlst *listhead; 11069 struct allocdirect *listadp; 11070 struct inodedep *inodedep; 11071 long bsize; 11072 11073 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11074 return; 11075 /* 11076 * The on-disk inode cannot claim to be any larger than the last 11077 * fragment that has been written. Otherwise, the on-disk inode 11078 * might have fragments that were not the last block in the file 11079 * which would corrupt the filesystem. Thus, we cannot free any 11080 * allocdirects after one whose ad_oldblkno claims a fragment as 11081 * these blocks must be rolled back to zero before writing the inode. 11082 * We check the currently active set of allocdirects in id_inoupdt 11083 * or id_extupdt as appropriate. 11084 */ 11085 inodedep = adp->ad_inodedep; 11086 bsize = inodedep->id_fs->fs_bsize; 11087 if (adp->ad_state & EXTDATA) 11088 listhead = &inodedep->id_extupdt; 11089 else 11090 listhead = &inodedep->id_inoupdt; 11091 TAILQ_FOREACH(listadp, listhead, ad_next) { 11092 /* found our block */ 11093 if (listadp == adp) 11094 break; 11095 /* continue if ad_oldlbn is not a fragment */ 11096 if (listadp->ad_oldsize == 0 || 11097 listadp->ad_oldsize == bsize) 11098 continue; 11099 /* hit a fragment */ 11100 return; 11101 } 11102 /* 11103 * If we have reached the end of the current list without 11104 * finding the just finished dependency, then it must be 11105 * on the future dependency list. Future dependencies cannot 11106 * be freed until they are moved to the current list. 11107 */ 11108 if (listadp == NULL) { 11109#ifdef DEBUG 11110 if (adp->ad_state & EXTDATA) 11111 listhead = &inodedep->id_newextupdt; 11112 else 11113 listhead = &inodedep->id_newinoupdt; 11114 TAILQ_FOREACH(listadp, listhead, ad_next) 11115 /* found our block */ 11116 if (listadp == adp) 11117 break; 11118 if (listadp == NULL) 11119 panic("handle_allocdirect_partdone: lost dep"); 11120#endif /* DEBUG */ 11121 return; 11122 } 11123 /* 11124 * If we have found the just finished dependency, then queue 11125 * it along with anything that follows it that is complete. 11126 * Since the pointer has not yet been written in the inode 11127 * as the dependency prevents it, place the allocdirect on the 11128 * bufwait list where it will be freed once the pointer is 11129 * valid. 11130 */ 11131 if (wkhd == NULL) 11132 wkhd = &inodedep->id_bufwait; 11133 for (; adp; adp = listadp) { 11134 listadp = TAILQ_NEXT(adp, ad_next); 11135 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11136 return; 11137 TAILQ_REMOVE(listhead, adp, ad_next); 11138 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); 11139 } 11140} 11141 11142/* 11143 * Called from within softdep_disk_write_complete above. This routine 11144 * completes successfully written allocindirs. 11145 */ 11146static void 11147handle_allocindir_partdone(aip) 11148 struct allocindir *aip; /* the completed allocindir */ 11149{ 11150 struct indirdep *indirdep; 11151 11152 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 11153 return; 11154 indirdep = aip->ai_indirdep; 11155 LIST_REMOVE(aip, ai_next); 11156 /* 11157 * Don't set a pointer while the buffer is undergoing IO or while 11158 * we have active truncations. 11159 */ 11160 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { 11161 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 11162 return; 11163 } 11164 if (indirdep->ir_state & UFS1FMT) 11165 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11166 aip->ai_newblkno; 11167 else 11168 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11169 aip->ai_newblkno; 11170 /* 11171 * Await the pointer write before freeing the allocindir. 11172 */ 11173 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); 11174} 11175 11176/* 11177 * Release segments held on a jwork list. 11178 */ 11179static void 11180handle_jwork(wkhd) 11181 struct workhead *wkhd; 11182{ 11183 struct worklist *wk; 11184 11185 while ((wk = LIST_FIRST(wkhd)) != NULL) { 11186 WORKLIST_REMOVE(wk); 11187 switch (wk->wk_type) { 11188 case D_JSEGDEP: 11189 free_jsegdep(WK_JSEGDEP(wk)); 11190 continue; 11191 case D_FREEDEP: 11192 free_freedep(WK_FREEDEP(wk)); 11193 continue; 11194 case D_FREEFRAG: 11195 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); 11196 WORKITEM_FREE(wk, D_FREEFRAG); 11197 continue; 11198 case D_FREEWORK: 11199 handle_written_freework(WK_FREEWORK(wk)); 11200 continue; 11201 default: 11202 panic("handle_jwork: Unknown type %s\n", 11203 TYPENAME(wk->wk_type)); 11204 } 11205 } 11206} 11207 11208/* 11209 * Handle the bufwait list on an inode when it is safe to release items 11210 * held there. This normally happens after an inode block is written but 11211 * may be delayed and handled later if there are pending journal items that 11212 * are not yet safe to be released. 11213 */ 11214static struct freefile * 11215handle_bufwait(inodedep, refhd) 11216 struct inodedep *inodedep; 11217 struct workhead *refhd; 11218{ 11219 struct jaddref *jaddref; 11220 struct freefile *freefile; 11221 struct worklist *wk; 11222 11223 freefile = NULL; 11224 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 11225 WORKLIST_REMOVE(wk); 11226 switch (wk->wk_type) { 11227 case D_FREEFILE: 11228 /* 11229 * We defer adding freefile to the worklist 11230 * until all other additions have been made to 11231 * ensure that it will be done after all the 11232 * old blocks have been freed. 11233 */ 11234 if (freefile != NULL) 11235 panic("handle_bufwait: freefile"); 11236 freefile = WK_FREEFILE(wk); 11237 continue; 11238 11239 case D_MKDIR: 11240 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 11241 continue; 11242 11243 case D_DIRADD: 11244 diradd_inode_written(WK_DIRADD(wk), inodedep); 11245 continue; 11246 11247 case D_FREEFRAG: 11248 wk->wk_state |= COMPLETE; 11249 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) 11250 add_to_worklist(wk, 0); 11251 continue; 11252 11253 case D_DIRREM: 11254 wk->wk_state |= COMPLETE; 11255 add_to_worklist(wk, 0); 11256 continue; 11257 11258 case D_ALLOCDIRECT: 11259 case D_ALLOCINDIR: 11260 free_newblk(WK_NEWBLK(wk)); 11261 continue; 11262 11263 case D_JNEWBLK: 11264 wk->wk_state |= COMPLETE; 11265 free_jnewblk(WK_JNEWBLK(wk)); 11266 continue; 11267 11268 /* 11269 * Save freed journal segments and add references on 11270 * the supplied list which will delay their release 11271 * until the cg bitmap is cleared on disk. 11272 */ 11273 case D_JSEGDEP: 11274 if (refhd == NULL) 11275 free_jsegdep(WK_JSEGDEP(wk)); 11276 else 11277 WORKLIST_INSERT(refhd, wk); 11278 continue; 11279 11280 case D_JADDREF: 11281 jaddref = WK_JADDREF(wk); 11282 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 11283 if_deps); 11284 /* 11285 * Transfer any jaddrefs to the list to be freed with 11286 * the bitmap if we're handling a removed file. 11287 */ 11288 if (refhd == NULL) { 11289 wk->wk_state |= COMPLETE; 11290 free_jaddref(jaddref); 11291 } else 11292 WORKLIST_INSERT(refhd, wk); 11293 continue; 11294 11295 default: 11296 panic("handle_bufwait: Unknown type %p(%s)", 11297 wk, TYPENAME(wk->wk_type)); 11298 /* NOTREACHED */ 11299 } 11300 } 11301 return (freefile); 11302} 11303/* 11304 * Called from within softdep_disk_write_complete above to restore 11305 * in-memory inode block contents to their most up-to-date state. Note 11306 * that this routine is always called from interrupt level with further 11307 * splbio interrupts blocked. 11308 */ 11309static int 11310handle_written_inodeblock(inodedep, bp) 11311 struct inodedep *inodedep; 11312 struct buf *bp; /* buffer containing the inode block */ 11313{ 11314 struct freefile *freefile; 11315 struct allocdirect *adp, *nextadp; 11316 struct ufs1_dinode *dp1 = NULL; 11317 struct ufs2_dinode *dp2 = NULL; 11318 struct workhead wkhd; 11319 int hadchanges, fstype; 11320 ino_t freelink; 11321 11322 LIST_INIT(&wkhd); 11323 hadchanges = 0; 11324 freefile = NULL; 11325 if ((inodedep->id_state & IOSTARTED) == 0) 11326 panic("handle_written_inodeblock: not started"); 11327 inodedep->id_state &= ~IOSTARTED; 11328 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 11329 fstype = UFS1; 11330 dp1 = (struct ufs1_dinode *)bp->b_data + 11331 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11332 freelink = dp1->di_freelink; 11333 } else { 11334 fstype = UFS2; 11335 dp2 = (struct ufs2_dinode *)bp->b_data + 11336 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11337 freelink = dp2->di_freelink; 11338 } 11339 /* 11340 * Leave this inodeblock dirty until it's in the list. 11341 */ 11342 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) { 11343 struct inodedep *inon; 11344 11345 inon = TAILQ_NEXT(inodedep, id_unlinked); 11346 if ((inon == NULL && freelink == 0) || 11347 (inon && inon->id_ino == freelink)) { 11348 if (inon) 11349 inon->id_state |= UNLINKPREV; 11350 inodedep->id_state |= UNLINKNEXT; 11351 } 11352 hadchanges = 1; 11353 } 11354 /* 11355 * If we had to rollback the inode allocation because of 11356 * bitmaps being incomplete, then simply restore it. 11357 * Keep the block dirty so that it will not be reclaimed until 11358 * all associated dependencies have been cleared and the 11359 * corresponding updates written to disk. 11360 */ 11361 if (inodedep->id_savedino1 != NULL) { 11362 hadchanges = 1; 11363 if (fstype == UFS1) 11364 *dp1 = *inodedep->id_savedino1; 11365 else 11366 *dp2 = *inodedep->id_savedino2; 11367 free(inodedep->id_savedino1, M_SAVEDINO); 11368 inodedep->id_savedino1 = NULL; 11369 if ((bp->b_flags & B_DELWRI) == 0) 11370 stat_inode_bitmap++; 11371 bdirty(bp); 11372 /* 11373 * If the inode is clear here and GOINGAWAY it will never 11374 * be written. Process the bufwait and clear any pending 11375 * work which may include the freefile. 11376 */ 11377 if (inodedep->id_state & GOINGAWAY) 11378 goto bufwait; 11379 return (1); 11380 } 11381 inodedep->id_state |= COMPLETE; 11382 /* 11383 * Roll forward anything that had to be rolled back before 11384 * the inode could be updated. 11385 */ 11386 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 11387 nextadp = TAILQ_NEXT(adp, ad_next); 11388 if (adp->ad_state & ATTACHED) 11389 panic("handle_written_inodeblock: new entry"); 11390 if (fstype == UFS1) { 11391 if (adp->ad_offset < NDADDR) { 11392 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11393 panic("%s %s #%jd mismatch %d != %jd", 11394 "handle_written_inodeblock:", 11395 "direct pointer", 11396 (intmax_t)adp->ad_offset, 11397 dp1->di_db[adp->ad_offset], 11398 (intmax_t)adp->ad_oldblkno); 11399 dp1->di_db[adp->ad_offset] = adp->ad_newblkno; 11400 } else { 11401 if (dp1->di_ib[adp->ad_offset - NDADDR] != 0) 11402 panic("%s: %s #%jd allocated as %d", 11403 "handle_written_inodeblock", 11404 "indirect pointer", 11405 (intmax_t)adp->ad_offset - NDADDR, 11406 dp1->di_ib[adp->ad_offset - NDADDR]); 11407 dp1->di_ib[adp->ad_offset - NDADDR] = 11408 adp->ad_newblkno; 11409 } 11410 } else { 11411 if (adp->ad_offset < NDADDR) { 11412 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11413 panic("%s: %s #%jd %s %jd != %jd", 11414 "handle_written_inodeblock", 11415 "direct pointer", 11416 (intmax_t)adp->ad_offset, "mismatch", 11417 (intmax_t)dp2->di_db[adp->ad_offset], 11418 (intmax_t)adp->ad_oldblkno); 11419 dp2->di_db[adp->ad_offset] = adp->ad_newblkno; 11420 } else { 11421 if (dp2->di_ib[adp->ad_offset - NDADDR] != 0) 11422 panic("%s: %s #%jd allocated as %jd", 11423 "handle_written_inodeblock", 11424 "indirect pointer", 11425 (intmax_t)adp->ad_offset - NDADDR, 11426 (intmax_t) 11427 dp2->di_ib[adp->ad_offset - NDADDR]); 11428 dp2->di_ib[adp->ad_offset - NDADDR] = 11429 adp->ad_newblkno; 11430 } 11431 } 11432 adp->ad_state &= ~UNDONE; 11433 adp->ad_state |= ATTACHED; 11434 hadchanges = 1; 11435 } 11436 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 11437 nextadp = TAILQ_NEXT(adp, ad_next); 11438 if (adp->ad_state & ATTACHED) 11439 panic("handle_written_inodeblock: new entry"); 11440 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) 11441 panic("%s: direct pointers #%jd %s %jd != %jd", 11442 "handle_written_inodeblock", 11443 (intmax_t)adp->ad_offset, "mismatch", 11444 (intmax_t)dp2->di_extb[adp->ad_offset], 11445 (intmax_t)adp->ad_oldblkno); 11446 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; 11447 adp->ad_state &= ~UNDONE; 11448 adp->ad_state |= ATTACHED; 11449 hadchanges = 1; 11450 } 11451 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 11452 stat_direct_blk_ptrs++; 11453 /* 11454 * Reset the file size to its most up-to-date value. 11455 */ 11456 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) 11457 panic("handle_written_inodeblock: bad size"); 11458 if (inodedep->id_savednlink > LINK_MAX) 11459 panic("handle_written_inodeblock: Invalid link count " 11460 "%d for inodedep %p", inodedep->id_savednlink, inodedep); 11461 if (fstype == UFS1) { 11462 if (dp1->di_nlink != inodedep->id_savednlink) { 11463 dp1->di_nlink = inodedep->id_savednlink; 11464 hadchanges = 1; 11465 } 11466 if (dp1->di_size != inodedep->id_savedsize) { 11467 dp1->di_size = inodedep->id_savedsize; 11468 hadchanges = 1; 11469 } 11470 } else { 11471 if (dp2->di_nlink != inodedep->id_savednlink) { 11472 dp2->di_nlink = inodedep->id_savednlink; 11473 hadchanges = 1; 11474 } 11475 if (dp2->di_size != inodedep->id_savedsize) { 11476 dp2->di_size = inodedep->id_savedsize; 11477 hadchanges = 1; 11478 } 11479 if (dp2->di_extsize != inodedep->id_savedextsize) { 11480 dp2->di_extsize = inodedep->id_savedextsize; 11481 hadchanges = 1; 11482 } 11483 } 11484 inodedep->id_savedsize = -1; 11485 inodedep->id_savedextsize = -1; 11486 inodedep->id_savednlink = -1; 11487 /* 11488 * If there were any rollbacks in the inode block, then it must be 11489 * marked dirty so that its will eventually get written back in 11490 * its correct form. 11491 */ 11492 if (hadchanges) 11493 bdirty(bp); 11494bufwait: 11495 /* 11496 * Process any allocdirects that completed during the update. 11497 */ 11498 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 11499 handle_allocdirect_partdone(adp, &wkhd); 11500 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 11501 handle_allocdirect_partdone(adp, &wkhd); 11502 /* 11503 * Process deallocations that were held pending until the 11504 * inode had been written to disk. Freeing of the inode 11505 * is delayed until after all blocks have been freed to 11506 * avoid creation of new <vfsid, inum, lbn> triples 11507 * before the old ones have been deleted. Completely 11508 * unlinked inodes are not processed until the unlinked 11509 * inode list is written or the last reference is removed. 11510 */ 11511 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { 11512 freefile = handle_bufwait(inodedep, NULL); 11513 if (freefile && !LIST_EMPTY(&wkhd)) { 11514 WORKLIST_INSERT(&wkhd, &freefile->fx_list); 11515 freefile = NULL; 11516 } 11517 } 11518 /* 11519 * Move rolled forward dependency completions to the bufwait list 11520 * now that those that were already written have been processed. 11521 */ 11522 if (!LIST_EMPTY(&wkhd) && hadchanges == 0) 11523 panic("handle_written_inodeblock: bufwait but no changes"); 11524 jwork_move(&inodedep->id_bufwait, &wkhd); 11525 11526 if (freefile != NULL) { 11527 /* 11528 * If the inode is goingaway it was never written. Fake up 11529 * the state here so free_inodedep() can succeed. 11530 */ 11531 if (inodedep->id_state & GOINGAWAY) 11532 inodedep->id_state |= COMPLETE | DEPCOMPLETE; 11533 if (free_inodedep(inodedep) == 0) 11534 panic("handle_written_inodeblock: live inodedep %p", 11535 inodedep); 11536 add_to_worklist(&freefile->fx_list, 0); 11537 return (0); 11538 } 11539 11540 /* 11541 * If no outstanding dependencies, free it. 11542 */ 11543 if (free_inodedep(inodedep) || 11544 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && 11545 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 11546 TAILQ_FIRST(&inodedep->id_extupdt) == 0 && 11547 LIST_FIRST(&inodedep->id_bufwait) == 0)) 11548 return (0); 11549 return (hadchanges); 11550} 11551 11552static int 11553handle_written_indirdep(indirdep, bp, bpp) 11554 struct indirdep *indirdep; 11555 struct buf *bp; 11556 struct buf **bpp; 11557{ 11558 struct allocindir *aip; 11559 struct buf *sbp; 11560 int chgs; 11561 11562 if (indirdep->ir_state & GOINGAWAY) 11563 panic("handle_written_indirdep: indirdep gone"); 11564 if ((indirdep->ir_state & IOSTARTED) == 0) 11565 panic("handle_written_indirdep: IO not started"); 11566 chgs = 0; 11567 /* 11568 * If there were rollbacks revert them here. 11569 */ 11570 if (indirdep->ir_saveddata) { 11571 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 11572 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11573 free(indirdep->ir_saveddata, M_INDIRDEP); 11574 indirdep->ir_saveddata = NULL; 11575 } 11576 chgs = 1; 11577 } 11578 indirdep->ir_state &= ~(UNDONE | IOSTARTED); 11579 indirdep->ir_state |= ATTACHED; 11580 /* 11581 * Move allocindirs with written pointers to the completehd if 11582 * the indirdep's pointer is not yet written. Otherwise 11583 * free them here. 11584 */ 11585 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) { 11586 LIST_REMOVE(aip, ai_next); 11587 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 11588 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, 11589 ai_next); 11590 newblk_freefrag(&aip->ai_block); 11591 continue; 11592 } 11593 free_newblk(&aip->ai_block); 11594 } 11595 /* 11596 * Move allocindirs that have finished dependency processing from 11597 * the done list to the write list after updating the pointers. 11598 */ 11599 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11600 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 11601 handle_allocindir_partdone(aip); 11602 if (aip == LIST_FIRST(&indirdep->ir_donehd)) 11603 panic("disk_write_complete: not gone"); 11604 chgs = 1; 11605 } 11606 } 11607 /* 11608 * Preserve the indirdep if there were any changes or if it is not 11609 * yet valid on disk. 11610 */ 11611 if (chgs) { 11612 stat_indir_blk_ptrs++; 11613 bdirty(bp); 11614 return (1); 11615 } 11616 /* 11617 * If there were no changes we can discard the savedbp and detach 11618 * ourselves from the buf. We are only carrying completed pointers 11619 * in this case. 11620 */ 11621 sbp = indirdep->ir_savebp; 11622 sbp->b_flags |= B_INVAL | B_NOCACHE; 11623 indirdep->ir_savebp = NULL; 11624 indirdep->ir_bp = NULL; 11625 if (*bpp != NULL) 11626 panic("handle_written_indirdep: bp already exists."); 11627 *bpp = sbp; 11628 /* 11629 * The indirdep may not be freed until its parent points at it. 11630 */ 11631 if (indirdep->ir_state & DEPCOMPLETE) 11632 free_indirdep(indirdep); 11633 11634 return (0); 11635} 11636 11637/* 11638 * Process a diradd entry after its dependent inode has been written. 11639 * This routine must be called with splbio interrupts blocked. 11640 */ 11641static void 11642diradd_inode_written(dap, inodedep) 11643 struct diradd *dap; 11644 struct inodedep *inodedep; 11645{ 11646 11647 dap->da_state |= COMPLETE; 11648 complete_diradd(dap); 11649 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 11650} 11651 11652/* 11653 * Returns true if the bmsafemap will have rollbacks when written. Must only 11654 * be called with the per-filesystem lock and the buf lock on the cg held. 11655 */ 11656static int 11657bmsafemap_backgroundwrite(bmsafemap, bp) 11658 struct bmsafemap *bmsafemap; 11659 struct buf *bp; 11660{ 11661 int dirty; 11662 11663 LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); 11664 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | 11665 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); 11666 /* 11667 * If we're initiating a background write we need to process the 11668 * rollbacks as they exist now, not as they exist when IO starts. 11669 * No other consumers will look at the contents of the shadowed 11670 * buf so this is safe to do here. 11671 */ 11672 if (bp->b_xflags & BX_BKGRDMARKER) 11673 initiate_write_bmsafemap(bmsafemap, bp); 11674 11675 return (dirty); 11676} 11677 11678/* 11679 * Re-apply an allocation when a cg write is complete. 11680 */ 11681static int 11682jnewblk_rollforward(jnewblk, fs, cgp, blksfree) 11683 struct jnewblk *jnewblk; 11684 struct fs *fs; 11685 struct cg *cgp; 11686 uint8_t *blksfree; 11687{ 11688 ufs1_daddr_t fragno; 11689 ufs2_daddr_t blkno; 11690 long cgbno, bbase; 11691 int frags, blk; 11692 int i; 11693 11694 frags = 0; 11695 cgbno = dtogd(fs, jnewblk->jn_blkno); 11696 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { 11697 if (isclr(blksfree, cgbno + i)) 11698 panic("jnewblk_rollforward: re-allocated fragment"); 11699 frags++; 11700 } 11701 if (frags == fs->fs_frag) { 11702 blkno = fragstoblks(fs, cgbno); 11703 ffs_clrblock(fs, blksfree, (long)blkno); 11704 ffs_clusteracct(fs, cgp, blkno, -1); 11705 cgp->cg_cs.cs_nbfree--; 11706 } else { 11707 bbase = cgbno - fragnum(fs, cgbno); 11708 cgbno += jnewblk->jn_oldfrags; 11709 /* If a complete block had been reassembled, account for it. */ 11710 fragno = fragstoblks(fs, bbase); 11711 if (ffs_isblock(fs, blksfree, fragno)) { 11712 cgp->cg_cs.cs_nffree += fs->fs_frag; 11713 ffs_clusteracct(fs, cgp, fragno, -1); 11714 cgp->cg_cs.cs_nbfree--; 11715 } 11716 /* Decrement the old frags. */ 11717 blk = blkmap(fs, blksfree, bbase); 11718 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 11719 /* Allocate the fragment */ 11720 for (i = 0; i < frags; i++) 11721 clrbit(blksfree, cgbno + i); 11722 cgp->cg_cs.cs_nffree -= frags; 11723 /* Add back in counts associated with the new frags */ 11724 blk = blkmap(fs, blksfree, bbase); 11725 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 11726 } 11727 return (frags); 11728} 11729 11730/* 11731 * Complete a write to a bmsafemap structure. Roll forward any bitmap 11732 * changes if it's not a background write. Set all written dependencies 11733 * to DEPCOMPLETE and free the structure if possible. 11734 */ 11735static int 11736handle_written_bmsafemap(bmsafemap, bp) 11737 struct bmsafemap *bmsafemap; 11738 struct buf *bp; 11739{ 11740 struct newblk *newblk; 11741 struct inodedep *inodedep; 11742 struct jaddref *jaddref, *jatmp; 11743 struct jnewblk *jnewblk, *jntmp; 11744 struct ufsmount *ump; 11745 uint8_t *inosused; 11746 uint8_t *blksfree; 11747 struct cg *cgp; 11748 struct fs *fs; 11749 ino_t ino; 11750 int foreground; 11751 int chgs; 11752 11753 if ((bmsafemap->sm_state & IOSTARTED) == 0) 11754 panic("initiate_write_bmsafemap: Not started\n"); 11755 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); 11756 chgs = 0; 11757 bmsafemap->sm_state &= ~IOSTARTED; 11758 foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; 11759 /* 11760 * Release journal work that was waiting on the write. 11761 */ 11762 handle_jwork(&bmsafemap->sm_freewr); 11763 11764 /* 11765 * Restore unwritten inode allocation pending jaddref writes. 11766 */ 11767 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { 11768 cgp = (struct cg *)bp->b_data; 11769 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11770 inosused = cg_inosused(cgp); 11771 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, 11772 ja_bmdeps, jatmp) { 11773 if ((jaddref->ja_state & UNDONE) == 0) 11774 continue; 11775 ino = jaddref->ja_ino % fs->fs_ipg; 11776 if (isset(inosused, ino)) 11777 panic("handle_written_bmsafemap: " 11778 "re-allocated inode"); 11779 /* Do the roll-forward only if it's a real copy. */ 11780 if (foreground) { 11781 if ((jaddref->ja_mode & IFMT) == IFDIR) 11782 cgp->cg_cs.cs_ndir++; 11783 cgp->cg_cs.cs_nifree--; 11784 setbit(inosused, ino); 11785 chgs = 1; 11786 } 11787 jaddref->ja_state &= ~UNDONE; 11788 jaddref->ja_state |= ATTACHED; 11789 free_jaddref(jaddref); 11790 } 11791 } 11792 /* 11793 * Restore any block allocations which are pending journal writes. 11794 */ 11795 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 11796 cgp = (struct cg *)bp->b_data; 11797 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11798 blksfree = cg_blksfree(cgp); 11799 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, 11800 jntmp) { 11801 if ((jnewblk->jn_state & UNDONE) == 0) 11802 continue; 11803 /* Do the roll-forward only if it's a real copy. */ 11804 if (foreground && 11805 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) 11806 chgs = 1; 11807 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); 11808 jnewblk->jn_state |= ATTACHED; 11809 free_jnewblk(jnewblk); 11810 } 11811 } 11812 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { 11813 newblk->nb_state |= DEPCOMPLETE; 11814 newblk->nb_state &= ~ONDEPLIST; 11815 newblk->nb_bmsafemap = NULL; 11816 LIST_REMOVE(newblk, nb_deps); 11817 if (newblk->nb_list.wk_type == D_ALLOCDIRECT) 11818 handle_allocdirect_partdone( 11819 WK_ALLOCDIRECT(&newblk->nb_list), NULL); 11820 else if (newblk->nb_list.wk_type == D_ALLOCINDIR) 11821 handle_allocindir_partdone( 11822 WK_ALLOCINDIR(&newblk->nb_list)); 11823 else if (newblk->nb_list.wk_type != D_NEWBLK) 11824 panic("handle_written_bmsafemap: Unexpected type: %s", 11825 TYPENAME(newblk->nb_list.wk_type)); 11826 } 11827 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { 11828 inodedep->id_state |= DEPCOMPLETE; 11829 inodedep->id_state &= ~ONDEPLIST; 11830 LIST_REMOVE(inodedep, id_deps); 11831 inodedep->id_bmsafemap = NULL; 11832 } 11833 LIST_REMOVE(bmsafemap, sm_next); 11834 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && 11835 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && 11836 LIST_EMPTY(&bmsafemap->sm_newblkhd) && 11837 LIST_EMPTY(&bmsafemap->sm_inodedephd) && 11838 LIST_EMPTY(&bmsafemap->sm_freehd)) { 11839 LIST_REMOVE(bmsafemap, sm_hash); 11840 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 11841 return (0); 11842 } 11843 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 11844 if (foreground) 11845 bdirty(bp); 11846 return (1); 11847} 11848 11849/* 11850 * Try to free a mkdir dependency. 11851 */ 11852static void 11853complete_mkdir(mkdir) 11854 struct mkdir *mkdir; 11855{ 11856 struct diradd *dap; 11857 11858 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) 11859 return; 11860 LIST_REMOVE(mkdir, md_mkdirs); 11861 dap = mkdir->md_diradd; 11862 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 11863 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { 11864 dap->da_state |= DEPCOMPLETE; 11865 complete_diradd(dap); 11866 } 11867 WORKITEM_FREE(mkdir, D_MKDIR); 11868} 11869 11870/* 11871 * Handle the completion of a mkdir dependency. 11872 */ 11873static void 11874handle_written_mkdir(mkdir, type) 11875 struct mkdir *mkdir; 11876 int type; 11877{ 11878 11879 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) 11880 panic("handle_written_mkdir: bad type"); 11881 mkdir->md_state |= COMPLETE; 11882 complete_mkdir(mkdir); 11883} 11884 11885static int 11886free_pagedep(pagedep) 11887 struct pagedep *pagedep; 11888{ 11889 int i; 11890 11891 if (pagedep->pd_state & NEWBLOCK) 11892 return (0); 11893 if (!LIST_EMPTY(&pagedep->pd_dirremhd)) 11894 return (0); 11895 for (i = 0; i < DAHASHSZ; i++) 11896 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) 11897 return (0); 11898 if (!LIST_EMPTY(&pagedep->pd_pendinghd)) 11899 return (0); 11900 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) 11901 return (0); 11902 if (pagedep->pd_state & ONWORKLIST) 11903 WORKLIST_REMOVE(&pagedep->pd_list); 11904 LIST_REMOVE(pagedep, pd_hash); 11905 WORKITEM_FREE(pagedep, D_PAGEDEP); 11906 11907 return (1); 11908} 11909 11910/* 11911 * Called from within softdep_disk_write_complete above. 11912 * A write operation was just completed. Removed inodes can 11913 * now be freed and associated block pointers may be committed. 11914 * Note that this routine is always called from interrupt level 11915 * with further splbio interrupts blocked. 11916 */ 11917static int 11918handle_written_filepage(pagedep, bp) 11919 struct pagedep *pagedep; 11920 struct buf *bp; /* buffer containing the written page */ 11921{ 11922 struct dirrem *dirrem; 11923 struct diradd *dap, *nextdap; 11924 struct direct *ep; 11925 int i, chgs; 11926 11927 if ((pagedep->pd_state & IOSTARTED) == 0) 11928 panic("handle_written_filepage: not started"); 11929 pagedep->pd_state &= ~IOSTARTED; 11930 /* 11931 * Process any directory removals that have been committed. 11932 */ 11933 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 11934 LIST_REMOVE(dirrem, dm_next); 11935 dirrem->dm_state |= COMPLETE; 11936 dirrem->dm_dirinum = pagedep->pd_ino; 11937 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 11938 ("handle_written_filepage: Journal entries not written.")); 11939 add_to_worklist(&dirrem->dm_list, 0); 11940 } 11941 /* 11942 * Free any directory additions that have been committed. 11943 * If it is a newly allocated block, we have to wait until 11944 * the on-disk directory inode claims the new block. 11945 */ 11946 if ((pagedep->pd_state & NEWBLOCK) == 0) 11947 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 11948 free_diradd(dap, NULL); 11949 /* 11950 * Uncommitted directory entries must be restored. 11951 */ 11952 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 11953 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 11954 dap = nextdap) { 11955 nextdap = LIST_NEXT(dap, da_pdlist); 11956 if (dap->da_state & ATTACHED) 11957 panic("handle_written_filepage: attached"); 11958 ep = (struct direct *) 11959 ((char *)bp->b_data + dap->da_offset); 11960 ep->d_ino = dap->da_newinum; 11961 dap->da_state &= ~UNDONE; 11962 dap->da_state |= ATTACHED; 11963 chgs = 1; 11964 /* 11965 * If the inode referenced by the directory has 11966 * been written out, then the dependency can be 11967 * moved to the pending list. 11968 */ 11969 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 11970 LIST_REMOVE(dap, da_pdlist); 11971 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 11972 da_pdlist); 11973 } 11974 } 11975 } 11976 /* 11977 * If there were any rollbacks in the directory, then it must be 11978 * marked dirty so that its will eventually get written back in 11979 * its correct form. 11980 */ 11981 if (chgs) { 11982 if ((bp->b_flags & B_DELWRI) == 0) 11983 stat_dir_entry++; 11984 bdirty(bp); 11985 return (1); 11986 } 11987 /* 11988 * If we are not waiting for a new directory block to be 11989 * claimed by its inode, then the pagedep will be freed. 11990 * Otherwise it will remain to track any new entries on 11991 * the page in case they are fsync'ed. 11992 */ 11993 free_pagedep(pagedep); 11994 return (0); 11995} 11996 11997/* 11998 * Writing back in-core inode structures. 11999 * 12000 * The filesystem only accesses an inode's contents when it occupies an 12001 * "in-core" inode structure. These "in-core" structures are separate from 12002 * the page frames used to cache inode blocks. Only the latter are 12003 * transferred to/from the disk. So, when the updated contents of the 12004 * "in-core" inode structure are copied to the corresponding in-memory inode 12005 * block, the dependencies are also transferred. The following procedure is 12006 * called when copying a dirty "in-core" inode to a cached inode block. 12007 */ 12008 12009/* 12010 * Called when an inode is loaded from disk. If the effective link count 12011 * differed from the actual link count when it was last flushed, then we 12012 * need to ensure that the correct effective link count is put back. 12013 */ 12014void 12015softdep_load_inodeblock(ip) 12016 struct inode *ip; /* the "in_core" copy of the inode */ 12017{ 12018 struct inodedep *inodedep; 12019 12020 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 12021 ("softdep_load_inodeblock called on non-softdep filesystem")); 12022 /* 12023 * Check for alternate nlink count. 12024 */ 12025 ip->i_effnlink = ip->i_nlink; 12026 ACQUIRE_LOCK(ip->i_ump); 12027 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 12028 &inodedep) == 0) { 12029 FREE_LOCK(ip->i_ump); 12030 return; 12031 } 12032 ip->i_effnlink -= inodedep->id_nlinkdelta; 12033 FREE_LOCK(ip->i_ump); 12034} 12035 12036/* 12037 * This routine is called just before the "in-core" inode 12038 * information is to be copied to the in-memory inode block. 12039 * Recall that an inode block contains several inodes. If 12040 * the force flag is set, then the dependencies will be 12041 * cleared so that the update can always be made. Note that 12042 * the buffer is locked when this routine is called, so we 12043 * will never be in the middle of writing the inode block 12044 * to disk. 12045 */ 12046void 12047softdep_update_inodeblock(ip, bp, waitfor) 12048 struct inode *ip; /* the "in_core" copy of the inode */ 12049 struct buf *bp; /* the buffer containing the inode block */ 12050 int waitfor; /* nonzero => update must be allowed */ 12051{ 12052 struct inodedep *inodedep; 12053 struct inoref *inoref; 12054 struct ufsmount *ump; 12055 struct worklist *wk; 12056 struct mount *mp; 12057 struct buf *ibp; 12058 struct fs *fs; 12059 int error; 12060 12061 ump = ip->i_ump; 12062 mp = UFSTOVFS(ump); 12063 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 12064 ("softdep_update_inodeblock called on non-softdep filesystem")); 12065 fs = ip->i_fs; 12066 /* 12067 * Preserve the freelink that is on disk. clear_unlinked_inodedep() 12068 * does not have access to the in-core ip so must write directly into 12069 * the inode block buffer when setting freelink. 12070 */ 12071 if (fs->fs_magic == FS_UFS1_MAGIC) 12072 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + 12073 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12074 else 12075 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + 12076 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12077 /* 12078 * If the effective link count is not equal to the actual link 12079 * count, then we must track the difference in an inodedep while 12080 * the inode is (potentially) tossed out of the cache. Otherwise, 12081 * if there is no existing inodedep, then there are no dependencies 12082 * to track. 12083 */ 12084 ACQUIRE_LOCK(ump); 12085again: 12086 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12087 FREE_LOCK(ump); 12088 if (ip->i_effnlink != ip->i_nlink) 12089 panic("softdep_update_inodeblock: bad link count"); 12090 return; 12091 } 12092 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) 12093 panic("softdep_update_inodeblock: bad delta"); 12094 /* 12095 * If we're flushing all dependencies we must also move any waiting 12096 * for journal writes onto the bufwait list prior to I/O. 12097 */ 12098 if (waitfor) { 12099 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12100 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12101 == DEPCOMPLETE) { 12102 jwait(&inoref->if_list, MNT_WAIT); 12103 goto again; 12104 } 12105 } 12106 } 12107 /* 12108 * Changes have been initiated. Anything depending on these 12109 * changes cannot occur until this inode has been written. 12110 */ 12111 inodedep->id_state &= ~COMPLETE; 12112 if ((inodedep->id_state & ONWORKLIST) == 0) 12113 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 12114 /* 12115 * Any new dependencies associated with the incore inode must 12116 * now be moved to the list associated with the buffer holding 12117 * the in-memory copy of the inode. Once merged process any 12118 * allocdirects that are completed by the merger. 12119 */ 12120 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 12121 if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) 12122 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), 12123 NULL); 12124 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 12125 if (!TAILQ_EMPTY(&inodedep->id_extupdt)) 12126 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), 12127 NULL); 12128 /* 12129 * Now that the inode has been pushed into the buffer, the 12130 * operations dependent on the inode being written to disk 12131 * can be moved to the id_bufwait so that they will be 12132 * processed when the buffer I/O completes. 12133 */ 12134 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 12135 WORKLIST_REMOVE(wk); 12136 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 12137 } 12138 /* 12139 * Newly allocated inodes cannot be written until the bitmap 12140 * that allocates them have been written (indicated by 12141 * DEPCOMPLETE being set in id_state). If we are doing a 12142 * forced sync (e.g., an fsync on a file), we force the bitmap 12143 * to be written so that the update can be done. 12144 */ 12145 if (waitfor == 0) { 12146 FREE_LOCK(ump); 12147 return; 12148 } 12149retry: 12150 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { 12151 FREE_LOCK(ump); 12152 return; 12153 } 12154 ibp = inodedep->id_bmsafemap->sm_buf; 12155 ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); 12156 if (ibp == NULL) { 12157 /* 12158 * If ibp came back as NULL, the dependency could have been 12159 * freed while we slept. Look it up again, and check to see 12160 * that it has completed. 12161 */ 12162 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 12163 goto retry; 12164 FREE_LOCK(ump); 12165 return; 12166 } 12167 FREE_LOCK(ump); 12168 if ((error = bwrite(ibp)) != 0) 12169 softdep_error("softdep_update_inodeblock: bwrite", error); 12170} 12171 12172/* 12173 * Merge the a new inode dependency list (such as id_newinoupdt) into an 12174 * old inode dependency list (such as id_inoupdt). This routine must be 12175 * called with splbio interrupts blocked. 12176 */ 12177static void 12178merge_inode_lists(newlisthead, oldlisthead) 12179 struct allocdirectlst *newlisthead; 12180 struct allocdirectlst *oldlisthead; 12181{ 12182 struct allocdirect *listadp, *newadp; 12183 12184 newadp = TAILQ_FIRST(newlisthead); 12185 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 12186 if (listadp->ad_offset < newadp->ad_offset) { 12187 listadp = TAILQ_NEXT(listadp, ad_next); 12188 continue; 12189 } 12190 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12191 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 12192 if (listadp->ad_offset == newadp->ad_offset) { 12193 allocdirect_merge(oldlisthead, newadp, 12194 listadp); 12195 listadp = newadp; 12196 } 12197 newadp = TAILQ_FIRST(newlisthead); 12198 } 12199 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 12200 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12201 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 12202 } 12203} 12204 12205/* 12206 * If we are doing an fsync, then we must ensure that any directory 12207 * entries for the inode have been written after the inode gets to disk. 12208 */ 12209int 12210softdep_fsync(vp) 12211 struct vnode *vp; /* the "in_core" copy of the inode */ 12212{ 12213 struct inodedep *inodedep; 12214 struct pagedep *pagedep; 12215 struct inoref *inoref; 12216 struct ufsmount *ump; 12217 struct worklist *wk; 12218 struct diradd *dap; 12219 struct mount *mp; 12220 struct vnode *pvp; 12221 struct inode *ip; 12222 struct buf *bp; 12223 struct fs *fs; 12224 struct thread *td = curthread; 12225 int error, flushparent, pagedep_new_block; 12226 ino_t parentino; 12227 ufs_lbn_t lbn; 12228 12229 ip = VTOI(vp); 12230 fs = ip->i_fs; 12231 ump = ip->i_ump; 12232 mp = vp->v_mount; 12233 if (MOUNTEDSOFTDEP(mp) == 0) 12234 return (0); 12235 ACQUIRE_LOCK(ump); 12236restart: 12237 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12238 FREE_LOCK(ump); 12239 return (0); 12240 } 12241 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12242 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12243 == DEPCOMPLETE) { 12244 jwait(&inoref->if_list, MNT_WAIT); 12245 goto restart; 12246 } 12247 } 12248 if (!LIST_EMPTY(&inodedep->id_inowait) || 12249 !TAILQ_EMPTY(&inodedep->id_extupdt) || 12250 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 12251 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 12252 !TAILQ_EMPTY(&inodedep->id_newinoupdt)) 12253 panic("softdep_fsync: pending ops %p", inodedep); 12254 for (error = 0, flushparent = 0; ; ) { 12255 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 12256 break; 12257 if (wk->wk_type != D_DIRADD) 12258 panic("softdep_fsync: Unexpected type %s", 12259 TYPENAME(wk->wk_type)); 12260 dap = WK_DIRADD(wk); 12261 /* 12262 * Flush our parent if this directory entry has a MKDIR_PARENT 12263 * dependency or is contained in a newly allocated block. 12264 */ 12265 if (dap->da_state & DIRCHG) 12266 pagedep = dap->da_previous->dm_pagedep; 12267 else 12268 pagedep = dap->da_pagedep; 12269 parentino = pagedep->pd_ino; 12270 lbn = pagedep->pd_lbn; 12271 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) 12272 panic("softdep_fsync: dirty"); 12273 if ((dap->da_state & MKDIR_PARENT) || 12274 (pagedep->pd_state & NEWBLOCK)) 12275 flushparent = 1; 12276 else 12277 flushparent = 0; 12278 /* 12279 * If we are being fsync'ed as part of vgone'ing this vnode, 12280 * then we will not be able to release and recover the 12281 * vnode below, so we just have to give up on writing its 12282 * directory entry out. It will eventually be written, just 12283 * not now, but then the user was not asking to have it 12284 * written, so we are not breaking any promises. 12285 */ 12286 if (vp->v_iflag & VI_DOOMED) 12287 break; 12288 /* 12289 * We prevent deadlock by always fetching inodes from the 12290 * root, moving down the directory tree. Thus, when fetching 12291 * our parent directory, we first try to get the lock. If 12292 * that fails, we must unlock ourselves before requesting 12293 * the lock on our parent. See the comment in ufs_lookup 12294 * for details on possible races. 12295 */ 12296 FREE_LOCK(ump); 12297 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, 12298 FFSV_FORCEINSMQ)) { 12299 error = vfs_busy(mp, MBF_NOWAIT); 12300 if (error != 0) { 12301 vfs_ref(mp); 12302 VOP_UNLOCK(vp, 0); 12303 error = vfs_busy(mp, 0); 12304 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12305 vfs_rel(mp); 12306 if (error != 0) 12307 return (ENOENT); 12308 if (vp->v_iflag & VI_DOOMED) { 12309 vfs_unbusy(mp); 12310 return (ENOENT); 12311 } 12312 } 12313 VOP_UNLOCK(vp, 0); 12314 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, 12315 &pvp, FFSV_FORCEINSMQ); 12316 vfs_unbusy(mp); 12317 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12318 if (vp->v_iflag & VI_DOOMED) { 12319 if (error == 0) 12320 vput(pvp); 12321 error = ENOENT; 12322 } 12323 if (error != 0) 12324 return (error); 12325 } 12326 /* 12327 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 12328 * that are contained in direct blocks will be resolved by 12329 * doing a ffs_update. Pagedeps contained in indirect blocks 12330 * may require a complete sync'ing of the directory. So, we 12331 * try the cheap and fast ffs_update first, and if that fails, 12332 * then we do the slower ffs_syncvnode of the directory. 12333 */ 12334 if (flushparent) { 12335 int locked; 12336 12337 if ((error = ffs_update(pvp, 1)) != 0) { 12338 vput(pvp); 12339 return (error); 12340 } 12341 ACQUIRE_LOCK(ump); 12342 locked = 1; 12343 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { 12344 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { 12345 if (wk->wk_type != D_DIRADD) 12346 panic("softdep_fsync: Unexpected type %s", 12347 TYPENAME(wk->wk_type)); 12348 dap = WK_DIRADD(wk); 12349 if (dap->da_state & DIRCHG) 12350 pagedep = dap->da_previous->dm_pagedep; 12351 else 12352 pagedep = dap->da_pagedep; 12353 pagedep_new_block = pagedep->pd_state & NEWBLOCK; 12354 FREE_LOCK(ump); 12355 locked = 0; 12356 if (pagedep_new_block && (error = 12357 ffs_syncvnode(pvp, MNT_WAIT, 0))) { 12358 vput(pvp); 12359 return (error); 12360 } 12361 } 12362 } 12363 if (locked) 12364 FREE_LOCK(ump); 12365 } 12366 /* 12367 * Flush directory page containing the inode's name. 12368 */ 12369 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 12370 &bp); 12371 if (error == 0) 12372 error = bwrite(bp); 12373 else 12374 brelse(bp); 12375 vput(pvp); 12376 if (error != 0) 12377 return (error); 12378 ACQUIRE_LOCK(ump); 12379 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 12380 break; 12381 } 12382 FREE_LOCK(ump); 12383 return (0); 12384} 12385 12386/* 12387 * Flush all the dirty bitmaps associated with the block device 12388 * before flushing the rest of the dirty blocks so as to reduce 12389 * the number of dependencies that will have to be rolled back. 12390 * 12391 * XXX Unused? 12392 */ 12393void 12394softdep_fsync_mountdev(vp) 12395 struct vnode *vp; 12396{ 12397 struct buf *bp, *nbp; 12398 struct worklist *wk; 12399 struct bufobj *bo; 12400 12401 if (!vn_isdisk(vp, NULL)) 12402 panic("softdep_fsync_mountdev: vnode not a disk"); 12403 bo = &vp->v_bufobj; 12404restart: 12405 BO_LOCK(bo); 12406 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 12407 /* 12408 * If it is already scheduled, skip to the next buffer. 12409 */ 12410 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 12411 continue; 12412 12413 if ((bp->b_flags & B_DELWRI) == 0) 12414 panic("softdep_fsync_mountdev: not dirty"); 12415 /* 12416 * We are only interested in bitmaps with outstanding 12417 * dependencies. 12418 */ 12419 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 12420 wk->wk_type != D_BMSAFEMAP || 12421 (bp->b_vflags & BV_BKGRDINPROG)) { 12422 BUF_UNLOCK(bp); 12423 continue; 12424 } 12425 BO_UNLOCK(bo); 12426 bremfree(bp); 12427 (void) bawrite(bp); 12428 goto restart; 12429 } 12430 drain_output(vp); 12431 BO_UNLOCK(bo); 12432} 12433 12434/* 12435 * Sync all cylinder groups that were dirty at the time this function is 12436 * called. Newly dirtied cgs will be inserted before the sentinel. This 12437 * is used to flush freedep activity that may be holding up writes to a 12438 * indirect block. 12439 */ 12440static int 12441sync_cgs(mp, waitfor) 12442 struct mount *mp; 12443 int waitfor; 12444{ 12445 struct bmsafemap *bmsafemap; 12446 struct bmsafemap *sentinel; 12447 struct ufsmount *ump; 12448 struct buf *bp; 12449 int error; 12450 12451 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); 12452 sentinel->sm_cg = -1; 12453 ump = VFSTOUFS(mp); 12454 error = 0; 12455 ACQUIRE_LOCK(ump); 12456 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); 12457 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; 12458 bmsafemap = LIST_NEXT(sentinel, sm_next)) { 12459 /* Skip sentinels and cgs with no work to release. */ 12460 if (bmsafemap->sm_cg == -1 || 12461 (LIST_EMPTY(&bmsafemap->sm_freehd) && 12462 LIST_EMPTY(&bmsafemap->sm_freewr))) { 12463 LIST_REMOVE(sentinel, sm_next); 12464 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12465 continue; 12466 } 12467 /* 12468 * If we don't get the lock and we're waiting try again, if 12469 * not move on to the next buf and try to sync it. 12470 */ 12471 bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); 12472 if (bp == NULL && waitfor == MNT_WAIT) 12473 continue; 12474 LIST_REMOVE(sentinel, sm_next); 12475 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12476 if (bp == NULL) 12477 continue; 12478 FREE_LOCK(ump); 12479 if (waitfor == MNT_NOWAIT) 12480 bawrite(bp); 12481 else 12482 error = bwrite(bp); 12483 ACQUIRE_LOCK(ump); 12484 if (error) 12485 break; 12486 } 12487 LIST_REMOVE(sentinel, sm_next); 12488 FREE_LOCK(ump); 12489 free(sentinel, M_BMSAFEMAP); 12490 return (error); 12491} 12492 12493/* 12494 * This routine is called when we are trying to synchronously flush a 12495 * file. This routine must eliminate any filesystem metadata dependencies 12496 * so that the syncing routine can succeed. 12497 */ 12498int 12499softdep_sync_metadata(struct vnode *vp) 12500{ 12501 struct inode *ip; 12502 int error; 12503 12504 ip = VTOI(vp); 12505 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 12506 ("softdep_sync_metadata called on non-softdep filesystem")); 12507 /* 12508 * Ensure that any direct block dependencies have been cleared, 12509 * truncations are started, and inode references are journaled. 12510 */ 12511 ACQUIRE_LOCK(ip->i_ump); 12512 /* 12513 * Write all journal records to prevent rollbacks on devvp. 12514 */ 12515 if (vp->v_type == VCHR) 12516 softdep_flushjournal(vp->v_mount); 12517 error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); 12518 /* 12519 * Ensure that all truncates are written so we won't find deps on 12520 * indirect blocks. 12521 */ 12522 process_truncates(vp); 12523 FREE_LOCK(ip->i_ump); 12524 12525 return (error); 12526} 12527 12528/* 12529 * This routine is called when we are attempting to sync a buf with 12530 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any 12531 * other IO it can but returns EBUSY if the buffer is not yet able to 12532 * be written. Dependencies which will not cause rollbacks will always 12533 * return 0. 12534 */ 12535int 12536softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 12537{ 12538 struct indirdep *indirdep; 12539 struct pagedep *pagedep; 12540 struct allocindir *aip; 12541 struct newblk *newblk; 12542 struct ufsmount *ump; 12543 struct buf *nbp; 12544 struct worklist *wk; 12545 int i, error; 12546 12547 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12548 ("softdep_sync_buf called on non-softdep filesystem")); 12549 /* 12550 * For VCHR we just don't want to force flush any dependencies that 12551 * will cause rollbacks. 12552 */ 12553 if (vp->v_type == VCHR) { 12554 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) 12555 return (EBUSY); 12556 return (0); 12557 } 12558 ump = VTOI(vp)->i_ump; 12559 ACQUIRE_LOCK(ump); 12560 /* 12561 * As we hold the buffer locked, none of its dependencies 12562 * will disappear. 12563 */ 12564 error = 0; 12565top: 12566 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 12567 switch (wk->wk_type) { 12568 12569 case D_ALLOCDIRECT: 12570 case D_ALLOCINDIR: 12571 newblk = WK_NEWBLK(wk); 12572 if (newblk->nb_jnewblk != NULL) { 12573 if (waitfor == MNT_NOWAIT) { 12574 error = EBUSY; 12575 goto out_unlock; 12576 } 12577 jwait(&newblk->nb_jnewblk->jn_list, waitfor); 12578 goto top; 12579 } 12580 if (newblk->nb_state & DEPCOMPLETE || 12581 waitfor == MNT_NOWAIT) 12582 continue; 12583 nbp = newblk->nb_bmsafemap->sm_buf; 12584 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12585 if (nbp == NULL) 12586 goto top; 12587 FREE_LOCK(ump); 12588 if ((error = bwrite(nbp)) != 0) 12589 goto out; 12590 ACQUIRE_LOCK(ump); 12591 continue; 12592 12593 case D_INDIRDEP: 12594 indirdep = WK_INDIRDEP(wk); 12595 if (waitfor == MNT_NOWAIT) { 12596 if (!TAILQ_EMPTY(&indirdep->ir_trunc) || 12597 !LIST_EMPTY(&indirdep->ir_deplisthd)) { 12598 error = EBUSY; 12599 goto out_unlock; 12600 } 12601 } 12602 if (!TAILQ_EMPTY(&indirdep->ir_trunc)) 12603 panic("softdep_sync_buf: truncation pending."); 12604 restart: 12605 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 12606 newblk = (struct newblk *)aip; 12607 if (newblk->nb_jnewblk != NULL) { 12608 jwait(&newblk->nb_jnewblk->jn_list, 12609 waitfor); 12610 goto restart; 12611 } 12612 if (newblk->nb_state & DEPCOMPLETE) 12613 continue; 12614 nbp = newblk->nb_bmsafemap->sm_buf; 12615 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12616 if (nbp == NULL) 12617 goto restart; 12618 FREE_LOCK(ump); 12619 if ((error = bwrite(nbp)) != 0) 12620 goto out; 12621 ACQUIRE_LOCK(ump); 12622 goto restart; 12623 } 12624 continue; 12625 12626 case D_PAGEDEP: 12627 /* 12628 * Only flush directory entries in synchronous passes. 12629 */ 12630 if (waitfor != MNT_WAIT) { 12631 error = EBUSY; 12632 goto out_unlock; 12633 } 12634 /* 12635 * While syncing snapshots, we must allow recursive 12636 * lookups. 12637 */ 12638 BUF_AREC(bp); 12639 /* 12640 * We are trying to sync a directory that may 12641 * have dependencies on both its own metadata 12642 * and/or dependencies on the inodes of any 12643 * recently allocated files. We walk its diradd 12644 * lists pushing out the associated inode. 12645 */ 12646 pagedep = WK_PAGEDEP(wk); 12647 for (i = 0; i < DAHASHSZ; i++) { 12648 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 12649 continue; 12650 if ((error = flush_pagedep_deps(vp, wk->wk_mp, 12651 &pagedep->pd_diraddhd[i]))) { 12652 BUF_NOREC(bp); 12653 goto out_unlock; 12654 } 12655 } 12656 BUF_NOREC(bp); 12657 continue; 12658 12659 case D_FREEWORK: 12660 case D_FREEDEP: 12661 case D_JSEGDEP: 12662 case D_JNEWBLK: 12663 continue; 12664 12665 default: 12666 panic("softdep_sync_buf: Unknown type %s", 12667 TYPENAME(wk->wk_type)); 12668 /* NOTREACHED */ 12669 } 12670 } 12671out_unlock: 12672 FREE_LOCK(ump); 12673out: 12674 return (error); 12675} 12676 12677/* 12678 * Flush the dependencies associated with an inodedep. 12679 * Called with splbio blocked. 12680 */ 12681static int 12682flush_inodedep_deps(vp, mp, ino) 12683 struct vnode *vp; 12684 struct mount *mp; 12685 ino_t ino; 12686{ 12687 struct inodedep *inodedep; 12688 struct inoref *inoref; 12689 struct ufsmount *ump; 12690 int error, waitfor; 12691 12692 /* 12693 * This work is done in two passes. The first pass grabs most 12694 * of the buffers and begins asynchronously writing them. The 12695 * only way to wait for these asynchronous writes is to sleep 12696 * on the filesystem vnode which may stay busy for a long time 12697 * if the filesystem is active. So, instead, we make a second 12698 * pass over the dependencies blocking on each write. In the 12699 * usual case we will be blocking against a write that we 12700 * initiated, so when it is done the dependency will have been 12701 * resolved. Thus the second pass is expected to end quickly. 12702 * We give a brief window at the top of the loop to allow 12703 * any pending I/O to complete. 12704 */ 12705 ump = VFSTOUFS(mp); 12706 LOCK_OWNED(ump); 12707 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 12708 if (error) 12709 return (error); 12710 FREE_LOCK(ump); 12711 ACQUIRE_LOCK(ump); 12712restart: 12713 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 12714 return (0); 12715 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12716 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12717 == DEPCOMPLETE) { 12718 jwait(&inoref->if_list, MNT_WAIT); 12719 goto restart; 12720 } 12721 } 12722 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 12723 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 12724 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 12725 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 12726 continue; 12727 /* 12728 * If pass2, we are done, otherwise do pass 2. 12729 */ 12730 if (waitfor == MNT_WAIT) 12731 break; 12732 waitfor = MNT_WAIT; 12733 } 12734 /* 12735 * Try freeing inodedep in case all dependencies have been removed. 12736 */ 12737 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) 12738 (void) free_inodedep(inodedep); 12739 return (0); 12740} 12741 12742/* 12743 * Flush an inode dependency list. 12744 * Called with splbio blocked. 12745 */ 12746static int 12747flush_deplist(listhead, waitfor, errorp) 12748 struct allocdirectlst *listhead; 12749 int waitfor; 12750 int *errorp; 12751{ 12752 struct allocdirect *adp; 12753 struct newblk *newblk; 12754 struct ufsmount *ump; 12755 struct buf *bp; 12756 12757 if ((adp = TAILQ_FIRST(listhead)) == NULL) 12758 return (0); 12759 ump = VFSTOUFS(adp->ad_list.wk_mp); 12760 LOCK_OWNED(ump); 12761 TAILQ_FOREACH(adp, listhead, ad_next) { 12762 newblk = (struct newblk *)adp; 12763 if (newblk->nb_jnewblk != NULL) { 12764 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12765 return (1); 12766 } 12767 if (newblk->nb_state & DEPCOMPLETE) 12768 continue; 12769 bp = newblk->nb_bmsafemap->sm_buf; 12770 bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); 12771 if (bp == NULL) { 12772 if (waitfor == MNT_NOWAIT) 12773 continue; 12774 return (1); 12775 } 12776 FREE_LOCK(ump); 12777 if (waitfor == MNT_NOWAIT) 12778 bawrite(bp); 12779 else 12780 *errorp = bwrite(bp); 12781 ACQUIRE_LOCK(ump); 12782 return (1); 12783 } 12784 return (0); 12785} 12786 12787/* 12788 * Flush dependencies associated with an allocdirect block. 12789 */ 12790static int 12791flush_newblk_dep(vp, mp, lbn) 12792 struct vnode *vp; 12793 struct mount *mp; 12794 ufs_lbn_t lbn; 12795{ 12796 struct newblk *newblk; 12797 struct ufsmount *ump; 12798 struct bufobj *bo; 12799 struct inode *ip; 12800 struct buf *bp; 12801 ufs2_daddr_t blkno; 12802 int error; 12803 12804 error = 0; 12805 bo = &vp->v_bufobj; 12806 ip = VTOI(vp); 12807 blkno = DIP(ip, i_db[lbn]); 12808 if (blkno == 0) 12809 panic("flush_newblk_dep: Missing block"); 12810 ump = VFSTOUFS(mp); 12811 ACQUIRE_LOCK(ump); 12812 /* 12813 * Loop until all dependencies related to this block are satisfied. 12814 * We must be careful to restart after each sleep in case a write 12815 * completes some part of this process for us. 12816 */ 12817 for (;;) { 12818 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { 12819 FREE_LOCK(ump); 12820 break; 12821 } 12822 if (newblk->nb_list.wk_type != D_ALLOCDIRECT) 12823 panic("flush_newblk_deps: Bad newblk %p", newblk); 12824 /* 12825 * Flush the journal. 12826 */ 12827 if (newblk->nb_jnewblk != NULL) { 12828 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12829 continue; 12830 } 12831 /* 12832 * Write the bitmap dependency. 12833 */ 12834 if ((newblk->nb_state & DEPCOMPLETE) == 0) { 12835 bp = newblk->nb_bmsafemap->sm_buf; 12836 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 12837 if (bp == NULL) 12838 continue; 12839 FREE_LOCK(ump); 12840 error = bwrite(bp); 12841 if (error) 12842 break; 12843 ACQUIRE_LOCK(ump); 12844 continue; 12845 } 12846 /* 12847 * Write the buffer. 12848 */ 12849 FREE_LOCK(ump); 12850 BO_LOCK(bo); 12851 bp = gbincore(bo, lbn); 12852 if (bp != NULL) { 12853 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 12854 LK_INTERLOCK, BO_LOCKPTR(bo)); 12855 if (error == ENOLCK) { 12856 ACQUIRE_LOCK(ump); 12857 continue; /* Slept, retry */ 12858 } 12859 if (error != 0) 12860 break; /* Failed */ 12861 if (bp->b_flags & B_DELWRI) { 12862 bremfree(bp); 12863 error = bwrite(bp); 12864 if (error) 12865 break; 12866 } else 12867 BUF_UNLOCK(bp); 12868 } else 12869 BO_UNLOCK(bo); 12870 /* 12871 * We have to wait for the direct pointers to 12872 * point at the newdirblk before the dependency 12873 * will go away. 12874 */ 12875 error = ffs_update(vp, 1); 12876 if (error) 12877 break; 12878 ACQUIRE_LOCK(ump); 12879 } 12880 return (error); 12881} 12882 12883/* 12884 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 12885 * Called with splbio blocked. 12886 */ 12887static int 12888flush_pagedep_deps(pvp, mp, diraddhdp) 12889 struct vnode *pvp; 12890 struct mount *mp; 12891 struct diraddhd *diraddhdp; 12892{ 12893 struct inodedep *inodedep; 12894 struct inoref *inoref; 12895 struct ufsmount *ump; 12896 struct diradd *dap; 12897 struct vnode *vp; 12898 int error = 0; 12899 struct buf *bp; 12900 ino_t inum; 12901 struct diraddhd unfinished; 12902 12903 LIST_INIT(&unfinished); 12904 ump = VFSTOUFS(mp); 12905 LOCK_OWNED(ump); 12906restart: 12907 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 12908 /* 12909 * Flush ourselves if this directory entry 12910 * has a MKDIR_PARENT dependency. 12911 */ 12912 if (dap->da_state & MKDIR_PARENT) { 12913 FREE_LOCK(ump); 12914 if ((error = ffs_update(pvp, 1)) != 0) 12915 break; 12916 ACQUIRE_LOCK(ump); 12917 /* 12918 * If that cleared dependencies, go on to next. 12919 */ 12920 if (dap != LIST_FIRST(diraddhdp)) 12921 continue; 12922 /* 12923 * All MKDIR_PARENT dependencies and all the 12924 * NEWBLOCK pagedeps that are contained in direct 12925 * blocks were resolved by doing above ffs_update. 12926 * Pagedeps contained in indirect blocks may 12927 * require a complete sync'ing of the directory. 12928 * We are in the midst of doing a complete sync, 12929 * so if they are not resolved in this pass we 12930 * defer them for now as they will be sync'ed by 12931 * our caller shortly. 12932 */ 12933 LIST_REMOVE(dap, da_pdlist); 12934 LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); 12935 continue; 12936 } 12937 /* 12938 * A newly allocated directory must have its "." and 12939 * ".." entries written out before its name can be 12940 * committed in its parent. 12941 */ 12942 inum = dap->da_newinum; 12943 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12944 panic("flush_pagedep_deps: lost inode1"); 12945 /* 12946 * Wait for any pending journal adds to complete so we don't 12947 * cause rollbacks while syncing. 12948 */ 12949 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12950 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12951 == DEPCOMPLETE) { 12952 jwait(&inoref->if_list, MNT_WAIT); 12953 goto restart; 12954 } 12955 } 12956 if (dap->da_state & MKDIR_BODY) { 12957 FREE_LOCK(ump); 12958 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12959 FFSV_FORCEINSMQ))) 12960 break; 12961 error = flush_newblk_dep(vp, mp, 0); 12962 /* 12963 * If we still have the dependency we might need to 12964 * update the vnode to sync the new link count to 12965 * disk. 12966 */ 12967 if (error == 0 && dap == LIST_FIRST(diraddhdp)) 12968 error = ffs_update(vp, 1); 12969 vput(vp); 12970 if (error != 0) 12971 break; 12972 ACQUIRE_LOCK(ump); 12973 /* 12974 * If that cleared dependencies, go on to next. 12975 */ 12976 if (dap != LIST_FIRST(diraddhdp)) 12977 continue; 12978 if (dap->da_state & MKDIR_BODY) { 12979 inodedep_lookup(UFSTOVFS(ump), inum, 0, 12980 &inodedep); 12981 panic("flush_pagedep_deps: MKDIR_BODY " 12982 "inodedep %p dap %p vp %p", 12983 inodedep, dap, vp); 12984 } 12985 } 12986 /* 12987 * Flush the inode on which the directory entry depends. 12988 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 12989 * the only remaining dependency is that the updated inode 12990 * count must get pushed to disk. The inode has already 12991 * been pushed into its inode buffer (via VOP_UPDATE) at 12992 * the time of the reference count change. So we need only 12993 * locate that buffer, ensure that there will be no rollback 12994 * caused by a bitmap dependency, then write the inode buffer. 12995 */ 12996retry: 12997 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12998 panic("flush_pagedep_deps: lost inode"); 12999 /* 13000 * If the inode still has bitmap dependencies, 13001 * push them to disk. 13002 */ 13003 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { 13004 bp = inodedep->id_bmsafemap->sm_buf; 13005 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 13006 if (bp == NULL) 13007 goto retry; 13008 FREE_LOCK(ump); 13009 if ((error = bwrite(bp)) != 0) 13010 break; 13011 ACQUIRE_LOCK(ump); 13012 if (dap != LIST_FIRST(diraddhdp)) 13013 continue; 13014 } 13015 /* 13016 * If the inode is still sitting in a buffer waiting 13017 * to be written or waiting for the link count to be 13018 * adjusted update it here to flush it to disk. 13019 */ 13020 if (dap == LIST_FIRST(diraddhdp)) { 13021 FREE_LOCK(ump); 13022 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 13023 FFSV_FORCEINSMQ))) 13024 break; 13025 error = ffs_update(vp, 1); 13026 vput(vp); 13027 if (error) 13028 break; 13029 ACQUIRE_LOCK(ump); 13030 } 13031 /* 13032 * If we have failed to get rid of all the dependencies 13033 * then something is seriously wrong. 13034 */ 13035 if (dap == LIST_FIRST(diraddhdp)) { 13036 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); 13037 panic("flush_pagedep_deps: failed to flush " 13038 "inodedep %p ino %ju dap %p", 13039 inodedep, (uintmax_t)inum, dap); 13040 } 13041 } 13042 if (error) 13043 ACQUIRE_LOCK(ump); 13044 while ((dap = LIST_FIRST(&unfinished)) != NULL) { 13045 LIST_REMOVE(dap, da_pdlist); 13046 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); 13047 } 13048 return (error); 13049} 13050 13051/* 13052 * A large burst of file addition or deletion activity can drive the 13053 * memory load excessively high. First attempt to slow things down 13054 * using the techniques below. If that fails, this routine requests 13055 * the offending operations to fall back to running synchronously 13056 * until the memory load returns to a reasonable level. 13057 */ 13058int 13059softdep_slowdown(vp) 13060 struct vnode *vp; 13061{ 13062 struct ufsmount *ump; 13063 int jlow; 13064 int max_softdeps_hard; 13065 13066 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 13067 ("softdep_slowdown called on non-softdep filesystem")); 13068 ump = VFSTOUFS(vp->v_mount); 13069 ACQUIRE_LOCK(ump); 13070 jlow = 0; 13071 /* 13072 * Check for journal space if needed. 13073 */ 13074 if (DOINGSUJ(vp)) { 13075 if (journal_space(ump, 0) == 0) 13076 jlow = 1; 13077 } 13078 /* 13079 * If the system is under its limits and our filesystem is 13080 * not responsible for more than our share of the usage and 13081 * we are not low on journal space, then no need to slow down. 13082 */ 13083 max_softdeps_hard = max_softdeps * 11 / 10; 13084 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && 13085 dep_current[D_INODEDEP] < max_softdeps_hard && 13086 dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 && 13087 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 && 13088 ump->softdep_curdeps[D_DIRREM] < 13089 (max_softdeps_hard / 2) / stat_flush_threads && 13090 ump->softdep_curdeps[D_INODEDEP] < 13091 max_softdeps_hard / stat_flush_threads && 13092 ump->softdep_curdeps[D_INDIRDEP] < 13093 (max_softdeps_hard / 1000) / stat_flush_threads && 13094 ump->softdep_curdeps[D_FREEBLKS] < 13095 max_softdeps_hard / stat_flush_threads) { 13096 FREE_LOCK(ump); 13097 return (0); 13098 } 13099 /* 13100 * If the journal is low or our filesystem is over its limit 13101 * then speedup the cleanup. 13102 */ 13103 if (ump->softdep_curdeps[D_INDIRDEP] < 13104 (max_softdeps_hard / 1000) / stat_flush_threads || jlow) 13105 softdep_speedup(ump); 13106 stat_sync_limit_hit += 1; 13107 FREE_LOCK(ump); 13108 /* 13109 * We only slow down the rate at which new dependencies are 13110 * generated if we are not using journaling. With journaling, 13111 * the cleanup should always be sufficient to keep things 13112 * under control. 13113 */ 13114 if (DOINGSUJ(vp)) 13115 return (0); 13116 return (1); 13117} 13118 13119/* 13120 * Called by the allocation routines when they are about to fail 13121 * in the hope that we can free up the requested resource (inodes 13122 * or disk space). 13123 * 13124 * First check to see if the work list has anything on it. If it has, 13125 * clean up entries until we successfully free the requested resource. 13126 * Because this process holds inodes locked, we cannot handle any remove 13127 * requests that might block on a locked inode as that could lead to 13128 * deadlock. If the worklist yields none of the requested resource, 13129 * start syncing out vnodes to free up the needed space. 13130 */ 13131int 13132softdep_request_cleanup(fs, vp, cred, resource) 13133 struct fs *fs; 13134 struct vnode *vp; 13135 struct ucred *cred; 13136 int resource; 13137{ 13138 struct ufsmount *ump; 13139 struct mount *mp; 13140 struct vnode *lvp, *mvp; 13141 long starttime; 13142 ufs2_daddr_t needed; 13143 int error; 13144 13145 /* 13146 * If we are being called because of a process doing a 13147 * copy-on-write, then it is not safe to process any 13148 * worklist items as we will recurse into the copyonwrite 13149 * routine. This will result in an incoherent snapshot. 13150 * If the vnode that we hold is a snapshot, we must avoid 13151 * handling other resources that could cause deadlock. 13152 */ 13153 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) 13154 return (0); 13155 13156 if (resource == FLUSH_BLOCKS_WAIT) 13157 stat_cleanup_blkrequests += 1; 13158 else 13159 stat_cleanup_inorequests += 1; 13160 13161 mp = vp->v_mount; 13162 ump = VFSTOUFS(mp); 13163 mtx_assert(UFS_MTX(ump), MA_OWNED); 13164 UFS_UNLOCK(ump); 13165 error = ffs_update(vp, 1); 13166 if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { 13167 UFS_LOCK(ump); 13168 return (0); 13169 } 13170 /* 13171 * If we are in need of resources, start by cleaning up 13172 * any block removals associated with our inode. 13173 */ 13174 ACQUIRE_LOCK(ump); 13175 process_removes(vp); 13176 process_truncates(vp); 13177 FREE_LOCK(ump); 13178 /* 13179 * Now clean up at least as many resources as we will need. 13180 * 13181 * When requested to clean up inodes, the number that are needed 13182 * is set by the number of simultaneous writers (mnt_writeopcount) 13183 * plus a bit of slop (2) in case some more writers show up while 13184 * we are cleaning. 13185 * 13186 * When requested to free up space, the amount of space that 13187 * we need is enough blocks to allocate a full-sized segment 13188 * (fs_contigsumsize). The number of such segments that will 13189 * be needed is set by the number of simultaneous writers 13190 * (mnt_writeopcount) plus a bit of slop (2) in case some more 13191 * writers show up while we are cleaning. 13192 * 13193 * Additionally, if we are unpriviledged and allocating space, 13194 * we need to ensure that we clean up enough blocks to get the 13195 * needed number of blocks over the threshhold of the minimum 13196 * number of blocks required to be kept free by the filesystem 13197 * (fs_minfree). 13198 */ 13199 if (resource == FLUSH_INODES_WAIT) { 13200 needed = vp->v_mount->mnt_writeopcount + 2; 13201 } else if (resource == FLUSH_BLOCKS_WAIT) { 13202 needed = (vp->v_mount->mnt_writeopcount + 2) * 13203 fs->fs_contigsumsize; 13204 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) 13205 needed += fragstoblks(fs, 13206 roundup((fs->fs_dsize * fs->fs_minfree / 100) - 13207 fs->fs_cstotal.cs_nffree, fs->fs_frag)); 13208 } else { 13209 UFS_LOCK(ump); 13210 printf("softdep_request_cleanup: Unknown resource type %d\n", 13211 resource); 13212 return (0); 13213 } 13214 starttime = time_second; 13215retry: 13216 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && 13217 fs->fs_cstotal.cs_nbfree <= needed) || 13218 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13219 fs->fs_cstotal.cs_nifree <= needed)) { 13220 ACQUIRE_LOCK(ump); 13221 if (ump->softdep_on_worklist > 0 && 13222 process_worklist_item(UFSTOVFS(ump), 13223 ump->softdep_on_worklist, LK_NOWAIT) != 0) 13224 stat_worklist_push += 1; 13225 FREE_LOCK(ump); 13226 } 13227 /* 13228 * If we still need resources and there are no more worklist 13229 * entries to process to obtain them, we have to start flushing 13230 * the dirty vnodes to force the release of additional requests 13231 * to the worklist that we can then process to reap addition 13232 * resources. We walk the vnodes associated with the mount point 13233 * until we get the needed worklist requests that we can reap. 13234 */ 13235 if ((resource == FLUSH_BLOCKS_WAIT && 13236 fs->fs_cstotal.cs_nbfree <= needed) || 13237 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13238 fs->fs_cstotal.cs_nifree <= needed)) { 13239 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { 13240 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { 13241 VI_UNLOCK(lvp); 13242 continue; 13243 } 13244 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, 13245 curthread)) 13246 continue; 13247 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ 13248 vput(lvp); 13249 continue; 13250 } 13251 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); 13252 vput(lvp); 13253 } 13254 lvp = ump->um_devvp; 13255 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 13256 VOP_FSYNC(lvp, MNT_NOWAIT, curthread); 13257 VOP_UNLOCK(lvp, 0); 13258 } 13259 if (ump->softdep_on_worklist > 0) { 13260 stat_cleanup_retries += 1; 13261 goto retry; 13262 } 13263 stat_cleanup_failures += 1; 13264 } 13265 if (time_second - starttime > stat_cleanup_high_delay) 13266 stat_cleanup_high_delay = time_second - starttime; 13267 UFS_LOCK(ump); 13268 return (1); 13269} 13270 13271/* 13272 * If memory utilization has gotten too high, deliberately slow things 13273 * down and speed up the I/O processing. 13274 */ 13275static int 13276request_cleanup(mp, resource) 13277 struct mount *mp; 13278 int resource; 13279{ 13280 struct thread *td = curthread; 13281 struct ufsmount *ump; 13282 13283 ump = VFSTOUFS(mp); 13284 LOCK_OWNED(ump); 13285 /* 13286 * We never hold up the filesystem syncer or buf daemon. 13287 */ 13288 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) 13289 return (0); 13290 /* 13291 * First check to see if the work list has gotten backlogged. 13292 * If it has, co-opt this process to help clean up two entries. 13293 * Because this process may hold inodes locked, we cannot 13294 * handle any remove requests that might block on a locked 13295 * inode as that could lead to deadlock. We set TDP_SOFTDEP 13296 * to avoid recursively processing the worklist. 13297 */ 13298 if (ump->softdep_on_worklist > max_softdeps / 10) { 13299 td->td_pflags |= TDP_SOFTDEP; 13300 process_worklist_item(mp, 2, LK_NOWAIT); 13301 td->td_pflags &= ~TDP_SOFTDEP; 13302 stat_worklist_push += 2; 13303 return(1); 13304 } 13305 /* 13306 * Next, we attempt to speed up the syncer process. If that 13307 * is successful, then we allow the process to continue. 13308 */ 13309 if (softdep_speedup(ump) && 13310 resource != FLUSH_BLOCKS_WAIT && 13311 resource != FLUSH_INODES_WAIT) 13312 return(0); 13313 /* 13314 * If we are resource constrained on inode dependencies, try 13315 * flushing some dirty inodes. Otherwise, we are constrained 13316 * by file deletions, so try accelerating flushes of directories 13317 * with removal dependencies. We would like to do the cleanup 13318 * here, but we probably hold an inode locked at this point and 13319 * that might deadlock against one that we try to clean. So, 13320 * the best that we can do is request the syncer daemon to do 13321 * the cleanup for us. 13322 */ 13323 switch (resource) { 13324 13325 case FLUSH_INODES: 13326 case FLUSH_INODES_WAIT: 13327 ACQUIRE_GBLLOCK(&lk); 13328 stat_ino_limit_push += 1; 13329 req_clear_inodedeps += 1; 13330 FREE_GBLLOCK(&lk); 13331 stat_countp = &stat_ino_limit_hit; 13332 break; 13333 13334 case FLUSH_BLOCKS: 13335 case FLUSH_BLOCKS_WAIT: 13336 ACQUIRE_GBLLOCK(&lk); 13337 stat_blk_limit_push += 1; 13338 req_clear_remove += 1; 13339 FREE_GBLLOCK(&lk); 13340 stat_countp = &stat_blk_limit_hit; 13341 break; 13342 13343 default: 13344 panic("request_cleanup: unknown type"); 13345 } 13346 /* 13347 * Hopefully the syncer daemon will catch up and awaken us. 13348 * We wait at most tickdelay before proceeding in any case. 13349 */ 13350 ACQUIRE_GBLLOCK(&lk); 13351 FREE_LOCK(ump); 13352 proc_waiting += 1; 13353 if (callout_pending(&softdep_callout) == FALSE) 13354 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 13355 pause_timer, 0); 13356 13357 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); 13358 proc_waiting -= 1; 13359 FREE_GBLLOCK(&lk); 13360 ACQUIRE_LOCK(ump); 13361 return (1); 13362} 13363 13364/* 13365 * Awaken processes pausing in request_cleanup and clear proc_waiting 13366 * to indicate that there is no longer a timer running. Pause_timer 13367 * will be called with the global softdep mutex (&lk) locked. 13368 */ 13369static void 13370pause_timer(arg) 13371 void *arg; 13372{ 13373 13374 GBLLOCK_OWNED(&lk); 13375 /* 13376 * The callout_ API has acquired mtx and will hold it around this 13377 * function call. 13378 */ 13379 *stat_countp += proc_waiting; 13380 wakeup(&proc_waiting); 13381} 13382 13383/* 13384 * If requested, try removing inode or removal dependencies. 13385 */ 13386static void 13387check_clear_deps(mp) 13388 struct mount *mp; 13389{ 13390 13391 /* 13392 * If we are suspended, it may be because of our using 13393 * too many inodedeps, so help clear them out. 13394 */ 13395 if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended) 13396 clear_inodedeps(mp); 13397 /* 13398 * General requests for cleanup of backed up dependencies 13399 */ 13400 ACQUIRE_GBLLOCK(&lk); 13401 if (req_clear_inodedeps) { 13402 req_clear_inodedeps -= 1; 13403 FREE_GBLLOCK(&lk); 13404 clear_inodedeps(mp); 13405 ACQUIRE_GBLLOCK(&lk); 13406 wakeup(&proc_waiting); 13407 } 13408 if (req_clear_remove) { 13409 req_clear_remove -= 1; 13410 FREE_GBLLOCK(&lk); 13411 clear_remove(mp); 13412 ACQUIRE_GBLLOCK(&lk); 13413 wakeup(&proc_waiting); 13414 } 13415 FREE_GBLLOCK(&lk); 13416} 13417 13418/* 13419 * Flush out a directory with at least one removal dependency in an effort to 13420 * reduce the number of dirrem, freefile, and freeblks dependency structures. 13421 */ 13422static void 13423clear_remove(mp) 13424 struct mount *mp; 13425{ 13426 struct pagedep_hashhead *pagedephd; 13427 struct pagedep *pagedep; 13428 struct ufsmount *ump; 13429 struct vnode *vp; 13430 struct bufobj *bo; 13431 int error, cnt; 13432 ino_t ino; 13433 13434 ump = VFSTOUFS(mp); 13435 LOCK_OWNED(ump); 13436 13437 for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { 13438 pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; 13439 if (ump->pagedep_nextclean > ump->pagedep_hash_size) 13440 ump->pagedep_nextclean = 0; 13441 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 13442 if (LIST_EMPTY(&pagedep->pd_dirremhd)) 13443 continue; 13444 ino = pagedep->pd_ino; 13445 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13446 continue; 13447 FREE_LOCK(ump); 13448 13449 /* 13450 * Let unmount clear deps 13451 */ 13452 error = vfs_busy(mp, MBF_NOWAIT); 13453 if (error != 0) 13454 goto finish_write; 13455 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13456 FFSV_FORCEINSMQ); 13457 vfs_unbusy(mp); 13458 if (error != 0) { 13459 softdep_error("clear_remove: vget", error); 13460 goto finish_write; 13461 } 13462 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13463 softdep_error("clear_remove: fsync", error); 13464 bo = &vp->v_bufobj; 13465 BO_LOCK(bo); 13466 drain_output(vp); 13467 BO_UNLOCK(bo); 13468 vput(vp); 13469 finish_write: 13470 vn_finished_write(mp); 13471 ACQUIRE_LOCK(ump); 13472 return; 13473 } 13474 } 13475} 13476 13477/* 13478 * Clear out a block of dirty inodes in an effort to reduce 13479 * the number of inodedep dependency structures. 13480 */ 13481static void 13482clear_inodedeps(mp) 13483 struct mount *mp; 13484{ 13485 struct inodedep_hashhead *inodedephd; 13486 struct inodedep *inodedep; 13487 struct ufsmount *ump; 13488 struct vnode *vp; 13489 struct fs *fs; 13490 int error, cnt; 13491 ino_t firstino, lastino, ino; 13492 13493 ump = VFSTOUFS(mp); 13494 fs = ump->um_fs; 13495 LOCK_OWNED(ump); 13496 /* 13497 * Pick a random inode dependency to be cleared. 13498 * We will then gather up all the inodes in its block 13499 * that have dependencies and flush them out. 13500 */ 13501 for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { 13502 inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; 13503 if (ump->inodedep_nextclean > ump->inodedep_hash_size) 13504 ump->inodedep_nextclean = 0; 13505 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 13506 break; 13507 } 13508 if (inodedep == NULL) 13509 return; 13510 /* 13511 * Find the last inode in the block with dependencies. 13512 */ 13513 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 13514 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 13515 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) 13516 break; 13517 /* 13518 * Asynchronously push all but the last inode with dependencies. 13519 * Synchronously push the last inode with dependencies to ensure 13520 * that the inode block gets written to free up the inodedeps. 13521 */ 13522 for (ino = firstino; ino <= lastino; ino++) { 13523 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 13524 continue; 13525 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13526 continue; 13527 FREE_LOCK(ump); 13528 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ 13529 if (error != 0) { 13530 vn_finished_write(mp); 13531 ACQUIRE_LOCK(ump); 13532 return; 13533 } 13534 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13535 FFSV_FORCEINSMQ)) != 0) { 13536 softdep_error("clear_inodedeps: vget", error); 13537 vfs_unbusy(mp); 13538 vn_finished_write(mp); 13539 ACQUIRE_LOCK(ump); 13540 return; 13541 } 13542 vfs_unbusy(mp); 13543 if (ino == lastino) { 13544 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) 13545 softdep_error("clear_inodedeps: fsync1", error); 13546 } else { 13547 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13548 softdep_error("clear_inodedeps: fsync2", error); 13549 BO_LOCK(&vp->v_bufobj); 13550 drain_output(vp); 13551 BO_UNLOCK(&vp->v_bufobj); 13552 } 13553 vput(vp); 13554 vn_finished_write(mp); 13555 ACQUIRE_LOCK(ump); 13556 } 13557} 13558 13559void 13560softdep_buf_append(bp, wkhd) 13561 struct buf *bp; 13562 struct workhead *wkhd; 13563{ 13564 struct worklist *wk; 13565 struct ufsmount *ump; 13566 13567 if ((wk = LIST_FIRST(wkhd)) == NULL) 13568 return; 13569 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13570 ("softdep_buf_append called on non-softdep filesystem")); 13571 ump = VFSTOUFS(wk->wk_mp); 13572 ACQUIRE_LOCK(ump); 13573 while ((wk = LIST_FIRST(wkhd)) != NULL) { 13574 WORKLIST_REMOVE(wk); 13575 WORKLIST_INSERT(&bp->b_dep, wk); 13576 } 13577 FREE_LOCK(ump); 13578 13579} 13580 13581void 13582softdep_inode_append(ip, cred, wkhd) 13583 struct inode *ip; 13584 struct ucred *cred; 13585 struct workhead *wkhd; 13586{ 13587 struct buf *bp; 13588 struct fs *fs; 13589 int error; 13590 13591 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 13592 ("softdep_inode_append called on non-softdep filesystem")); 13593 fs = ip->i_fs; 13594 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 13595 (int)fs->fs_bsize, cred, &bp); 13596 if (error) { 13597 bqrelse(bp); 13598 softdep_freework(wkhd); 13599 return; 13600 } 13601 softdep_buf_append(bp, wkhd); 13602 bqrelse(bp); 13603} 13604 13605void 13606softdep_freework(wkhd) 13607 struct workhead *wkhd; 13608{ 13609 struct worklist *wk; 13610 struct ufsmount *ump; 13611 13612 if ((wk = LIST_FIRST(wkhd)) == NULL) 13613 return; 13614 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13615 ("softdep_freework called on non-softdep filesystem")); 13616 ump = VFSTOUFS(wk->wk_mp); 13617 ACQUIRE_LOCK(ump); 13618 handle_jwork(wkhd); 13619 FREE_LOCK(ump); 13620} 13621 13622/* 13623 * Function to determine if the buffer has outstanding dependencies 13624 * that will cause a roll-back if the buffer is written. If wantcount 13625 * is set, return number of dependencies, otherwise just yes or no. 13626 */ 13627static int 13628softdep_count_dependencies(bp, wantcount) 13629 struct buf *bp; 13630 int wantcount; 13631{ 13632 struct worklist *wk; 13633 struct ufsmount *ump; 13634 struct bmsafemap *bmsafemap; 13635 struct freework *freework; 13636 struct inodedep *inodedep; 13637 struct indirdep *indirdep; 13638 struct freeblks *freeblks; 13639 struct allocindir *aip; 13640 struct pagedep *pagedep; 13641 struct dirrem *dirrem; 13642 struct newblk *newblk; 13643 struct mkdir *mkdir; 13644 struct diradd *dap; 13645 int i, retval; 13646 13647 retval = 0; 13648 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 13649 return (0); 13650 ump = VFSTOUFS(wk->wk_mp); 13651 ACQUIRE_LOCK(ump); 13652 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 13653 switch (wk->wk_type) { 13654 13655 case D_INODEDEP: 13656 inodedep = WK_INODEDEP(wk); 13657 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 13658 /* bitmap allocation dependency */ 13659 retval += 1; 13660 if (!wantcount) 13661 goto out; 13662 } 13663 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 13664 /* direct block pointer dependency */ 13665 retval += 1; 13666 if (!wantcount) 13667 goto out; 13668 } 13669 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 13670 /* direct block pointer dependency */ 13671 retval += 1; 13672 if (!wantcount) 13673 goto out; 13674 } 13675 if (TAILQ_FIRST(&inodedep->id_inoreflst)) { 13676 /* Add reference dependency. */ 13677 retval += 1; 13678 if (!wantcount) 13679 goto out; 13680 } 13681 continue; 13682 13683 case D_INDIRDEP: 13684 indirdep = WK_INDIRDEP(wk); 13685 13686 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { 13687 /* indirect truncation dependency */ 13688 retval += 1; 13689 if (!wantcount) 13690 goto out; 13691 } 13692 13693 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 13694 /* indirect block pointer dependency */ 13695 retval += 1; 13696 if (!wantcount) 13697 goto out; 13698 } 13699 continue; 13700 13701 case D_PAGEDEP: 13702 pagedep = WK_PAGEDEP(wk); 13703 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 13704 if (LIST_FIRST(&dirrem->dm_jremrefhd)) { 13705 /* Journal remove ref dependency. */ 13706 retval += 1; 13707 if (!wantcount) 13708 goto out; 13709 } 13710 } 13711 for (i = 0; i < DAHASHSZ; i++) { 13712 13713 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 13714 /* directory entry dependency */ 13715 retval += 1; 13716 if (!wantcount) 13717 goto out; 13718 } 13719 } 13720 continue; 13721 13722 case D_BMSAFEMAP: 13723 bmsafemap = WK_BMSAFEMAP(wk); 13724 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { 13725 /* Add reference dependency. */ 13726 retval += 1; 13727 if (!wantcount) 13728 goto out; 13729 } 13730 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { 13731 /* Allocate block dependency. */ 13732 retval += 1; 13733 if (!wantcount) 13734 goto out; 13735 } 13736 continue; 13737 13738 case D_FREEBLKS: 13739 freeblks = WK_FREEBLKS(wk); 13740 if (LIST_FIRST(&freeblks->fb_jblkdephd)) { 13741 /* Freeblk journal dependency. */ 13742 retval += 1; 13743 if (!wantcount) 13744 goto out; 13745 } 13746 continue; 13747 13748 case D_ALLOCDIRECT: 13749 case D_ALLOCINDIR: 13750 newblk = WK_NEWBLK(wk); 13751 if (newblk->nb_jnewblk) { 13752 /* Journal allocate dependency. */ 13753 retval += 1; 13754 if (!wantcount) 13755 goto out; 13756 } 13757 continue; 13758 13759 case D_MKDIR: 13760 mkdir = WK_MKDIR(wk); 13761 if (mkdir->md_jaddref) { 13762 /* Journal reference dependency. */ 13763 retval += 1; 13764 if (!wantcount) 13765 goto out; 13766 } 13767 continue; 13768 13769 case D_FREEWORK: 13770 case D_FREEDEP: 13771 case D_JSEGDEP: 13772 case D_JSEG: 13773 case D_SBDEP: 13774 /* never a dependency on these blocks */ 13775 continue; 13776 13777 default: 13778 panic("softdep_count_dependencies: Unexpected type %s", 13779 TYPENAME(wk->wk_type)); 13780 /* NOTREACHED */ 13781 } 13782 } 13783out: 13784 FREE_LOCK(ump); 13785 return retval; 13786} 13787 13788/* 13789 * Acquire exclusive access to a buffer. 13790 * Must be called with a locked mtx parameter. 13791 * Return acquired buffer or NULL on failure. 13792 */ 13793static struct buf * 13794getdirtybuf(bp, lock, waitfor) 13795 struct buf *bp; 13796 struct rwlock *lock; 13797 int waitfor; 13798{ 13799 int error; 13800 13801 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { 13802 if (waitfor != MNT_WAIT) 13803 return (NULL); 13804 error = BUF_LOCK(bp, 13805 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); 13806 /* 13807 * Even if we sucessfully acquire bp here, we have dropped 13808 * lock, which may violates our guarantee. 13809 */ 13810 if (error == 0) 13811 BUF_UNLOCK(bp); 13812 else if (error != ENOLCK) 13813 panic("getdirtybuf: inconsistent lock: %d", error); 13814 rw_wlock(lock); 13815 return (NULL); 13816 } 13817 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13818 if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { 13819 rw_wunlock(lock); 13820 BO_LOCK(bp->b_bufobj); 13821 BUF_UNLOCK(bp); 13822 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13823 bp->b_vflags |= BV_BKGRDWAIT; 13824 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), 13825 PRIBIO | PDROP, "getbuf", 0); 13826 } else 13827 BO_UNLOCK(bp->b_bufobj); 13828 rw_wlock(lock); 13829 return (NULL); 13830 } 13831 BUF_UNLOCK(bp); 13832 if (waitfor != MNT_WAIT) 13833 return (NULL); 13834 /* 13835 * The lock argument must be bp->b_vp's mutex in 13836 * this case. 13837 */ 13838#ifdef DEBUG_VFS_LOCKS 13839 if (bp->b_vp->v_type != VCHR) 13840 ASSERT_BO_WLOCKED(bp->b_bufobj); 13841#endif 13842 bp->b_vflags |= BV_BKGRDWAIT; 13843 rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); 13844 return (NULL); 13845 } 13846 if ((bp->b_flags & B_DELWRI) == 0) { 13847 BUF_UNLOCK(bp); 13848 return (NULL); 13849 } 13850 bremfree(bp); 13851 return (bp); 13852} 13853 13854 13855/* 13856 * Check if it is safe to suspend the file system now. On entry, 13857 * the vnode interlock for devvp should be held. Return 0 with 13858 * the mount interlock held if the file system can be suspended now, 13859 * otherwise return EAGAIN with the mount interlock held. 13860 */ 13861int 13862softdep_check_suspend(struct mount *mp, 13863 struct vnode *devvp, 13864 int softdep_depcnt, 13865 int softdep_accdepcnt, 13866 int secondary_writes, 13867 int secondary_accwrites) 13868{ 13869 struct bufobj *bo; 13870 struct ufsmount *ump; 13871 struct inodedep *inodedep; 13872 int error, unlinked; 13873 13874 bo = &devvp->v_bufobj; 13875 ASSERT_BO_WLOCKED(bo); 13876 13877 /* 13878 * If we are not running with soft updates, then we need only 13879 * deal with secondary writes as we try to suspend. 13880 */ 13881 if (MOUNTEDSOFTDEP(mp) == 0) { 13882 MNT_ILOCK(mp); 13883 while (mp->mnt_secondary_writes != 0) { 13884 BO_UNLOCK(bo); 13885 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 13886 (PUSER - 1) | PDROP, "secwr", 0); 13887 BO_LOCK(bo); 13888 MNT_ILOCK(mp); 13889 } 13890 13891 /* 13892 * Reasons for needing more work before suspend: 13893 * - Dirty buffers on devvp. 13894 * - Secondary writes occurred after start of vnode sync loop 13895 */ 13896 error = 0; 13897 if (bo->bo_numoutput > 0 || 13898 bo->bo_dirty.bv_cnt > 0 || 13899 secondary_writes != 0 || 13900 mp->mnt_secondary_writes != 0 || 13901 secondary_accwrites != mp->mnt_secondary_accwrites) 13902 error = EAGAIN; 13903 BO_UNLOCK(bo); 13904 return (error); 13905 } 13906 13907 /* 13908 * If we are running with soft updates, then we need to coordinate 13909 * with them as we try to suspend. 13910 */ 13911 ump = VFSTOUFS(mp); 13912 for (;;) { 13913 if (!TRY_ACQUIRE_LOCK(ump)) { 13914 BO_UNLOCK(bo); 13915 ACQUIRE_LOCK(ump); 13916 FREE_LOCK(ump); 13917 BO_LOCK(bo); 13918 continue; 13919 } 13920 MNT_ILOCK(mp); 13921 if (mp->mnt_secondary_writes != 0) { 13922 FREE_LOCK(ump); 13923 BO_UNLOCK(bo); 13924 msleep(&mp->mnt_secondary_writes, 13925 MNT_MTX(mp), 13926 (PUSER - 1) | PDROP, "secwr", 0); 13927 BO_LOCK(bo); 13928 continue; 13929 } 13930 break; 13931 } 13932 13933 unlinked = 0; 13934 if (MOUNTEDSUJ(mp)) { 13935 for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked); 13936 inodedep != NULL; 13937 inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 13938 if ((inodedep->id_state & (UNLINKED | UNLINKLINKS | 13939 UNLINKONLIST)) != (UNLINKED | UNLINKLINKS | 13940 UNLINKONLIST) || 13941 !check_inodedep_free(inodedep)) 13942 continue; 13943 unlinked++; 13944 } 13945 } 13946 13947 /* 13948 * Reasons for needing more work before suspend: 13949 * - Dirty buffers on devvp. 13950 * - Softdep activity occurred after start of vnode sync loop 13951 * - Secondary writes occurred after start of vnode sync loop 13952 */ 13953 error = 0; 13954 if (bo->bo_numoutput > 0 || 13955 bo->bo_dirty.bv_cnt > 0 || 13956 softdep_depcnt != unlinked || 13957 ump->softdep_deps != unlinked || 13958 softdep_accdepcnt != ump->softdep_accdeps || 13959 secondary_writes != 0 || 13960 mp->mnt_secondary_writes != 0 || 13961 secondary_accwrites != mp->mnt_secondary_accwrites) 13962 error = EAGAIN; 13963 FREE_LOCK(ump); 13964 BO_UNLOCK(bo); 13965 return (error); 13966} 13967 13968 13969/* 13970 * Get the number of dependency structures for the file system, both 13971 * the current number and the total number allocated. These will 13972 * later be used to detect that softdep processing has occurred. 13973 */ 13974void 13975softdep_get_depcounts(struct mount *mp, 13976 int *softdep_depsp, 13977 int *softdep_accdepsp) 13978{ 13979 struct ufsmount *ump; 13980 13981 if (MOUNTEDSOFTDEP(mp) == 0) { 13982 *softdep_depsp = 0; 13983 *softdep_accdepsp = 0; 13984 return; 13985 } 13986 ump = VFSTOUFS(mp); 13987 ACQUIRE_LOCK(ump); 13988 *softdep_depsp = ump->softdep_deps; 13989 *softdep_accdepsp = ump->softdep_accdeps; 13990 FREE_LOCK(ump); 13991} 13992 13993/* 13994 * Wait for pending output on a vnode to complete. 13995 * Must be called with vnode lock and interlock locked. 13996 * 13997 * XXX: Should just be a call to bufobj_wwait(). 13998 */ 13999static void 14000drain_output(vp) 14001 struct vnode *vp; 14002{ 14003 struct bufobj *bo; 14004 14005 bo = &vp->v_bufobj; 14006 ASSERT_VOP_LOCKED(vp, "drain_output"); 14007 ASSERT_BO_WLOCKED(bo); 14008 14009 while (bo->bo_numoutput) { 14010 bo->bo_flag |= BO_WWAIT; 14011 msleep((caddr_t)&bo->bo_numoutput, 14012 BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0); 14013 } 14014} 14015 14016/* 14017 * Called whenever a buffer that is being invalidated or reallocated 14018 * contains dependencies. This should only happen if an I/O error has 14019 * occurred. The routine is called with the buffer locked. 14020 */ 14021static void 14022softdep_deallocate_dependencies(bp) 14023 struct buf *bp; 14024{ 14025 14026 if ((bp->b_ioflags & BIO_ERROR) == 0) 14027 panic("softdep_deallocate_dependencies: dangling deps"); 14028 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) 14029 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 14030 else 14031 printf("softdep_deallocate_dependencies: " 14032 "got error %d while accessing filesystem\n", bp->b_error); 14033 if (bp->b_error != ENXIO) 14034 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 14035} 14036 14037/* 14038 * Function to handle asynchronous write errors in the filesystem. 14039 */ 14040static void 14041softdep_error(func, error) 14042 char *func; 14043 int error; 14044{ 14045 14046 /* XXX should do something better! */ 14047 printf("%s: got error %d while accessing filesystem\n", func, error); 14048} 14049 14050#ifdef DDB 14051 14052static void 14053inodedep_print(struct inodedep *inodedep, int verbose) 14054{ 14055 db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d" 14056 " saveino %p\n", 14057 inodedep, inodedep->id_fs, inodedep->id_state, 14058 (intmax_t)inodedep->id_ino, 14059 (intmax_t)fsbtodb(inodedep->id_fs, 14060 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), 14061 inodedep->id_nlinkdelta, inodedep->id_savednlink, 14062 inodedep->id_savedino1); 14063 14064 if (verbose == 0) 14065 return; 14066 14067 db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " 14068 "mkdiradd %p\n", 14069 LIST_FIRST(&inodedep->id_pendinghd), 14070 LIST_FIRST(&inodedep->id_bufwait), 14071 LIST_FIRST(&inodedep->id_inowait), 14072 TAILQ_FIRST(&inodedep->id_inoreflst), 14073 inodedep->id_mkdiradd); 14074 db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", 14075 TAILQ_FIRST(&inodedep->id_inoupdt), 14076 TAILQ_FIRST(&inodedep->id_newinoupdt), 14077 TAILQ_FIRST(&inodedep->id_extupdt), 14078 TAILQ_FIRST(&inodedep->id_newextupdt)); 14079} 14080 14081DB_SHOW_COMMAND(inodedep, db_show_inodedep) 14082{ 14083 14084 if (have_addr == 0) { 14085 db_printf("Address required\n"); 14086 return; 14087 } 14088 inodedep_print((struct inodedep*)addr, 1); 14089} 14090 14091DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) 14092{ 14093 struct inodedep_hashhead *inodedephd; 14094 struct inodedep *inodedep; 14095 struct ufsmount *ump; 14096 int cnt; 14097 14098 if (have_addr == 0) { 14099 db_printf("Address required\n"); 14100 return; 14101 } 14102 ump = (struct ufsmount *)addr; 14103 for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { 14104 inodedephd = &ump->inodedep_hashtbl[cnt]; 14105 LIST_FOREACH(inodedep, inodedephd, id_hash) { 14106 inodedep_print(inodedep, 0); 14107 } 14108 } 14109} 14110 14111DB_SHOW_COMMAND(worklist, db_show_worklist) 14112{ 14113 struct worklist *wk; 14114 14115 if (have_addr == 0) { 14116 db_printf("Address required\n"); 14117 return; 14118 } 14119 wk = (struct worklist *)addr; 14120 printf("worklist: %p type %s state 0x%X\n", 14121 wk, TYPENAME(wk->wk_type), wk->wk_state); 14122} 14123 14124DB_SHOW_COMMAND(workhead, db_show_workhead) 14125{ 14126 struct workhead *wkhd; 14127 struct worklist *wk; 14128 int i; 14129 14130 if (have_addr == 0) { 14131 db_printf("Address required\n"); 14132 return; 14133 } 14134 wkhd = (struct workhead *)addr; 14135 wk = LIST_FIRST(wkhd); 14136 for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) 14137 db_printf("worklist: %p type %s state 0x%X", 14138 wk, TYPENAME(wk->wk_type), wk->wk_state); 14139 if (i == 100) 14140 db_printf("workhead overflow"); 14141 printf("\n"); 14142} 14143 14144 14145DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) 14146{ 14147 struct mkdirlist *mkdirlisthd; 14148 struct jaddref *jaddref; 14149 struct diradd *diradd; 14150 struct mkdir *mkdir; 14151 14152 if (have_addr == 0) { 14153 db_printf("Address required\n"); 14154 return; 14155 } 14156 mkdirlisthd = (struct mkdirlist *)addr; 14157 LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { 14158 diradd = mkdir->md_diradd; 14159 db_printf("mkdir: %p state 0x%X dap %p state 0x%X", 14160 mkdir, mkdir->md_state, diradd, diradd->da_state); 14161 if ((jaddref = mkdir->md_jaddref) != NULL) 14162 db_printf(" jaddref %p jaddref state 0x%X", 14163 jaddref, jaddref->ja_state); 14164 db_printf("\n"); 14165 } 14166} 14167 14168/* exported to ffs_vfsops.c */ 14169extern void db_print_ffs(struct ufsmount *ump); 14170void 14171db_print_ffs(struct ufsmount *ump) 14172{ 14173 db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n", 14174 ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname, 14175 ump->um_devvp, ump->um_fs, ump->softdep_on_worklist, 14176 ump->softdep_deps, ump->softdep_req); 14177} 14178 14179#endif /* DDB */ 14180 14181#endif /* SOFTUPDATES */ 14182