ffs_softdep.c revision 103946
1/* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_softdep.c 103946 2002-09-25 02:49:48Z jeff $"); 43 44/* 45 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 46 */ 47#ifndef DIAGNOSTIC 48#define DIAGNOSTIC 49#endif 50#ifndef DEBUG 51#define DEBUG 52#endif 53 54#include <sys/param.h> 55#include <sys/kernel.h> 56#include <sys/systm.h> 57#include <sys/stdint.h> 58#include <sys/bio.h> 59#include <sys/buf.h> 60#include <sys/malloc.h> 61#include <sys/mount.h> 62#include <sys/proc.h> 63#include <sys/stat.h> 64#include <sys/syslog.h> 65#include <sys/vnode.h> 66#include <sys/conf.h> 67#include <ufs/ufs/dir.h> 68#include <ufs/ufs/extattr.h> 69#include <ufs/ufs/quota.h> 70#include <ufs/ufs/inode.h> 71#include <ufs/ufs/ufsmount.h> 72#include <ufs/ffs/fs.h> 73#include <ufs/ffs/softdep.h> 74#include <ufs/ffs/ffs_extern.h> 75#include <ufs/ufs/ufs_extern.h> 76 77/* 78 * These definitions need to be adapted to the system to which 79 * this file is being ported. 80 */ 81/* 82 * malloc types defined for the softdep system. 83 */ 84static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 85static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 86static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 87static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 88static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 89static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 90static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 91static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 92static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 93static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 94static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 95static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 96static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 97static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block"); 98 99#define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 100 101#define D_PAGEDEP 0 102#define D_INODEDEP 1 103#define D_NEWBLK 2 104#define D_BMSAFEMAP 3 105#define D_ALLOCDIRECT 4 106#define D_INDIRDEP 5 107#define D_ALLOCINDIR 6 108#define D_FREEFRAG 7 109#define D_FREEBLKS 8 110#define D_FREEFILE 9 111#define D_DIRADD 10 112#define D_MKDIR 11 113#define D_DIRREM 12 114#define D_NEWDIRBLK 13 115#define D_LAST D_NEWDIRBLK 116 117/* 118 * translate from workitem type to memory type 119 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 120 */ 121static struct malloc_type *memtype[] = { 122 M_PAGEDEP, 123 M_INODEDEP, 124 M_NEWBLK, 125 M_BMSAFEMAP, 126 M_ALLOCDIRECT, 127 M_INDIRDEP, 128 M_ALLOCINDIR, 129 M_FREEFRAG, 130 M_FREEBLKS, 131 M_FREEFILE, 132 M_DIRADD, 133 M_MKDIR, 134 M_DIRREM, 135 M_NEWDIRBLK 136}; 137 138#define DtoM(type) (memtype[type]) 139 140/* 141 * Names of malloc types. 142 */ 143#define TYPENAME(type) \ 144 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 145/* 146 * End system adaptaion definitions. 147 */ 148 149/* 150 * Internal function prototypes. 151 */ 152static void softdep_error(char *, int); 153static void drain_output(struct vnode *, int); 154static int getdirtybuf(struct buf **, int); 155static void clear_remove(struct thread *); 156static void clear_inodedeps(struct thread *); 157static int flush_pagedep_deps(struct vnode *, struct mount *, 158 struct diraddhd *); 159static int flush_inodedep_deps(struct fs *, ino_t); 160static int flush_deplist(struct allocdirectlst *, int, int *); 161static int handle_written_filepage(struct pagedep *, struct buf *); 162static void diradd_inode_written(struct diradd *, struct inodedep *); 163static int handle_written_inodeblock(struct inodedep *, struct buf *); 164static void handle_allocdirect_partdone(struct allocdirect *); 165static void handle_allocindir_partdone(struct allocindir *); 166static void initiate_write_filepage(struct pagedep *, struct buf *); 167static void handle_written_mkdir(struct mkdir *, int); 168static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 169static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 170static void handle_workitem_freefile(struct freefile *); 171static void handle_workitem_remove(struct dirrem *, struct vnode *); 172static struct dirrem *newdirrem(struct buf *, struct inode *, 173 struct inode *, int, struct dirrem **); 174static void free_diradd(struct diradd *); 175static void free_allocindir(struct allocindir *, struct inodedep *); 176static void free_newdirblk(struct newdirblk *); 177static int indir_trunc(struct freeblks *, ufs2_daddr_t, int, ufs_lbn_t, 178 ufs2_daddr_t *); 179static void deallocate_dependencies(struct buf *, struct inodedep *); 180static void free_allocdirect(struct allocdirectlst *, 181 struct allocdirect *, int); 182static int check_inode_unwritten(struct inodedep *); 183static int free_inodedep(struct inodedep *); 184static void handle_workitem_freeblocks(struct freeblks *, int); 185static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 186static void setup_allocindir_phase2(struct buf *, struct inode *, 187 struct allocindir *); 188static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 189 ufs2_daddr_t); 190static void handle_workitem_freefrag(struct freefrag *); 191static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long); 192static void allocdirect_merge(struct allocdirectlst *, 193 struct allocdirect *, struct allocdirect *); 194static struct bmsafemap *bmsafemap_lookup(struct buf *); 195static int newblk_lookup(struct fs *, ufs2_daddr_t, int, struct newblk **); 196static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 197static int pagedep_lookup(struct inode *, ufs_lbn_t, int, struct pagedep **); 198static void pause_timer(void *); 199static int request_cleanup(int, int); 200static int process_worklist_item(struct mount *, int); 201static void add_to_worklist(struct worklist *); 202 203/* 204 * Exported softdep operations. 205 */ 206static void softdep_disk_io_initiation(struct buf *); 207static void softdep_disk_write_complete(struct buf *); 208static void softdep_deallocate_dependencies(struct buf *); 209static void softdep_move_dependencies(struct buf *, struct buf *); 210static int softdep_count_dependencies(struct buf *bp, int); 211 212/* 213 * Locking primitives. 214 * 215 * For a uniprocessor, all we need to do is protect against disk 216 * interrupts. For a multiprocessor, this lock would have to be 217 * a mutex. A single mutex is used throughout this file, though 218 * finer grain locking could be used if contention warranted it. 219 * 220 * For a multiprocessor, the sleep call would accept a lock and 221 * release it after the sleep processing was complete. In a uniprocessor 222 * implementation there is no such interlock, so we simple mark 223 * the places where it needs to be done with the `interlocked' form 224 * of the lock calls. Since the uniprocessor sleep already interlocks 225 * the spl, there is nothing that really needs to be done. 226 */ 227#ifndef /* NOT */ DEBUG 228static struct lockit { 229 int lkt_spl; 230} lk = { 0 }; 231#define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() 232#define FREE_LOCK(lk) splx((lk)->lkt_spl) 233 234#else /* DEBUG */ 235#define NOHOLDER ((struct thread *)-1) 236#define SPECIAL_FLAG ((struct thread *)-2) 237static struct lockit { 238 int lkt_spl; 239 struct thread *lkt_held; 240} lk = { 0, NOHOLDER }; 241static int lockcnt; 242 243static void acquire_lock(struct lockit *); 244static void free_lock(struct lockit *); 245void softdep_panic(char *); 246 247#define ACQUIRE_LOCK(lk) acquire_lock(lk) 248#define FREE_LOCK(lk) free_lock(lk) 249 250static void 251acquire_lock(lk) 252 struct lockit *lk; 253{ 254 struct thread *holder; 255 256 if (lk->lkt_held != NOHOLDER) { 257 holder = lk->lkt_held; 258 FREE_LOCK(lk); 259 if (holder == curthread) 260 panic("softdep_lock: locking against myself"); 261 else 262 panic("softdep_lock: lock held by %p", holder); 263 } 264 lk->lkt_spl = splbio(); 265 lk->lkt_held = curthread; 266 lockcnt++; 267} 268 269static void 270free_lock(lk) 271 struct lockit *lk; 272{ 273 274 if (lk->lkt_held == NOHOLDER) 275 panic("softdep_unlock: lock not held"); 276 lk->lkt_held = NOHOLDER; 277 splx(lk->lkt_spl); 278} 279 280/* 281 * Function to release soft updates lock and panic. 282 */ 283void 284softdep_panic(msg) 285 char *msg; 286{ 287 288 if (lk.lkt_held != NOHOLDER) 289 FREE_LOCK(&lk); 290 panic(msg); 291} 292#endif /* DEBUG */ 293 294static int interlocked_sleep(struct lockit *, int, void *, struct mtx *, int, 295 const char *, int); 296 297/* 298 * When going to sleep, we must save our SPL so that it does 299 * not get lost if some other process uses the lock while we 300 * are sleeping. We restore it after we have slept. This routine 301 * wraps the interlocking with functions that sleep. The list 302 * below enumerates the available set of operations. 303 */ 304#define UNKNOWN 0 305#define SLEEP 1 306#define LOCKBUF 2 307 308static int 309interlocked_sleep(lk, op, ident, mtx, flags, wmesg, timo) 310 struct lockit *lk; 311 int op; 312 void *ident; 313 struct mtx *mtx; 314 int flags; 315 const char *wmesg; 316 int timo; 317{ 318 struct thread *holder; 319 int s, retval; 320 321 s = lk->lkt_spl; 322# ifdef DEBUG 323 if (lk->lkt_held == NOHOLDER) 324 panic("interlocked_sleep: lock not held"); 325 lk->lkt_held = NOHOLDER; 326# endif /* DEBUG */ 327 switch (op) { 328 case SLEEP: 329 retval = msleep(ident, mtx, flags, wmesg, timo); 330 break; 331 case LOCKBUF: 332 retval = BUF_LOCK((struct buf *)ident, flags); 333 break; 334 default: 335 panic("interlocked_sleep: unknown operation"); 336 } 337# ifdef DEBUG 338 if (lk->lkt_held != NOHOLDER) { 339 holder = lk->lkt_held; 340 FREE_LOCK(lk); 341 if (holder == curthread) 342 panic("interlocked_sleep: locking against self"); 343 else 344 panic("interlocked_sleep: lock held by %p", holder); 345 } 346 lk->lkt_held = curthread; 347 lockcnt++; 348# endif /* DEBUG */ 349 lk->lkt_spl = s; 350 return (retval); 351} 352 353/* 354 * Place holder for real semaphores. 355 */ 356struct sema { 357 int value; 358 struct thread *holder; 359 char *name; 360 int prio; 361 int timo; 362}; 363static void sema_init(struct sema *, char *, int, int); 364static int sema_get(struct sema *, struct lockit *); 365static void sema_release(struct sema *); 366 367static void 368sema_init(semap, name, prio, timo) 369 struct sema *semap; 370 char *name; 371 int prio, timo; 372{ 373 374 semap->holder = NOHOLDER; 375 semap->value = 0; 376 semap->name = name; 377 semap->prio = prio; 378 semap->timo = timo; 379} 380 381static int 382sema_get(semap, interlock) 383 struct sema *semap; 384 struct lockit *interlock; 385{ 386 387 if (semap->value++ > 0) { 388 if (interlock != NULL) { 389 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 390 NULL, semap->prio, semap->name, 391 semap->timo); 392 FREE_LOCK(interlock); 393 } else { 394 tsleep((caddr_t)semap, semap->prio, semap->name, 395 semap->timo); 396 } 397 return (0); 398 } 399 semap->holder = curthread; 400 if (interlock != NULL) 401 FREE_LOCK(interlock); 402 return (1); 403} 404 405static void 406sema_release(semap) 407 struct sema *semap; 408{ 409 410 if (semap->value <= 0 || semap->holder != curthread) { 411 if (lk.lkt_held != NOHOLDER) 412 FREE_LOCK(&lk); 413 panic("sema_release: not held"); 414 } 415 if (--semap->value > 0) { 416 semap->value = 0; 417 wakeup(semap); 418 } 419 semap->holder = NOHOLDER; 420} 421 422/* 423 * Worklist queue management. 424 * These routines require that the lock be held. 425 */ 426#ifndef /* NOT */ DEBUG 427#define WORKLIST_INSERT(head, item) do { \ 428 (item)->wk_state |= ONWORKLIST; \ 429 LIST_INSERT_HEAD(head, item, wk_list); \ 430} while (0) 431#define WORKLIST_REMOVE(item) do { \ 432 (item)->wk_state &= ~ONWORKLIST; \ 433 LIST_REMOVE(item, wk_list); \ 434} while (0) 435#define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 436 437#else /* DEBUG */ 438static void worklist_insert(struct workhead *, struct worklist *); 439static void worklist_remove(struct worklist *); 440static void workitem_free(struct worklist *, int); 441 442#define WORKLIST_INSERT(head, item) worklist_insert(head, item) 443#define WORKLIST_REMOVE(item) worklist_remove(item) 444#define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 445 446static void 447worklist_insert(head, item) 448 struct workhead *head; 449 struct worklist *item; 450{ 451 452 if (lk.lkt_held == NOHOLDER) 453 panic("worklist_insert: lock not held"); 454 if (item->wk_state & ONWORKLIST) { 455 FREE_LOCK(&lk); 456 panic("worklist_insert: already on list"); 457 } 458 item->wk_state |= ONWORKLIST; 459 LIST_INSERT_HEAD(head, item, wk_list); 460} 461 462static void 463worklist_remove(item) 464 struct worklist *item; 465{ 466 467 if (lk.lkt_held == NOHOLDER) 468 panic("worklist_remove: lock not held"); 469 if ((item->wk_state & ONWORKLIST) == 0) { 470 FREE_LOCK(&lk); 471 panic("worklist_remove: not on list"); 472 } 473 item->wk_state &= ~ONWORKLIST; 474 LIST_REMOVE(item, wk_list); 475} 476 477static void 478workitem_free(item, type) 479 struct worklist *item; 480 int type; 481{ 482 483 if (item->wk_state & ONWORKLIST) { 484 if (lk.lkt_held != NOHOLDER) 485 FREE_LOCK(&lk); 486 panic("workitem_free: still on list"); 487 } 488 if (item->wk_type != type) { 489 if (lk.lkt_held != NOHOLDER) 490 FREE_LOCK(&lk); 491 panic("workitem_free: type mismatch"); 492 } 493 FREE(item, DtoM(type)); 494} 495#endif /* DEBUG */ 496 497/* 498 * Workitem queue management 499 */ 500static struct workhead softdep_workitem_pending; 501static int num_on_worklist; /* number of worklist items to be processed */ 502static int softdep_worklist_busy; /* 1 => trying to do unmount */ 503static int softdep_worklist_req; /* serialized waiters */ 504static int max_softdeps; /* maximum number of structs before slowdown */ 505static int tickdelay = 2; /* number of ticks to pause during slowdown */ 506static int proc_waiting; /* tracks whether we have a timeout posted */ 507static int *stat_countp; /* statistic to count in proc_waiting timeout */ 508static struct callout_handle handle; /* handle on posted proc_waiting timeout */ 509static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 510static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 511#define FLUSH_INODES 1 512static int req_clear_remove; /* syncer process flush some freeblks */ 513#define FLUSH_REMOVE 2 514#define FLUSH_REMOVE_WAIT 3 515/* 516 * runtime statistics 517 */ 518static int stat_worklist_push; /* number of worklist cleanups */ 519static int stat_blk_limit_push; /* number of times block limit neared */ 520static int stat_ino_limit_push; /* number of times inode limit neared */ 521static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 522static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 523static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 524static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 525static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 526static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 527static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 528#ifdef DEBUG 529#include <vm/vm.h> 530#include <sys/sysctl.h> 531SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 532SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 533SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 534SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 535SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 536SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 537SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 538SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 539SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 540SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 541SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 542SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 543#endif /* DEBUG */ 544 545/* 546 * Add an item to the end of the work queue. 547 * This routine requires that the lock be held. 548 * This is the only routine that adds items to the list. 549 * The following routine is the only one that removes items 550 * and does so in order from first to last. 551 */ 552static void 553add_to_worklist(wk) 554 struct worklist *wk; 555{ 556 static struct worklist *worklist_tail; 557 558 if (wk->wk_state & ONWORKLIST) { 559 if (lk.lkt_held != NOHOLDER) 560 FREE_LOCK(&lk); 561 panic("add_to_worklist: already on list"); 562 } 563 wk->wk_state |= ONWORKLIST; 564 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 565 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 566 else 567 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 568 worklist_tail = wk; 569 num_on_worklist += 1; 570} 571 572/* 573 * Process that runs once per second to handle items in the background queue. 574 * 575 * Note that we ensure that everything is done in the order in which they 576 * appear in the queue. The code below depends on this property to ensure 577 * that blocks of a file are freed before the inode itself is freed. This 578 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 579 * until all the old ones have been purged from the dependency lists. 580 */ 581int 582softdep_process_worklist(matchmnt) 583 struct mount *matchmnt; 584{ 585 struct thread *td = curthread; 586 int cnt, matchcnt, loopcount; 587 long starttime; 588 589 /* 590 * Record the process identifier of our caller so that we can give 591 * this process preferential treatment in request_cleanup below. 592 */ 593 filesys_syncer = td; 594 matchcnt = 0; 595 596 /* 597 * There is no danger of having multiple processes run this 598 * code, but we have to single-thread it when softdep_flushfiles() 599 * is in operation to get an accurate count of the number of items 600 * related to its mount point that are in the list. 601 */ 602 if (matchmnt == NULL) { 603 if (softdep_worklist_busy < 0) 604 return(-1); 605 softdep_worklist_busy += 1; 606 } 607 608 /* 609 * If requested, try removing inode or removal dependencies. 610 */ 611 if (req_clear_inodedeps) { 612 clear_inodedeps(td); 613 req_clear_inodedeps -= 1; 614 wakeup_one(&proc_waiting); 615 } 616 if (req_clear_remove) { 617 clear_remove(td); 618 req_clear_remove -= 1; 619 wakeup_one(&proc_waiting); 620 } 621 loopcount = 1; 622 starttime = time_second; 623 while (num_on_worklist > 0) { 624 if ((cnt = process_worklist_item(matchmnt, 0)) == -1) 625 break; 626 else 627 matchcnt += cnt; 628 629 /* 630 * If a umount operation wants to run the worklist 631 * accurately, abort. 632 */ 633 if (softdep_worklist_req && matchmnt == NULL) { 634 matchcnt = -1; 635 break; 636 } 637 638 /* 639 * If requested, try removing inode or removal dependencies. 640 */ 641 if (req_clear_inodedeps) { 642 clear_inodedeps(td); 643 req_clear_inodedeps -= 1; 644 wakeup_one(&proc_waiting); 645 } 646 if (req_clear_remove) { 647 clear_remove(td); 648 req_clear_remove -= 1; 649 wakeup_one(&proc_waiting); 650 } 651 /* 652 * We do not generally want to stop for buffer space, but if 653 * we are really being a buffer hog, we will stop and wait. 654 */ 655 if (loopcount++ % 128 == 0) 656 bwillwrite(); 657 /* 658 * Never allow processing to run for more than one 659 * second. Otherwise the other syncer tasks may get 660 * excessively backlogged. 661 */ 662 if (starttime != time_second && matchmnt == NULL) { 663 matchcnt = -1; 664 break; 665 } 666 } 667 if (matchmnt == NULL) { 668 softdep_worklist_busy -= 1; 669 if (softdep_worklist_req && softdep_worklist_busy == 0) 670 wakeup(&softdep_worklist_req); 671 } 672 return (matchcnt); 673} 674 675/* 676 * Process one item on the worklist. 677 */ 678static int 679process_worklist_item(matchmnt, flags) 680 struct mount *matchmnt; 681 int flags; 682{ 683 struct worklist *wk; 684 struct mount *mp; 685 struct vnode *vp; 686 int matchcnt = 0; 687 688 ACQUIRE_LOCK(&lk); 689 /* 690 * Normally we just process each item on the worklist in order. 691 * However, if we are in a situation where we cannot lock any 692 * inodes, we have to skip over any dirrem requests whose 693 * vnodes are resident and locked. 694 */ 695 vp = NULL; 696 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 697 if (wk->wk_state & INPROGRESS) 698 continue; 699 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 700 break; 701 wk->wk_state |= INPROGRESS; 702 FREE_LOCK(&lk); 703 VFS_VGET(WK_DIRREM(wk)->dm_mnt, WK_DIRREM(wk)->dm_oldinum, 704 LK_NOWAIT | LK_EXCLUSIVE, &vp); 705 ACQUIRE_LOCK(&lk); 706 wk->wk_state &= ~INPROGRESS; 707 if (vp != NULL) 708 break; 709 } 710 if (wk == 0) { 711 FREE_LOCK(&lk); 712 return (-1); 713 } 714 WORKLIST_REMOVE(wk); 715 num_on_worklist -= 1; 716 FREE_LOCK(&lk); 717 switch (wk->wk_type) { 718 719 case D_DIRREM: 720 /* removal of a directory entry */ 721 mp = WK_DIRREM(wk)->dm_mnt; 722 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 723 panic("%s: dirrem on suspended filesystem", 724 "process_worklist_item"); 725 if (mp == matchmnt) 726 matchcnt += 1; 727 handle_workitem_remove(WK_DIRREM(wk), vp); 728 break; 729 730 case D_FREEBLKS: 731 /* releasing blocks and/or fragments from a file */ 732 mp = WK_FREEBLKS(wk)->fb_mnt; 733 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 734 panic("%s: freeblks on suspended filesystem", 735 "process_worklist_item"); 736 if (mp == matchmnt) 737 matchcnt += 1; 738 handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT); 739 break; 740 741 case D_FREEFRAG: 742 /* releasing a fragment when replaced as a file grows */ 743 mp = WK_FREEFRAG(wk)->ff_mnt; 744 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 745 panic("%s: freefrag on suspended filesystem", 746 "process_worklist_item"); 747 if (mp == matchmnt) 748 matchcnt += 1; 749 handle_workitem_freefrag(WK_FREEFRAG(wk)); 750 break; 751 752 case D_FREEFILE: 753 /* releasing an inode when its link count drops to 0 */ 754 mp = WK_FREEFILE(wk)->fx_mnt; 755 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 756 panic("%s: freefile on suspended filesystem", 757 "process_worklist_item"); 758 if (mp == matchmnt) 759 matchcnt += 1; 760 handle_workitem_freefile(WK_FREEFILE(wk)); 761 break; 762 763 default: 764 panic("%s_process_worklist: Unknown type %s", 765 "softdep", TYPENAME(wk->wk_type)); 766 /* NOTREACHED */ 767 } 768 return (matchcnt); 769} 770 771/* 772 * Move dependencies from one buffer to another. 773 */ 774static void 775softdep_move_dependencies(oldbp, newbp) 776 struct buf *oldbp; 777 struct buf *newbp; 778{ 779 struct worklist *wk, *wktail; 780 781 if (LIST_FIRST(&newbp->b_dep) != NULL) 782 panic("softdep_move_dependencies: need merge code"); 783 wktail = 0; 784 ACQUIRE_LOCK(&lk); 785 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 786 LIST_REMOVE(wk, wk_list); 787 if (wktail == 0) 788 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 789 else 790 LIST_INSERT_AFTER(wktail, wk, wk_list); 791 wktail = wk; 792 } 793 FREE_LOCK(&lk); 794} 795 796/* 797 * Purge the work list of all items associated with a particular mount point. 798 */ 799int 800softdep_flushworklist(oldmnt, countp, td) 801 struct mount *oldmnt; 802 int *countp; 803 struct thread *td; 804{ 805 struct vnode *devvp; 806 int count, error = 0; 807 808 /* 809 * Await our turn to clear out the queue, then serialize access. 810 */ 811 while (softdep_worklist_busy) { 812 softdep_worklist_req += 1; 813 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0); 814 softdep_worklist_req -= 1; 815 } 816 softdep_worklist_busy = -1; 817 /* 818 * Alternately flush the block device associated with the mount 819 * point and process any dependencies that the flushing 820 * creates. We continue until no more worklist dependencies 821 * are found. 822 */ 823 *countp = 0; 824 devvp = VFSTOUFS(oldmnt)->um_devvp; 825 while ((count = softdep_process_worklist(oldmnt)) > 0) { 826 *countp += count; 827 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 828 error = VOP_FSYNC(devvp, td->td_ucred, MNT_WAIT, td); 829 VOP_UNLOCK(devvp, 0, td); 830 if (error) 831 break; 832 } 833 softdep_worklist_busy = 0; 834 if (softdep_worklist_req) 835 wakeup(&softdep_worklist_req); 836 return (error); 837} 838 839/* 840 * Flush all vnodes and worklist items associated with a specified mount point. 841 */ 842int 843softdep_flushfiles(oldmnt, flags, td) 844 struct mount *oldmnt; 845 int flags; 846 struct thread *td; 847{ 848 int error, count, loopcnt; 849 850 error = 0; 851 852 /* 853 * Alternately flush the vnodes associated with the mount 854 * point and process any dependencies that the flushing 855 * creates. In theory, this loop can happen at most twice, 856 * but we give it a few extra just to be sure. 857 */ 858 for (loopcnt = 10; loopcnt > 0; loopcnt--) { 859 /* 860 * Do another flush in case any vnodes were brought in 861 * as part of the cleanup operations. 862 */ 863 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 864 break; 865 if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 || 866 count == 0) 867 break; 868 } 869 /* 870 * If we are unmounting then it is an error to fail. If we 871 * are simply trying to downgrade to read-only, then filesystem 872 * activity can keep us busy forever, so we just fail with EBUSY. 873 */ 874 if (loopcnt == 0) { 875 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 876 panic("softdep_flushfiles: looping"); 877 error = EBUSY; 878 } 879 return (error); 880} 881 882/* 883 * Structure hashing. 884 * 885 * There are three types of structures that can be looked up: 886 * 1) pagedep structures identified by mount point, inode number, 887 * and logical block. 888 * 2) inodedep structures identified by mount point and inode number. 889 * 3) newblk structures identified by mount point and 890 * physical block number. 891 * 892 * The "pagedep" and "inodedep" dependency structures are hashed 893 * separately from the file blocks and inodes to which they correspond. 894 * This separation helps when the in-memory copy of an inode or 895 * file block must be replaced. It also obviates the need to access 896 * an inode or file page when simply updating (or de-allocating) 897 * dependency structures. Lookup of newblk structures is needed to 898 * find newly allocated blocks when trying to associate them with 899 * their allocdirect or allocindir structure. 900 * 901 * The lookup routines optionally create and hash a new instance when 902 * an existing entry is not found. 903 */ 904#define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 905#define NODELAY 0x0002 /* cannot do background work */ 906 907/* 908 * Structures and routines associated with pagedep caching. 909 */ 910LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 911u_long pagedep_hash; /* size of hash table - 1 */ 912#define PAGEDEP_HASH(mp, inum, lbn) \ 913 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 914 pagedep_hash]) 915static struct sema pagedep_in_progress; 916 917/* 918 * Look up a pagedep. Return 1 if found, 0 if not found or found 919 * when asked to allocate but not associated with any buffer. 920 * If not found, allocate if DEPALLOC flag is passed. 921 * Found or allocated entry is returned in pagedeppp. 922 * This routine must be called with splbio interrupts blocked. 923 */ 924static int 925pagedep_lookup(ip, lbn, flags, pagedeppp) 926 struct inode *ip; 927 ufs_lbn_t lbn; 928 int flags; 929 struct pagedep **pagedeppp; 930{ 931 struct pagedep *pagedep; 932 struct pagedep_hashhead *pagedephd; 933 struct mount *mp; 934 int i; 935 936#ifdef DEBUG 937 if (lk.lkt_held == NOHOLDER) 938 panic("pagedep_lookup: lock not held"); 939#endif 940 mp = ITOV(ip)->v_mount; 941 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 942top: 943 LIST_FOREACH(pagedep, pagedephd, pd_hash) 944 if (ip->i_number == pagedep->pd_ino && 945 lbn == pagedep->pd_lbn && 946 mp == pagedep->pd_mnt) 947 break; 948 if (pagedep) { 949 *pagedeppp = pagedep; 950 if ((flags & DEPALLOC) != 0 && 951 (pagedep->pd_state & ONWORKLIST) == 0) 952 return (0); 953 return (1); 954 } 955 if ((flags & DEPALLOC) == 0) { 956 *pagedeppp = NULL; 957 return (0); 958 } 959 if (sema_get(&pagedep_in_progress, &lk) == 0) { 960 ACQUIRE_LOCK(&lk); 961 goto top; 962 } 963 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 964 M_SOFTDEP_FLAGS|M_ZERO); 965 pagedep->pd_list.wk_type = D_PAGEDEP; 966 pagedep->pd_mnt = mp; 967 pagedep->pd_ino = ip->i_number; 968 pagedep->pd_lbn = lbn; 969 LIST_INIT(&pagedep->pd_dirremhd); 970 LIST_INIT(&pagedep->pd_pendinghd); 971 for (i = 0; i < DAHASHSZ; i++) 972 LIST_INIT(&pagedep->pd_diraddhd[i]); 973 ACQUIRE_LOCK(&lk); 974 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 975 sema_release(&pagedep_in_progress); 976 *pagedeppp = pagedep; 977 return (0); 978} 979 980/* 981 * Structures and routines associated with inodedep caching. 982 */ 983LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 984static u_long inodedep_hash; /* size of hash table - 1 */ 985static long num_inodedep; /* number of inodedep allocated */ 986#define INODEDEP_HASH(fs, inum) \ 987 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 988static struct sema inodedep_in_progress; 989 990/* 991 * Look up a inodedep. Return 1 if found, 0 if not found. 992 * If not found, allocate if DEPALLOC flag is passed. 993 * Found or allocated entry is returned in inodedeppp. 994 * This routine must be called with splbio interrupts blocked. 995 */ 996static int 997inodedep_lookup(fs, inum, flags, inodedeppp) 998 struct fs *fs; 999 ino_t inum; 1000 int flags; 1001 struct inodedep **inodedeppp; 1002{ 1003 struct inodedep *inodedep; 1004 struct inodedep_hashhead *inodedephd; 1005 int firsttry; 1006 1007#ifdef DEBUG 1008 if (lk.lkt_held == NOHOLDER) 1009 panic("inodedep_lookup: lock not held"); 1010#endif 1011 firsttry = 1; 1012 inodedephd = INODEDEP_HASH(fs, inum); 1013top: 1014 LIST_FOREACH(inodedep, inodedephd, id_hash) 1015 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 1016 break; 1017 if (inodedep) { 1018 *inodedeppp = inodedep; 1019 return (1); 1020 } 1021 if ((flags & DEPALLOC) == 0) { 1022 *inodedeppp = NULL; 1023 return (0); 1024 } 1025 /* 1026 * If we are over our limit, try to improve the situation. 1027 */ 1028 if (num_inodedep > max_softdeps && firsttry && (flags & NODELAY) == 0 && 1029 request_cleanup(FLUSH_INODES, 1)) { 1030 firsttry = 0; 1031 goto top; 1032 } 1033 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1034 ACQUIRE_LOCK(&lk); 1035 goto top; 1036 } 1037 num_inodedep += 1; 1038 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1039 M_INODEDEP, M_SOFTDEP_FLAGS); 1040 inodedep->id_list.wk_type = D_INODEDEP; 1041 inodedep->id_fs = fs; 1042 inodedep->id_ino = inum; 1043 inodedep->id_state = ALLCOMPLETE; 1044 inodedep->id_nlinkdelta = 0; 1045 inodedep->id_savedino1 = NULL; 1046 inodedep->id_savedsize = -1; 1047 inodedep->id_savedextsize = -1; 1048 inodedep->id_buf = NULL; 1049 LIST_INIT(&inodedep->id_pendinghd); 1050 LIST_INIT(&inodedep->id_inowait); 1051 LIST_INIT(&inodedep->id_bufwait); 1052 TAILQ_INIT(&inodedep->id_inoupdt); 1053 TAILQ_INIT(&inodedep->id_newinoupdt); 1054 TAILQ_INIT(&inodedep->id_extupdt); 1055 TAILQ_INIT(&inodedep->id_newextupdt); 1056 ACQUIRE_LOCK(&lk); 1057 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1058 sema_release(&inodedep_in_progress); 1059 *inodedeppp = inodedep; 1060 return (0); 1061} 1062 1063/* 1064 * Structures and routines associated with newblk caching. 1065 */ 1066LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1067u_long newblk_hash; /* size of hash table - 1 */ 1068#define NEWBLK_HASH(fs, inum) \ 1069 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1070static struct sema newblk_in_progress; 1071 1072/* 1073 * Look up a newblk. Return 1 if found, 0 if not found. 1074 * If not found, allocate if DEPALLOC flag is passed. 1075 * Found or allocated entry is returned in newblkpp. 1076 */ 1077static int 1078newblk_lookup(fs, newblkno, flags, newblkpp) 1079 struct fs *fs; 1080 ufs2_daddr_t newblkno; 1081 int flags; 1082 struct newblk **newblkpp; 1083{ 1084 struct newblk *newblk; 1085 struct newblk_hashhead *newblkhd; 1086 1087 newblkhd = NEWBLK_HASH(fs, newblkno); 1088top: 1089 LIST_FOREACH(newblk, newblkhd, nb_hash) 1090 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1091 break; 1092 if (newblk) { 1093 *newblkpp = newblk; 1094 return (1); 1095 } 1096 if ((flags & DEPALLOC) == 0) { 1097 *newblkpp = NULL; 1098 return (0); 1099 } 1100 if (sema_get(&newblk_in_progress, 0) == 0) 1101 goto top; 1102 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1103 M_NEWBLK, M_SOFTDEP_FLAGS); 1104 newblk->nb_state = 0; 1105 newblk->nb_fs = fs; 1106 newblk->nb_newblkno = newblkno; 1107 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1108 sema_release(&newblk_in_progress); 1109 *newblkpp = newblk; 1110 return (0); 1111} 1112 1113/* 1114 * Executed during filesystem system initialization before 1115 * mounting any filesystems. 1116 */ 1117void 1118softdep_initialize() 1119{ 1120 1121 LIST_INIT(&mkdirlisthd); 1122 LIST_INIT(&softdep_workitem_pending); 1123 max_softdeps = desiredvnodes * 8; 1124 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1125 &pagedep_hash); 1126 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0); 1127 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1128 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0); 1129 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1130 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0); 1131 1132 /* hooks through which the main kernel code calls us */ 1133 softdep_process_worklist_hook = softdep_process_worklist; 1134 softdep_fsync_hook = softdep_fsync; 1135 1136 /* initialise bioops hack */ 1137 bioops.io_start = softdep_disk_io_initiation; 1138 bioops.io_complete = softdep_disk_write_complete; 1139 bioops.io_deallocate = softdep_deallocate_dependencies; 1140 bioops.io_movedeps = softdep_move_dependencies; 1141 bioops.io_countdeps = softdep_count_dependencies; 1142} 1143 1144/* 1145 * Executed after all filesystems have been unmounted during 1146 * filesystem module unload. 1147 */ 1148void 1149softdep_uninitialize() 1150{ 1151 1152 softdep_process_worklist_hook = NULL; 1153 softdep_fsync_hook = NULL; 1154 hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash); 1155 hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash); 1156 hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash); 1157} 1158 1159/* 1160 * Called at mount time to notify the dependency code that a 1161 * filesystem wishes to use it. 1162 */ 1163int 1164softdep_mount(devvp, mp, fs, cred) 1165 struct vnode *devvp; 1166 struct mount *mp; 1167 struct fs *fs; 1168 struct ucred *cred; 1169{ 1170 struct csum_total cstotal; 1171 struct cg *cgp; 1172 struct buf *bp; 1173 int error, cyl; 1174 1175 mp->mnt_flag &= ~MNT_ASYNC; 1176 mp->mnt_flag |= MNT_SOFTDEP; 1177 /* 1178 * When doing soft updates, the counters in the 1179 * superblock may have gotten out of sync, so we have 1180 * to scan the cylinder groups and recalculate them. 1181 */ 1182 if (fs->fs_clean != 0) 1183 return (0); 1184 bzero(&cstotal, sizeof cstotal); 1185 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1186 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1187 fs->fs_cgsize, cred, &bp)) != 0) { 1188 brelse(bp); 1189 return (error); 1190 } 1191 cgp = (struct cg *)bp->b_data; 1192 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1193 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1194 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1195 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1196 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1197 brelse(bp); 1198 } 1199#ifdef DEBUG 1200 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1201 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 1202#endif 1203 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1204 return (0); 1205} 1206 1207/* 1208 * Protecting the freemaps (or bitmaps). 1209 * 1210 * To eliminate the need to execute fsck before mounting a filesystem 1211 * after a power failure, one must (conservatively) guarantee that the 1212 * on-disk copy of the bitmaps never indicate that a live inode or block is 1213 * free. So, when a block or inode is allocated, the bitmap should be 1214 * updated (on disk) before any new pointers. When a block or inode is 1215 * freed, the bitmap should not be updated until all pointers have been 1216 * reset. The latter dependency is handled by the delayed de-allocation 1217 * approach described below for block and inode de-allocation. The former 1218 * dependency is handled by calling the following procedure when a block or 1219 * inode is allocated. When an inode is allocated an "inodedep" is created 1220 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1221 * Each "inodedep" is also inserted into the hash indexing structure so 1222 * that any additional link additions can be made dependent on the inode 1223 * allocation. 1224 * 1225 * The ufs filesystem maintains a number of free block counts (e.g., per 1226 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1227 * in addition to the bitmaps. These counts are used to improve efficiency 1228 * during allocation and therefore must be consistent with the bitmaps. 1229 * There is no convenient way to guarantee post-crash consistency of these 1230 * counts with simple update ordering, for two main reasons: (1) The counts 1231 * and bitmaps for a single cylinder group block are not in the same disk 1232 * sector. If a disk write is interrupted (e.g., by power failure), one may 1233 * be written and the other not. (2) Some of the counts are located in the 1234 * superblock rather than the cylinder group block. So, we focus our soft 1235 * updates implementation on protecting the bitmaps. When mounting a 1236 * filesystem, we recompute the auxiliary counts from the bitmaps. 1237 */ 1238 1239/* 1240 * Called just after updating the cylinder group block to allocate an inode. 1241 */ 1242void 1243softdep_setup_inomapdep(bp, ip, newinum) 1244 struct buf *bp; /* buffer for cylgroup block with inode map */ 1245 struct inode *ip; /* inode related to allocation */ 1246 ino_t newinum; /* new inode number being allocated */ 1247{ 1248 struct inodedep *inodedep; 1249 struct bmsafemap *bmsafemap; 1250 1251 /* 1252 * Create a dependency for the newly allocated inode. 1253 * Panic if it already exists as something is seriously wrong. 1254 * Otherwise add it to the dependency list for the buffer holding 1255 * the cylinder group map from which it was allocated. 1256 */ 1257 ACQUIRE_LOCK(&lk); 1258 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1259 FREE_LOCK(&lk); 1260 panic("softdep_setup_inomapdep: found inode"); 1261 } 1262 inodedep->id_buf = bp; 1263 inodedep->id_state &= ~DEPCOMPLETE; 1264 bmsafemap = bmsafemap_lookup(bp); 1265 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1266 FREE_LOCK(&lk); 1267} 1268 1269/* 1270 * Called just after updating the cylinder group block to 1271 * allocate block or fragment. 1272 */ 1273void 1274softdep_setup_blkmapdep(bp, fs, newblkno) 1275 struct buf *bp; /* buffer for cylgroup block with block map */ 1276 struct fs *fs; /* filesystem doing allocation */ 1277 ufs2_daddr_t newblkno; /* number of newly allocated block */ 1278{ 1279 struct newblk *newblk; 1280 struct bmsafemap *bmsafemap; 1281 1282 /* 1283 * Create a dependency for the newly allocated block. 1284 * Add it to the dependency list for the buffer holding 1285 * the cylinder group map from which it was allocated. 1286 */ 1287 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1288 panic("softdep_setup_blkmapdep: found block"); 1289 ACQUIRE_LOCK(&lk); 1290 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1291 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1292 FREE_LOCK(&lk); 1293} 1294 1295/* 1296 * Find the bmsafemap associated with a cylinder group buffer. 1297 * If none exists, create one. The buffer must be locked when 1298 * this routine is called and this routine must be called with 1299 * splbio interrupts blocked. 1300 */ 1301static struct bmsafemap * 1302bmsafemap_lookup(bp) 1303 struct buf *bp; 1304{ 1305 struct bmsafemap *bmsafemap; 1306 struct worklist *wk; 1307 1308#ifdef DEBUG 1309 if (lk.lkt_held == NOHOLDER) 1310 panic("bmsafemap_lookup: lock not held"); 1311#endif 1312 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1313 if (wk->wk_type == D_BMSAFEMAP) 1314 return (WK_BMSAFEMAP(wk)); 1315 FREE_LOCK(&lk); 1316 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1317 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1318 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1319 bmsafemap->sm_list.wk_state = 0; 1320 bmsafemap->sm_buf = bp; 1321 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1322 LIST_INIT(&bmsafemap->sm_allocindirhd); 1323 LIST_INIT(&bmsafemap->sm_inodedephd); 1324 LIST_INIT(&bmsafemap->sm_newblkhd); 1325 ACQUIRE_LOCK(&lk); 1326 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1327 return (bmsafemap); 1328} 1329 1330/* 1331 * Direct block allocation dependencies. 1332 * 1333 * When a new block is allocated, the corresponding disk locations must be 1334 * initialized (with zeros or new data) before the on-disk inode points to 1335 * them. Also, the freemap from which the block was allocated must be 1336 * updated (on disk) before the inode's pointer. These two dependencies are 1337 * independent of each other and are needed for all file blocks and indirect 1338 * blocks that are pointed to directly by the inode. Just before the 1339 * "in-core" version of the inode is updated with a newly allocated block 1340 * number, a procedure (below) is called to setup allocation dependency 1341 * structures. These structures are removed when the corresponding 1342 * dependencies are satisfied or when the block allocation becomes obsolete 1343 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1344 * fragment that gets upgraded). All of these cases are handled in 1345 * procedures described later. 1346 * 1347 * When a file extension causes a fragment to be upgraded, either to a larger 1348 * fragment or to a full block, the on-disk location may change (if the 1349 * previous fragment could not simply be extended). In this case, the old 1350 * fragment must be de-allocated, but not until after the inode's pointer has 1351 * been updated. In most cases, this is handled by later procedures, which 1352 * will construct a "freefrag" structure to be added to the workitem queue 1353 * when the inode update is complete (or obsolete). The main exception to 1354 * this is when an allocation occurs while a pending allocation dependency 1355 * (for the same block pointer) remains. This case is handled in the main 1356 * allocation dependency setup procedure by immediately freeing the 1357 * unreferenced fragments. 1358 */ 1359void 1360softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1361 struct inode *ip; /* inode to which block is being added */ 1362 ufs_lbn_t lbn; /* block pointer within inode */ 1363 ufs2_daddr_t newblkno; /* disk block number being added */ 1364 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1365 long newsize; /* size of new block */ 1366 long oldsize; /* size of new block */ 1367 struct buf *bp; /* bp for allocated block */ 1368{ 1369 struct allocdirect *adp, *oldadp; 1370 struct allocdirectlst *adphead; 1371 struct bmsafemap *bmsafemap; 1372 struct inodedep *inodedep; 1373 struct pagedep *pagedep; 1374 struct newblk *newblk; 1375 1376 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1377 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1378 adp->ad_list.wk_type = D_ALLOCDIRECT; 1379 adp->ad_lbn = lbn; 1380 adp->ad_newblkno = newblkno; 1381 adp->ad_oldblkno = oldblkno; 1382 adp->ad_newsize = newsize; 1383 adp->ad_oldsize = oldsize; 1384 adp->ad_state = ATTACHED; 1385 LIST_INIT(&adp->ad_newdirblk); 1386 if (newblkno == oldblkno) 1387 adp->ad_freefrag = NULL; 1388 else 1389 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1390 1391 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1392 panic("softdep_setup_allocdirect: lost block"); 1393 1394 ACQUIRE_LOCK(&lk); 1395 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1396 adp->ad_inodedep = inodedep; 1397 1398 if (newblk->nb_state == DEPCOMPLETE) { 1399 adp->ad_state |= DEPCOMPLETE; 1400 adp->ad_buf = NULL; 1401 } else { 1402 bmsafemap = newblk->nb_bmsafemap; 1403 adp->ad_buf = bmsafemap->sm_buf; 1404 LIST_REMOVE(newblk, nb_deps); 1405 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1406 } 1407 LIST_REMOVE(newblk, nb_hash); 1408 FREE(newblk, M_NEWBLK); 1409 1410 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1411 if (lbn >= NDADDR) { 1412 /* allocating an indirect block */ 1413 if (oldblkno != 0) { 1414 FREE_LOCK(&lk); 1415 panic("softdep_setup_allocdirect: non-zero indir"); 1416 } 1417 } else { 1418 /* 1419 * Allocating a direct block. 1420 * 1421 * If we are allocating a directory block, then we must 1422 * allocate an associated pagedep to track additions and 1423 * deletions. 1424 */ 1425 if ((ip->i_mode & IFMT) == IFDIR && 1426 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1427 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1428 } 1429 /* 1430 * The list of allocdirects must be kept in sorted and ascending 1431 * order so that the rollback routines can quickly determine the 1432 * first uncommitted block (the size of the file stored on disk 1433 * ends at the end of the lowest committed fragment, or if there 1434 * are no fragments, at the end of the highest committed block). 1435 * Since files generally grow, the typical case is that the new 1436 * block is to be added at the end of the list. We speed this 1437 * special case by checking against the last allocdirect in the 1438 * list before laboriously traversing the list looking for the 1439 * insertion point. 1440 */ 1441 adphead = &inodedep->id_newinoupdt; 1442 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1443 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1444 /* insert at end of list */ 1445 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1446 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1447 allocdirect_merge(adphead, adp, oldadp); 1448 FREE_LOCK(&lk); 1449 return; 1450 } 1451 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1452 if (oldadp->ad_lbn >= lbn) 1453 break; 1454 } 1455 if (oldadp == NULL) { 1456 FREE_LOCK(&lk); 1457 panic("softdep_setup_allocdirect: lost entry"); 1458 } 1459 /* insert in middle of list */ 1460 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1461 if (oldadp->ad_lbn == lbn) 1462 allocdirect_merge(adphead, adp, oldadp); 1463 FREE_LOCK(&lk); 1464} 1465 1466/* 1467 * Replace an old allocdirect dependency with a newer one. 1468 * This routine must be called with splbio interrupts blocked. 1469 */ 1470static void 1471allocdirect_merge(adphead, newadp, oldadp) 1472 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1473 struct allocdirect *newadp; /* allocdirect being added */ 1474 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1475{ 1476 struct worklist *wk; 1477 struct freefrag *freefrag; 1478 struct newdirblk *newdirblk; 1479 1480#ifdef DEBUG 1481 if (lk.lkt_held == NOHOLDER) 1482 panic("allocdirect_merge: lock not held"); 1483#endif 1484 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1485 newadp->ad_oldsize != oldadp->ad_newsize || 1486 newadp->ad_lbn >= NDADDR) { 1487 FREE_LOCK(&lk); 1488 panic("%s %jd != new %jd || old size %ld != new %ld", 1489 "allocdirect_merge: old blkno", 1490 (intmax_t)newadp->ad_oldblkno, 1491 (intmax_t)oldadp->ad_newblkno, 1492 newadp->ad_oldsize, oldadp->ad_newsize); 1493 } 1494 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1495 newadp->ad_oldsize = oldadp->ad_oldsize; 1496 /* 1497 * If the old dependency had a fragment to free or had never 1498 * previously had a block allocated, then the new dependency 1499 * can immediately post its freefrag and adopt the old freefrag. 1500 * This action is done by swapping the freefrag dependencies. 1501 * The new dependency gains the old one's freefrag, and the 1502 * old one gets the new one and then immediately puts it on 1503 * the worklist when it is freed by free_allocdirect. It is 1504 * not possible to do this swap when the old dependency had a 1505 * non-zero size but no previous fragment to free. This condition 1506 * arises when the new block is an extension of the old block. 1507 * Here, the first part of the fragment allocated to the new 1508 * dependency is part of the block currently claimed on disk by 1509 * the old dependency, so cannot legitimately be freed until the 1510 * conditions for the new dependency are fulfilled. 1511 */ 1512 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1513 freefrag = newadp->ad_freefrag; 1514 newadp->ad_freefrag = oldadp->ad_freefrag; 1515 oldadp->ad_freefrag = freefrag; 1516 } 1517 /* 1518 * If we are tracking a new directory-block allocation, 1519 * move it from the old allocdirect to the new allocdirect. 1520 */ 1521 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 1522 newdirblk = WK_NEWDIRBLK(wk); 1523 WORKLIST_REMOVE(&newdirblk->db_list); 1524 if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL) 1525 panic("allocdirect_merge: extra newdirblk"); 1526 WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list); 1527 } 1528 free_allocdirect(adphead, oldadp, 0); 1529} 1530 1531/* 1532 * Allocate a new freefrag structure if needed. 1533 */ 1534static struct freefrag * 1535newfreefrag(ip, blkno, size) 1536 struct inode *ip; 1537 ufs2_daddr_t blkno; 1538 long size; 1539{ 1540 struct freefrag *freefrag; 1541 struct fs *fs; 1542 1543 if (blkno == 0) 1544 return (NULL); 1545 fs = ip->i_fs; 1546 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1547 panic("newfreefrag: frag size"); 1548 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1549 M_FREEFRAG, M_SOFTDEP_FLAGS); 1550 freefrag->ff_list.wk_type = D_FREEFRAG; 1551 freefrag->ff_state = 0; 1552 freefrag->ff_inum = ip->i_number; 1553 freefrag->ff_mnt = ITOV(ip)->v_mount; 1554 freefrag->ff_blkno = blkno; 1555 freefrag->ff_fragsize = size; 1556 return (freefrag); 1557} 1558 1559/* 1560 * This workitem de-allocates fragments that were replaced during 1561 * file block allocation. 1562 */ 1563static void 1564handle_workitem_freefrag(freefrag) 1565 struct freefrag *freefrag; 1566{ 1567 struct ufsmount *ump = VFSTOUFS(freefrag->ff_mnt); 1568 1569 ffs_blkfree(ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 1570 freefrag->ff_fragsize, freefrag->ff_inum); 1571 FREE(freefrag, M_FREEFRAG); 1572} 1573 1574/* 1575 * Set up a dependency structure for an external attributes data block. 1576 * This routine follows much of the structure of softdep_setup_allocdirect. 1577 * See the description of softdep_setup_allocdirect above for details. 1578 */ 1579void 1580softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1581 struct inode *ip; 1582 ufs_lbn_t lbn; 1583 ufs2_daddr_t newblkno; 1584 ufs2_daddr_t oldblkno; 1585 long newsize; 1586 long oldsize; 1587 struct buf *bp; 1588{ 1589 struct allocdirect *adp, *oldadp; 1590 struct allocdirectlst *adphead; 1591 struct bmsafemap *bmsafemap; 1592 struct inodedep *inodedep; 1593 struct newblk *newblk; 1594 1595 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1596 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1597 adp->ad_list.wk_type = D_ALLOCDIRECT; 1598 adp->ad_lbn = lbn; 1599 adp->ad_newblkno = newblkno; 1600 adp->ad_oldblkno = oldblkno; 1601 adp->ad_newsize = newsize; 1602 adp->ad_oldsize = oldsize; 1603 adp->ad_state = ATTACHED | EXTDATA; 1604 LIST_INIT(&adp->ad_newdirblk); 1605 if (newblkno == oldblkno) 1606 adp->ad_freefrag = NULL; 1607 else 1608 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1609 1610 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1611 panic("softdep_setup_allocext: lost block"); 1612 1613 ACQUIRE_LOCK(&lk); 1614 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1615 adp->ad_inodedep = inodedep; 1616 1617 if (newblk->nb_state == DEPCOMPLETE) { 1618 adp->ad_state |= DEPCOMPLETE; 1619 adp->ad_buf = NULL; 1620 } else { 1621 bmsafemap = newblk->nb_bmsafemap; 1622 adp->ad_buf = bmsafemap->sm_buf; 1623 LIST_REMOVE(newblk, nb_deps); 1624 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1625 } 1626 LIST_REMOVE(newblk, nb_hash); 1627 FREE(newblk, M_NEWBLK); 1628 1629 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1630 if (lbn >= NXADDR) { 1631 FREE_LOCK(&lk); 1632 panic("softdep_setup_allocext: lbn %lld > NXADDR", 1633 (long long)lbn); 1634 } 1635 /* 1636 * The list of allocdirects must be kept in sorted and ascending 1637 * order so that the rollback routines can quickly determine the 1638 * first uncommitted block (the size of the file stored on disk 1639 * ends at the end of the lowest committed fragment, or if there 1640 * are no fragments, at the end of the highest committed block). 1641 * Since files generally grow, the typical case is that the new 1642 * block is to be added at the end of the list. We speed this 1643 * special case by checking against the last allocdirect in the 1644 * list before laboriously traversing the list looking for the 1645 * insertion point. 1646 */ 1647 adphead = &inodedep->id_newextupdt; 1648 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1649 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1650 /* insert at end of list */ 1651 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1652 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1653 allocdirect_merge(adphead, adp, oldadp); 1654 FREE_LOCK(&lk); 1655 return; 1656 } 1657 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1658 if (oldadp->ad_lbn >= lbn) 1659 break; 1660 } 1661 if (oldadp == NULL) { 1662 FREE_LOCK(&lk); 1663 panic("softdep_setup_allocext: lost entry"); 1664 } 1665 /* insert in middle of list */ 1666 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1667 if (oldadp->ad_lbn == lbn) 1668 allocdirect_merge(adphead, adp, oldadp); 1669 FREE_LOCK(&lk); 1670} 1671 1672/* 1673 * Indirect block allocation dependencies. 1674 * 1675 * The same dependencies that exist for a direct block also exist when 1676 * a new block is allocated and pointed to by an entry in a block of 1677 * indirect pointers. The undo/redo states described above are also 1678 * used here. Because an indirect block contains many pointers that 1679 * may have dependencies, a second copy of the entire in-memory indirect 1680 * block is kept. The buffer cache copy is always completely up-to-date. 1681 * The second copy, which is used only as a source for disk writes, 1682 * contains only the safe pointers (i.e., those that have no remaining 1683 * update dependencies). The second copy is freed when all pointers 1684 * are safe. The cache is not allowed to replace indirect blocks with 1685 * pending update dependencies. If a buffer containing an indirect 1686 * block with dependencies is written, these routines will mark it 1687 * dirty again. It can only be successfully written once all the 1688 * dependencies are removed. The ffs_fsync routine in conjunction with 1689 * softdep_sync_metadata work together to get all the dependencies 1690 * removed so that a file can be successfully written to disk. Three 1691 * procedures are used when setting up indirect block pointer 1692 * dependencies. The division is necessary because of the organization 1693 * of the "balloc" routine and because of the distinction between file 1694 * pages and file metadata blocks. 1695 */ 1696 1697/* 1698 * Allocate a new allocindir structure. 1699 */ 1700static struct allocindir * 1701newallocindir(ip, ptrno, newblkno, oldblkno) 1702 struct inode *ip; /* inode for file being extended */ 1703 int ptrno; /* offset of pointer in indirect block */ 1704 ufs2_daddr_t newblkno; /* disk block number being added */ 1705 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1706{ 1707 struct allocindir *aip; 1708 1709 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1710 M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO); 1711 aip->ai_list.wk_type = D_ALLOCINDIR; 1712 aip->ai_state = ATTACHED; 1713 aip->ai_offset = ptrno; 1714 aip->ai_newblkno = newblkno; 1715 aip->ai_oldblkno = oldblkno; 1716 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1717 return (aip); 1718} 1719 1720/* 1721 * Called just before setting an indirect block pointer 1722 * to a newly allocated file page. 1723 */ 1724void 1725softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1726 struct inode *ip; /* inode for file being extended */ 1727 ufs_lbn_t lbn; /* allocated block number within file */ 1728 struct buf *bp; /* buffer with indirect blk referencing page */ 1729 int ptrno; /* offset of pointer in indirect block */ 1730 ufs2_daddr_t newblkno; /* disk block number being added */ 1731 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1732 struct buf *nbp; /* buffer holding allocated page */ 1733{ 1734 struct allocindir *aip; 1735 struct pagedep *pagedep; 1736 1737 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1738 ACQUIRE_LOCK(&lk); 1739 /* 1740 * If we are allocating a directory page, then we must 1741 * allocate an associated pagedep to track additions and 1742 * deletions. 1743 */ 1744 if ((ip->i_mode & IFMT) == IFDIR && 1745 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1746 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1747 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1748 FREE_LOCK(&lk); 1749 setup_allocindir_phase2(bp, ip, aip); 1750} 1751 1752/* 1753 * Called just before setting an indirect block pointer to a 1754 * newly allocated indirect block. 1755 */ 1756void 1757softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1758 struct buf *nbp; /* newly allocated indirect block */ 1759 struct inode *ip; /* inode for file being extended */ 1760 struct buf *bp; /* indirect block referencing allocated block */ 1761 int ptrno; /* offset of pointer in indirect block */ 1762 ufs2_daddr_t newblkno; /* disk block number being added */ 1763{ 1764 struct allocindir *aip; 1765 1766 aip = newallocindir(ip, ptrno, newblkno, 0); 1767 ACQUIRE_LOCK(&lk); 1768 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1769 FREE_LOCK(&lk); 1770 setup_allocindir_phase2(bp, ip, aip); 1771} 1772 1773/* 1774 * Called to finish the allocation of the "aip" allocated 1775 * by one of the two routines above. 1776 */ 1777static void 1778setup_allocindir_phase2(bp, ip, aip) 1779 struct buf *bp; /* in-memory copy of the indirect block */ 1780 struct inode *ip; /* inode for file being extended */ 1781 struct allocindir *aip; /* allocindir allocated by the above routines */ 1782{ 1783 struct worklist *wk; 1784 struct indirdep *indirdep, *newindirdep; 1785 struct bmsafemap *bmsafemap; 1786 struct allocindir *oldaip; 1787 struct freefrag *freefrag; 1788 struct newblk *newblk; 1789 ufs2_daddr_t blkno; 1790 1791 if (bp->b_lblkno >= 0) 1792 panic("setup_allocindir_phase2: not indir blk"); 1793 for (indirdep = NULL, newindirdep = NULL; ; ) { 1794 ACQUIRE_LOCK(&lk); 1795 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1796 if (wk->wk_type != D_INDIRDEP) 1797 continue; 1798 indirdep = WK_INDIRDEP(wk); 1799 break; 1800 } 1801 if (indirdep == NULL && newindirdep) { 1802 indirdep = newindirdep; 1803 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1804 newindirdep = NULL; 1805 } 1806 FREE_LOCK(&lk); 1807 if (indirdep) { 1808 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1809 &newblk) == 0) 1810 panic("setup_allocindir: lost block"); 1811 ACQUIRE_LOCK(&lk); 1812 if (newblk->nb_state == DEPCOMPLETE) { 1813 aip->ai_state |= DEPCOMPLETE; 1814 aip->ai_buf = NULL; 1815 } else { 1816 bmsafemap = newblk->nb_bmsafemap; 1817 aip->ai_buf = bmsafemap->sm_buf; 1818 LIST_REMOVE(newblk, nb_deps); 1819 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1820 aip, ai_deps); 1821 } 1822 LIST_REMOVE(newblk, nb_hash); 1823 FREE(newblk, M_NEWBLK); 1824 aip->ai_indirdep = indirdep; 1825 /* 1826 * Check to see if there is an existing dependency 1827 * for this block. If there is, merge the old 1828 * dependency into the new one. 1829 */ 1830 if (aip->ai_oldblkno == 0) 1831 oldaip = NULL; 1832 else 1833 1834 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1835 if (oldaip->ai_offset == aip->ai_offset) 1836 break; 1837 freefrag = NULL; 1838 if (oldaip != NULL) { 1839 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1840 FREE_LOCK(&lk); 1841 panic("setup_allocindir_phase2: blkno"); 1842 } 1843 aip->ai_oldblkno = oldaip->ai_oldblkno; 1844 freefrag = aip->ai_freefrag; 1845 aip->ai_freefrag = oldaip->ai_freefrag; 1846 oldaip->ai_freefrag = NULL; 1847 free_allocindir(oldaip, NULL); 1848 } 1849 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1850 if (ip->i_ump->um_fstype == UFS1) 1851 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data) 1852 [aip->ai_offset] = aip->ai_oldblkno; 1853 else 1854 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data) 1855 [aip->ai_offset] = aip->ai_oldblkno; 1856 FREE_LOCK(&lk); 1857 if (freefrag != NULL) 1858 handle_workitem_freefrag(freefrag); 1859 } 1860 if (newindirdep) { 1861 if (indirdep->ir_savebp != NULL) 1862 brelse(newindirdep->ir_savebp); 1863 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1864 } 1865 if (indirdep) 1866 break; 1867 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1868 M_INDIRDEP, M_SOFTDEP_FLAGS); 1869 newindirdep->ir_list.wk_type = D_INDIRDEP; 1870 newindirdep->ir_state = ATTACHED; 1871 if (ip->i_ump->um_fstype == UFS1) 1872 newindirdep->ir_state |= UFS1FMT; 1873 LIST_INIT(&newindirdep->ir_deplisthd); 1874 LIST_INIT(&newindirdep->ir_donehd); 1875 if (bp->b_blkno == bp->b_lblkno) { 1876 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 1877 NULL, NULL); 1878 bp->b_blkno = blkno; 1879 } 1880 newindirdep->ir_savebp = 1881 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1882 BUF_KERNPROC(newindirdep->ir_savebp); 1883 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1884 } 1885} 1886 1887/* 1888 * Block de-allocation dependencies. 1889 * 1890 * When blocks are de-allocated, the on-disk pointers must be nullified before 1891 * the blocks are made available for use by other files. (The true 1892 * requirement is that old pointers must be nullified before new on-disk 1893 * pointers are set. We chose this slightly more stringent requirement to 1894 * reduce complexity.) Our implementation handles this dependency by updating 1895 * the inode (or indirect block) appropriately but delaying the actual block 1896 * de-allocation (i.e., freemap and free space count manipulation) until 1897 * after the updated versions reach stable storage. After the disk is 1898 * updated, the blocks can be safely de-allocated whenever it is convenient. 1899 * This implementation handles only the common case of reducing a file's 1900 * length to zero. Other cases are handled by the conventional synchronous 1901 * write approach. 1902 * 1903 * The ffs implementation with which we worked double-checks 1904 * the state of the block pointers and file size as it reduces 1905 * a file's length. Some of this code is replicated here in our 1906 * soft updates implementation. The freeblks->fb_chkcnt field is 1907 * used to transfer a part of this information to the procedure 1908 * that eventually de-allocates the blocks. 1909 * 1910 * This routine should be called from the routine that shortens 1911 * a file's length, before the inode's size or block pointers 1912 * are modified. It will save the block pointer information for 1913 * later release and zero the inode so that the calling routine 1914 * can release it. 1915 */ 1916void 1917softdep_setup_freeblocks(ip, length, flags) 1918 struct inode *ip; /* The inode whose length is to be reduced */ 1919 off_t length; /* The new length for the file */ 1920 int flags; /* IO_EXT and/or IO_NORMAL */ 1921{ 1922 struct freeblks *freeblks; 1923 struct inodedep *inodedep; 1924 struct allocdirect *adp; 1925 struct vnode *vp; 1926 struct buf *bp; 1927 struct fs *fs; 1928 ufs2_daddr_t extblocks, datablocks; 1929 int i, delay, error; 1930 1931 fs = ip->i_fs; 1932 if (length != 0) 1933 panic("softdep_setup_freeblocks: non-zero length"); 1934 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1935 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 1936 freeblks->fb_list.wk_type = D_FREEBLKS; 1937 freeblks->fb_uid = ip->i_uid; 1938 freeblks->fb_previousinum = ip->i_number; 1939 freeblks->fb_devvp = ip->i_devvp; 1940 freeblks->fb_mnt = ITOV(ip)->v_mount; 1941 extblocks = 0; 1942 if (fs->fs_magic == FS_UFS2_MAGIC) 1943 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 1944 datablocks = DIP(ip, i_blocks) - extblocks; 1945 if ((flags & IO_NORMAL) == 0) { 1946 freeblks->fb_oldsize = 0; 1947 freeblks->fb_chkcnt = 0; 1948 } else { 1949 freeblks->fb_oldsize = ip->i_size; 1950 ip->i_size = 0; 1951 DIP(ip, i_size) = 0; 1952 freeblks->fb_chkcnt = datablocks; 1953 for (i = 0; i < NDADDR; i++) { 1954 freeblks->fb_dblks[i] = DIP(ip, i_db[i]); 1955 DIP(ip, i_db[i]) = 0; 1956 } 1957 for (i = 0; i < NIADDR; i++) { 1958 freeblks->fb_iblks[i] = DIP(ip, i_ib[i]); 1959 DIP(ip, i_ib[i]) = 0; 1960 } 1961 /* 1962 * If the file was removed, then the space being freed was 1963 * accounted for then (see softdep_filereleased()). If the 1964 * file is merely being truncated, then we account for it now. 1965 */ 1966 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 1967 fs->fs_pendingblocks += datablocks; 1968 } 1969 if ((flags & IO_EXT) == 0) { 1970 freeblks->fb_oldextsize = 0; 1971 } else { 1972 freeblks->fb_oldextsize = ip->i_din2->di_extsize; 1973 ip->i_din2->di_extsize = 0; 1974 freeblks->fb_chkcnt += extblocks; 1975 for (i = 0; i < NXADDR; i++) { 1976 freeblks->fb_eblks[i] = ip->i_din2->di_extb[i]; 1977 ip->i_din2->di_extb[i] = 0; 1978 } 1979 } 1980 DIP(ip, i_blocks) -= freeblks->fb_chkcnt; 1981 /* 1982 * Push the zero'ed inode to to its disk buffer so that we are free 1983 * to delete its dependencies below. Once the dependencies are gone 1984 * the buffer can be safely released. 1985 */ 1986 if ((error = bread(ip->i_devvp, 1987 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1988 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 1989 brelse(bp); 1990 softdep_error("softdep_setup_freeblocks", error); 1991 } 1992 if (ip->i_ump->um_fstype == UFS1) 1993 *((struct ufs1_dinode *)bp->b_data + 1994 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 1995 else 1996 *((struct ufs2_dinode *)bp->b_data + 1997 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 1998 /* 1999 * Find and eliminate any inode dependencies. 2000 */ 2001 ACQUIRE_LOCK(&lk); 2002 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 2003 if ((inodedep->id_state & IOSTARTED) != 0) { 2004 FREE_LOCK(&lk); 2005 panic("softdep_setup_freeblocks: inode busy"); 2006 } 2007 /* 2008 * Add the freeblks structure to the list of operations that 2009 * must await the zero'ed inode being written to disk. If we 2010 * still have a bitmap dependency (delay == 0), then the inode 2011 * has never been written to disk, so we can process the 2012 * freeblks below once we have deleted the dependencies. 2013 */ 2014 delay = (inodedep->id_state & DEPCOMPLETE); 2015 if (delay) 2016 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 2017 /* 2018 * Because the file length has been truncated to zero, any 2019 * pending block allocation dependency structures associated 2020 * with this inode are obsolete and can simply be de-allocated. 2021 * We must first merge the two dependency lists to get rid of 2022 * any duplicate freefrag structures, then purge the merged list. 2023 * If we still have a bitmap dependency, then the inode has never 2024 * been written to disk, so we can free any fragments without delay. 2025 */ 2026 if (flags & IO_NORMAL) { 2027 merge_inode_lists(&inodedep->id_newinoupdt, 2028 &inodedep->id_inoupdt); 2029 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 2030 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 2031 } 2032 if (flags & IO_EXT) { 2033 merge_inode_lists(&inodedep->id_newextupdt, 2034 &inodedep->id_extupdt); 2035 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 2036 free_allocdirect(&inodedep->id_extupdt, adp, delay); 2037 } 2038 FREE_LOCK(&lk); 2039 bdwrite(bp); 2040 /* 2041 * We must wait for any I/O in progress to finish so that 2042 * all potential buffers on the dirty list will be visible. 2043 * Once they are all there, walk the list and get rid of 2044 * any dependencies. 2045 */ 2046 vp = ITOV(ip); 2047 ACQUIRE_LOCK(&lk); 2048 drain_output(vp, 1); 2049restart: 2050 VI_LOCK(vp); 2051 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2052 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 2053 ((flags & IO_NORMAL) == 0 && 2054 (bp->b_xflags & BX_ALTDATA) == 0)) 2055 continue; 2056 VI_UNLOCK(vp); 2057 if (getdirtybuf(&bp, MNT_WAIT) == 0) 2058 goto restart; 2059 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep); 2060 deallocate_dependencies(bp, inodedep); 2061 bp->b_flags |= B_INVAL | B_NOCACHE; 2062 FREE_LOCK(&lk); 2063 brelse(bp); 2064 ACQUIRE_LOCK(&lk); 2065 goto restart; 2066 } 2067 VI_UNLOCK(vp); 2068 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 2069 (void) free_inodedep(inodedep); 2070 FREE_LOCK(&lk); 2071 /* 2072 * If the inode has never been written to disk (delay == 0), 2073 * then we can process the freeblks now that we have deleted 2074 * the dependencies. 2075 */ 2076 if (!delay) 2077 handle_workitem_freeblocks(freeblks, 0); 2078} 2079 2080/* 2081 * Reclaim any dependency structures from a buffer that is about to 2082 * be reallocated to a new vnode. The buffer must be locked, thus, 2083 * no I/O completion operations can occur while we are manipulating 2084 * its associated dependencies. The mutex is held so that other I/O's 2085 * associated with related dependencies do not occur. 2086 */ 2087static void 2088deallocate_dependencies(bp, inodedep) 2089 struct buf *bp; 2090 struct inodedep *inodedep; 2091{ 2092 struct worklist *wk; 2093 struct indirdep *indirdep; 2094 struct allocindir *aip; 2095 struct pagedep *pagedep; 2096 struct dirrem *dirrem; 2097 struct diradd *dap; 2098 int i; 2099 2100 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2101 switch (wk->wk_type) { 2102 2103 case D_INDIRDEP: 2104 indirdep = WK_INDIRDEP(wk); 2105 /* 2106 * None of the indirect pointers will ever be visible, 2107 * so they can simply be tossed. GOINGAWAY ensures 2108 * that allocated pointers will be saved in the buffer 2109 * cache until they are freed. Note that they will 2110 * only be able to be found by their physical address 2111 * since the inode mapping the logical address will 2112 * be gone. The save buffer used for the safe copy 2113 * was allocated in setup_allocindir_phase2 using 2114 * the physical address so it could be used for this 2115 * purpose. Hence we swap the safe copy with the real 2116 * copy, allowing the safe copy to be freed and holding 2117 * on to the real copy for later use in indir_trunc. 2118 */ 2119 if (indirdep->ir_state & GOINGAWAY) { 2120 FREE_LOCK(&lk); 2121 panic("deallocate_dependencies: already gone"); 2122 } 2123 indirdep->ir_state |= GOINGAWAY; 2124 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 2125 free_allocindir(aip, inodedep); 2126 if (bp->b_lblkno >= 0 || 2127 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 2128 FREE_LOCK(&lk); 2129 panic("deallocate_dependencies: not indir"); 2130 } 2131 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2132 bp->b_bcount); 2133 WORKLIST_REMOVE(wk); 2134 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 2135 continue; 2136 2137 case D_PAGEDEP: 2138 pagedep = WK_PAGEDEP(wk); 2139 /* 2140 * None of the directory additions will ever be 2141 * visible, so they can simply be tossed. 2142 */ 2143 for (i = 0; i < DAHASHSZ; i++) 2144 while ((dap = 2145 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2146 free_diradd(dap); 2147 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 2148 free_diradd(dap); 2149 /* 2150 * Copy any directory remove dependencies to the list 2151 * to be processed after the zero'ed inode is written. 2152 * If the inode has already been written, then they 2153 * can be dumped directly onto the work list. 2154 */ 2155 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2156 LIST_REMOVE(dirrem, dm_next); 2157 dirrem->dm_dirinum = pagedep->pd_ino; 2158 if (inodedep == NULL || 2159 (inodedep->id_state & ALLCOMPLETE) == 2160 ALLCOMPLETE) 2161 add_to_worklist(&dirrem->dm_list); 2162 else 2163 WORKLIST_INSERT(&inodedep->id_bufwait, 2164 &dirrem->dm_list); 2165 } 2166 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2167 LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list) 2168 if (wk->wk_type == D_NEWDIRBLK && 2169 WK_NEWDIRBLK(wk)->db_pagedep == 2170 pagedep) 2171 break; 2172 if (wk != NULL) { 2173 WORKLIST_REMOVE(wk); 2174 free_newdirblk(WK_NEWDIRBLK(wk)); 2175 } else { 2176 FREE_LOCK(&lk); 2177 panic("deallocate_dependencies: " 2178 "lost pagedep"); 2179 } 2180 } 2181 WORKLIST_REMOVE(&pagedep->pd_list); 2182 LIST_REMOVE(pagedep, pd_hash); 2183 WORKITEM_FREE(pagedep, D_PAGEDEP); 2184 continue; 2185 2186 case D_ALLOCINDIR: 2187 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2188 continue; 2189 2190 case D_ALLOCDIRECT: 2191 case D_INODEDEP: 2192 FREE_LOCK(&lk); 2193 panic("deallocate_dependencies: Unexpected type %s", 2194 TYPENAME(wk->wk_type)); 2195 /* NOTREACHED */ 2196 2197 default: 2198 FREE_LOCK(&lk); 2199 panic("deallocate_dependencies: Unknown type %s", 2200 TYPENAME(wk->wk_type)); 2201 /* NOTREACHED */ 2202 } 2203 } 2204} 2205 2206/* 2207 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2208 * This routine must be called with splbio interrupts blocked. 2209 */ 2210static void 2211free_allocdirect(adphead, adp, delay) 2212 struct allocdirectlst *adphead; 2213 struct allocdirect *adp; 2214 int delay; 2215{ 2216 struct newdirblk *newdirblk; 2217 struct worklist *wk; 2218 2219#ifdef DEBUG 2220 if (lk.lkt_held == NOHOLDER) 2221 panic("free_allocdirect: lock not held"); 2222#endif 2223 if ((adp->ad_state & DEPCOMPLETE) == 0) 2224 LIST_REMOVE(adp, ad_deps); 2225 TAILQ_REMOVE(adphead, adp, ad_next); 2226 if ((adp->ad_state & COMPLETE) == 0) 2227 WORKLIST_REMOVE(&adp->ad_list); 2228 if (adp->ad_freefrag != NULL) { 2229 if (delay) 2230 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2231 &adp->ad_freefrag->ff_list); 2232 else 2233 add_to_worklist(&adp->ad_freefrag->ff_list); 2234 } 2235 if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) { 2236 newdirblk = WK_NEWDIRBLK(wk); 2237 WORKLIST_REMOVE(&newdirblk->db_list); 2238 if (LIST_FIRST(&adp->ad_newdirblk) != NULL) 2239 panic("free_allocdirect: extra newdirblk"); 2240 if (delay) 2241 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2242 &newdirblk->db_list); 2243 else 2244 free_newdirblk(newdirblk); 2245 } 2246 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2247} 2248 2249/* 2250 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 2251 * This routine must be called with splbio interrupts blocked. 2252 */ 2253static void 2254free_newdirblk(newdirblk) 2255 struct newdirblk *newdirblk; 2256{ 2257 struct pagedep *pagedep; 2258 struct diradd *dap; 2259 int i; 2260 2261#ifdef DEBUG 2262 if (lk.lkt_held == NOHOLDER) 2263 panic("free_newdirblk: lock not held"); 2264#endif 2265 /* 2266 * If the pagedep is still linked onto the directory buffer 2267 * dependency chain, then some of the entries on the 2268 * pd_pendinghd list may not be committed to disk yet. In 2269 * this case, we will simply clear the NEWBLOCK flag and 2270 * let the pd_pendinghd list be processed when the pagedep 2271 * is next written. If the pagedep is no longer on the buffer 2272 * dependency chain, then all the entries on the pd_pending 2273 * list are committed to disk and we can free them here. 2274 */ 2275 pagedep = newdirblk->db_pagedep; 2276 pagedep->pd_state &= ~NEWBLOCK; 2277 if ((pagedep->pd_state & ONWORKLIST) == 0) 2278 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2279 free_diradd(dap); 2280 /* 2281 * If no dependencies remain, the pagedep will be freed. 2282 */ 2283 for (i = 0; i < DAHASHSZ; i++) 2284 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 2285 break; 2286 if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) { 2287 LIST_REMOVE(pagedep, pd_hash); 2288 WORKITEM_FREE(pagedep, D_PAGEDEP); 2289 } 2290 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2291} 2292 2293/* 2294 * Prepare an inode to be freed. The actual free operation is not 2295 * done until the zero'ed inode has been written to disk. 2296 */ 2297void 2298softdep_freefile(pvp, ino, mode) 2299 struct vnode *pvp; 2300 ino_t ino; 2301 int mode; 2302{ 2303 struct inode *ip = VTOI(pvp); 2304 struct inodedep *inodedep; 2305 struct freefile *freefile; 2306 2307 /* 2308 * This sets up the inode de-allocation dependency. 2309 */ 2310 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2311 M_FREEFILE, M_SOFTDEP_FLAGS); 2312 freefile->fx_list.wk_type = D_FREEFILE; 2313 freefile->fx_list.wk_state = 0; 2314 freefile->fx_mode = mode; 2315 freefile->fx_oldinum = ino; 2316 freefile->fx_devvp = ip->i_devvp; 2317 freefile->fx_mnt = ITOV(ip)->v_mount; 2318 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 2319 ip->i_fs->fs_pendinginodes += 1; 2320 2321 /* 2322 * If the inodedep does not exist, then the zero'ed inode has 2323 * been written to disk. If the allocated inode has never been 2324 * written to disk, then the on-disk inode is zero'ed. In either 2325 * case we can free the file immediately. 2326 */ 2327 ACQUIRE_LOCK(&lk); 2328 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2329 check_inode_unwritten(inodedep)) { 2330 FREE_LOCK(&lk); 2331 handle_workitem_freefile(freefile); 2332 return; 2333 } 2334 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2335 FREE_LOCK(&lk); 2336} 2337 2338/* 2339 * Check to see if an inode has never been written to disk. If 2340 * so free the inodedep and return success, otherwise return failure. 2341 * This routine must be called with splbio interrupts blocked. 2342 * 2343 * If we still have a bitmap dependency, then the inode has never 2344 * been written to disk. Drop the dependency as it is no longer 2345 * necessary since the inode is being deallocated. We set the 2346 * ALLCOMPLETE flags since the bitmap now properly shows that the 2347 * inode is not allocated. Even if the inode is actively being 2348 * written, it has been rolled back to its zero'ed state, so we 2349 * are ensured that a zero inode is what is on the disk. For short 2350 * lived files, this change will usually result in removing all the 2351 * dependencies from the inode so that it can be freed immediately. 2352 */ 2353static int 2354check_inode_unwritten(inodedep) 2355 struct inodedep *inodedep; 2356{ 2357 2358 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2359 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2360 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2361 LIST_FIRST(&inodedep->id_inowait) != NULL || 2362 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2363 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2364 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2365 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2366 inodedep->id_nlinkdelta != 0) 2367 return (0); 2368 inodedep->id_state |= ALLCOMPLETE; 2369 LIST_REMOVE(inodedep, id_deps); 2370 inodedep->id_buf = NULL; 2371 if (inodedep->id_state & ONWORKLIST) 2372 WORKLIST_REMOVE(&inodedep->id_list); 2373 if (inodedep->id_savedino1 != NULL) { 2374 FREE(inodedep->id_savedino1, M_INODEDEP); 2375 inodedep->id_savedino1 = NULL; 2376 } 2377 if (free_inodedep(inodedep) == 0) { 2378 FREE_LOCK(&lk); 2379 panic("check_inode_unwritten: busy inode"); 2380 } 2381 return (1); 2382} 2383 2384/* 2385 * Try to free an inodedep structure. Return 1 if it could be freed. 2386 */ 2387static int 2388free_inodedep(inodedep) 2389 struct inodedep *inodedep; 2390{ 2391 2392 if ((inodedep->id_state & ONWORKLIST) != 0 || 2393 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2394 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2395 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2396 LIST_FIRST(&inodedep->id_inowait) != NULL || 2397 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2398 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2399 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2400 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2401 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL) 2402 return (0); 2403 LIST_REMOVE(inodedep, id_hash); 2404 WORKITEM_FREE(inodedep, D_INODEDEP); 2405 num_inodedep -= 1; 2406 return (1); 2407} 2408 2409/* 2410 * This workitem routine performs the block de-allocation. 2411 * The workitem is added to the pending list after the updated 2412 * inode block has been written to disk. As mentioned above, 2413 * checks regarding the number of blocks de-allocated (compared 2414 * to the number of blocks allocated for the file) are also 2415 * performed in this function. 2416 */ 2417static void 2418handle_workitem_freeblocks(freeblks, flags) 2419 struct freeblks *freeblks; 2420 int flags; 2421{ 2422 struct inode *ip; 2423 struct vnode *vp; 2424 struct fs *fs; 2425 int i, nblocks, level, bsize; 2426 ufs2_daddr_t bn, blocksreleased = 0; 2427 int error, allerror = 0; 2428 ufs_lbn_t baselbns[NIADDR], tmpval; 2429 2430 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2431 tmpval = 1; 2432 baselbns[0] = NDADDR; 2433 for (i = 1; i < NIADDR; i++) { 2434 tmpval *= NINDIR(fs); 2435 baselbns[i] = baselbns[i - 1] + tmpval; 2436 } 2437 nblocks = btodb(fs->fs_bsize); 2438 blocksreleased = 0; 2439 /* 2440 * Release all extended attribute blocks or frags. 2441 */ 2442 if (freeblks->fb_oldextsize > 0) { 2443 for (i = (NXADDR - 1); i >= 0; i--) { 2444 if ((bn = freeblks->fb_eblks[i]) == 0) 2445 continue; 2446 bsize = sblksize(fs, freeblks->fb_oldextsize, i); 2447 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2448 freeblks->fb_previousinum); 2449 blocksreleased += btodb(bsize); 2450 } 2451 } 2452 /* 2453 * Release all data blocks or frags. 2454 */ 2455 if (freeblks->fb_oldsize > 0) { 2456 /* 2457 * Indirect blocks first. 2458 */ 2459 for (level = (NIADDR - 1); level >= 0; level--) { 2460 if ((bn = freeblks->fb_iblks[level]) == 0) 2461 continue; 2462 if ((error = indir_trunc(freeblks, fsbtodb(fs, bn), 2463 level, baselbns[level], &blocksreleased)) == 0) 2464 allerror = error; 2465 ffs_blkfree(fs, freeblks->fb_devvp, bn, fs->fs_bsize, 2466 freeblks->fb_previousinum); 2467 fs->fs_pendingblocks -= nblocks; 2468 blocksreleased += nblocks; 2469 } 2470 /* 2471 * All direct blocks or frags. 2472 */ 2473 for (i = (NDADDR - 1); i >= 0; i--) { 2474 if ((bn = freeblks->fb_dblks[i]) == 0) 2475 continue; 2476 bsize = sblksize(fs, freeblks->fb_oldsize, i); 2477 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2478 freeblks->fb_previousinum); 2479 fs->fs_pendingblocks -= btodb(bsize); 2480 blocksreleased += btodb(bsize); 2481 } 2482 } 2483 /* 2484 * If we still have not finished background cleanup, then check 2485 * to see if the block count needs to be adjusted. 2486 */ 2487 if (freeblks->fb_chkcnt != blocksreleased && 2488 (fs->fs_flags & FS_UNCLEAN) != 0 && 2489 VFS_VGET(freeblks->fb_mnt, freeblks->fb_previousinum, 2490 (flags & LK_NOWAIT) | LK_EXCLUSIVE, &vp) == 0) { 2491 ip = VTOI(vp); 2492 DIP(ip, i_blocks) += freeblks->fb_chkcnt - blocksreleased; 2493 ip->i_flag |= IN_CHANGE; 2494 vput(vp); 2495 } 2496 2497#ifdef DIAGNOSTIC 2498 if (freeblks->fb_chkcnt != blocksreleased && 2499 ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0)) 2500 printf("handle_workitem_freeblocks: block count"); 2501 if (allerror) 2502 softdep_error("handle_workitem_freeblks", allerror); 2503#endif /* DIAGNOSTIC */ 2504 2505 WORKITEM_FREE(freeblks, D_FREEBLKS); 2506} 2507 2508/* 2509 * Release blocks associated with the inode ip and stored in the indirect 2510 * block dbn. If level is greater than SINGLE, the block is an indirect block 2511 * and recursive calls to indirtrunc must be used to cleanse other indirect 2512 * blocks. 2513 */ 2514static int 2515indir_trunc(freeblks, dbn, level, lbn, countp) 2516 struct freeblks *freeblks; 2517 ufs2_daddr_t dbn; 2518 int level; 2519 ufs_lbn_t lbn; 2520 ufs2_daddr_t *countp; 2521{ 2522 struct buf *bp; 2523 struct fs *fs; 2524 struct worklist *wk; 2525 struct indirdep *indirdep; 2526 ufs1_daddr_t *bap1 = 0; 2527 ufs2_daddr_t nb, *bap2 = 0; 2528 ufs_lbn_t lbnadd; 2529 int i, nblocks, ufs1fmt; 2530 int error, allerror = 0; 2531 2532 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2533 lbnadd = 1; 2534 for (i = level; i > 0; i--) 2535 lbnadd *= NINDIR(fs); 2536 /* 2537 * Get buffer of block pointers to be freed. This routine is not 2538 * called until the zero'ed inode has been written, so it is safe 2539 * to free blocks as they are encountered. Because the inode has 2540 * been zero'ed, calls to bmap on these blocks will fail. So, we 2541 * have to use the on-disk address and the block device for the 2542 * filesystem to look them up. If the file was deleted before its 2543 * indirect blocks were all written to disk, the routine that set 2544 * us up (deallocate_dependencies) will have arranged to leave 2545 * a complete copy of the indirect block in memory for our use. 2546 * Otherwise we have to read the blocks in from the disk. 2547 */ 2548 ACQUIRE_LOCK(&lk); 2549 if ((bp = incore(freeblks->fb_devvp, dbn)) != NULL && 2550 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2551 if (wk->wk_type != D_INDIRDEP || 2552 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2553 (indirdep->ir_state & GOINGAWAY) == 0) { 2554 FREE_LOCK(&lk); 2555 panic("indir_trunc: lost indirdep"); 2556 } 2557 WORKLIST_REMOVE(wk); 2558 WORKITEM_FREE(indirdep, D_INDIRDEP); 2559 if (LIST_FIRST(&bp->b_dep) != NULL) { 2560 FREE_LOCK(&lk); 2561 panic("indir_trunc: dangling dep"); 2562 } 2563 FREE_LOCK(&lk); 2564 } else { 2565 FREE_LOCK(&lk); 2566 error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 2567 NOCRED, &bp); 2568 if (error) { 2569 brelse(bp); 2570 return (error); 2571 } 2572 } 2573 /* 2574 * Recursively free indirect blocks. 2575 */ 2576 if (VFSTOUFS(freeblks->fb_mnt)->um_fstype == UFS1) { 2577 ufs1fmt = 1; 2578 bap1 = (ufs1_daddr_t *)bp->b_data; 2579 } else { 2580 ufs1fmt = 0; 2581 bap2 = (ufs2_daddr_t *)bp->b_data; 2582 } 2583 nblocks = btodb(fs->fs_bsize); 2584 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2585 if (ufs1fmt) 2586 nb = bap1[i]; 2587 else 2588 nb = bap2[i]; 2589 if (nb == 0) 2590 continue; 2591 if (level != 0) { 2592 if ((error = indir_trunc(freeblks, fsbtodb(fs, nb), 2593 level - 1, lbn + (i * lbnadd), countp)) != 0) 2594 allerror = error; 2595 } 2596 ffs_blkfree(fs, freeblks->fb_devvp, nb, fs->fs_bsize, 2597 freeblks->fb_previousinum); 2598 fs->fs_pendingblocks -= nblocks; 2599 *countp += nblocks; 2600 } 2601 bp->b_flags |= B_INVAL | B_NOCACHE; 2602 brelse(bp); 2603 return (allerror); 2604} 2605 2606/* 2607 * Free an allocindir. 2608 * This routine must be called with splbio interrupts blocked. 2609 */ 2610static void 2611free_allocindir(aip, inodedep) 2612 struct allocindir *aip; 2613 struct inodedep *inodedep; 2614{ 2615 struct freefrag *freefrag; 2616 2617#ifdef DEBUG 2618 if (lk.lkt_held == NOHOLDER) 2619 panic("free_allocindir: lock not held"); 2620#endif 2621 if ((aip->ai_state & DEPCOMPLETE) == 0) 2622 LIST_REMOVE(aip, ai_deps); 2623 if (aip->ai_state & ONWORKLIST) 2624 WORKLIST_REMOVE(&aip->ai_list); 2625 LIST_REMOVE(aip, ai_next); 2626 if ((freefrag = aip->ai_freefrag) != NULL) { 2627 if (inodedep == NULL) 2628 add_to_worklist(&freefrag->ff_list); 2629 else 2630 WORKLIST_INSERT(&inodedep->id_bufwait, 2631 &freefrag->ff_list); 2632 } 2633 WORKITEM_FREE(aip, D_ALLOCINDIR); 2634} 2635 2636/* 2637 * Directory entry addition dependencies. 2638 * 2639 * When adding a new directory entry, the inode (with its incremented link 2640 * count) must be written to disk before the directory entry's pointer to it. 2641 * Also, if the inode is newly allocated, the corresponding freemap must be 2642 * updated (on disk) before the directory entry's pointer. These requirements 2643 * are met via undo/redo on the directory entry's pointer, which consists 2644 * simply of the inode number. 2645 * 2646 * As directory entries are added and deleted, the free space within a 2647 * directory block can become fragmented. The ufs filesystem will compact 2648 * a fragmented directory block to make space for a new entry. When this 2649 * occurs, the offsets of previously added entries change. Any "diradd" 2650 * dependency structures corresponding to these entries must be updated with 2651 * the new offsets. 2652 */ 2653 2654/* 2655 * This routine is called after the in-memory inode's link 2656 * count has been incremented, but before the directory entry's 2657 * pointer to the inode has been set. 2658 */ 2659int 2660softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 2661 struct buf *bp; /* buffer containing directory block */ 2662 struct inode *dp; /* inode for directory */ 2663 off_t diroffset; /* offset of new entry in directory */ 2664 ino_t newinum; /* inode referenced by new directory entry */ 2665 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2666 int isnewblk; /* entry is in a newly allocated block */ 2667{ 2668 int offset; /* offset of new entry within directory block */ 2669 ufs_lbn_t lbn; /* block in directory containing new entry */ 2670 struct fs *fs; 2671 struct diradd *dap; 2672 struct allocdirect *adp; 2673 struct pagedep *pagedep; 2674 struct inodedep *inodedep; 2675 struct newdirblk *newdirblk = 0; 2676 struct mkdir *mkdir1, *mkdir2; 2677 2678 /* 2679 * Whiteouts have no dependencies. 2680 */ 2681 if (newinum == WINO) { 2682 if (newdirbp != NULL) 2683 bdwrite(newdirbp); 2684 return (0); 2685 } 2686 2687 fs = dp->i_fs; 2688 lbn = lblkno(fs, diroffset); 2689 offset = blkoff(fs, diroffset); 2690 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2691 M_SOFTDEP_FLAGS|M_ZERO); 2692 dap->da_list.wk_type = D_DIRADD; 2693 dap->da_offset = offset; 2694 dap->da_newinum = newinum; 2695 dap->da_state = ATTACHED; 2696 if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) { 2697 MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk), 2698 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 2699 newdirblk->db_list.wk_type = D_NEWDIRBLK; 2700 newdirblk->db_state = 0; 2701 } 2702 if (newdirbp == NULL) { 2703 dap->da_state |= DEPCOMPLETE; 2704 ACQUIRE_LOCK(&lk); 2705 } else { 2706 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2707 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2708 M_SOFTDEP_FLAGS); 2709 mkdir1->md_list.wk_type = D_MKDIR; 2710 mkdir1->md_state = MKDIR_BODY; 2711 mkdir1->md_diradd = dap; 2712 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2713 M_SOFTDEP_FLAGS); 2714 mkdir2->md_list.wk_type = D_MKDIR; 2715 mkdir2->md_state = MKDIR_PARENT; 2716 mkdir2->md_diradd = dap; 2717 /* 2718 * Dependency on "." and ".." being written to disk. 2719 */ 2720 mkdir1->md_buf = newdirbp; 2721 ACQUIRE_LOCK(&lk); 2722 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2723 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2724 FREE_LOCK(&lk); 2725 bdwrite(newdirbp); 2726 /* 2727 * Dependency on link count increase for parent directory 2728 */ 2729 ACQUIRE_LOCK(&lk); 2730 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0 2731 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2732 dap->da_state &= ~MKDIR_PARENT; 2733 WORKITEM_FREE(mkdir2, D_MKDIR); 2734 } else { 2735 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2736 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2737 } 2738 } 2739 /* 2740 * Link into parent directory pagedep to await its being written. 2741 */ 2742 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2743 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2744 dap->da_pagedep = pagedep; 2745 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2746 da_pdlist); 2747 /* 2748 * Link into its inodedep. Put it on the id_bufwait list if the inode 2749 * is not yet written. If it is written, do the post-inode write 2750 * processing to put it on the id_pendinghd list. 2751 */ 2752 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2753 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2754 diradd_inode_written(dap, inodedep); 2755 else 2756 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2757 if (isnewblk) { 2758 /* 2759 * Directories growing into indirect blocks are rare 2760 * enough and the frequency of new block allocation 2761 * in those cases even more rare, that we choose not 2762 * to bother tracking them. Rather we simply force the 2763 * new directory entry to disk. 2764 */ 2765 if (lbn >= NDADDR) { 2766 FREE_LOCK(&lk); 2767 /* 2768 * We only have a new allocation when at the 2769 * beginning of a new block, not when we are 2770 * expanding into an existing block. 2771 */ 2772 if (blkoff(fs, diroffset) == 0) 2773 return (1); 2774 return (0); 2775 } 2776 /* 2777 * We only have a new allocation when at the beginning 2778 * of a new fragment, not when we are expanding into an 2779 * existing fragment. Also, there is nothing to do if we 2780 * are already tracking this block. 2781 */ 2782 if (fragoff(fs, diroffset) != 0) { 2783 FREE_LOCK(&lk); 2784 return (0); 2785 } 2786 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2787 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2788 FREE_LOCK(&lk); 2789 return (0); 2790 } 2791 /* 2792 * Find our associated allocdirect and have it track us. 2793 */ 2794 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0) 2795 panic("softdep_setup_directory_add: lost inodedep"); 2796 adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst); 2797 if (adp == NULL || adp->ad_lbn != lbn) { 2798 FREE_LOCK(&lk); 2799 panic("softdep_setup_directory_add: lost entry"); 2800 } 2801 pagedep->pd_state |= NEWBLOCK; 2802 newdirblk->db_pagedep = pagedep; 2803 WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list); 2804 } 2805 FREE_LOCK(&lk); 2806 return (0); 2807} 2808 2809/* 2810 * This procedure is called to change the offset of a directory 2811 * entry when compacting a directory block which must be owned 2812 * exclusively by the caller. Note that the actual entry movement 2813 * must be done in this procedure to ensure that no I/O completions 2814 * occur while the move is in progress. 2815 */ 2816void 2817softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2818 struct inode *dp; /* inode for directory */ 2819 caddr_t base; /* address of dp->i_offset */ 2820 caddr_t oldloc; /* address of old directory location */ 2821 caddr_t newloc; /* address of new directory location */ 2822 int entrysize; /* size of directory entry */ 2823{ 2824 int offset, oldoffset, newoffset; 2825 struct pagedep *pagedep; 2826 struct diradd *dap; 2827 ufs_lbn_t lbn; 2828 2829 ACQUIRE_LOCK(&lk); 2830 lbn = lblkno(dp->i_fs, dp->i_offset); 2831 offset = blkoff(dp->i_fs, dp->i_offset); 2832 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2833 goto done; 2834 oldoffset = offset + (oldloc - base); 2835 newoffset = offset + (newloc - base); 2836 2837 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2838 if (dap->da_offset != oldoffset) 2839 continue; 2840 dap->da_offset = newoffset; 2841 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2842 break; 2843 LIST_REMOVE(dap, da_pdlist); 2844 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2845 dap, da_pdlist); 2846 break; 2847 } 2848 if (dap == NULL) { 2849 2850 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2851 if (dap->da_offset == oldoffset) { 2852 dap->da_offset = newoffset; 2853 break; 2854 } 2855 } 2856 } 2857done: 2858 bcopy(oldloc, newloc, entrysize); 2859 FREE_LOCK(&lk); 2860} 2861 2862/* 2863 * Free a diradd dependency structure. This routine must be called 2864 * with splbio interrupts blocked. 2865 */ 2866static void 2867free_diradd(dap) 2868 struct diradd *dap; 2869{ 2870 struct dirrem *dirrem; 2871 struct pagedep *pagedep; 2872 struct inodedep *inodedep; 2873 struct mkdir *mkdir, *nextmd; 2874 2875#ifdef DEBUG 2876 if (lk.lkt_held == NOHOLDER) 2877 panic("free_diradd: lock not held"); 2878#endif 2879 WORKLIST_REMOVE(&dap->da_list); 2880 LIST_REMOVE(dap, da_pdlist); 2881 if ((dap->da_state & DIRCHG) == 0) { 2882 pagedep = dap->da_pagedep; 2883 } else { 2884 dirrem = dap->da_previous; 2885 pagedep = dirrem->dm_pagedep; 2886 dirrem->dm_dirinum = pagedep->pd_ino; 2887 add_to_worklist(&dirrem->dm_list); 2888 } 2889 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2890 0, &inodedep) != 0) 2891 (void) free_inodedep(inodedep); 2892 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2893 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2894 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2895 if (mkdir->md_diradd != dap) 2896 continue; 2897 dap->da_state &= ~mkdir->md_state; 2898 WORKLIST_REMOVE(&mkdir->md_list); 2899 LIST_REMOVE(mkdir, md_mkdirs); 2900 WORKITEM_FREE(mkdir, D_MKDIR); 2901 } 2902 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2903 FREE_LOCK(&lk); 2904 panic("free_diradd: unfound ref"); 2905 } 2906 } 2907 WORKITEM_FREE(dap, D_DIRADD); 2908} 2909 2910/* 2911 * Directory entry removal dependencies. 2912 * 2913 * When removing a directory entry, the entry's inode pointer must be 2914 * zero'ed on disk before the corresponding inode's link count is decremented 2915 * (possibly freeing the inode for re-use). This dependency is handled by 2916 * updating the directory entry but delaying the inode count reduction until 2917 * after the directory block has been written to disk. After this point, the 2918 * inode count can be decremented whenever it is convenient. 2919 */ 2920 2921/* 2922 * This routine should be called immediately after removing 2923 * a directory entry. The inode's link count should not be 2924 * decremented by the calling procedure -- the soft updates 2925 * code will do this task when it is safe. 2926 */ 2927void 2928softdep_setup_remove(bp, dp, ip, isrmdir) 2929 struct buf *bp; /* buffer containing directory block */ 2930 struct inode *dp; /* inode for the directory being modified */ 2931 struct inode *ip; /* inode for directory entry being removed */ 2932 int isrmdir; /* indicates if doing RMDIR */ 2933{ 2934 struct dirrem *dirrem, *prevdirrem; 2935 2936 /* 2937 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2938 */ 2939 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2940 2941 /* 2942 * If the COMPLETE flag is clear, then there were no active 2943 * entries and we want to roll back to a zeroed entry until 2944 * the new inode is committed to disk. If the COMPLETE flag is 2945 * set then we have deleted an entry that never made it to 2946 * disk. If the entry we deleted resulted from a name change, 2947 * then the old name still resides on disk. We cannot delete 2948 * its inode (returned to us in prevdirrem) until the zeroed 2949 * directory entry gets to disk. The new inode has never been 2950 * referenced on the disk, so can be deleted immediately. 2951 */ 2952 if ((dirrem->dm_state & COMPLETE) == 0) { 2953 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2954 dm_next); 2955 FREE_LOCK(&lk); 2956 } else { 2957 if (prevdirrem != NULL) 2958 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2959 prevdirrem, dm_next); 2960 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2961 FREE_LOCK(&lk); 2962 handle_workitem_remove(dirrem, NULL); 2963 } 2964} 2965 2966/* 2967 * Allocate a new dirrem if appropriate and return it along with 2968 * its associated pagedep. Called without a lock, returns with lock. 2969 */ 2970static long num_dirrem; /* number of dirrem allocated */ 2971static struct dirrem * 2972newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2973 struct buf *bp; /* buffer containing directory block */ 2974 struct inode *dp; /* inode for the directory being modified */ 2975 struct inode *ip; /* inode for directory entry being removed */ 2976 int isrmdir; /* indicates if doing RMDIR */ 2977 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2978{ 2979 int offset; 2980 ufs_lbn_t lbn; 2981 struct diradd *dap; 2982 struct dirrem *dirrem; 2983 struct pagedep *pagedep; 2984 2985 /* 2986 * Whiteouts have no deletion dependencies. 2987 */ 2988 if (ip == NULL) 2989 panic("newdirrem: whiteout"); 2990 /* 2991 * If we are over our limit, try to improve the situation. 2992 * Limiting the number of dirrem structures will also limit 2993 * the number of freefile and freeblks structures. 2994 */ 2995 if (num_dirrem > max_softdeps / 2) 2996 (void) request_cleanup(FLUSH_REMOVE, 0); 2997 num_dirrem += 1; 2998 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2999 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 3000 dirrem->dm_list.wk_type = D_DIRREM; 3001 dirrem->dm_state = isrmdir ? RMDIR : 0; 3002 dirrem->dm_mnt = ITOV(ip)->v_mount; 3003 dirrem->dm_oldinum = ip->i_number; 3004 *prevdirremp = NULL; 3005 3006 ACQUIRE_LOCK(&lk); 3007 lbn = lblkno(dp->i_fs, dp->i_offset); 3008 offset = blkoff(dp->i_fs, dp->i_offset); 3009 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 3010 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 3011 dirrem->dm_pagedep = pagedep; 3012 /* 3013 * Check for a diradd dependency for the same directory entry. 3014 * If present, then both dependencies become obsolete and can 3015 * be de-allocated. Check for an entry on both the pd_dirraddhd 3016 * list and the pd_pendinghd list. 3017 */ 3018 3019 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 3020 if (dap->da_offset == offset) 3021 break; 3022 if (dap == NULL) { 3023 3024 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 3025 if (dap->da_offset == offset) 3026 break; 3027 if (dap == NULL) 3028 return (dirrem); 3029 } 3030 /* 3031 * Must be ATTACHED at this point. 3032 */ 3033 if ((dap->da_state & ATTACHED) == 0) { 3034 FREE_LOCK(&lk); 3035 panic("newdirrem: not ATTACHED"); 3036 } 3037 if (dap->da_newinum != ip->i_number) { 3038 FREE_LOCK(&lk); 3039 panic("newdirrem: inum %d should be %d", 3040 ip->i_number, dap->da_newinum); 3041 } 3042 /* 3043 * If we are deleting a changed name that never made it to disk, 3044 * then return the dirrem describing the previous inode (which 3045 * represents the inode currently referenced from this entry on disk). 3046 */ 3047 if ((dap->da_state & DIRCHG) != 0) { 3048 *prevdirremp = dap->da_previous; 3049 dap->da_state &= ~DIRCHG; 3050 dap->da_pagedep = pagedep; 3051 } 3052 /* 3053 * We are deleting an entry that never made it to disk. 3054 * Mark it COMPLETE so we can delete its inode immediately. 3055 */ 3056 dirrem->dm_state |= COMPLETE; 3057 free_diradd(dap); 3058 return (dirrem); 3059} 3060 3061/* 3062 * Directory entry change dependencies. 3063 * 3064 * Changing an existing directory entry requires that an add operation 3065 * be completed first followed by a deletion. The semantics for the addition 3066 * are identical to the description of adding a new entry above except 3067 * that the rollback is to the old inode number rather than zero. Once 3068 * the addition dependency is completed, the removal is done as described 3069 * in the removal routine above. 3070 */ 3071 3072/* 3073 * This routine should be called immediately after changing 3074 * a directory entry. The inode's link count should not be 3075 * decremented by the calling procedure -- the soft updates 3076 * code will perform this task when it is safe. 3077 */ 3078void 3079softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 3080 struct buf *bp; /* buffer containing directory block */ 3081 struct inode *dp; /* inode for the directory being modified */ 3082 struct inode *ip; /* inode for directory entry being removed */ 3083 ino_t newinum; /* new inode number for changed entry */ 3084 int isrmdir; /* indicates if doing RMDIR */ 3085{ 3086 int offset; 3087 struct diradd *dap = NULL; 3088 struct dirrem *dirrem, *prevdirrem; 3089 struct pagedep *pagedep; 3090 struct inodedep *inodedep; 3091 3092 offset = blkoff(dp->i_fs, dp->i_offset); 3093 3094 /* 3095 * Whiteouts do not need diradd dependencies. 3096 */ 3097 if (newinum != WINO) { 3098 MALLOC(dap, struct diradd *, sizeof(struct diradd), 3099 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 3100 dap->da_list.wk_type = D_DIRADD; 3101 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 3102 dap->da_offset = offset; 3103 dap->da_newinum = newinum; 3104 } 3105 3106 /* 3107 * Allocate a new dirrem and ACQUIRE_LOCK. 3108 */ 3109 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 3110 pagedep = dirrem->dm_pagedep; 3111 /* 3112 * The possible values for isrmdir: 3113 * 0 - non-directory file rename 3114 * 1 - directory rename within same directory 3115 * inum - directory rename to new directory of given inode number 3116 * When renaming to a new directory, we are both deleting and 3117 * creating a new directory entry, so the link count on the new 3118 * directory should not change. Thus we do not need the followup 3119 * dirrem which is usually done in handle_workitem_remove. We set 3120 * the DIRCHG flag to tell handle_workitem_remove to skip the 3121 * followup dirrem. 3122 */ 3123 if (isrmdir > 1) 3124 dirrem->dm_state |= DIRCHG; 3125 3126 /* 3127 * Whiteouts have no additional dependencies, 3128 * so just put the dirrem on the correct list. 3129 */ 3130 if (newinum == WINO) { 3131 if ((dirrem->dm_state & COMPLETE) == 0) { 3132 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 3133 dm_next); 3134 } else { 3135 dirrem->dm_dirinum = pagedep->pd_ino; 3136 add_to_worklist(&dirrem->dm_list); 3137 } 3138 FREE_LOCK(&lk); 3139 return; 3140 } 3141 3142 /* 3143 * If the COMPLETE flag is clear, then there were no active 3144 * entries and we want to roll back to the previous inode until 3145 * the new inode is committed to disk. If the COMPLETE flag is 3146 * set, then we have deleted an entry that never made it to disk. 3147 * If the entry we deleted resulted from a name change, then the old 3148 * inode reference still resides on disk. Any rollback that we do 3149 * needs to be to that old inode (returned to us in prevdirrem). If 3150 * the entry we deleted resulted from a create, then there is 3151 * no entry on the disk, so we want to roll back to zero rather 3152 * than the uncommitted inode. In either of the COMPLETE cases we 3153 * want to immediately free the unwritten and unreferenced inode. 3154 */ 3155 if ((dirrem->dm_state & COMPLETE) == 0) { 3156 dap->da_previous = dirrem; 3157 } else { 3158 if (prevdirrem != NULL) { 3159 dap->da_previous = prevdirrem; 3160 } else { 3161 dap->da_state &= ~DIRCHG; 3162 dap->da_pagedep = pagedep; 3163 } 3164 dirrem->dm_dirinum = pagedep->pd_ino; 3165 add_to_worklist(&dirrem->dm_list); 3166 } 3167 /* 3168 * Link into its inodedep. Put it on the id_bufwait list if the inode 3169 * is not yet written. If it is written, do the post-inode write 3170 * processing to put it on the id_pendinghd list. 3171 */ 3172 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 3173 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 3174 dap->da_state |= COMPLETE; 3175 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3176 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3177 } else { 3178 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 3179 dap, da_pdlist); 3180 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 3181 } 3182 FREE_LOCK(&lk); 3183} 3184 3185/* 3186 * Called whenever the link count on an inode is changed. 3187 * It creates an inode dependency so that the new reference(s) 3188 * to the inode cannot be committed to disk until the updated 3189 * inode has been written. 3190 */ 3191void 3192softdep_change_linkcnt(ip) 3193 struct inode *ip; /* the inode with the increased link count */ 3194{ 3195 struct inodedep *inodedep; 3196 3197 ACQUIRE_LOCK(&lk); 3198 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 3199 if (ip->i_nlink < ip->i_effnlink) { 3200 FREE_LOCK(&lk); 3201 panic("softdep_change_linkcnt: bad delta"); 3202 } 3203 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3204 FREE_LOCK(&lk); 3205} 3206 3207/* 3208 * Called when the effective link count and the reference count 3209 * on an inode drops to zero. At this point there are no names 3210 * referencing the file in the filesystem and no active file 3211 * references. The space associated with the file will be freed 3212 * as soon as the necessary soft dependencies are cleared. 3213 */ 3214void 3215softdep_releasefile(ip) 3216 struct inode *ip; /* inode with the zero effective link count */ 3217{ 3218 struct inodedep *inodedep; 3219 struct fs *fs; 3220 int extblocks; 3221 3222 if (ip->i_effnlink > 0) 3223 panic("softdep_filerelease: file still referenced"); 3224 /* 3225 * We may be called several times as the real reference count 3226 * drops to zero. We only want to account for the space once. 3227 */ 3228 if (ip->i_flag & IN_SPACECOUNTED) 3229 return; 3230 /* 3231 * We have to deactivate a snapshot otherwise copyonwrites may 3232 * add blocks and the cleanup may remove blocks after we have 3233 * tried to account for them. 3234 */ 3235 if ((ip->i_flags & SF_SNAPSHOT) != 0) 3236 ffs_snapremove(ITOV(ip)); 3237 /* 3238 * If we are tracking an nlinkdelta, we have to also remember 3239 * whether we accounted for the freed space yet. 3240 */ 3241 ACQUIRE_LOCK(&lk); 3242 if ((inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep))) 3243 inodedep->id_state |= SPACECOUNTED; 3244 FREE_LOCK(&lk); 3245 fs = ip->i_fs; 3246 extblocks = 0; 3247 if (fs->fs_magic == FS_UFS2_MAGIC) 3248 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 3249 ip->i_fs->fs_pendingblocks += DIP(ip, i_blocks) - extblocks; 3250 ip->i_fs->fs_pendinginodes += 1; 3251 ip->i_flag |= IN_SPACECOUNTED; 3252} 3253 3254/* 3255 * This workitem decrements the inode's link count. 3256 * If the link count reaches zero, the file is removed. 3257 */ 3258static void 3259handle_workitem_remove(dirrem, xp) 3260 struct dirrem *dirrem; 3261 struct vnode *xp; 3262{ 3263 struct thread *td = curthread; 3264 struct inodedep *inodedep; 3265 struct vnode *vp; 3266 struct inode *ip; 3267 ino_t oldinum; 3268 int error; 3269 3270 if ((vp = xp) == NULL && 3271 (error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, LK_EXCLUSIVE, 3272 &vp)) != 0) { 3273 softdep_error("handle_workitem_remove: vget", error); 3274 return; 3275 } 3276 ip = VTOI(vp); 3277 ACQUIRE_LOCK(&lk); 3278 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 3279 FREE_LOCK(&lk); 3280 panic("handle_workitem_remove: lost inodedep"); 3281 } 3282 /* 3283 * Normal file deletion. 3284 */ 3285 if ((dirrem->dm_state & RMDIR) == 0) { 3286 ip->i_nlink--; 3287 DIP(ip, i_nlink) = ip->i_nlink; 3288 ip->i_flag |= IN_CHANGE; 3289 if (ip->i_nlink < ip->i_effnlink) { 3290 FREE_LOCK(&lk); 3291 panic("handle_workitem_remove: bad file delta"); 3292 } 3293 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3294 FREE_LOCK(&lk); 3295 vput(vp); 3296 num_dirrem -= 1; 3297 WORKITEM_FREE(dirrem, D_DIRREM); 3298 return; 3299 } 3300 /* 3301 * Directory deletion. Decrement reference count for both the 3302 * just deleted parent directory entry and the reference for ".". 3303 * Next truncate the directory to length zero. When the 3304 * truncation completes, arrange to have the reference count on 3305 * the parent decremented to account for the loss of "..". 3306 */ 3307 ip->i_nlink -= 2; 3308 DIP(ip, i_nlink) = ip->i_nlink; 3309 ip->i_flag |= IN_CHANGE; 3310 if (ip->i_nlink < ip->i_effnlink) { 3311 FREE_LOCK(&lk); 3312 panic("handle_workitem_remove: bad dir delta"); 3313 } 3314 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3315 FREE_LOCK(&lk); 3316 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_ucred, td)) != 0) 3317 softdep_error("handle_workitem_remove: truncate", error); 3318 /* 3319 * Rename a directory to a new parent. Since, we are both deleting 3320 * and creating a new directory entry, the link count on the new 3321 * directory should not change. Thus we skip the followup dirrem. 3322 */ 3323 if (dirrem->dm_state & DIRCHG) { 3324 vput(vp); 3325 num_dirrem -= 1; 3326 WORKITEM_FREE(dirrem, D_DIRREM); 3327 return; 3328 } 3329 /* 3330 * If the inodedep does not exist, then the zero'ed inode has 3331 * been written to disk. If the allocated inode has never been 3332 * written to disk, then the on-disk inode is zero'ed. In either 3333 * case we can remove the file immediately. 3334 */ 3335 ACQUIRE_LOCK(&lk); 3336 dirrem->dm_state = 0; 3337 oldinum = dirrem->dm_oldinum; 3338 dirrem->dm_oldinum = dirrem->dm_dirinum; 3339 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3340 check_inode_unwritten(inodedep)) { 3341 FREE_LOCK(&lk); 3342 vput(vp); 3343 handle_workitem_remove(dirrem, NULL); 3344 return; 3345 } 3346 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3347 FREE_LOCK(&lk); 3348 vput(vp); 3349} 3350 3351/* 3352 * Inode de-allocation dependencies. 3353 * 3354 * When an inode's link count is reduced to zero, it can be de-allocated. We 3355 * found it convenient to postpone de-allocation until after the inode is 3356 * written to disk with its new link count (zero). At this point, all of the 3357 * on-disk inode's block pointers are nullified and, with careful dependency 3358 * list ordering, all dependencies related to the inode will be satisfied and 3359 * the corresponding dependency structures de-allocated. So, if/when the 3360 * inode is reused, there will be no mixing of old dependencies with new 3361 * ones. This artificial dependency is set up by the block de-allocation 3362 * procedure above (softdep_setup_freeblocks) and completed by the 3363 * following procedure. 3364 */ 3365static void 3366handle_workitem_freefile(freefile) 3367 struct freefile *freefile; 3368{ 3369 struct fs *fs; 3370 struct inodedep *idp; 3371 int error; 3372 3373 fs = VFSTOUFS(freefile->fx_mnt)->um_fs; 3374#ifdef DEBUG 3375 ACQUIRE_LOCK(&lk); 3376 error = inodedep_lookup(fs, freefile->fx_oldinum, 0, &idp); 3377 FREE_LOCK(&lk); 3378 if (error) 3379 panic("handle_workitem_freefile: inodedep survived"); 3380#endif 3381 fs->fs_pendinginodes -= 1; 3382 if ((error = ffs_freefile(fs, freefile->fx_devvp, freefile->fx_oldinum, 3383 freefile->fx_mode)) != 0) 3384 softdep_error("handle_workitem_freefile", error); 3385 WORKITEM_FREE(freefile, D_FREEFILE); 3386} 3387 3388/* 3389 * Disk writes. 3390 * 3391 * The dependency structures constructed above are most actively used when file 3392 * system blocks are written to disk. No constraints are placed on when a 3393 * block can be written, but unsatisfied update dependencies are made safe by 3394 * modifying (or replacing) the source memory for the duration of the disk 3395 * write. When the disk write completes, the memory block is again brought 3396 * up-to-date. 3397 * 3398 * In-core inode structure reclamation. 3399 * 3400 * Because there are a finite number of "in-core" inode structures, they are 3401 * reused regularly. By transferring all inode-related dependencies to the 3402 * in-memory inode block and indexing them separately (via "inodedep"s), we 3403 * can allow "in-core" inode structures to be reused at any time and avoid 3404 * any increase in contention. 3405 * 3406 * Called just before entering the device driver to initiate a new disk I/O. 3407 * The buffer must be locked, thus, no I/O completion operations can occur 3408 * while we are manipulating its associated dependencies. 3409 */ 3410static void 3411softdep_disk_io_initiation(bp) 3412 struct buf *bp; /* structure describing disk write to occur */ 3413{ 3414 struct worklist *wk, *nextwk; 3415 struct indirdep *indirdep; 3416 struct inodedep *inodedep; 3417 3418 /* 3419 * We only care about write operations. There should never 3420 * be dependencies for reads. 3421 */ 3422 if (bp->b_iocmd == BIO_READ) 3423 panic("softdep_disk_io_initiation: read"); 3424 /* 3425 * Do any necessary pre-I/O processing. 3426 */ 3427 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) { 3428 nextwk = LIST_NEXT(wk, wk_list); 3429 switch (wk->wk_type) { 3430 3431 case D_PAGEDEP: 3432 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3433 continue; 3434 3435 case D_INODEDEP: 3436 inodedep = WK_INODEDEP(wk); 3437 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 3438 initiate_write_inodeblock_ufs1(inodedep, bp); 3439 else 3440 initiate_write_inodeblock_ufs2(inodedep, bp); 3441 continue; 3442 3443 case D_INDIRDEP: 3444 indirdep = WK_INDIRDEP(wk); 3445 if (indirdep->ir_state & GOINGAWAY) 3446 panic("disk_io_initiation: indirdep gone"); 3447 /* 3448 * If there are no remaining dependencies, this 3449 * will be writing the real pointers, so the 3450 * dependency can be freed. 3451 */ 3452 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3453 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3454 brelse(indirdep->ir_savebp); 3455 /* inline expand WORKLIST_REMOVE(wk); */ 3456 wk->wk_state &= ~ONWORKLIST; 3457 LIST_REMOVE(wk, wk_list); 3458 WORKITEM_FREE(indirdep, D_INDIRDEP); 3459 continue; 3460 } 3461 /* 3462 * Replace up-to-date version with safe version. 3463 */ 3464 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3465 M_INDIRDEP, M_SOFTDEP_FLAGS); 3466 ACQUIRE_LOCK(&lk); 3467 indirdep->ir_state &= ~ATTACHED; 3468 indirdep->ir_state |= UNDONE; 3469 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3470 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3471 bp->b_bcount); 3472 FREE_LOCK(&lk); 3473 continue; 3474 3475 case D_MKDIR: 3476 case D_BMSAFEMAP: 3477 case D_ALLOCDIRECT: 3478 case D_ALLOCINDIR: 3479 continue; 3480 3481 default: 3482 panic("handle_disk_io_initiation: Unexpected type %s", 3483 TYPENAME(wk->wk_type)); 3484 /* NOTREACHED */ 3485 } 3486 } 3487} 3488 3489/* 3490 * Called from within the procedure above to deal with unsatisfied 3491 * allocation dependencies in a directory. The buffer must be locked, 3492 * thus, no I/O completion operations can occur while we are 3493 * manipulating its associated dependencies. 3494 */ 3495static void 3496initiate_write_filepage(pagedep, bp) 3497 struct pagedep *pagedep; 3498 struct buf *bp; 3499{ 3500 struct diradd *dap; 3501 struct direct *ep; 3502 int i; 3503 3504 if (pagedep->pd_state & IOSTARTED) { 3505 /* 3506 * This can only happen if there is a driver that does not 3507 * understand chaining. Here biodone will reissue the call 3508 * to strategy for the incomplete buffers. 3509 */ 3510 printf("initiate_write_filepage: already started\n"); 3511 return; 3512 } 3513 pagedep->pd_state |= IOSTARTED; 3514 ACQUIRE_LOCK(&lk); 3515 for (i = 0; i < DAHASHSZ; i++) { 3516 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3517 ep = (struct direct *) 3518 ((char *)bp->b_data + dap->da_offset); 3519 if (ep->d_ino != dap->da_newinum) { 3520 FREE_LOCK(&lk); 3521 panic("%s: dir inum %d != new %d", 3522 "initiate_write_filepage", 3523 ep->d_ino, dap->da_newinum); 3524 } 3525 if (dap->da_state & DIRCHG) 3526 ep->d_ino = dap->da_previous->dm_oldinum; 3527 else 3528 ep->d_ino = 0; 3529 dap->da_state &= ~ATTACHED; 3530 dap->da_state |= UNDONE; 3531 } 3532 } 3533 FREE_LOCK(&lk); 3534} 3535 3536/* 3537 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 3538 * Note that any bug fixes made to this routine must be done in the 3539 * version found below. 3540 * 3541 * Called from within the procedure above to deal with unsatisfied 3542 * allocation dependencies in an inodeblock. The buffer must be 3543 * locked, thus, no I/O completion operations can occur while we 3544 * are manipulating its associated dependencies. 3545 */ 3546static void 3547initiate_write_inodeblock_ufs1(inodedep, bp) 3548 struct inodedep *inodedep; 3549 struct buf *bp; /* The inode block */ 3550{ 3551 struct allocdirect *adp, *lastadp; 3552 struct ufs1_dinode *dp; 3553 struct fs *fs; 3554 ufs_lbn_t i, prevlbn = 0; 3555 int deplist; 3556 3557 if (inodedep->id_state & IOSTARTED) 3558 panic("initiate_write_inodeblock_ufs1: already started"); 3559 inodedep->id_state |= IOSTARTED; 3560 fs = inodedep->id_fs; 3561 dp = (struct ufs1_dinode *)bp->b_data + 3562 ino_to_fsbo(fs, inodedep->id_ino); 3563 /* 3564 * If the bitmap is not yet written, then the allocated 3565 * inode cannot be written to disk. 3566 */ 3567 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3568 if (inodedep->id_savedino1 != NULL) 3569 panic("initiate_write_inodeblock_ufs1: I/O underway"); 3570 MALLOC(inodedep->id_savedino1, struct ufs1_dinode *, 3571 sizeof(struct ufs1_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3572 *inodedep->id_savedino1 = *dp; 3573 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3574 return; 3575 } 3576 /* 3577 * If no dependencies, then there is nothing to roll back. 3578 */ 3579 inodedep->id_savedsize = dp->di_size; 3580 inodedep->id_savedextsize = 0; 3581 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3582 return; 3583 /* 3584 * Set the dependencies to busy. 3585 */ 3586 ACQUIRE_LOCK(&lk); 3587 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3588 adp = TAILQ_NEXT(adp, ad_next)) { 3589#ifdef DIAGNOSTIC 3590 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3591 FREE_LOCK(&lk); 3592 panic("softdep_write_inodeblock: lbn order"); 3593 } 3594 prevlbn = adp->ad_lbn; 3595 if (adp->ad_lbn < NDADDR && 3596 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3597 FREE_LOCK(&lk); 3598 panic("%s: direct pointer #%jd mismatch %d != %jd", 3599 "softdep_write_inodeblock", 3600 (intmax_t)adp->ad_lbn, 3601 dp->di_db[adp->ad_lbn], 3602 (intmax_t)adp->ad_newblkno); 3603 } 3604 if (adp->ad_lbn >= NDADDR && 3605 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3606 FREE_LOCK(&lk); 3607 panic("%s: indirect pointer #%jd mismatch %d != %jd", 3608 "softdep_write_inodeblock", 3609 (intmax_t)adp->ad_lbn - NDADDR, 3610 dp->di_ib[adp->ad_lbn - NDADDR], 3611 (intmax_t)adp->ad_newblkno); 3612 } 3613 deplist |= 1 << adp->ad_lbn; 3614 if ((adp->ad_state & ATTACHED) == 0) { 3615 FREE_LOCK(&lk); 3616 panic("softdep_write_inodeblock: Unknown state 0x%x", 3617 adp->ad_state); 3618 } 3619#endif /* DIAGNOSTIC */ 3620 adp->ad_state &= ~ATTACHED; 3621 adp->ad_state |= UNDONE; 3622 } 3623 /* 3624 * The on-disk inode cannot claim to be any larger than the last 3625 * fragment that has been written. Otherwise, the on-disk inode 3626 * might have fragments that were not the last block in the file 3627 * which would corrupt the filesystem. 3628 */ 3629 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3630 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3631 if (adp->ad_lbn >= NDADDR) 3632 break; 3633 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3634 /* keep going until hitting a rollback to a frag */ 3635 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3636 continue; 3637 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3638 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3639#ifdef DIAGNOSTIC 3640 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3641 FREE_LOCK(&lk); 3642 panic("softdep_write_inodeblock: lost dep1"); 3643 } 3644#endif /* DIAGNOSTIC */ 3645 dp->di_db[i] = 0; 3646 } 3647 for (i = 0; i < NIADDR; i++) { 3648#ifdef DIAGNOSTIC 3649 if (dp->di_ib[i] != 0 && 3650 (deplist & ((1 << NDADDR) << i)) == 0) { 3651 FREE_LOCK(&lk); 3652 panic("softdep_write_inodeblock: lost dep2"); 3653 } 3654#endif /* DIAGNOSTIC */ 3655 dp->di_ib[i] = 0; 3656 } 3657 FREE_LOCK(&lk); 3658 return; 3659 } 3660 /* 3661 * If we have zero'ed out the last allocated block of the file, 3662 * roll back the size to the last currently allocated block. 3663 * We know that this last allocated block is a full-sized as 3664 * we already checked for fragments in the loop above. 3665 */ 3666 if (lastadp != NULL && 3667 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3668 for (i = lastadp->ad_lbn; i >= 0; i--) 3669 if (dp->di_db[i] != 0) 3670 break; 3671 dp->di_size = (i + 1) * fs->fs_bsize; 3672 } 3673 /* 3674 * The only dependencies are for indirect blocks. 3675 * 3676 * The file size for indirect block additions is not guaranteed. 3677 * Such a guarantee would be non-trivial to achieve. The conventional 3678 * synchronous write implementation also does not make this guarantee. 3679 * Fsck should catch and fix discrepancies. Arguably, the file size 3680 * can be over-estimated without destroying integrity when the file 3681 * moves into the indirect blocks (i.e., is large). If we want to 3682 * postpone fsck, we are stuck with this argument. 3683 */ 3684 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3685 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3686 FREE_LOCK(&lk); 3687} 3688 3689/* 3690 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 3691 * Note that any bug fixes made to this routine must be done in the 3692 * version found above. 3693 * 3694 * Called from within the procedure above to deal with unsatisfied 3695 * allocation dependencies in an inodeblock. The buffer must be 3696 * locked, thus, no I/O completion operations can occur while we 3697 * are manipulating its associated dependencies. 3698 */ 3699static void 3700initiate_write_inodeblock_ufs2(inodedep, bp) 3701 struct inodedep *inodedep; 3702 struct buf *bp; /* The inode block */ 3703{ 3704 struct allocdirect *adp, *lastadp; 3705 struct ufs2_dinode *dp; 3706 struct fs *fs; 3707 ufs_lbn_t i, prevlbn = 0; 3708 int deplist; 3709 3710 if (inodedep->id_state & IOSTARTED) 3711 panic("initiate_write_inodeblock_ufs2: already started"); 3712 inodedep->id_state |= IOSTARTED; 3713 fs = inodedep->id_fs; 3714 dp = (struct ufs2_dinode *)bp->b_data + 3715 ino_to_fsbo(fs, inodedep->id_ino); 3716 /* 3717 * If the bitmap is not yet written, then the allocated 3718 * inode cannot be written to disk. 3719 */ 3720 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3721 if (inodedep->id_savedino2 != NULL) 3722 panic("initiate_write_inodeblock_ufs2: I/O underway"); 3723 MALLOC(inodedep->id_savedino2, struct ufs2_dinode *, 3724 sizeof(struct ufs2_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3725 *inodedep->id_savedino2 = *dp; 3726 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 3727 return; 3728 } 3729 /* 3730 * If no dependencies, then there is nothing to roll back. 3731 */ 3732 inodedep->id_savedsize = dp->di_size; 3733 inodedep->id_savedextsize = dp->di_extsize; 3734 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL && 3735 TAILQ_FIRST(&inodedep->id_extupdt) == NULL) 3736 return; 3737 /* 3738 * Set the ext data dependencies to busy. 3739 */ 3740 ACQUIRE_LOCK(&lk); 3741 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3742 adp = TAILQ_NEXT(adp, ad_next)) { 3743#ifdef DIAGNOSTIC 3744 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3745 FREE_LOCK(&lk); 3746 panic("softdep_write_inodeblock: lbn order"); 3747 } 3748 prevlbn = adp->ad_lbn; 3749 if (dp->di_extb[adp->ad_lbn] != adp->ad_newblkno) { 3750 FREE_LOCK(&lk); 3751 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3752 "softdep_write_inodeblock", 3753 (intmax_t)adp->ad_lbn, 3754 (intmax_t)dp->di_extb[adp->ad_lbn], 3755 (intmax_t)adp->ad_newblkno); 3756 } 3757 deplist |= 1 << adp->ad_lbn; 3758 if ((adp->ad_state & ATTACHED) == 0) { 3759 FREE_LOCK(&lk); 3760 panic("softdep_write_inodeblock: Unknown state 0x%x", 3761 adp->ad_state); 3762 } 3763#endif /* DIAGNOSTIC */ 3764 adp->ad_state &= ~ATTACHED; 3765 adp->ad_state |= UNDONE; 3766 } 3767 /* 3768 * The on-disk inode cannot claim to be any larger than the last 3769 * fragment that has been written. Otherwise, the on-disk inode 3770 * might have fragments that were not the last block in the ext 3771 * data which would corrupt the filesystem. 3772 */ 3773 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3774 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3775 dp->di_extb[adp->ad_lbn] = adp->ad_oldblkno; 3776 /* keep going until hitting a rollback to a frag */ 3777 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3778 continue; 3779 dp->di_extsize = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3780 for (i = adp->ad_lbn + 1; i < NXADDR; i++) { 3781#ifdef DIAGNOSTIC 3782 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) { 3783 FREE_LOCK(&lk); 3784 panic("softdep_write_inodeblock: lost dep1"); 3785 } 3786#endif /* DIAGNOSTIC */ 3787 dp->di_extb[i] = 0; 3788 } 3789 lastadp = NULL; 3790 break; 3791 } 3792 /* 3793 * If we have zero'ed out the last allocated block of the ext 3794 * data, roll back the size to the last currently allocated block. 3795 * We know that this last allocated block is a full-sized as 3796 * we already checked for fragments in the loop above. 3797 */ 3798 if (lastadp != NULL && 3799 dp->di_extsize <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3800 for (i = lastadp->ad_lbn; i >= 0; i--) 3801 if (dp->di_extb[i] != 0) 3802 break; 3803 dp->di_extsize = (i + 1) * fs->fs_bsize; 3804 } 3805 /* 3806 * Set the file data dependencies to busy. 3807 */ 3808 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3809 adp = TAILQ_NEXT(adp, ad_next)) { 3810#ifdef DIAGNOSTIC 3811 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3812 FREE_LOCK(&lk); 3813 panic("softdep_write_inodeblock: lbn order"); 3814 } 3815 prevlbn = adp->ad_lbn; 3816 if (adp->ad_lbn < NDADDR && 3817 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3818 FREE_LOCK(&lk); 3819 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3820 "softdep_write_inodeblock", 3821 (intmax_t)adp->ad_lbn, 3822 (intmax_t)dp->di_db[adp->ad_lbn], 3823 (intmax_t)adp->ad_newblkno); 3824 } 3825 if (adp->ad_lbn >= NDADDR && 3826 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3827 FREE_LOCK(&lk); 3828 panic("%s indirect pointer #%jd mismatch %jd != %jd", 3829 "softdep_write_inodeblock:", 3830 (intmax_t)adp->ad_lbn - NDADDR, 3831 (intmax_t)dp->di_ib[adp->ad_lbn - NDADDR], 3832 (intmax_t)adp->ad_newblkno); 3833 } 3834 deplist |= 1 << adp->ad_lbn; 3835 if ((adp->ad_state & ATTACHED) == 0) { 3836 FREE_LOCK(&lk); 3837 panic("softdep_write_inodeblock: Unknown state 0x%x", 3838 adp->ad_state); 3839 } 3840#endif /* DIAGNOSTIC */ 3841 adp->ad_state &= ~ATTACHED; 3842 adp->ad_state |= UNDONE; 3843 } 3844 /* 3845 * The on-disk inode cannot claim to be any larger than the last 3846 * fragment that has been written. Otherwise, the on-disk inode 3847 * might have fragments that were not the last block in the file 3848 * which would corrupt the filesystem. 3849 */ 3850 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3851 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3852 if (adp->ad_lbn >= NDADDR) 3853 break; 3854 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3855 /* keep going until hitting a rollback to a frag */ 3856 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3857 continue; 3858 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3859 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3860#ifdef DIAGNOSTIC 3861 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3862 FREE_LOCK(&lk); 3863 panic("softdep_write_inodeblock: lost dep2"); 3864 } 3865#endif /* DIAGNOSTIC */ 3866 dp->di_db[i] = 0; 3867 } 3868 for (i = 0; i < NIADDR; i++) { 3869#ifdef DIAGNOSTIC 3870 if (dp->di_ib[i] != 0 && 3871 (deplist & ((1 << NDADDR) << i)) == 0) { 3872 FREE_LOCK(&lk); 3873 panic("softdep_write_inodeblock: lost dep3"); 3874 } 3875#endif /* DIAGNOSTIC */ 3876 dp->di_ib[i] = 0; 3877 } 3878 FREE_LOCK(&lk); 3879 return; 3880 } 3881 /* 3882 * If we have zero'ed out the last allocated block of the file, 3883 * roll back the size to the last currently allocated block. 3884 * We know that this last allocated block is a full-sized as 3885 * we already checked for fragments in the loop above. 3886 */ 3887 if (lastadp != NULL && 3888 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3889 for (i = lastadp->ad_lbn; i >= 0; i--) 3890 if (dp->di_db[i] != 0) 3891 break; 3892 dp->di_size = (i + 1) * fs->fs_bsize; 3893 } 3894 /* 3895 * The only dependencies are for indirect blocks. 3896 * 3897 * The file size for indirect block additions is not guaranteed. 3898 * Such a guarantee would be non-trivial to achieve. The conventional 3899 * synchronous write implementation also does not make this guarantee. 3900 * Fsck should catch and fix discrepancies. Arguably, the file size 3901 * can be over-estimated without destroying integrity when the file 3902 * moves into the indirect blocks (i.e., is large). If we want to 3903 * postpone fsck, we are stuck with this argument. 3904 */ 3905 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3906 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3907 FREE_LOCK(&lk); 3908} 3909 3910/* 3911 * This routine is called during the completion interrupt 3912 * service routine for a disk write (from the procedure called 3913 * by the device driver to inform the filesystem caches of 3914 * a request completion). It should be called early in this 3915 * procedure, before the block is made available to other 3916 * processes or other routines are called. 3917 */ 3918static void 3919softdep_disk_write_complete(bp) 3920 struct buf *bp; /* describes the completed disk write */ 3921{ 3922 struct worklist *wk; 3923 struct workhead reattach; 3924 struct newblk *newblk; 3925 struct allocindir *aip; 3926 struct allocdirect *adp; 3927 struct indirdep *indirdep; 3928 struct inodedep *inodedep; 3929 struct bmsafemap *bmsafemap; 3930 3931#ifdef DEBUG 3932 if (lk.lkt_held != NOHOLDER) 3933 panic("softdep_disk_write_complete: lock is held"); 3934 lk.lkt_held = SPECIAL_FLAG; 3935#endif 3936 LIST_INIT(&reattach); 3937 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3938 WORKLIST_REMOVE(wk); 3939 switch (wk->wk_type) { 3940 3941 case D_PAGEDEP: 3942 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3943 WORKLIST_INSERT(&reattach, wk); 3944 continue; 3945 3946 case D_INODEDEP: 3947 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3948 WORKLIST_INSERT(&reattach, wk); 3949 continue; 3950 3951 case D_BMSAFEMAP: 3952 bmsafemap = WK_BMSAFEMAP(wk); 3953 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3954 newblk->nb_state |= DEPCOMPLETE; 3955 newblk->nb_bmsafemap = NULL; 3956 LIST_REMOVE(newblk, nb_deps); 3957 } 3958 while ((adp = 3959 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3960 adp->ad_state |= DEPCOMPLETE; 3961 adp->ad_buf = NULL; 3962 LIST_REMOVE(adp, ad_deps); 3963 handle_allocdirect_partdone(adp); 3964 } 3965 while ((aip = 3966 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3967 aip->ai_state |= DEPCOMPLETE; 3968 aip->ai_buf = NULL; 3969 LIST_REMOVE(aip, ai_deps); 3970 handle_allocindir_partdone(aip); 3971 } 3972 while ((inodedep = 3973 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3974 inodedep->id_state |= DEPCOMPLETE; 3975 LIST_REMOVE(inodedep, id_deps); 3976 inodedep->id_buf = NULL; 3977 } 3978 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3979 continue; 3980 3981 case D_MKDIR: 3982 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3983 continue; 3984 3985 case D_ALLOCDIRECT: 3986 adp = WK_ALLOCDIRECT(wk); 3987 adp->ad_state |= COMPLETE; 3988 handle_allocdirect_partdone(adp); 3989 continue; 3990 3991 case D_ALLOCINDIR: 3992 aip = WK_ALLOCINDIR(wk); 3993 aip->ai_state |= COMPLETE; 3994 handle_allocindir_partdone(aip); 3995 continue; 3996 3997 case D_INDIRDEP: 3998 indirdep = WK_INDIRDEP(wk); 3999 if (indirdep->ir_state & GOINGAWAY) { 4000 lk.lkt_held = NOHOLDER; 4001 panic("disk_write_complete: indirdep gone"); 4002 } 4003 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 4004 FREE(indirdep->ir_saveddata, M_INDIRDEP); 4005 indirdep->ir_saveddata = 0; 4006 indirdep->ir_state &= ~UNDONE; 4007 indirdep->ir_state |= ATTACHED; 4008 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 4009 handle_allocindir_partdone(aip); 4010 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 4011 lk.lkt_held = NOHOLDER; 4012 panic("disk_write_complete: not gone"); 4013 } 4014 } 4015 WORKLIST_INSERT(&reattach, wk); 4016 if ((bp->b_flags & B_DELWRI) == 0) 4017 stat_indir_blk_ptrs++; 4018 bdirty(bp); 4019 continue; 4020 4021 default: 4022 lk.lkt_held = NOHOLDER; 4023 panic("handle_disk_write_complete: Unknown type %s", 4024 TYPENAME(wk->wk_type)); 4025 /* NOTREACHED */ 4026 } 4027 } 4028 /* 4029 * Reattach any requests that must be redone. 4030 */ 4031 while ((wk = LIST_FIRST(&reattach)) != NULL) { 4032 WORKLIST_REMOVE(wk); 4033 WORKLIST_INSERT(&bp->b_dep, wk); 4034 } 4035#ifdef DEBUG 4036 if (lk.lkt_held != SPECIAL_FLAG) 4037 panic("softdep_disk_write_complete: lock lost"); 4038 lk.lkt_held = NOHOLDER; 4039#endif 4040} 4041 4042/* 4043 * Called from within softdep_disk_write_complete above. Note that 4044 * this routine is always called from interrupt level with further 4045 * splbio interrupts blocked. 4046 */ 4047static void 4048handle_allocdirect_partdone(adp) 4049 struct allocdirect *adp; /* the completed allocdirect */ 4050{ 4051 struct allocdirectlst *listhead; 4052 struct allocdirect *listadp; 4053 struct inodedep *inodedep; 4054 long bsize, delay; 4055 4056 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4057 return; 4058 if (adp->ad_buf != NULL) { 4059 lk.lkt_held = NOHOLDER; 4060 panic("handle_allocdirect_partdone: dangling dep"); 4061 } 4062 /* 4063 * The on-disk inode cannot claim to be any larger than the last 4064 * fragment that has been written. Otherwise, the on-disk inode 4065 * might have fragments that were not the last block in the file 4066 * which would corrupt the filesystem. Thus, we cannot free any 4067 * allocdirects after one whose ad_oldblkno claims a fragment as 4068 * these blocks must be rolled back to zero before writing the inode. 4069 * We check the currently active set of allocdirects in id_inoupdt 4070 * or id_extupdt as appropriate. 4071 */ 4072 inodedep = adp->ad_inodedep; 4073 bsize = inodedep->id_fs->fs_bsize; 4074 if (adp->ad_state & EXTDATA) 4075 listhead = &inodedep->id_extupdt; 4076 else 4077 listhead = &inodedep->id_inoupdt; 4078 TAILQ_FOREACH(listadp, listhead, ad_next) { 4079 /* found our block */ 4080 if (listadp == adp) 4081 break; 4082 /* continue if ad_oldlbn is not a fragment */ 4083 if (listadp->ad_oldsize == 0 || 4084 listadp->ad_oldsize == bsize) 4085 continue; 4086 /* hit a fragment */ 4087 return; 4088 } 4089 /* 4090 * If we have reached the end of the current list without 4091 * finding the just finished dependency, then it must be 4092 * on the future dependency list. Future dependencies cannot 4093 * be freed until they are moved to the current list. 4094 */ 4095 if (listadp == NULL) { 4096#ifdef DEBUG 4097 if (adp->ad_state & EXTDATA) 4098 listhead = &inodedep->id_newextupdt; 4099 else 4100 listhead = &inodedep->id_newinoupdt; 4101 TAILQ_FOREACH(listadp, listhead, ad_next) 4102 /* found our block */ 4103 if (listadp == adp) 4104 break; 4105 if (listadp == NULL) { 4106 lk.lkt_held = NOHOLDER; 4107 panic("handle_allocdirect_partdone: lost dep"); 4108 } 4109#endif /* DEBUG */ 4110 return; 4111 } 4112 /* 4113 * If we have found the just finished dependency, then free 4114 * it along with anything that follows it that is complete. 4115 * If the inode still has a bitmap dependency, then it has 4116 * never been written to disk, hence the on-disk inode cannot 4117 * reference the old fragment so we can free it without delay. 4118 */ 4119 delay = (inodedep->id_state & DEPCOMPLETE); 4120 for (; adp; adp = listadp) { 4121 listadp = TAILQ_NEXT(adp, ad_next); 4122 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4123 return; 4124 free_allocdirect(listhead, adp, delay); 4125 } 4126} 4127 4128/* 4129 * Called from within softdep_disk_write_complete above. Note that 4130 * this routine is always called from interrupt level with further 4131 * splbio interrupts blocked. 4132 */ 4133static void 4134handle_allocindir_partdone(aip) 4135 struct allocindir *aip; /* the completed allocindir */ 4136{ 4137 struct indirdep *indirdep; 4138 4139 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 4140 return; 4141 if (aip->ai_buf != NULL) { 4142 lk.lkt_held = NOHOLDER; 4143 panic("handle_allocindir_partdone: dangling dependency"); 4144 } 4145 indirdep = aip->ai_indirdep; 4146 if (indirdep->ir_state & UNDONE) { 4147 LIST_REMOVE(aip, ai_next); 4148 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 4149 return; 4150 } 4151 if (indirdep->ir_state & UFS1FMT) 4152 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4153 aip->ai_newblkno; 4154 else 4155 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4156 aip->ai_newblkno; 4157 LIST_REMOVE(aip, ai_next); 4158 if (aip->ai_freefrag != NULL) 4159 add_to_worklist(&aip->ai_freefrag->ff_list); 4160 WORKITEM_FREE(aip, D_ALLOCINDIR); 4161} 4162 4163/* 4164 * Called from within softdep_disk_write_complete above to restore 4165 * in-memory inode block contents to their most up-to-date state. Note 4166 * that this routine is always called from interrupt level with further 4167 * splbio interrupts blocked. 4168 */ 4169static int 4170handle_written_inodeblock(inodedep, bp) 4171 struct inodedep *inodedep; 4172 struct buf *bp; /* buffer containing the inode block */ 4173{ 4174 struct worklist *wk, *filefree; 4175 struct allocdirect *adp, *nextadp; 4176 struct ufs1_dinode *dp1 = NULL; 4177 struct ufs2_dinode *dp2 = NULL; 4178 int hadchanges, fstype; 4179 4180 if ((inodedep->id_state & IOSTARTED) == 0) { 4181 lk.lkt_held = NOHOLDER; 4182 panic("handle_written_inodeblock: not started"); 4183 } 4184 inodedep->id_state &= ~IOSTARTED; 4185 inodedep->id_state |= COMPLETE; 4186 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 4187 fstype = UFS1; 4188 dp1 = (struct ufs1_dinode *)bp->b_data + 4189 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4190 } else { 4191 fstype = UFS2; 4192 dp2 = (struct ufs2_dinode *)bp->b_data + 4193 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4194 } 4195 /* 4196 * If we had to rollback the inode allocation because of 4197 * bitmaps being incomplete, then simply restore it. 4198 * Keep the block dirty so that it will not be reclaimed until 4199 * all associated dependencies have been cleared and the 4200 * corresponding updates written to disk. 4201 */ 4202 if (inodedep->id_savedino1 != NULL) { 4203 if (fstype == UFS1) 4204 *dp1 = *inodedep->id_savedino1; 4205 else 4206 *dp2 = *inodedep->id_savedino2; 4207 FREE(inodedep->id_savedino1, M_INODEDEP); 4208 inodedep->id_savedino1 = NULL; 4209 if ((bp->b_flags & B_DELWRI) == 0) 4210 stat_inode_bitmap++; 4211 bdirty(bp); 4212 return (1); 4213 } 4214 /* 4215 * Roll forward anything that had to be rolled back before 4216 * the inode could be updated. 4217 */ 4218 hadchanges = 0; 4219 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 4220 nextadp = TAILQ_NEXT(adp, ad_next); 4221 if (adp->ad_state & ATTACHED) { 4222 lk.lkt_held = NOHOLDER; 4223 panic("handle_written_inodeblock: new entry"); 4224 } 4225 if (fstype == UFS1) { 4226 if (adp->ad_lbn < NDADDR) { 4227 if (dp1->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4228 lk.lkt_held = NOHOLDER; 4229 panic("%s %s #%jd mismatch %d != %jd", 4230 "handle_written_inodeblock:", 4231 "direct pointer", 4232 (intmax_t)adp->ad_lbn, 4233 dp1->di_db[adp->ad_lbn], 4234 (intmax_t)adp->ad_oldblkno); 4235 } 4236 dp1->di_db[adp->ad_lbn] = adp->ad_newblkno; 4237 } else { 4238 if (dp1->di_ib[adp->ad_lbn - NDADDR] != 0) { 4239 lk.lkt_held = NOHOLDER; 4240 panic("%s: %s #%jd allocated as %d", 4241 "handle_written_inodeblock", 4242 "indirect pointer", 4243 (intmax_t)adp->ad_lbn - NDADDR, 4244 dp1->di_ib[adp->ad_lbn - NDADDR]); 4245 } 4246 dp1->di_ib[adp->ad_lbn - NDADDR] = 4247 adp->ad_newblkno; 4248 } 4249 } else { 4250 if (adp->ad_lbn < NDADDR) { 4251 if (dp2->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4252 lk.lkt_held = NOHOLDER; 4253 panic("%s: %s #%jd %s %jd != %jd", 4254 "handle_written_inodeblock", 4255 "direct pointer", 4256 (intmax_t)adp->ad_lbn, "mismatch", 4257 (intmax_t)dp2->di_db[adp->ad_lbn], 4258 (intmax_t)adp->ad_oldblkno); 4259 } 4260 dp2->di_db[adp->ad_lbn] = adp->ad_newblkno; 4261 } else { 4262 if (dp2->di_ib[adp->ad_lbn - NDADDR] != 0) { 4263 lk.lkt_held = NOHOLDER; 4264 panic("%s: %s #%jd allocated as %jd", 4265 "handle_written_inodeblock", 4266 "indirect pointer", 4267 (intmax_t)adp->ad_lbn - NDADDR, 4268 (intmax_t) 4269 dp2->di_ib[adp->ad_lbn - NDADDR]); 4270 } 4271 dp2->di_ib[adp->ad_lbn - NDADDR] = 4272 adp->ad_newblkno; 4273 } 4274 } 4275 adp->ad_state &= ~UNDONE; 4276 adp->ad_state |= ATTACHED; 4277 hadchanges = 1; 4278 } 4279 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 4280 nextadp = TAILQ_NEXT(adp, ad_next); 4281 if (adp->ad_state & ATTACHED) { 4282 lk.lkt_held = NOHOLDER; 4283 panic("handle_written_inodeblock: new entry"); 4284 } 4285 if (dp2->di_extb[adp->ad_lbn] != adp->ad_oldblkno) { 4286 lk.lkt_held = NOHOLDER; 4287 panic("%s: direct pointers #%jd %s %jd != %jd", 4288 "handle_written_inodeblock", 4289 (intmax_t)adp->ad_lbn, "mismatch", 4290 (intmax_t)dp2->di_extb[adp->ad_lbn], 4291 (intmax_t)adp->ad_oldblkno); 4292 } 4293 dp2->di_extb[adp->ad_lbn] = adp->ad_newblkno; 4294 adp->ad_state &= ~UNDONE; 4295 adp->ad_state |= ATTACHED; 4296 hadchanges = 1; 4297 } 4298 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 4299 stat_direct_blk_ptrs++; 4300 /* 4301 * Reset the file size to its most up-to-date value. 4302 */ 4303 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) { 4304 lk.lkt_held = NOHOLDER; 4305 panic("handle_written_inodeblock: bad size"); 4306 } 4307 if (fstype == UFS1) { 4308 if (dp1->di_size != inodedep->id_savedsize) { 4309 dp1->di_size = inodedep->id_savedsize; 4310 hadchanges = 1; 4311 } 4312 } else { 4313 if (dp2->di_size != inodedep->id_savedsize) { 4314 dp2->di_size = inodedep->id_savedsize; 4315 hadchanges = 1; 4316 } 4317 if (dp2->di_extsize != inodedep->id_savedextsize) { 4318 dp2->di_extsize = inodedep->id_savedextsize; 4319 hadchanges = 1; 4320 } 4321 } 4322 inodedep->id_savedsize = -1; 4323 inodedep->id_savedextsize = -1; 4324 /* 4325 * If there were any rollbacks in the inode block, then it must be 4326 * marked dirty so that its will eventually get written back in 4327 * its correct form. 4328 */ 4329 if (hadchanges) 4330 bdirty(bp); 4331 /* 4332 * Process any allocdirects that completed during the update. 4333 */ 4334 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 4335 handle_allocdirect_partdone(adp); 4336 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 4337 handle_allocdirect_partdone(adp); 4338 /* 4339 * Process deallocations that were held pending until the 4340 * inode had been written to disk. Freeing of the inode 4341 * is delayed until after all blocks have been freed to 4342 * avoid creation of new <vfsid, inum, lbn> triples 4343 * before the old ones have been deleted. 4344 */ 4345 filefree = NULL; 4346 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 4347 WORKLIST_REMOVE(wk); 4348 switch (wk->wk_type) { 4349 4350 case D_FREEFILE: 4351 /* 4352 * We defer adding filefree to the worklist until 4353 * all other additions have been made to ensure 4354 * that it will be done after all the old blocks 4355 * have been freed. 4356 */ 4357 if (filefree != NULL) { 4358 lk.lkt_held = NOHOLDER; 4359 panic("handle_written_inodeblock: filefree"); 4360 } 4361 filefree = wk; 4362 continue; 4363 4364 case D_MKDIR: 4365 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 4366 continue; 4367 4368 case D_DIRADD: 4369 diradd_inode_written(WK_DIRADD(wk), inodedep); 4370 continue; 4371 4372 case D_FREEBLKS: 4373 case D_FREEFRAG: 4374 case D_DIRREM: 4375 add_to_worklist(wk); 4376 continue; 4377 4378 case D_NEWDIRBLK: 4379 free_newdirblk(WK_NEWDIRBLK(wk)); 4380 continue; 4381 4382 default: 4383 lk.lkt_held = NOHOLDER; 4384 panic("handle_written_inodeblock: Unknown type %s", 4385 TYPENAME(wk->wk_type)); 4386 /* NOTREACHED */ 4387 } 4388 } 4389 if (filefree != NULL) { 4390 if (free_inodedep(inodedep) == 0) { 4391 lk.lkt_held = NOHOLDER; 4392 panic("handle_written_inodeblock: live inodedep"); 4393 } 4394 add_to_worklist(filefree); 4395 return (0); 4396 } 4397 4398 /* 4399 * If no outstanding dependencies, free it. 4400 */ 4401 if (free_inodedep(inodedep) || 4402 (TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 4403 TAILQ_FIRST(&inodedep->id_extupdt) == 0)) 4404 return (0); 4405 return (hadchanges); 4406} 4407 4408/* 4409 * Process a diradd entry after its dependent inode has been written. 4410 * This routine must be called with splbio interrupts blocked. 4411 */ 4412static void 4413diradd_inode_written(dap, inodedep) 4414 struct diradd *dap; 4415 struct inodedep *inodedep; 4416{ 4417 struct pagedep *pagedep; 4418 4419 dap->da_state |= COMPLETE; 4420 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4421 if (dap->da_state & DIRCHG) 4422 pagedep = dap->da_previous->dm_pagedep; 4423 else 4424 pagedep = dap->da_pagedep; 4425 LIST_REMOVE(dap, da_pdlist); 4426 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4427 } 4428 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 4429} 4430 4431/* 4432 * Handle the completion of a mkdir dependency. 4433 */ 4434static void 4435handle_written_mkdir(mkdir, type) 4436 struct mkdir *mkdir; 4437 int type; 4438{ 4439 struct diradd *dap; 4440 struct pagedep *pagedep; 4441 4442 if (mkdir->md_state != type) { 4443 lk.lkt_held = NOHOLDER; 4444 panic("handle_written_mkdir: bad type"); 4445 } 4446 dap = mkdir->md_diradd; 4447 dap->da_state &= ~type; 4448 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 4449 dap->da_state |= DEPCOMPLETE; 4450 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4451 if (dap->da_state & DIRCHG) 4452 pagedep = dap->da_previous->dm_pagedep; 4453 else 4454 pagedep = dap->da_pagedep; 4455 LIST_REMOVE(dap, da_pdlist); 4456 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4457 } 4458 LIST_REMOVE(mkdir, md_mkdirs); 4459 WORKITEM_FREE(mkdir, D_MKDIR); 4460} 4461 4462/* 4463 * Called from within softdep_disk_write_complete above. 4464 * A write operation was just completed. Removed inodes can 4465 * now be freed and associated block pointers may be committed. 4466 * Note that this routine is always called from interrupt level 4467 * with further splbio interrupts blocked. 4468 */ 4469static int 4470handle_written_filepage(pagedep, bp) 4471 struct pagedep *pagedep; 4472 struct buf *bp; /* buffer containing the written page */ 4473{ 4474 struct dirrem *dirrem; 4475 struct diradd *dap, *nextdap; 4476 struct direct *ep; 4477 int i, chgs; 4478 4479 if ((pagedep->pd_state & IOSTARTED) == 0) { 4480 lk.lkt_held = NOHOLDER; 4481 panic("handle_written_filepage: not started"); 4482 } 4483 pagedep->pd_state &= ~IOSTARTED; 4484 /* 4485 * Process any directory removals that have been committed. 4486 */ 4487 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 4488 LIST_REMOVE(dirrem, dm_next); 4489 dirrem->dm_dirinum = pagedep->pd_ino; 4490 add_to_worklist(&dirrem->dm_list); 4491 } 4492 /* 4493 * Free any directory additions that have been committed. 4494 * If it is a newly allocated block, we have to wait until 4495 * the on-disk directory inode claims the new block. 4496 */ 4497 if ((pagedep->pd_state & NEWBLOCK) == 0) 4498 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 4499 free_diradd(dap); 4500 /* 4501 * Uncommitted directory entries must be restored. 4502 */ 4503 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 4504 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 4505 dap = nextdap) { 4506 nextdap = LIST_NEXT(dap, da_pdlist); 4507 if (dap->da_state & ATTACHED) { 4508 lk.lkt_held = NOHOLDER; 4509 panic("handle_written_filepage: attached"); 4510 } 4511 ep = (struct direct *) 4512 ((char *)bp->b_data + dap->da_offset); 4513 ep->d_ino = dap->da_newinum; 4514 dap->da_state &= ~UNDONE; 4515 dap->da_state |= ATTACHED; 4516 chgs = 1; 4517 /* 4518 * If the inode referenced by the directory has 4519 * been written out, then the dependency can be 4520 * moved to the pending list. 4521 */ 4522 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4523 LIST_REMOVE(dap, da_pdlist); 4524 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 4525 da_pdlist); 4526 } 4527 } 4528 } 4529 /* 4530 * If there were any rollbacks in the directory, then it must be 4531 * marked dirty so that its will eventually get written back in 4532 * its correct form. 4533 */ 4534 if (chgs) { 4535 if ((bp->b_flags & B_DELWRI) == 0) 4536 stat_dir_entry++; 4537 bdirty(bp); 4538 return (1); 4539 } 4540 /* 4541 * If we are not waiting for a new directory block to be 4542 * claimed by its inode, then the pagedep will be freed. 4543 * Otherwise it will remain to track any new entries on 4544 * the page in case they are fsync'ed. 4545 */ 4546 if ((pagedep->pd_state & NEWBLOCK) == 0) { 4547 LIST_REMOVE(pagedep, pd_hash); 4548 WORKITEM_FREE(pagedep, D_PAGEDEP); 4549 } 4550 return (0); 4551} 4552 4553/* 4554 * Writing back in-core inode structures. 4555 * 4556 * The filesystem only accesses an inode's contents when it occupies an 4557 * "in-core" inode structure. These "in-core" structures are separate from 4558 * the page frames used to cache inode blocks. Only the latter are 4559 * transferred to/from the disk. So, when the updated contents of the 4560 * "in-core" inode structure are copied to the corresponding in-memory inode 4561 * block, the dependencies are also transferred. The following procedure is 4562 * called when copying a dirty "in-core" inode to a cached inode block. 4563 */ 4564 4565/* 4566 * Called when an inode is loaded from disk. If the effective link count 4567 * differed from the actual link count when it was last flushed, then we 4568 * need to ensure that the correct effective link count is put back. 4569 */ 4570void 4571softdep_load_inodeblock(ip) 4572 struct inode *ip; /* the "in_core" copy of the inode */ 4573{ 4574 struct inodedep *inodedep; 4575 4576 /* 4577 * Check for alternate nlink count. 4578 */ 4579 ip->i_effnlink = ip->i_nlink; 4580 ACQUIRE_LOCK(&lk); 4581 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4582 FREE_LOCK(&lk); 4583 return; 4584 } 4585 ip->i_effnlink -= inodedep->id_nlinkdelta; 4586 if (inodedep->id_state & SPACECOUNTED) 4587 ip->i_flag |= IN_SPACECOUNTED; 4588 FREE_LOCK(&lk); 4589} 4590 4591/* 4592 * This routine is called just before the "in-core" inode 4593 * information is to be copied to the in-memory inode block. 4594 * Recall that an inode block contains several inodes. If 4595 * the force flag is set, then the dependencies will be 4596 * cleared so that the update can always be made. Note that 4597 * the buffer is locked when this routine is called, so we 4598 * will never be in the middle of writing the inode block 4599 * to disk. 4600 */ 4601void 4602softdep_update_inodeblock(ip, bp, waitfor) 4603 struct inode *ip; /* the "in_core" copy of the inode */ 4604 struct buf *bp; /* the buffer containing the inode block */ 4605 int waitfor; /* nonzero => update must be allowed */ 4606{ 4607 struct inodedep *inodedep; 4608 struct worklist *wk; 4609 int error, gotit; 4610 4611 /* 4612 * If the effective link count is not equal to the actual link 4613 * count, then we must track the difference in an inodedep while 4614 * the inode is (potentially) tossed out of the cache. Otherwise, 4615 * if there is no existing inodedep, then there are no dependencies 4616 * to track. 4617 */ 4618 ACQUIRE_LOCK(&lk); 4619 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4620 FREE_LOCK(&lk); 4621 if (ip->i_effnlink != ip->i_nlink) 4622 panic("softdep_update_inodeblock: bad link count"); 4623 return; 4624 } 4625 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4626 FREE_LOCK(&lk); 4627 panic("softdep_update_inodeblock: bad delta"); 4628 } 4629 /* 4630 * Changes have been initiated. Anything depending on these 4631 * changes cannot occur until this inode has been written. 4632 */ 4633 inodedep->id_state &= ~COMPLETE; 4634 if ((inodedep->id_state & ONWORKLIST) == 0) 4635 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 4636 /* 4637 * Any new dependencies associated with the incore inode must 4638 * now be moved to the list associated with the buffer holding 4639 * the in-memory copy of the inode. Once merged process any 4640 * allocdirects that are completed by the merger. 4641 */ 4642 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 4643 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4644 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4645 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 4646 if (TAILQ_FIRST(&inodedep->id_extupdt) != NULL) 4647 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt)); 4648 /* 4649 * Now that the inode has been pushed into the buffer, the 4650 * operations dependent on the inode being written to disk 4651 * can be moved to the id_bufwait so that they will be 4652 * processed when the buffer I/O completes. 4653 */ 4654 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4655 WORKLIST_REMOVE(wk); 4656 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4657 } 4658 /* 4659 * Newly allocated inodes cannot be written until the bitmap 4660 * that allocates them have been written (indicated by 4661 * DEPCOMPLETE being set in id_state). If we are doing a 4662 * forced sync (e.g., an fsync on a file), we force the bitmap 4663 * to be written so that the update can be done. 4664 */ 4665 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4666 FREE_LOCK(&lk); 4667 return; 4668 } 4669 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4670 FREE_LOCK(&lk); 4671 if (gotit && 4672 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4673 softdep_error("softdep_update_inodeblock: bwrite", error); 4674 if ((inodedep->id_state & DEPCOMPLETE) == 0) 4675 panic("softdep_update_inodeblock: update failed"); 4676} 4677 4678/* 4679 * Merge the a new inode dependency list (such as id_newinoupdt) into an 4680 * old inode dependency list (such as id_inoupdt). This routine must be 4681 * called with splbio interrupts blocked. 4682 */ 4683static void 4684merge_inode_lists(newlisthead, oldlisthead) 4685 struct allocdirectlst *newlisthead; 4686 struct allocdirectlst *oldlisthead; 4687{ 4688 struct allocdirect *listadp, *newadp; 4689 4690 newadp = TAILQ_FIRST(newlisthead); 4691 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 4692 if (listadp->ad_lbn < newadp->ad_lbn) { 4693 listadp = TAILQ_NEXT(listadp, ad_next); 4694 continue; 4695 } 4696 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4697 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4698 if (listadp->ad_lbn == newadp->ad_lbn) { 4699 allocdirect_merge(oldlisthead, newadp, 4700 listadp); 4701 listadp = newadp; 4702 } 4703 newadp = TAILQ_FIRST(newlisthead); 4704 } 4705 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 4706 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4707 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 4708 } 4709} 4710 4711/* 4712 * If we are doing an fsync, then we must ensure that any directory 4713 * entries for the inode have been written after the inode gets to disk. 4714 */ 4715int 4716softdep_fsync(vp) 4717 struct vnode *vp; /* the "in_core" copy of the inode */ 4718{ 4719 struct inodedep *inodedep; 4720 struct pagedep *pagedep; 4721 struct worklist *wk; 4722 struct diradd *dap; 4723 struct mount *mnt; 4724 struct vnode *pvp; 4725 struct inode *ip; 4726 struct buf *bp; 4727 struct fs *fs; 4728 struct thread *td = curthread; 4729 int error, flushparent; 4730 ino_t parentino; 4731 ufs_lbn_t lbn; 4732 4733 ip = VTOI(vp); 4734 fs = ip->i_fs; 4735 ACQUIRE_LOCK(&lk); 4736 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4737 FREE_LOCK(&lk); 4738 return (0); 4739 } 4740 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4741 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4742 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 4743 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 4744 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4745 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4746 FREE_LOCK(&lk); 4747 panic("softdep_fsync: pending ops"); 4748 } 4749 for (error = 0, flushparent = 0; ; ) { 4750 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4751 break; 4752 if (wk->wk_type != D_DIRADD) { 4753 FREE_LOCK(&lk); 4754 panic("softdep_fsync: Unexpected type %s", 4755 TYPENAME(wk->wk_type)); 4756 } 4757 dap = WK_DIRADD(wk); 4758 /* 4759 * Flush our parent if this directory entry has a MKDIR_PARENT 4760 * dependency or is contained in a newly allocated block. 4761 */ 4762 if (dap->da_state & DIRCHG) 4763 pagedep = dap->da_previous->dm_pagedep; 4764 else 4765 pagedep = dap->da_pagedep; 4766 mnt = pagedep->pd_mnt; 4767 parentino = pagedep->pd_ino; 4768 lbn = pagedep->pd_lbn; 4769 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4770 FREE_LOCK(&lk); 4771 panic("softdep_fsync: dirty"); 4772 } 4773 if ((dap->da_state & MKDIR_PARENT) || 4774 (pagedep->pd_state & NEWBLOCK)) 4775 flushparent = 1; 4776 else 4777 flushparent = 0; 4778 /* 4779 * If we are being fsync'ed as part of vgone'ing this vnode, 4780 * then we will not be able to release and recover the 4781 * vnode below, so we just have to give up on writing its 4782 * directory entry out. It will eventually be written, just 4783 * not now, but then the user was not asking to have it 4784 * written, so we are not breaking any promises. 4785 */ 4786 mp_fixme("This operation is not atomic wrt the rest of the code"); 4787 VI_LOCK(vp); 4788 if (vp->v_iflag & VI_XLOCK) { 4789 VI_UNLOCK(vp); 4790 break; 4791 } else 4792 VI_UNLOCK(vp); 4793 /* 4794 * We prevent deadlock by always fetching inodes from the 4795 * root, moving down the directory tree. Thus, when fetching 4796 * our parent directory, we first try to get the lock. If 4797 * that fails, we must unlock ourselves before requesting 4798 * the lock on our parent. See the comment in ufs_lookup 4799 * for details on possible races. 4800 */ 4801 FREE_LOCK(&lk); 4802 if (VFS_VGET(mnt, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp)) { 4803 VOP_UNLOCK(vp, 0, td); 4804 error = VFS_VGET(mnt, parentino, LK_EXCLUSIVE, &pvp); 4805 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4806 if (error != 0) 4807 return (error); 4808 } 4809 /* 4810 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 4811 * that are contained in direct blocks will be resolved by 4812 * doing a UFS_UPDATE. Pagedeps contained in indirect blocks 4813 * may require a complete sync'ing of the directory. So, we 4814 * try the cheap and fast UFS_UPDATE first, and if that fails, 4815 * then we do the slower VOP_FSYNC of the directory. 4816 */ 4817 if (flushparent) { 4818 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4819 vput(pvp); 4820 return (error); 4821 } 4822 if ((pagedep->pd_state & NEWBLOCK) && 4823 (error = VOP_FSYNC(pvp, td->td_ucred, MNT_WAIT, td))) { 4824 vput(pvp); 4825 return (error); 4826 } 4827 } 4828 /* 4829 * Flush directory page containing the inode's name. 4830 */ 4831 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 4832 &bp); 4833 if (error == 0) 4834 error = BUF_WRITE(bp); 4835 else 4836 brelse(bp); 4837 vput(pvp); 4838 if (error != 0) 4839 return (error); 4840 ACQUIRE_LOCK(&lk); 4841 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4842 break; 4843 } 4844 FREE_LOCK(&lk); 4845 return (0); 4846} 4847 4848/* 4849 * Flush all the dirty bitmaps associated with the block device 4850 * before flushing the rest of the dirty blocks so as to reduce 4851 * the number of dependencies that will have to be rolled back. 4852 */ 4853void 4854softdep_fsync_mountdev(vp) 4855 struct vnode *vp; 4856{ 4857 struct buf *bp, *nbp; 4858 struct worklist *wk; 4859 4860 if (!vn_isdisk(vp, NULL)) 4861 panic("softdep_fsync_mountdev: vnode not a disk"); 4862 ACQUIRE_LOCK(&lk); 4863 VI_LOCK(vp); 4864 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 4865 nbp = TAILQ_NEXT(bp, b_vnbufs); 4866 VI_UNLOCK(vp); 4867 /* 4868 * If it is already scheduled, skip to the next buffer. 4869 */ 4870 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 4871 VI_LOCK(vp); 4872 continue; 4873 } 4874 if ((bp->b_flags & B_DELWRI) == 0) { 4875 FREE_LOCK(&lk); 4876 panic("softdep_fsync_mountdev: not dirty"); 4877 } 4878 /* 4879 * We are only interested in bitmaps with outstanding 4880 * dependencies. 4881 */ 4882 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4883 wk->wk_type != D_BMSAFEMAP || 4884 (bp->b_xflags & BX_BKGRDINPROG)) { 4885 BUF_UNLOCK(bp); 4886 VI_LOCK(vp); 4887 continue; 4888 } 4889 bremfree(bp); 4890 FREE_LOCK(&lk); 4891 (void) bawrite(bp); 4892 ACQUIRE_LOCK(&lk); 4893 /* 4894 * Since we may have slept during the I/O, we need 4895 * to start from a known point. 4896 */ 4897 VI_LOCK(vp); 4898 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4899 } 4900 VI_UNLOCK(vp); 4901 drain_output(vp, 1); 4902 FREE_LOCK(&lk); 4903} 4904 4905/* 4906 * This routine is called when we are trying to synchronously flush a 4907 * file. This routine must eliminate any filesystem metadata dependencies 4908 * so that the syncing routine can succeed by pushing the dirty blocks 4909 * associated with the file. If any I/O errors occur, they are returned. 4910 */ 4911int 4912softdep_sync_metadata(ap) 4913 struct vop_fsync_args /* { 4914 struct vnode *a_vp; 4915 struct ucred *a_cred; 4916 int a_waitfor; 4917 struct thread *a_td; 4918 } */ *ap; 4919{ 4920 struct vnode *vp = ap->a_vp; 4921 struct pagedep *pagedep; 4922 struct allocdirect *adp; 4923 struct allocindir *aip; 4924 struct buf *bp, *nbp; 4925 struct worklist *wk; 4926 int i, error, waitfor; 4927 4928 /* 4929 * Check whether this vnode is involved in a filesystem 4930 * that is doing soft dependency processing. 4931 */ 4932 if (!vn_isdisk(vp, NULL)) { 4933 if (!DOINGSOFTDEP(vp)) 4934 return (0); 4935 } else 4936 if (vp->v_rdev->si_mountpoint == NULL || 4937 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4938 return (0); 4939 /* 4940 * Ensure that any direct block dependencies have been cleared. 4941 */ 4942 ACQUIRE_LOCK(&lk); 4943 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4944 FREE_LOCK(&lk); 4945 return (error); 4946 } 4947 /* 4948 * For most files, the only metadata dependencies are the 4949 * cylinder group maps that allocate their inode or blocks. 4950 * The block allocation dependencies can be found by traversing 4951 * the dependency lists for any buffers that remain on their 4952 * dirty buffer list. The inode allocation dependency will 4953 * be resolved when the inode is updated with MNT_WAIT. 4954 * This work is done in two passes. The first pass grabs most 4955 * of the buffers and begins asynchronously writing them. The 4956 * only way to wait for these asynchronous writes is to sleep 4957 * on the filesystem vnode which may stay busy for a long time 4958 * if the filesystem is active. So, instead, we make a second 4959 * pass over the dependencies blocking on each write. In the 4960 * usual case we will be blocking against a write that we 4961 * initiated, so when it is done the dependency will have been 4962 * resolved. Thus the second pass is expected to end quickly. 4963 */ 4964 waitfor = MNT_NOWAIT; 4965top: 4966 /* 4967 * We must wait for any I/O in progress to finish so that 4968 * all potential buffers on the dirty list will be visible. 4969 */ 4970 drain_output(vp, 1); 4971 if (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT) == 0) { 4972 FREE_LOCK(&lk); 4973 return (0); 4974 } 4975 mp_fixme("The locking is somewhat complicated nonexistant here."); 4976 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4977 /* While syncing snapshots, we must allow recursive lookups */ 4978 bp->b_lock.lk_flags |= LK_CANRECURSE; 4979loop: 4980 /* 4981 * As we hold the buffer locked, none of its dependencies 4982 * will disappear. 4983 */ 4984 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4985 switch (wk->wk_type) { 4986 4987 case D_ALLOCDIRECT: 4988 adp = WK_ALLOCDIRECT(wk); 4989 if (adp->ad_state & DEPCOMPLETE) 4990 continue; 4991 nbp = adp->ad_buf; 4992 if (getdirtybuf(&nbp, waitfor) == 0) 4993 continue; 4994 FREE_LOCK(&lk); 4995 if (waitfor == MNT_NOWAIT) { 4996 bawrite(nbp); 4997 } else if ((error = BUF_WRITE(nbp)) != 0) { 4998 break; 4999 } 5000 ACQUIRE_LOCK(&lk); 5001 continue; 5002 5003 case D_ALLOCINDIR: 5004 aip = WK_ALLOCINDIR(wk); 5005 if (aip->ai_state & DEPCOMPLETE) 5006 continue; 5007 nbp = aip->ai_buf; 5008 if (getdirtybuf(&nbp, waitfor) == 0) 5009 continue; 5010 FREE_LOCK(&lk); 5011 if (waitfor == MNT_NOWAIT) { 5012 bawrite(nbp); 5013 } else if ((error = BUF_WRITE(nbp)) != 0) { 5014 break; 5015 } 5016 ACQUIRE_LOCK(&lk); 5017 continue; 5018 5019 case D_INDIRDEP: 5020 restart: 5021 5022 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 5023 if (aip->ai_state & DEPCOMPLETE) 5024 continue; 5025 nbp = aip->ai_buf; 5026 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 5027 goto restart; 5028 FREE_LOCK(&lk); 5029 if ((error = BUF_WRITE(nbp)) != 0) { 5030 break; 5031 } 5032 ACQUIRE_LOCK(&lk); 5033 goto restart; 5034 } 5035 continue; 5036 5037 case D_INODEDEP: 5038 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 5039 WK_INODEDEP(wk)->id_ino)) != 0) { 5040 FREE_LOCK(&lk); 5041 break; 5042 } 5043 continue; 5044 5045 case D_PAGEDEP: 5046 /* 5047 * We are trying to sync a directory that may 5048 * have dependencies on both its own metadata 5049 * and/or dependencies on the inodes of any 5050 * recently allocated files. We walk its diradd 5051 * lists pushing out the associated inode. 5052 */ 5053 pagedep = WK_PAGEDEP(wk); 5054 for (i = 0; i < DAHASHSZ; i++) { 5055 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 5056 continue; 5057 if ((error = 5058 flush_pagedep_deps(vp, pagedep->pd_mnt, 5059 &pagedep->pd_diraddhd[i]))) { 5060 FREE_LOCK(&lk); 5061 break; 5062 } 5063 } 5064 continue; 5065 5066 case D_MKDIR: 5067 /* 5068 * This case should never happen if the vnode has 5069 * been properly sync'ed. However, if this function 5070 * is used at a place where the vnode has not yet 5071 * been sync'ed, this dependency can show up. So, 5072 * rather than panic, just flush it. 5073 */ 5074 nbp = WK_MKDIR(wk)->md_buf; 5075 if (getdirtybuf(&nbp, waitfor) == 0) 5076 continue; 5077 FREE_LOCK(&lk); 5078 if (waitfor == MNT_NOWAIT) { 5079 bawrite(nbp); 5080 } else if ((error = BUF_WRITE(nbp)) != 0) { 5081 break; 5082 } 5083 ACQUIRE_LOCK(&lk); 5084 continue; 5085 5086 case D_BMSAFEMAP: 5087 /* 5088 * This case should never happen if the vnode has 5089 * been properly sync'ed. However, if this function 5090 * is used at a place where the vnode has not yet 5091 * been sync'ed, this dependency can show up. So, 5092 * rather than panic, just flush it. 5093 */ 5094 nbp = WK_BMSAFEMAP(wk)->sm_buf; 5095 if (getdirtybuf(&nbp, waitfor) == 0) 5096 continue; 5097 FREE_LOCK(&lk); 5098 if (waitfor == MNT_NOWAIT) { 5099 bawrite(nbp); 5100 } else if ((error = BUF_WRITE(nbp)) != 0) { 5101 break; 5102 } 5103 ACQUIRE_LOCK(&lk); 5104 continue; 5105 5106 default: 5107 FREE_LOCK(&lk); 5108 panic("softdep_sync_metadata: Unknown type %s", 5109 TYPENAME(wk->wk_type)); 5110 /* NOTREACHED */ 5111 } 5112 /* We reach here only in error and unlocked */ 5113 if (error == 0) 5114 panic("softdep_sync_metadata: zero error"); 5115 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5116 bawrite(bp); 5117 return (error); 5118 } 5119 (void) getdirtybuf(&TAILQ_NEXT(bp, b_vnbufs), MNT_WAIT); 5120 nbp = TAILQ_NEXT(bp, b_vnbufs); 5121 FREE_LOCK(&lk); 5122 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5123 bawrite(bp); 5124 ACQUIRE_LOCK(&lk); 5125 if (nbp != NULL) { 5126 bp = nbp; 5127 goto loop; 5128 } 5129 /* 5130 * The brief unlock is to allow any pent up dependency 5131 * processing to be done. Then proceed with the second pass. 5132 */ 5133 if (waitfor == MNT_NOWAIT) { 5134 waitfor = MNT_WAIT; 5135 FREE_LOCK(&lk); 5136 ACQUIRE_LOCK(&lk); 5137 goto top; 5138 } 5139 5140 /* 5141 * If we have managed to get rid of all the dirty buffers, 5142 * then we are done. For certain directories and block 5143 * devices, we may need to do further work. 5144 * 5145 * We must wait for any I/O in progress to finish so that 5146 * all potential buffers on the dirty list will be visible. 5147 */ 5148 drain_output(vp, 1); 5149 if (TAILQ_FIRST(&vp->v_dirtyblkhd) == NULL) { 5150 FREE_LOCK(&lk); 5151 return (0); 5152 } 5153 5154 FREE_LOCK(&lk); 5155 /* 5156 * If we are trying to sync a block device, some of its buffers may 5157 * contain metadata that cannot be written until the contents of some 5158 * partially written files have been written to disk. The only easy 5159 * way to accomplish this is to sync the entire filesystem (luckily 5160 * this happens rarely). 5161 */ 5162 if (vn_isdisk(vp, NULL) && 5163 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 5164 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred, 5165 ap->a_td)) != 0) 5166 return (error); 5167 return (0); 5168} 5169 5170/* 5171 * Flush the dependencies associated with an inodedep. 5172 * Called with splbio blocked. 5173 */ 5174static int 5175flush_inodedep_deps(fs, ino) 5176 struct fs *fs; 5177 ino_t ino; 5178{ 5179 struct inodedep *inodedep; 5180 int error, waitfor; 5181 5182 /* 5183 * This work is done in two passes. The first pass grabs most 5184 * of the buffers and begins asynchronously writing them. The 5185 * only way to wait for these asynchronous writes is to sleep 5186 * on the filesystem vnode which may stay busy for a long time 5187 * if the filesystem is active. So, instead, we make a second 5188 * pass over the dependencies blocking on each write. In the 5189 * usual case we will be blocking against a write that we 5190 * initiated, so when it is done the dependency will have been 5191 * resolved. Thus the second pass is expected to end quickly. 5192 * We give a brief window at the top of the loop to allow 5193 * any pending I/O to complete. 5194 */ 5195 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 5196 if (error) 5197 return (error); 5198 FREE_LOCK(&lk); 5199 ACQUIRE_LOCK(&lk); 5200 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5201 return (0); 5202 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 5203 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 5204 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 5205 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 5206 continue; 5207 /* 5208 * If pass2, we are done, otherwise do pass 2. 5209 */ 5210 if (waitfor == MNT_WAIT) 5211 break; 5212 waitfor = MNT_WAIT; 5213 } 5214 /* 5215 * Try freeing inodedep in case all dependencies have been removed. 5216 */ 5217 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 5218 (void) free_inodedep(inodedep); 5219 return (0); 5220} 5221 5222/* 5223 * Flush an inode dependency list. 5224 * Called with splbio blocked. 5225 */ 5226static int 5227flush_deplist(listhead, waitfor, errorp) 5228 struct allocdirectlst *listhead; 5229 int waitfor; 5230 int *errorp; 5231{ 5232 struct allocdirect *adp; 5233 struct buf *bp; 5234 5235 TAILQ_FOREACH(adp, listhead, ad_next) { 5236 if (adp->ad_state & DEPCOMPLETE) 5237 continue; 5238 bp = adp->ad_buf; 5239 if (getdirtybuf(&bp, waitfor) == 0) { 5240 if (waitfor == MNT_NOWAIT) 5241 continue; 5242 return (1); 5243 } 5244 FREE_LOCK(&lk); 5245 if (waitfor == MNT_NOWAIT) { 5246 bawrite(bp); 5247 } else if ((*errorp = BUF_WRITE(bp)) != 0) { 5248 ACQUIRE_LOCK(&lk); 5249 return (1); 5250 } 5251 ACQUIRE_LOCK(&lk); 5252 return (1); 5253 } 5254 return (0); 5255} 5256 5257/* 5258 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 5259 * Called with splbio blocked. 5260 */ 5261static int 5262flush_pagedep_deps(pvp, mp, diraddhdp) 5263 struct vnode *pvp; 5264 struct mount *mp; 5265 struct diraddhd *diraddhdp; 5266{ 5267 struct thread *td = curthread; 5268 struct inodedep *inodedep; 5269 struct ufsmount *ump; 5270 struct diradd *dap; 5271 struct vnode *vp; 5272 int gotit, error = 0; 5273 struct buf *bp; 5274 ino_t inum; 5275 5276 ump = VFSTOUFS(mp); 5277 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 5278 /* 5279 * Flush ourselves if this directory entry 5280 * has a MKDIR_PARENT dependency. 5281 */ 5282 if (dap->da_state & MKDIR_PARENT) { 5283 FREE_LOCK(&lk); 5284 if ((error = UFS_UPDATE(pvp, 1)) != 0) 5285 break; 5286 ACQUIRE_LOCK(&lk); 5287 /* 5288 * If that cleared dependencies, go on to next. 5289 */ 5290 if (dap != LIST_FIRST(diraddhdp)) 5291 continue; 5292 if (dap->da_state & MKDIR_PARENT) { 5293 FREE_LOCK(&lk); 5294 panic("flush_pagedep_deps: MKDIR_PARENT"); 5295 } 5296 } 5297 /* 5298 * A newly allocated directory must have its "." and 5299 * ".." entries written out before its name can be 5300 * committed in its parent. We do not want or need 5301 * the full semantics of a synchronous VOP_FSYNC as 5302 * that may end up here again, once for each directory 5303 * level in the filesystem. Instead, we push the blocks 5304 * and wait for them to clear. We have to fsync twice 5305 * because the first call may choose to defer blocks 5306 * that still have dependencies, but deferral will 5307 * happen at most once. 5308 */ 5309 inum = dap->da_newinum; 5310 if (dap->da_state & MKDIR_BODY) { 5311 FREE_LOCK(&lk); 5312 if ((error = VFS_VGET(mp, inum, LK_EXCLUSIVE, &vp))) 5313 break; 5314 if ((error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td)) || 5315 (error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) { 5316 vput(vp); 5317 break; 5318 } 5319 drain_output(vp, 0); 5320 vput(vp); 5321 ACQUIRE_LOCK(&lk); 5322 /* 5323 * If that cleared dependencies, go on to next. 5324 */ 5325 if (dap != LIST_FIRST(diraddhdp)) 5326 continue; 5327 if (dap->da_state & MKDIR_BODY) { 5328 FREE_LOCK(&lk); 5329 panic("flush_pagedep_deps: MKDIR_BODY"); 5330 } 5331 } 5332 /* 5333 * Flush the inode on which the directory entry depends. 5334 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 5335 * the only remaining dependency is that the updated inode 5336 * count must get pushed to disk. The inode has already 5337 * been pushed into its inode buffer (via VOP_UPDATE) at 5338 * the time of the reference count change. So we need only 5339 * locate that buffer, ensure that there will be no rollback 5340 * caused by a bitmap dependency, then write the inode buffer. 5341 */ 5342 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 5343 FREE_LOCK(&lk); 5344 panic("flush_pagedep_deps: lost inode"); 5345 } 5346 /* 5347 * If the inode still has bitmap dependencies, 5348 * push them to disk. 5349 */ 5350 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5351 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 5352 FREE_LOCK(&lk); 5353 if (gotit && 5354 (error = BUF_WRITE(inodedep->id_buf)) != 0) 5355 break; 5356 ACQUIRE_LOCK(&lk); 5357 if (dap != LIST_FIRST(diraddhdp)) 5358 continue; 5359 } 5360 /* 5361 * If the inode is still sitting in a buffer waiting 5362 * to be written, push it to disk. 5363 */ 5364 FREE_LOCK(&lk); 5365 if ((error = bread(ump->um_devvp, 5366 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 5367 (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) { 5368 brelse(bp); 5369 break; 5370 } 5371 if ((error = BUF_WRITE(bp)) != 0) 5372 break; 5373 ACQUIRE_LOCK(&lk); 5374 /* 5375 * If we have failed to get rid of all the dependencies 5376 * then something is seriously wrong. 5377 */ 5378 if (dap == LIST_FIRST(diraddhdp)) { 5379 FREE_LOCK(&lk); 5380 panic("flush_pagedep_deps: flush failed"); 5381 } 5382 } 5383 if (error) 5384 ACQUIRE_LOCK(&lk); 5385 return (error); 5386} 5387 5388/* 5389 * A large burst of file addition or deletion activity can drive the 5390 * memory load excessively high. First attempt to slow things down 5391 * using the techniques below. If that fails, this routine requests 5392 * the offending operations to fall back to running synchronously 5393 * until the memory load returns to a reasonable level. 5394 */ 5395int 5396softdep_slowdown(vp) 5397 struct vnode *vp; 5398{ 5399 int max_softdeps_hard; 5400 5401 max_softdeps_hard = max_softdeps * 11 / 10; 5402 if (num_dirrem < max_softdeps_hard / 2 && 5403 num_inodedep < max_softdeps_hard) 5404 return (0); 5405 stat_sync_limit_hit += 1; 5406 return (1); 5407} 5408 5409/* 5410 * Called by the allocation routines when they are about to fail 5411 * in the hope that we can free up some disk space. 5412 * 5413 * First check to see if the work list has anything on it. If it has, 5414 * clean up entries until we successfully free some space. Because this 5415 * process holds inodes locked, we cannot handle any remove requests 5416 * that might block on a locked inode as that could lead to deadlock. 5417 * If the worklist yields no free space, encourage the syncer daemon 5418 * to help us. In no event will we try for longer than tickdelay seconds. 5419 */ 5420int 5421softdep_request_cleanup(fs, vp) 5422 struct fs *fs; 5423 struct vnode *vp; 5424{ 5425 long starttime; 5426 ufs2_daddr_t needed; 5427 5428 needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize; 5429 starttime = time_second + tickdelay; 5430 if (UFS_UPDATE(vp, 1) != 0) 5431 return (0); 5432 while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) { 5433 if (time_second > starttime) 5434 return (0); 5435 if (num_on_worklist > 0 && 5436 process_worklist_item(NULL, LK_NOWAIT) != -1) { 5437 stat_worklist_push += 1; 5438 continue; 5439 } 5440 request_cleanup(FLUSH_REMOVE_WAIT, 0); 5441 } 5442 return (1); 5443} 5444 5445/* 5446 * If memory utilization has gotten too high, deliberately slow things 5447 * down and speed up the I/O processing. 5448 */ 5449static int 5450request_cleanup(resource, islocked) 5451 int resource; 5452 int islocked; 5453{ 5454 struct thread *td = curthread; 5455 5456 /* 5457 * We never hold up the filesystem syncer process. 5458 */ 5459 if (td == filesys_syncer) 5460 return (0); 5461 /* 5462 * First check to see if the work list has gotten backlogged. 5463 * If it has, co-opt this process to help clean up two entries. 5464 * Because this process may hold inodes locked, we cannot 5465 * handle any remove requests that might block on a locked 5466 * inode as that could lead to deadlock. 5467 */ 5468 if (num_on_worklist > max_softdeps / 10) { 5469 if (islocked) 5470 FREE_LOCK(&lk); 5471 process_worklist_item(NULL, LK_NOWAIT); 5472 process_worklist_item(NULL, LK_NOWAIT); 5473 stat_worklist_push += 2; 5474 if (islocked) 5475 ACQUIRE_LOCK(&lk); 5476 return(1); 5477 } 5478 /* 5479 * Next, we attempt to speed up the syncer process. If that 5480 * is successful, then we allow the process to continue. 5481 */ 5482 if (speedup_syncer() && resource != FLUSH_REMOVE_WAIT) 5483 return(0); 5484 /* 5485 * If we are resource constrained on inode dependencies, try 5486 * flushing some dirty inodes. Otherwise, we are constrained 5487 * by file deletions, so try accelerating flushes of directories 5488 * with removal dependencies. We would like to do the cleanup 5489 * here, but we probably hold an inode locked at this point and 5490 * that might deadlock against one that we try to clean. So, 5491 * the best that we can do is request the syncer daemon to do 5492 * the cleanup for us. 5493 */ 5494 switch (resource) { 5495 5496 case FLUSH_INODES: 5497 stat_ino_limit_push += 1; 5498 req_clear_inodedeps += 1; 5499 stat_countp = &stat_ino_limit_hit; 5500 break; 5501 5502 case FLUSH_REMOVE: 5503 case FLUSH_REMOVE_WAIT: 5504 stat_blk_limit_push += 1; 5505 req_clear_remove += 1; 5506 stat_countp = &stat_blk_limit_hit; 5507 break; 5508 5509 default: 5510 if (islocked) 5511 FREE_LOCK(&lk); 5512 panic("request_cleanup: unknown type"); 5513 } 5514 /* 5515 * Hopefully the syncer daemon will catch up and awaken us. 5516 * We wait at most tickdelay before proceeding in any case. 5517 */ 5518 if (islocked == 0) 5519 ACQUIRE_LOCK(&lk); 5520 proc_waiting += 1; 5521 if (handle.callout == NULL) 5522 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5523 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, NULL, PPAUSE, 5524 "softupdate", 0); 5525 proc_waiting -= 1; 5526 if (islocked == 0) 5527 FREE_LOCK(&lk); 5528 return (1); 5529} 5530 5531/* 5532 * Awaken processes pausing in request_cleanup and clear proc_waiting 5533 * to indicate that there is no longer a timer running. 5534 */ 5535void 5536pause_timer(arg) 5537 void *arg; 5538{ 5539 5540 *stat_countp += 1; 5541 wakeup_one(&proc_waiting); 5542 if (proc_waiting > 0) 5543 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5544 else 5545 handle.callout = NULL; 5546} 5547 5548/* 5549 * Flush out a directory with at least one removal dependency in an effort to 5550 * reduce the number of dirrem, freefile, and freeblks dependency structures. 5551 */ 5552static void 5553clear_remove(td) 5554 struct thread *td; 5555{ 5556 struct pagedep_hashhead *pagedephd; 5557 struct pagedep *pagedep; 5558 static int next = 0; 5559 struct mount *mp; 5560 struct vnode *vp; 5561 int error, cnt; 5562 ino_t ino; 5563 5564 ACQUIRE_LOCK(&lk); 5565 for (cnt = 0; cnt < pagedep_hash; cnt++) { 5566 pagedephd = &pagedep_hashtbl[next++]; 5567 if (next >= pagedep_hash) 5568 next = 0; 5569 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 5570 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 5571 continue; 5572 mp = pagedep->pd_mnt; 5573 ino = pagedep->pd_ino; 5574 FREE_LOCK(&lk); 5575 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5576 continue; 5577 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp))) { 5578 softdep_error("clear_remove: vget", error); 5579 vn_finished_write(mp); 5580 return; 5581 } 5582 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5583 softdep_error("clear_remove: fsync", error); 5584 drain_output(vp, 0); 5585 vput(vp); 5586 vn_finished_write(mp); 5587 return; 5588 } 5589 } 5590 FREE_LOCK(&lk); 5591} 5592 5593/* 5594 * Clear out a block of dirty inodes in an effort to reduce 5595 * the number of inodedep dependency structures. 5596 */ 5597static void 5598clear_inodedeps(td) 5599 struct thread *td; 5600{ 5601 struct inodedep_hashhead *inodedephd; 5602 struct inodedep *inodedep; 5603 static int next = 0; 5604 struct mount *mp; 5605 struct vnode *vp; 5606 struct fs *fs; 5607 int error, cnt; 5608 ino_t firstino, lastino, ino; 5609 5610 ACQUIRE_LOCK(&lk); 5611 /* 5612 * Pick a random inode dependency to be cleared. 5613 * We will then gather up all the inodes in its block 5614 * that have dependencies and flush them out. 5615 */ 5616 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5617 inodedephd = &inodedep_hashtbl[next++]; 5618 if (next >= inodedep_hash) 5619 next = 0; 5620 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5621 break; 5622 } 5623 if (inodedep == NULL) 5624 return; 5625 /* 5626 * Ugly code to find mount point given pointer to superblock. 5627 */ 5628 fs = inodedep->id_fs; 5629 TAILQ_FOREACH(mp, &mountlist, mnt_list) 5630 if ((mp->mnt_flag & MNT_SOFTDEP) && fs == VFSTOUFS(mp)->um_fs) 5631 break; 5632 /* 5633 * Find the last inode in the block with dependencies. 5634 */ 5635 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5636 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5637 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5638 break; 5639 /* 5640 * Asynchronously push all but the last inode with dependencies. 5641 * Synchronously push the last inode with dependencies to ensure 5642 * that the inode block gets written to free up the inodedeps. 5643 */ 5644 for (ino = firstino; ino <= lastino; ino++) { 5645 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5646 continue; 5647 FREE_LOCK(&lk); 5648 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5649 continue; 5650 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp)) != 0) { 5651 softdep_error("clear_inodedeps: vget", error); 5652 vn_finished_write(mp); 5653 return; 5654 } 5655 if (ino == lastino) { 5656 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_WAIT, td))) 5657 softdep_error("clear_inodedeps: fsync1", error); 5658 } else { 5659 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5660 softdep_error("clear_inodedeps: fsync2", error); 5661 drain_output(vp, 0); 5662 } 5663 vput(vp); 5664 vn_finished_write(mp); 5665 ACQUIRE_LOCK(&lk); 5666 } 5667 FREE_LOCK(&lk); 5668} 5669 5670/* 5671 * Function to determine if the buffer has outstanding dependencies 5672 * that will cause a roll-back if the buffer is written. If wantcount 5673 * is set, return number of dependencies, otherwise just yes or no. 5674 */ 5675static int 5676softdep_count_dependencies(bp, wantcount) 5677 struct buf *bp; 5678 int wantcount; 5679{ 5680 struct worklist *wk; 5681 struct inodedep *inodedep; 5682 struct indirdep *indirdep; 5683 struct allocindir *aip; 5684 struct pagedep *pagedep; 5685 struct diradd *dap; 5686 int i, retval; 5687 5688 retval = 0; 5689 ACQUIRE_LOCK(&lk); 5690 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5691 switch (wk->wk_type) { 5692 5693 case D_INODEDEP: 5694 inodedep = WK_INODEDEP(wk); 5695 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5696 /* bitmap allocation dependency */ 5697 retval += 1; 5698 if (!wantcount) 5699 goto out; 5700 } 5701 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5702 /* direct block pointer dependency */ 5703 retval += 1; 5704 if (!wantcount) 5705 goto out; 5706 } 5707 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 5708 /* direct block pointer dependency */ 5709 retval += 1; 5710 if (!wantcount) 5711 goto out; 5712 } 5713 continue; 5714 5715 case D_INDIRDEP: 5716 indirdep = WK_INDIRDEP(wk); 5717 5718 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5719 /* indirect block pointer dependency */ 5720 retval += 1; 5721 if (!wantcount) 5722 goto out; 5723 } 5724 continue; 5725 5726 case D_PAGEDEP: 5727 pagedep = WK_PAGEDEP(wk); 5728 for (i = 0; i < DAHASHSZ; i++) { 5729 5730 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5731 /* directory entry dependency */ 5732 retval += 1; 5733 if (!wantcount) 5734 goto out; 5735 } 5736 } 5737 continue; 5738 5739 case D_BMSAFEMAP: 5740 case D_ALLOCDIRECT: 5741 case D_ALLOCINDIR: 5742 case D_MKDIR: 5743 /* never a dependency on these blocks */ 5744 continue; 5745 5746 default: 5747 FREE_LOCK(&lk); 5748 panic("softdep_check_for_rollback: Unexpected type %s", 5749 TYPENAME(wk->wk_type)); 5750 /* NOTREACHED */ 5751 } 5752 } 5753out: 5754 FREE_LOCK(&lk); 5755 return retval; 5756} 5757 5758/* 5759 * Acquire exclusive access to a buffer. 5760 * Must be called with splbio blocked. 5761 * Return 1 if buffer was acquired. 5762 */ 5763static int 5764getdirtybuf(bpp, waitfor) 5765 struct buf **bpp; 5766 int waitfor; 5767{ 5768 struct buf *bp; 5769 int error; 5770 5771 for (;;) { 5772 if ((bp = *bpp) == NULL) 5773 return (0); 5774 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5775 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5776 break; 5777 BUF_UNLOCK(bp); 5778 if (waitfor != MNT_WAIT) 5779 return (0); 5780 bp->b_xflags |= BX_BKGRDWAIT; 5781 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, NULL, 5782 PRIBIO, "getbuf", 0); 5783 continue; 5784 } 5785 if (waitfor != MNT_WAIT) 5786 return (0); 5787 error = interlocked_sleep(&lk, LOCKBUF, bp, NULL, 5788 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5789 if (error != ENOLCK) { 5790 FREE_LOCK(&lk); 5791 panic("getdirtybuf: inconsistent lock"); 5792 } 5793 } 5794 if ((bp->b_flags & B_DELWRI) == 0) { 5795 BUF_UNLOCK(bp); 5796 return (0); 5797 } 5798 bremfree(bp); 5799 return (1); 5800} 5801 5802/* 5803 * Wait for pending output on a vnode to complete. 5804 * Must be called with vnode locked. 5805 */ 5806static void 5807drain_output(vp, islocked) 5808 struct vnode *vp; 5809 int islocked; 5810{ 5811 5812 if (!islocked) 5813 ACQUIRE_LOCK(&lk); 5814 VI_LOCK(vp); 5815 while (vp->v_numoutput) { 5816 vp->v_iflag |= VI_BWAIT; 5817 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5818 VI_MTX(vp), PRIBIO + 1, "drainvp", 0); 5819 } 5820 VI_UNLOCK(vp); 5821 if (!islocked) 5822 FREE_LOCK(&lk); 5823} 5824 5825/* 5826 * Called whenever a buffer that is being invalidated or reallocated 5827 * contains dependencies. This should only happen if an I/O error has 5828 * occurred. The routine is called with the buffer locked. 5829 */ 5830static void 5831softdep_deallocate_dependencies(bp) 5832 struct buf *bp; 5833{ 5834 5835 if ((bp->b_ioflags & BIO_ERROR) == 0) 5836 panic("softdep_deallocate_dependencies: dangling deps"); 5837 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 5838 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5839} 5840 5841/* 5842 * Function to handle asynchronous write errors in the filesystem. 5843 */ 5844void 5845softdep_error(func, error) 5846 char *func; 5847 int error; 5848{ 5849 5850 /* XXX should do something better! */ 5851 printf("%s: got error %d while accessing filesystem\n", func, error); 5852} 5853