1/* 2 * fs/fs-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains all the functions related to writing back and waiting 7 * upon dirty inodes against superblocks, and writing back dirty 8 * pages against inodes. ie: data writeback. Writeout of the 9 * inode itself is not handled here. 10 * 11 * 10Apr2002 akpm@zip.com.au 12 * Split out of fs/inode.c 13 * Additions for address_space-based writeback 14 */ 15 16#include <linux/kernel.h> 17#include <linux/spinlock.h> 18#include <linux/sched.h> 19#include <linux/fs.h> 20#include <linux/mm.h> 21#include <linux/writeback.h> 22#include <linux/blkdev.h> 23#include <linux/backing-dev.h> 24#include <linux/buffer_head.h> 25#include "internal.h" 26 27/** 28 * __mark_inode_dirty - internal function 29 * @inode: inode to mark 30 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 31 * Mark an inode as dirty. Callers should use mark_inode_dirty or 32 * mark_inode_dirty_sync. 33 * 34 * Put the inode on the super block's dirty list. 35 * 36 * CAREFUL! We mark it dirty unconditionally, but move it onto the 37 * dirty list only if it is hashed or if it refers to a blockdev. 38 * If it was not hashed, it will never be added to the dirty list 39 * even if it is later hashed, as it will have been marked dirty already. 40 * 41 * In short, make sure you hash any inodes _before_ you start marking 42 * them dirty. 43 * 44 * This function *must* be atomic for the I_DIRTY_PAGES case - 45 * set_page_dirty() is called under spinlock in several places. 46 * 47 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 48 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 49 * the kernel-internal blockdev inode represents the dirtying time of the 50 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 51 * page->mapping->host, so the page-dirtying time is recorded in the internal 52 * blockdev inode. 53 */ 54void __mark_inode_dirty(struct inode *inode, int flags) 55{ 56 struct super_block *sb = inode->i_sb; 57 58 /* 59 * Don't do this for I_DIRTY_PAGES - that doesn't actually 60 * dirty the inode itself 61 */ 62 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 63 if (sb->s_op->dirty_inode) 64 sb->s_op->dirty_inode(inode); 65 } 66 67 /* 68 * make sure that changes are seen by all cpus before we test i_state 69 * -- mikulas 70 */ 71 smp_mb(); 72 73 /* avoid the locking if we can */ 74 if ((inode->i_state & flags) == flags) 75 return; 76 77 if (unlikely(block_dump)) { 78 struct dentry *dentry = NULL; 79 const char *name = "?"; 80 81 if (!list_empty(&inode->i_dentry)) { 82 dentry = list_entry(inode->i_dentry.next, 83 struct dentry, d_alias); 84 if (dentry && dentry->d_name.name) 85 name = (const char *) dentry->d_name.name; 86 } 87 88 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) 89 printk(KERN_DEBUG 90 "%s(%d): dirtied inode %lu (%s) on %s\n", 91 current->comm, current->pid, inode->i_ino, 92 name, inode->i_sb->s_id); 93 } 94 95 spin_lock(&inode_lock); 96 if ((inode->i_state & flags) != flags) { 97 const int was_dirty = inode->i_state & I_DIRTY; 98 99 inode->i_state |= flags; 100 101 /* 102 * If the inode is locked, just update its dirty state. 103 * The unlocker will place the inode on the appropriate 104 * superblock list, based upon its state. 105 */ 106 if (inode->i_state & I_LOCK) 107 goto out; 108 109 /* 110 * Only add valid (hashed) inodes to the superblock's 111 * dirty list. Add blockdev inodes as well. 112 */ 113 if (!S_ISBLK(inode->i_mode)) { 114 if (hlist_unhashed(&inode->i_hash)) 115 goto out; 116 } 117 if (inode->i_state & (I_FREEING|I_CLEAR)) 118 goto out; 119 120 /* 121 * If the inode was already on s_dirty or s_io, don't 122 * reposition it (that would break s_dirty time-ordering). 123 */ 124 if (!was_dirty) { 125 inode->dirtied_when = jiffies; 126 list_move(&inode->i_list, &sb->s_dirty); 127 } 128 } 129out: 130 spin_unlock(&inode_lock); 131} 132 133EXPORT_SYMBOL(__mark_inode_dirty); 134 135static int write_inode(struct inode *inode, int sync) 136{ 137 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 138 return inode->i_sb->s_op->write_inode(inode, sync); 139 return 0; 140} 141 142/* 143 * Write a single inode's dirty pages and inode data out to disk. 144 * If `wait' is set, wait on the writeout. 145 * 146 * The whole writeout design is quite complex and fragile. We want to avoid 147 * starvation of particular inodes when others are being redirtied, prevent 148 * livelocks, etc. 149 * 150 * Called under inode_lock. 151 */ 152static int 153__sync_single_inode(struct inode *inode, struct writeback_control *wbc) 154{ 155 unsigned dirty; 156 struct address_space *mapping = inode->i_mapping; 157 struct super_block *sb = inode->i_sb; 158 int wait = wbc->sync_mode == WB_SYNC_ALL; 159 int ret; 160 161 BUG_ON(inode->i_state & I_LOCK); 162 163 /* Set I_LOCK, reset I_DIRTY */ 164 dirty = inode->i_state & I_DIRTY; 165 inode->i_state |= I_LOCK; 166 inode->i_state &= ~I_DIRTY; 167 168 spin_unlock(&inode_lock); 169 170 ret = do_writepages(mapping, wbc); 171 172 /* Don't write the inode if only I_DIRTY_PAGES was set */ 173 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 174 int err = write_inode(inode, wait); 175 if (ret == 0) 176 ret = err; 177 } 178 179 if (wait) { 180 int err = filemap_fdatawait(mapping); 181 if (ret == 0) 182 ret = err; 183 } 184 185 spin_lock(&inode_lock); 186 inode->i_state &= ~I_LOCK; 187 if (!(inode->i_state & I_FREEING)) { 188 if (!(inode->i_state & I_DIRTY) && 189 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 190 /* 191 * We didn't write back all the pages. nfs_writepages() 192 * sometimes bales out without doing anything. Redirty 193 * the inode. It is still on sb->s_io. 194 */ 195 if (wbc->for_kupdate) { 196 /* 197 * For the kupdate function we leave the inode 198 * at the head of sb_dirty so it will get more 199 * writeout as soon as the queue becomes 200 * uncongested. 201 */ 202 inode->i_state |= I_DIRTY_PAGES; 203 list_move_tail(&inode->i_list, &sb->s_dirty); 204 } else { 205 /* 206 * Otherwise fully redirty the inode so that 207 * other inodes on this superblock will get some 208 * writeout. Otherwise heavy writing to one 209 * file would indefinitely suspend writeout of 210 * all the other files. 211 */ 212 inode->i_state |= I_DIRTY_PAGES; 213 inode->dirtied_when = jiffies; 214 list_move(&inode->i_list, &sb->s_dirty); 215 } 216 } else if (inode->i_state & I_DIRTY) { 217 /* 218 * Someone redirtied the inode while were writing back 219 * the pages. 220 */ 221 list_move(&inode->i_list, &sb->s_dirty); 222 } else if (atomic_read(&inode->i_count)) { 223 /* 224 * The inode is clean, inuse 225 */ 226 list_move(&inode->i_list, &inode_in_use); 227 } else { 228 /* 229 * The inode is clean, unused 230 */ 231 list_move(&inode->i_list, &inode_unused); 232 } 233 } 234 wake_up_inode(inode); 235 return ret; 236} 237 238/* 239 * Write out an inode's dirty pages. Called under inode_lock. Either the 240 * caller has ref on the inode (either via __iget or via syscall against an fd) 241 * or the inode has I_WILL_FREE set (via generic_forget_inode) 242 */ 243static int 244__writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 245{ 246 wait_queue_head_t *wqh; 247 248 if (!atomic_read(&inode->i_count)) 249 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 250 else 251 WARN_ON(inode->i_state & I_WILL_FREE); 252 253 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) { 254 struct address_space *mapping = inode->i_mapping; 255 int ret; 256 257 list_move(&inode->i_list, &inode->i_sb->s_dirty); 258 259 /* 260 * Even if we don't actually write the inode itself here, 261 * we can at least start some of the data writeout.. 262 */ 263 spin_unlock(&inode_lock); 264 ret = do_writepages(mapping, wbc); 265 spin_lock(&inode_lock); 266 return ret; 267 } 268 269 /* 270 * It's a data-integrity sync. We must wait. 271 */ 272 if (inode->i_state & I_LOCK) { 273 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK); 274 275 wqh = bit_waitqueue(&inode->i_state, __I_LOCK); 276 do { 277 spin_unlock(&inode_lock); 278 __wait_on_bit(wqh, &wq, inode_wait, 279 TASK_UNINTERRUPTIBLE); 280 spin_lock(&inode_lock); 281 } while (inode->i_state & I_LOCK); 282 } 283 return __sync_single_inode(inode, wbc); 284} 285 286static void 287sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc) 288{ 289 const unsigned long start = jiffies; /* livelock avoidance */ 290 291 if (!wbc->for_kupdate || list_empty(&sb->s_io)) 292 list_splice_init(&sb->s_dirty, &sb->s_io); 293 294 while (!list_empty(&sb->s_io)) { 295 struct inode *inode = list_entry(sb->s_io.prev, 296 struct inode, i_list); 297 struct address_space *mapping = inode->i_mapping; 298 struct backing_dev_info *bdi = mapping->backing_dev_info; 299 long pages_skipped; 300 301 if (!bdi_cap_writeback_dirty(bdi)) { 302 list_move(&inode->i_list, &sb->s_dirty); 303 if (sb_is_blkdev_sb(sb)) { 304 /* 305 * Dirty memory-backed blockdev: the ramdisk 306 * driver does this. Skip just this inode 307 */ 308 continue; 309 } 310 /* 311 * Dirty memory-backed inode against a filesystem other 312 * than the kernel-internal bdev filesystem. Skip the 313 * entire superblock. 314 */ 315 break; 316 } 317 318 if (wbc->nonblocking && bdi_write_congested(bdi)) { 319 wbc->encountered_congestion = 1; 320 if (!sb_is_blkdev_sb(sb)) 321 break; /* Skip a congested fs */ 322 list_move(&inode->i_list, &sb->s_dirty); 323 continue; /* Skip a congested blockdev */ 324 } 325 326 if (wbc->bdi && bdi != wbc->bdi) { 327 if (!sb_is_blkdev_sb(sb)) 328 break; /* fs has the wrong queue */ 329 list_move(&inode->i_list, &sb->s_dirty); 330 continue; /* blockdev has wrong queue */ 331 } 332 333 /* Was this inode dirtied after sync_sb_inodes was called? */ 334 if (time_after(inode->dirtied_when, start)) 335 break; 336 337 /* Was this inode dirtied too recently? */ 338 if (wbc->older_than_this && time_after(inode->dirtied_when, 339 *wbc->older_than_this)) 340 break; 341 342 /* Is another pdflush already flushing this queue? */ 343 if (current_is_pdflush() && !writeback_acquire(bdi)) 344 break; 345 346 BUG_ON(inode->i_state & I_FREEING); 347 __iget(inode); 348 pages_skipped = wbc->pages_skipped; 349 __writeback_single_inode(inode, wbc); 350 if (wbc->sync_mode == WB_SYNC_HOLD) { 351 inode->dirtied_when = jiffies; 352 list_move(&inode->i_list, &sb->s_dirty); 353 } 354 if (current_is_pdflush()) 355 writeback_release(bdi); 356 if (wbc->pages_skipped != pages_skipped) { 357 /* 358 * writeback is not making progress due to locked 359 * buffers. Skip this inode for now. 360 */ 361 list_move(&inode->i_list, &sb->s_dirty); 362 } 363 spin_unlock(&inode_lock); 364 iput(inode); 365 cond_resched(); 366 spin_lock(&inode_lock); 367 if (wbc->nr_to_write <= 0) 368 break; 369 } 370 return; /* Leave any unwritten inodes on s_io */ 371} 372 373/* 374 * Start writeback of dirty pagecache data against all unlocked inodes. 375 * 376 * Note: 377 * We don't need to grab a reference to superblock here. If it has non-empty 378 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed 379 * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are 380 * empty. Since __sync_single_inode() regains inode_lock before it finally moves 381 * inode from superblock lists we are OK. 382 * 383 * If `older_than_this' is non-zero then only flush inodes which have a 384 * flushtime older than *older_than_this. 385 * 386 * If `bdi' is non-zero then we will scan the first inode against each 387 * superblock until we find the matching ones. One group will be the dirty 388 * inodes against a filesystem. Then when we hit the dummy blockdev superblock, 389 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not 390 * super-efficient but we're about to do a ton of I/O... 391 */ 392void 393writeback_inodes(struct writeback_control *wbc) 394{ 395 struct super_block *sb; 396 397 might_sleep(); 398 spin_lock(&sb_lock); 399restart: 400 sb = sb_entry(super_blocks.prev); 401 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { 402 if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) { 403 /* we're making our own get_super here */ 404 sb->s_count++; 405 spin_unlock(&sb_lock); 406 /* 407 * If we can't get the readlock, there's no sense in 408 * waiting around, most of the time the FS is going to 409 * be unmounted by the time it is released. 410 */ 411 if (down_read_trylock(&sb->s_umount)) { 412 if (sb->s_root) { 413 spin_lock(&inode_lock); 414 sync_sb_inodes(sb, wbc); 415 spin_unlock(&inode_lock); 416 } 417 up_read(&sb->s_umount); 418 } 419 spin_lock(&sb_lock); 420 if (__put_super_and_need_restart(sb)) 421 goto restart; 422 } 423 if (wbc->nr_to_write <= 0) 424 break; 425 } 426 spin_unlock(&sb_lock); 427} 428 429/* 430 * writeback and wait upon the filesystem's dirty inodes. The caller will 431 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is 432 * used to park the written inodes on sb->s_dirty for the wait pass. 433 * 434 * A finite limit is set on the number of pages which will be written. 435 * To prevent infinite livelock of sys_sync(). 436 * 437 * We add in the number of potentially dirty inodes, because each inode write 438 * can dirty pagecache in the underlying blockdev. 439 */ 440void sync_inodes_sb(struct super_block *sb, int wait) 441{ 442 struct writeback_control wbc = { 443 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, 444 .range_start = 0, 445 .range_end = LLONG_MAX, 446 }; 447 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 448 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 449 450 wbc.nr_to_write = nr_dirty + nr_unstable + 451 (inodes_stat.nr_inodes - inodes_stat.nr_unused) + 452 nr_dirty + nr_unstable; 453 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */ 454 spin_lock(&inode_lock); 455 sync_sb_inodes(sb, &wbc); 456 spin_unlock(&inode_lock); 457} 458 459/* 460 * Rather lame livelock avoidance. 461 */ 462static void set_sb_syncing(int val) 463{ 464 struct super_block *sb; 465 spin_lock(&sb_lock); 466 sb = sb_entry(super_blocks.prev); 467 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { 468 sb->s_syncing = val; 469 } 470 spin_unlock(&sb_lock); 471} 472 473/** 474 * sync_inodes - writes all inodes to disk 475 * @wait: wait for completion 476 * 477 * sync_inodes() goes through each super block's dirty inode list, writes the 478 * inodes out, waits on the writeout and puts the inodes back on the normal 479 * list. 480 * 481 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle 482 * part of the sync functions is that the blockdev "superblock" is processed 483 * last. This is because the write_inode() function of a typical fs will 484 * perform no I/O, but will mark buffers in the blockdev mapping as dirty. 485 * What we want to do is to perform all that dirtying first, and then write 486 * back all those inode blocks via the blockdev mapping in one sweep. So the 487 * additional (somewhat redundant) sync_blockdev() calls here are to make 488 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with 489 * outstanding dirty inodes, the writeback goes block-at-a-time within the 490 * filesystem's write_inode(). This is extremely slow. 491 */ 492static void __sync_inodes(int wait) 493{ 494 struct super_block *sb; 495 496 spin_lock(&sb_lock); 497restart: 498 list_for_each_entry(sb, &super_blocks, s_list) { 499 if (sb->s_syncing) 500 continue; 501 sb->s_syncing = 1; 502 sb->s_count++; 503 spin_unlock(&sb_lock); 504 down_read(&sb->s_umount); 505 if (sb->s_root) { 506 sync_inodes_sb(sb, wait); 507 sync_blockdev(sb->s_bdev); 508 } 509 up_read(&sb->s_umount); 510 spin_lock(&sb_lock); 511 if (__put_super_and_need_restart(sb)) 512 goto restart; 513 } 514 spin_unlock(&sb_lock); 515} 516 517void sync_inodes(int wait) 518{ 519 set_sb_syncing(0); 520 __sync_inodes(0); 521 522 if (wait) { 523 set_sb_syncing(0); 524 __sync_inodes(1); 525 } 526} 527 528/** 529 * write_inode_now - write an inode to disk 530 * @inode: inode to write to disk 531 * @sync: whether the write should be synchronous or not 532 * 533 * This function commits an inode to disk immediately if it is dirty. This is 534 * primarily needed by knfsd. 535 * 536 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 537 */ 538int write_inode_now(struct inode *inode, int sync) 539{ 540 int ret; 541 struct writeback_control wbc = { 542 .nr_to_write = LONG_MAX, 543 .sync_mode = WB_SYNC_ALL, 544 .range_start = 0, 545 .range_end = LLONG_MAX, 546 }; 547 548 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 549 wbc.nr_to_write = 0; 550 551 might_sleep(); 552 spin_lock(&inode_lock); 553 ret = __writeback_single_inode(inode, &wbc); 554 spin_unlock(&inode_lock); 555 if (sync) 556 wait_on_inode(inode); 557 return ret; 558} 559EXPORT_SYMBOL(write_inode_now); 560 561/** 562 * sync_inode - write an inode and its pages to disk. 563 * @inode: the inode to sync 564 * @wbc: controls the writeback mode 565 * 566 * sync_inode() will write an inode and its pages to disk. It will also 567 * correctly update the inode on its superblock's dirty inode lists and will 568 * update inode->i_state. 569 * 570 * The caller must have a ref on the inode. 571 */ 572int sync_inode(struct inode *inode, struct writeback_control *wbc) 573{ 574 int ret; 575 576 spin_lock(&inode_lock); 577 ret = __writeback_single_inode(inode, wbc); 578 spin_unlock(&inode_lock); 579 return ret; 580} 581EXPORT_SYMBOL(sync_inode); 582 583/** 584 * generic_osync_inode - flush all dirty data for a given inode to disk 585 * @inode: inode to write 586 * @mapping: the address_space that should be flushed 587 * @what: what to write and wait upon 588 * 589 * This can be called by file_write functions for files which have the 590 * O_SYNC flag set, to flush dirty writes to disk. 591 * 592 * @what is a bitmask, specifying which part of the inode's data should be 593 * written and waited upon. 594 * 595 * OSYNC_DATA: i_mapping's dirty data 596 * OSYNC_METADATA: the buffers at i_mapping->private_list 597 * OSYNC_INODE: the inode itself 598 */ 599 600int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) 601{ 602 int err = 0; 603 int need_write_inode_now = 0; 604 int err2; 605 606 if (what & OSYNC_DATA) 607 err = filemap_fdatawrite(mapping); 608 if (what & (OSYNC_METADATA|OSYNC_DATA)) { 609 err2 = sync_mapping_buffers(mapping); 610 if (!err) 611 err = err2; 612 } 613 if (what & OSYNC_DATA) { 614 err2 = filemap_fdatawait(mapping); 615 if (!err) 616 err = err2; 617 } 618 619 spin_lock(&inode_lock); 620 if ((inode->i_state & I_DIRTY) && 621 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) 622 need_write_inode_now = 1; 623 spin_unlock(&inode_lock); 624 625 if (need_write_inode_now) { 626 err2 = write_inode_now(inode, 1); 627 if (!err) 628 err = err2; 629 } 630 else 631 wait_on_inode(inode); 632 633 return err; 634} 635 636EXPORT_SYMBOL(generic_osync_inode); 637 638/** 639 * writeback_acquire: attempt to get exclusive writeback access to a device 640 * @bdi: the device's backing_dev_info structure 641 * 642 * It is a waste of resources to have more than one pdflush thread blocked on 643 * a single request queue. Exclusion at the request_queue level is obtained 644 * via a flag in the request_queue's backing_dev_info.state. 645 * 646 * Non-request_queue-backed address_spaces will share default_backing_dev_info, 647 * unless they implement their own. Which is somewhat inefficient, as this 648 * may prevent concurrent writeback against multiple devices. 649 */ 650int writeback_acquire(struct backing_dev_info *bdi) 651{ 652 return !test_and_set_bit(BDI_pdflush, &bdi->state); 653} 654 655/** 656 * writeback_in_progress: determine whether there is writeback in progress 657 * @bdi: the device's backing_dev_info structure. 658 * 659 * Determine whether there is writeback in progress against a backing device. 660 */ 661int writeback_in_progress(struct backing_dev_info *bdi) 662{ 663 return test_bit(BDI_pdflush, &bdi->state); 664} 665 666/** 667 * writeback_release: relinquish exclusive writeback access against a device. 668 * @bdi: the device's backing_dev_info structure 669 */ 670void writeback_release(struct backing_dev_info *bdi) 671{ 672 BUG_ON(!writeback_in_progress(bdi)); 673 clear_bit(BDI_pdflush, &bdi->state); 674} 675