1/* 2 * linux/fs/ext3/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 23 */ 24 25#include <linux/module.h> 26#include <linux/fs.h> 27#include <linux/time.h> 28#include <linux/ext3_jbd.h> 29#include <linux/jbd.h> 30#include <linux/highuid.h> 31#include <linux/pagemap.h> 32#include <linux/quotaops.h> 33#include <linux/string.h> 34#include <linux/buffer_head.h> 35#include <linux/writeback.h> 36#include <linux/mpage.h> 37#include <linux/uio.h> 38#include <linux/bio.h> 39#include "xattr.h" 40#include "acl.h" 41 42static int ext3_writepage_trans_blocks(struct inode *inode); 43 44/* 45 * Test whether an inode is a fast symlink. 46 */ 47static int ext3_inode_is_fast_symlink(struct inode *inode) 48{ 49 int ea_blocks = EXT3_I(inode)->i_file_acl ? 50 (inode->i_sb->s_blocksize >> 9) : 0; 51 52 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 53} 54 55/* 56 * The ext3 forget function must perform a revoke if we are freeing data 57 * which has been journaled. Metadata (eg. indirect blocks) must be 58 * revoked in all cases. 59 * 60 * "bh" may be NULL: a metadata block may have been freed from memory 61 * but there may still be a record of it in the journal, and that record 62 * still needs to be revoked. 63 */ 64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, 65 struct buffer_head *bh, ext3_fsblk_t blocknr) 66{ 67 int err; 68 69 might_sleep(); 70 71 BUFFER_TRACE(bh, "enter"); 72 73 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 74 "data mode %lx\n", 75 bh, is_metadata, inode->i_mode, 76 test_opt(inode->i_sb, DATA_FLAGS)); 77 78 /* Never use the revoke function if we are doing full data 79 * journaling: there is no need to, and a V1 superblock won't 80 * support it. Otherwise, only skip the revoke on un-journaled 81 * data blocks. */ 82 83 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || 84 (!is_metadata && !ext3_should_journal_data(inode))) { 85 if (bh) { 86 BUFFER_TRACE(bh, "call journal_forget"); 87 return ext3_journal_forget(handle, bh); 88 } 89 return 0; 90 } 91 92 /* 93 * data!=journal && (is_metadata || should_journal_data(inode)) 94 */ 95 BUFFER_TRACE(bh, "call ext3_journal_revoke"); 96 err = ext3_journal_revoke(handle, blocknr, bh); 97 if (err) 98 ext3_abort(inode->i_sb, __FUNCTION__, 99 "error %d when attempting revoke", err); 100 BUFFER_TRACE(bh, "exit"); 101 return err; 102} 103 104/* 105 * Work out how many blocks we need to proceed with the next chunk of a 106 * truncate transaction. 107 */ 108static unsigned long blocks_for_truncate(struct inode *inode) 109{ 110 unsigned long needed; 111 112 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 113 114 /* Give ourselves just enough room to cope with inodes in which 115 * i_blocks is corrupt: we've seen disk corruptions in the past 116 * which resulted in random data in an inode which looked enough 117 * like a regular file for ext3 to try to delete it. Things 118 * will go a bit crazy if that happens, but at least we should 119 * try not to panic the whole kernel. */ 120 if (needed < 2) 121 needed = 2; 122 123 /* But we need to bound the transaction so we don't overflow the 124 * journal. */ 125 if (needed > EXT3_MAX_TRANS_DATA) 126 needed = EXT3_MAX_TRANS_DATA; 127 128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 129} 130 131/* 132 * Truncate transactions can be complex and absolutely huge. So we need to 133 * be able to restart the transaction at a conventient checkpoint to make 134 * sure we don't overflow the journal. 135 * 136 * start_transaction gets us a new handle for a truncate transaction, 137 * and extend_transaction tries to extend the existing one a bit. If 138 * extend fails, we need to propagate the failure up and restart the 139 * transaction in the top-level truncate loop. --sct 140 */ 141static handle_t *start_transaction(struct inode *inode) 142{ 143 handle_t *result; 144 145 result = ext3_journal_start(inode, blocks_for_truncate(inode)); 146 if (!IS_ERR(result)) 147 return result; 148 149 ext3_std_error(inode->i_sb, PTR_ERR(result)); 150 return result; 151} 152 153/* 154 * Try to extend this transaction for the purposes of truncation. 155 * 156 * Returns 0 if we managed to create more room. If we can't create more 157 * room, and the transaction must be restarted we return 1. 158 */ 159static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 160{ 161 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) 162 return 0; 163 if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) 164 return 0; 165 return 1; 166} 167 168/* 169 * Restart the transaction associated with *handle. This does a commit, 170 * so before we call here everything must be consistently dirtied against 171 * this transaction. 172 */ 173static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) 174{ 175 jbd_debug(2, "restarting handle %p\n", handle); 176 return ext3_journal_restart(handle, blocks_for_truncate(inode)); 177} 178 179/* 180 * Called at the last iput() if i_nlink is zero. 181 */ 182void ext3_delete_inode (struct inode * inode) 183{ 184 handle_t *handle; 185 186 truncate_inode_pages(&inode->i_data, 0); 187 188 if (is_bad_inode(inode)) 189 goto no_delete; 190 191 handle = start_transaction(inode); 192 if (IS_ERR(handle)) { 193 /* 194 * If we're going to skip the normal cleanup, we still need to 195 * make sure that the in-core orphan linked list is properly 196 * cleaned up. 197 */ 198 ext3_orphan_del(NULL, inode); 199 goto no_delete; 200 } 201 202 if (IS_SYNC(inode)) 203 handle->h_sync = 1; 204 inode->i_size = 0; 205 if (inode->i_blocks) 206 ext3_truncate(inode); 207 /* 208 * Kill off the orphan record which ext3_truncate created. 209 * AKPM: I think this can be inside the above `if'. 210 * Note that ext3_orphan_del() has to be able to cope with the 211 * deletion of a non-existent orphan - this is because we don't 212 * know if ext3_truncate() actually created an orphan record. 213 * (Well, we could do this if we need to, but heck - it works) 214 */ 215 ext3_orphan_del(handle, inode); 216 EXT3_I(inode)->i_dtime = get_seconds(); 217 218 /* 219 * One subtle ordering requirement: if anything has gone wrong 220 * (transaction abort, IO errors, whatever), then we can still 221 * do these next steps (the fs will already have been marked as 222 * having errors), but we can't free the inode if the mark_dirty 223 * fails. 224 */ 225 if (ext3_mark_inode_dirty(handle, inode)) 226 /* If that failed, just do the required in-core inode clear. */ 227 clear_inode(inode); 228 else 229 ext3_free_inode(handle, inode); 230 ext3_journal_stop(handle); 231 return; 232no_delete: 233 clear_inode(inode); /* We must guarantee clearing of inode... */ 234} 235 236typedef struct { 237 __le32 *p; 238 __le32 key; 239 struct buffer_head *bh; 240} Indirect; 241 242static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 243{ 244 p->key = *(p->p = v); 245 p->bh = bh; 246} 247 248static int verify_chain(Indirect *from, Indirect *to) 249{ 250 while (from <= to && from->key == *from->p) 251 from++; 252 return (from > to); 253} 254 255/** 256 * ext3_block_to_path - parse the block number into array of offsets 257 * @inode: inode in question (we are only interested in its superblock) 258 * @i_block: block number to be parsed 259 * @offsets: array to store the offsets in 260 * @boundary: set this non-zero if the referred-to block is likely to be 261 * followed (on disk) by an indirect block. 262 * 263 * To store the locations of file's data ext3 uses a data structure common 264 * for UNIX filesystems - tree of pointers anchored in the inode, with 265 * data blocks at leaves and indirect blocks in intermediate nodes. 266 * This function translates the block number into path in that tree - 267 * return value is the path length and @offsets[n] is the offset of 268 * pointer to (n+1)th node in the nth one. If @block is out of range 269 * (negative or too large) warning is printed and zero returned. 270 * 271 * Note: function doesn't find node addresses, so no IO is needed. All 272 * we need to know is the capacity of indirect blocks (taken from the 273 * inode->i_sb). 274 */ 275 276/* 277 * Portability note: the last comparison (check that we fit into triple 278 * indirect block) is spelled differently, because otherwise on an 279 * architecture with 32-bit longs and 8Kb pages we might get into trouble 280 * if our filesystem had 8Kb blocks. We might use long long, but that would 281 * kill us on x86. Oh, well, at least the sign propagation does not matter - 282 * i_block would have to be negative in the very beginning, so we would not 283 * get there at all. 284 */ 285 286static int ext3_block_to_path(struct inode *inode, 287 long i_block, int offsets[4], int *boundary) 288{ 289 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); 290 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); 291 const long direct_blocks = EXT3_NDIR_BLOCKS, 292 indirect_blocks = ptrs, 293 double_blocks = (1 << (ptrs_bits * 2)); 294 int n = 0; 295 int final = 0; 296 297 if (i_block < 0) { 298 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); 299 } else if (i_block < direct_blocks) { 300 offsets[n++] = i_block; 301 final = direct_blocks; 302 } else if ( (i_block -= direct_blocks) < indirect_blocks) { 303 offsets[n++] = EXT3_IND_BLOCK; 304 offsets[n++] = i_block; 305 final = ptrs; 306 } else if ((i_block -= indirect_blocks) < double_blocks) { 307 offsets[n++] = EXT3_DIND_BLOCK; 308 offsets[n++] = i_block >> ptrs_bits; 309 offsets[n++] = i_block & (ptrs - 1); 310 final = ptrs; 311 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 312 offsets[n++] = EXT3_TIND_BLOCK; 313 offsets[n++] = i_block >> (ptrs_bits * 2); 314 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 315 offsets[n++] = i_block & (ptrs - 1); 316 final = ptrs; 317 } else { 318 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); 319 } 320 if (boundary) 321 *boundary = final - 1 - (i_block & (ptrs - 1)); 322 return n; 323} 324 325/** 326 * ext3_get_branch - read the chain of indirect blocks leading to data 327 * @inode: inode in question 328 * @depth: depth of the chain (1 - direct pointer, etc.) 329 * @offsets: offsets of pointers in inode/indirect blocks 330 * @chain: place to store the result 331 * @err: here we store the error value 332 * 333 * Function fills the array of triples <key, p, bh> and returns %NULL 334 * if everything went OK or the pointer to the last filled triple 335 * (incomplete one) otherwise. Upon the return chain[i].key contains 336 * the number of (i+1)-th block in the chain (as it is stored in memory, 337 * i.e. little-endian 32-bit), chain[i].p contains the address of that 338 * number (it points into struct inode for i==0 and into the bh->b_data 339 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 340 * block for i>0 and NULL for i==0. In other words, it holds the block 341 * numbers of the chain, addresses they were taken from (and where we can 342 * verify that chain did not change) and buffer_heads hosting these 343 * numbers. 344 * 345 * Function stops when it stumbles upon zero pointer (absent block) 346 * (pointer to last triple returned, *@err == 0) 347 * or when it gets an IO error reading an indirect block 348 * (ditto, *@err == -EIO) 349 * or when it notices that chain had been changed while it was reading 350 * (ditto, *@err == -EAGAIN) 351 * or when it reads all @depth-1 indirect blocks successfully and finds 352 * the whole chain, all way to the data (returns %NULL, *err == 0). 353 */ 354static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, 355 Indirect chain[4], int *err) 356{ 357 struct super_block *sb = inode->i_sb; 358 Indirect *p = chain; 359 struct buffer_head *bh; 360 361 *err = 0; 362 /* i_data is not going away, no lock needed */ 363 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets); 364 if (!p->key) 365 goto no_block; 366 while (--depth) { 367 bh = sb_bread(sb, le32_to_cpu(p->key)); 368 if (!bh) 369 goto failure; 370 /* Reader: pointers */ 371 if (!verify_chain(chain, p)) 372 goto changed; 373 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); 374 /* Reader: end */ 375 if (!p->key) 376 goto no_block; 377 } 378 return NULL; 379 380changed: 381 brelse(bh); 382 *err = -EAGAIN; 383 goto no_block; 384failure: 385 *err = -EIO; 386no_block: 387 return p; 388} 389 390/** 391 * ext3_find_near - find a place for allocation with sufficient locality 392 * @inode: owner 393 * @ind: descriptor of indirect block. 394 * 395 * This function returns the prefered place for block allocation. 396 * It is used when heuristic for sequential allocation fails. 397 * Rules are: 398 * + if there is a block to the left of our position - allocate near it. 399 * + if pointer will live in indirect block - allocate near that block. 400 * + if pointer will live in inode - allocate in the same 401 * cylinder group. 402 * 403 * In the latter case we colour the starting block by the callers PID to 404 * prevent it from clashing with concurrent allocations for a different inode 405 * in the same block group. The PID is used here so that functionally related 406 * files will be close-by on-disk. 407 * 408 * Caller must make sure that @ind is valid and will stay that way. 409 */ 410static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) 411{ 412 struct ext3_inode_info *ei = EXT3_I(inode); 413 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 414 __le32 *p; 415 ext3_fsblk_t bg_start; 416 ext3_grpblk_t colour; 417 418 /* Try to find previous block */ 419 for (p = ind->p - 1; p >= start; p--) { 420 if (*p) 421 return le32_to_cpu(*p); 422 } 423 424 /* No such thing, so let's try location of indirect block */ 425 if (ind->bh) 426 return ind->bh->b_blocknr; 427 428 /* 429 * It is going to be referred to from the inode itself? OK, just put it 430 * into the same cylinder group then. 431 */ 432 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group); 433 colour = (current->pid % 16) * 434 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); 435 return bg_start + colour; 436} 437 438/** 439 * ext3_find_goal - find a prefered place for allocation. 440 * @inode: owner 441 * @block: block we want 442 * @chain: chain of indirect blocks 443 * @partial: pointer to the last triple within a chain 444 * @goal: place to store the result. 445 * 446 * Normally this function find the prefered place for block allocation, 447 * stores it in *@goal and returns zero. 448 */ 449 450static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, 451 Indirect chain[4], Indirect *partial) 452{ 453 struct ext3_block_alloc_info *block_i; 454 455 block_i = EXT3_I(inode)->i_block_alloc_info; 456 457 /* 458 * try the heuristic for sequential allocation, 459 * failing that at least try to get decent locality. 460 */ 461 if (block_i && (block == block_i->last_alloc_logical_block + 1) 462 && (block_i->last_alloc_physical_block != 0)) { 463 return block_i->last_alloc_physical_block + 1; 464 } 465 466 return ext3_find_near(inode, partial); 467} 468 469/** 470 * ext3_blks_to_allocate: Look up the block map and count the number 471 * of direct blocks need to be allocated for the given branch. 472 * 473 * @branch: chain of indirect blocks 474 * @k: number of blocks need for indirect blocks 475 * @blks: number of data blocks to be mapped. 476 * @blocks_to_boundary: the offset in the indirect block 477 * 478 * return the total number of blocks to be allocate, including the 479 * direct and indirect blocks. 480 */ 481static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, 482 int blocks_to_boundary) 483{ 484 unsigned long count = 0; 485 486 /* 487 * Simple case, [t,d]Indirect block(s) has not allocated yet 488 * then it's clear blocks on that path have not allocated 489 */ 490 if (k > 0) { 491 /* right now we don't handle cross boundary allocation */ 492 if (blks < blocks_to_boundary + 1) 493 count += blks; 494 else 495 count += blocks_to_boundary + 1; 496 return count; 497 } 498 499 count++; 500 while (count < blks && count <= blocks_to_boundary && 501 le32_to_cpu(*(branch[0].p + count)) == 0) { 502 count++; 503 } 504 return count; 505} 506 507/** 508 * ext3_alloc_blocks: multiple allocate blocks needed for a branch 509 * @indirect_blks: the number of blocks need to allocate for indirect 510 * blocks 511 * 512 * @new_blocks: on return it will store the new block numbers for 513 * the indirect blocks(if needed) and the first direct block, 514 * @blks: on return it will store the total number of allocated 515 * direct blocks 516 */ 517static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, 518 ext3_fsblk_t goal, int indirect_blks, int blks, 519 ext3_fsblk_t new_blocks[4], int *err) 520{ 521 int target, i; 522 unsigned long count = 0; 523 int index = 0; 524 ext3_fsblk_t current_block = 0; 525 int ret = 0; 526 527 /* 528 * Here we try to allocate the requested multiple blocks at once, 529 * on a best-effort basis. 530 * To build a branch, we should allocate blocks for 531 * the indirect blocks(if not allocated yet), and at least 532 * the first direct block of this branch. That's the 533 * minimum number of blocks need to allocate(required) 534 */ 535 target = blks + indirect_blks; 536 537 while (1) { 538 count = target; 539 /* allocating blocks for indirect blocks and direct blocks */ 540 current_block = ext3_new_blocks(handle,inode,goal,&count,err); 541 if (*err) 542 goto failed_out; 543 544 target -= count; 545 /* allocate blocks for indirect blocks */ 546 while (index < indirect_blks && count) { 547 new_blocks[index++] = current_block++; 548 count--; 549 } 550 551 if (count > 0) 552 break; 553 } 554 555 /* save the new block number for the first direct block */ 556 new_blocks[index] = current_block; 557 558 /* total number of blocks allocated for direct blocks */ 559 ret = count; 560 *err = 0; 561 return ret; 562failed_out: 563 for (i = 0; i <index; i++) 564 ext3_free_blocks(handle, inode, new_blocks[i], 1); 565 return ret; 566} 567 568/** 569 * ext3_alloc_branch - allocate and set up a chain of blocks. 570 * @inode: owner 571 * @indirect_blks: number of allocated indirect blocks 572 * @blks: number of allocated direct blocks 573 * @offsets: offsets (in the blocks) to store the pointers to next. 574 * @branch: place to store the chain in. 575 * 576 * This function allocates blocks, zeroes out all but the last one, 577 * links them into chain and (if we are synchronous) writes them to disk. 578 * In other words, it prepares a branch that can be spliced onto the 579 * inode. It stores the information about that chain in the branch[], in 580 * the same format as ext3_get_branch() would do. We are calling it after 581 * we had read the existing part of chain and partial points to the last 582 * triple of that (one with zero ->key). Upon the exit we have the same 583 * picture as after the successful ext3_get_block(), except that in one 584 * place chain is disconnected - *branch->p is still zero (we did not 585 * set the last link), but branch->key contains the number that should 586 * be placed into *branch->p to fill that gap. 587 * 588 * If allocation fails we free all blocks we've allocated (and forget 589 * their buffer_heads) and return the error value the from failed 590 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 591 * as described above and return 0. 592 */ 593static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 594 int indirect_blks, int *blks, ext3_fsblk_t goal, 595 int *offsets, Indirect *branch) 596{ 597 int blocksize = inode->i_sb->s_blocksize; 598 int i, n = 0; 599 int err = 0; 600 struct buffer_head *bh; 601 int num; 602 ext3_fsblk_t new_blocks[4]; 603 ext3_fsblk_t current_block; 604 605 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, 606 *blks, new_blocks, &err); 607 if (err) 608 return err; 609 610 branch[0].key = cpu_to_le32(new_blocks[0]); 611 /* 612 * metadata blocks and data blocks are allocated. 613 */ 614 for (n = 1; n <= indirect_blks; n++) { 615 /* 616 * Get buffer_head for parent block, zero it out 617 * and set the pointer to new one, then send 618 * parent to disk. 619 */ 620 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 621 branch[n].bh = bh; 622 lock_buffer(bh); 623 BUFFER_TRACE(bh, "call get_create_access"); 624 err = ext3_journal_get_create_access(handle, bh); 625 if (err) { 626 unlock_buffer(bh); 627 brelse(bh); 628 goto failed; 629 } 630 631 memset(bh->b_data, 0, blocksize); 632 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 633 branch[n].key = cpu_to_le32(new_blocks[n]); 634 *branch[n].p = branch[n].key; 635 if ( n == indirect_blks) { 636 current_block = new_blocks[n]; 637 /* 638 * End of chain, update the last new metablock of 639 * the chain to point to the new allocated 640 * data blocks numbers 641 */ 642 for (i=1; i < num; i++) 643 *(branch[n].p + i) = cpu_to_le32(++current_block); 644 } 645 BUFFER_TRACE(bh, "marking uptodate"); 646 set_buffer_uptodate(bh); 647 unlock_buffer(bh); 648 649 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 650 err = ext3_journal_dirty_metadata(handle, bh); 651 if (err) 652 goto failed; 653 } 654 *blks = num; 655 return err; 656failed: 657 /* Allocation failed, free what we already allocated */ 658 for (i = 1; i <= n ; i++) { 659 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 660 ext3_journal_forget(handle, branch[i].bh); 661 } 662 for (i = 0; i <indirect_blks; i++) 663 ext3_free_blocks(handle, inode, new_blocks[i], 1); 664 665 ext3_free_blocks(handle, inode, new_blocks[i], num); 666 667 return err; 668} 669 670/** 671 * ext3_splice_branch - splice the allocated branch onto inode. 672 * @inode: owner 673 * @block: (logical) number of block we are adding 674 * @chain: chain of indirect blocks (with a missing link - see 675 * ext3_alloc_branch) 676 * @where: location of missing link 677 * @num: number of indirect blocks we are adding 678 * @blks: number of direct blocks we are adding 679 * 680 * This function fills the missing link and does all housekeeping needed in 681 * inode (->i_blocks, etc.). In case of success we end up with the full 682 * chain to new block and return 0. 683 */ 684static int ext3_splice_branch(handle_t *handle, struct inode *inode, 685 long block, Indirect *where, int num, int blks) 686{ 687 int i; 688 int err = 0; 689 struct ext3_block_alloc_info *block_i; 690 ext3_fsblk_t current_block; 691 692 block_i = EXT3_I(inode)->i_block_alloc_info; 693 /* 694 * If we're splicing into a [td]indirect block (as opposed to the 695 * inode) then we need to get write access to the [td]indirect block 696 * before the splice. 697 */ 698 if (where->bh) { 699 BUFFER_TRACE(where->bh, "get_write_access"); 700 err = ext3_journal_get_write_access(handle, where->bh); 701 if (err) 702 goto err_out; 703 } 704 /* That's it */ 705 706 *where->p = where->key; 707 708 /* 709 * Update the host buffer_head or inode to point to more just allocated 710 * direct blocks blocks 711 */ 712 if (num == 0 && blks > 1) { 713 current_block = le32_to_cpu(where->key) + 1; 714 for (i = 1; i < blks; i++) 715 *(where->p + i ) = cpu_to_le32(current_block++); 716 } 717 718 /* 719 * update the most recently allocated logical & physical block 720 * in i_block_alloc_info, to assist find the proper goal block for next 721 * allocation 722 */ 723 if (block_i) { 724 block_i->last_alloc_logical_block = block + blks - 1; 725 block_i->last_alloc_physical_block = 726 le32_to_cpu(where[num].key) + blks - 1; 727 } 728 729 /* We are done with atomic stuff, now do the rest of housekeeping */ 730 731 inode->i_ctime = CURRENT_TIME_SEC; 732 ext3_mark_inode_dirty(handle, inode); 733 734 /* had we spliced it onto indirect block? */ 735 if (where->bh) { 736 /* 737 * If we spliced it onto an indirect block, we haven't 738 * altered the inode. Note however that if it is being spliced 739 * onto an indirect block at the very end of the file (the 740 * file is growing) then we *will* alter the inode to reflect 741 * the new i_size. But that is not done here - it is done in 742 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. 743 */ 744 jbd_debug(5, "splicing indirect only\n"); 745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 746 err = ext3_journal_dirty_metadata(handle, where->bh); 747 if (err) 748 goto err_out; 749 } else { 750 /* 751 * OK, we spliced it into the inode itself on a direct block. 752 * Inode was dirtied above. 753 */ 754 jbd_debug(5, "splicing direct\n"); 755 } 756 return err; 757 758err_out: 759 for (i = 1; i <= num; i++) { 760 BUFFER_TRACE(where[i].bh, "call journal_forget"); 761 ext3_journal_forget(handle, where[i].bh); 762 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); 763 } 764 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); 765 766 return err; 767} 768 769/* 770 * Allocation strategy is simple: if we have to allocate something, we will 771 * have to go the whole way to leaf. So let's do it before attaching anything 772 * to tree, set linkage between the newborn blocks, write them if sync is 773 * required, recheck the path, free and repeat if check fails, otherwise 774 * set the last missing link (that will protect us from any truncate-generated 775 * removals - all blocks on the path are immune now) and possibly force the 776 * write on the parent block. 777 * That has a nice additional property: no special recovery from the failed 778 * allocations is needed - we simply release blocks and do not touch anything 779 * reachable from inode. 780 * 781 * `handle' can be NULL if create == 0. 782 * 783 * The BKL may not be held on entry here. Be sure to take it early. 784 * return > 0, # of blocks mapped or allocated. 785 * return = 0, if plain lookup failed. 786 * return < 0, error case. 787 */ 788int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, 789 sector_t iblock, unsigned long maxblocks, 790 struct buffer_head *bh_result, 791 int create, int extend_disksize) 792{ 793 int err = -EIO; 794 int offsets[4]; 795 Indirect chain[4]; 796 Indirect *partial; 797 ext3_fsblk_t goal; 798 int indirect_blks; 799 int blocks_to_boundary = 0; 800 int depth; 801 struct ext3_inode_info *ei = EXT3_I(inode); 802 int count = 0; 803 ext3_fsblk_t first_block = 0; 804 805 806 J_ASSERT(handle != NULL || create == 0); 807 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); 808 809 if (depth == 0) 810 goto out; 811 812 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 813 814 /* Simplest case - block found, no allocation needed */ 815 if (!partial) { 816 first_block = le32_to_cpu(chain[depth - 1].key); 817 clear_buffer_new(bh_result); 818 count++; 819 /*map more blocks*/ 820 while (count < maxblocks && count <= blocks_to_boundary) { 821 ext3_fsblk_t blk; 822 823 if (!verify_chain(chain, partial)) { 824 /* 825 * Indirect block might be removed by 826 * truncate while we were reading it. 827 * Handling of that case: forget what we've 828 * got now. Flag the err as EAGAIN, so it 829 * will reread. 830 */ 831 err = -EAGAIN; 832 count = 0; 833 break; 834 } 835 blk = le32_to_cpu(*(chain[depth-1].p + count)); 836 837 if (blk == first_block + count) 838 count++; 839 else 840 break; 841 } 842 if (err != -EAGAIN) 843 goto got_it; 844 } 845 846 /* Next simple case - plain lookup or failed read of indirect block */ 847 if (!create || err == -EIO) 848 goto cleanup; 849 850 mutex_lock(&ei->truncate_mutex); 851 852 /* 853 * If the indirect block is missing while we are reading 854 * the chain(ext3_get_branch() returns -EAGAIN err), or 855 * if the chain has been changed after we grab the semaphore, 856 * (either because another process truncated this branch, or 857 * another get_block allocated this branch) re-grab the chain to see if 858 * the request block has been allocated or not. 859 * 860 * Since we already block the truncate/other get_block 861 * at this point, we will have the current copy of the chain when we 862 * splice the branch into the tree. 863 */ 864 if (err == -EAGAIN || !verify_chain(chain, partial)) { 865 while (partial > chain) { 866 brelse(partial->bh); 867 partial--; 868 } 869 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 870 if (!partial) { 871 count++; 872 mutex_unlock(&ei->truncate_mutex); 873 if (err) 874 goto cleanup; 875 clear_buffer_new(bh_result); 876 goto got_it; 877 } 878 } 879 880 /* 881 * Okay, we need to do block allocation. Lazily initialize the block 882 * allocation info here if necessary 883 */ 884 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 885 ext3_init_block_alloc_info(inode); 886 887 goal = ext3_find_goal(inode, iblock, chain, partial); 888 889 /* the number of blocks need to allocate for [d,t]indirect blocks */ 890 indirect_blks = (chain + depth) - partial - 1; 891 892 /* 893 * Next look up the indirect map to count the totoal number of 894 * direct blocks to allocate for this branch. 895 */ 896 count = ext3_blks_to_allocate(partial, indirect_blks, 897 maxblocks, blocks_to_boundary); 898 /* 899 * Block out ext3_truncate while we alter the tree 900 */ 901 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, 902 offsets + (partial - chain), partial); 903 904 /* 905 * The ext3_splice_branch call will free and forget any buffers 906 * on the new chain if there is a failure, but that risks using 907 * up transaction credits, especially for bitmaps where the 908 * credits cannot be returned. Can we handle this somehow? We 909 * may need to return -EAGAIN upwards in the worst case. --sct 910 */ 911 if (!err) 912 err = ext3_splice_branch(handle, inode, iblock, 913 partial, indirect_blks, count); 914 /* 915 * i_disksize growing is protected by truncate_mutex. Don't forget to 916 * protect it if you're about to implement concurrent 917 * ext3_get_block() -bzzz 918 */ 919 if (!err && extend_disksize && inode->i_size > ei->i_disksize) 920 ei->i_disksize = inode->i_size; 921 mutex_unlock(&ei->truncate_mutex); 922 if (err) 923 goto cleanup; 924 925 set_buffer_new(bh_result); 926got_it: 927 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 928 if (count > blocks_to_boundary) 929 set_buffer_boundary(bh_result); 930 err = count; 931 /* Clean up and exit */ 932 partial = chain + depth - 1; /* the whole chain */ 933cleanup: 934 while (partial > chain) { 935 BUFFER_TRACE(partial->bh, "call brelse"); 936 brelse(partial->bh); 937 partial--; 938 } 939 BUFFER_TRACE(bh_result, "returned"); 940out: 941 return err; 942} 943 944#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) 945 946static int ext3_get_block(struct inode *inode, sector_t iblock, 947 struct buffer_head *bh_result, int create) 948{ 949 handle_t *handle = ext3_journal_current_handle(); 950 int ret = 0; 951 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 952 953 if (!create) 954 goto get_block; /* A read */ 955 956 if (max_blocks == 1) 957 goto get_block; /* A single block get */ 958 959 if (handle->h_transaction->t_state == T_LOCKED) { 960 /* 961 * Huge direct-io writes can hold off commits for long 962 * periods of time. Let this commit run. 963 */ 964 ext3_journal_stop(handle); 965 handle = ext3_journal_start(inode, DIO_CREDITS); 966 if (IS_ERR(handle)) 967 ret = PTR_ERR(handle); 968 goto get_block; 969 } 970 971 if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) { 972 /* 973 * Getting low on buffer credits... 974 */ 975 ret = ext3_journal_extend(handle, DIO_CREDITS); 976 if (ret > 0) { 977 /* 978 * Couldn't extend the transaction. Start a new one. 979 */ 980 ret = ext3_journal_restart(handle, DIO_CREDITS); 981 } 982 } 983 984get_block: 985 if (ret == 0) { 986 ret = ext3_get_blocks_handle(handle, inode, iblock, 987 max_blocks, bh_result, create, 0); 988 if (ret > 0) { 989 bh_result->b_size = (ret << inode->i_blkbits); 990 ret = 0; 991 } 992 } 993 return ret; 994} 995 996/* 997 * `handle' can be NULL if create is zero 998 */ 999struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, 1000 long block, int create, int *errp) 1001{ 1002 struct buffer_head dummy; 1003 int fatal = 0, err; 1004 1005 J_ASSERT(handle != NULL || create == 0); 1006 1007 dummy.b_state = 0; 1008 dummy.b_blocknr = -1000; 1009 buffer_trace_init(&dummy.b_history); 1010 err = ext3_get_blocks_handle(handle, inode, block, 1, 1011 &dummy, create, 1); 1012 /* 1013 * ext3_get_blocks_handle() returns number of blocks 1014 * mapped. 0 in case of a HOLE. 1015 */ 1016 if (err > 0) { 1017 if (err > 1) 1018 WARN_ON(1); 1019 err = 0; 1020 } 1021 *errp = err; 1022 if (!err && buffer_mapped(&dummy)) { 1023 struct buffer_head *bh; 1024 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1025 if (!bh) { 1026 *errp = -EIO; 1027 goto err; 1028 } 1029 if (buffer_new(&dummy)) { 1030 J_ASSERT(create != 0); 1031 J_ASSERT(handle != 0); 1032 1033 /* 1034 * Now that we do not always journal data, we should 1035 * keep in mind whether this should always journal the 1036 * new buffer as metadata. For now, regular file 1037 * writes use ext3_get_block instead, so it's not a 1038 * problem. 1039 */ 1040 lock_buffer(bh); 1041 BUFFER_TRACE(bh, "call get_create_access"); 1042 fatal = ext3_journal_get_create_access(handle, bh); 1043 if (!fatal && !buffer_uptodate(bh)) { 1044 memset(bh->b_data,0,inode->i_sb->s_blocksize); 1045 set_buffer_uptodate(bh); 1046 } 1047 unlock_buffer(bh); 1048 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1049 err = ext3_journal_dirty_metadata(handle, bh); 1050 if (!fatal) 1051 fatal = err; 1052 } else { 1053 BUFFER_TRACE(bh, "not a new buffer"); 1054 } 1055 if (fatal) { 1056 *errp = fatal; 1057 brelse(bh); 1058 bh = NULL; 1059 } 1060 return bh; 1061 } 1062err: 1063 return NULL; 1064} 1065 1066struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, 1067 int block, int create, int *err) 1068{ 1069 struct buffer_head * bh; 1070 1071 bh = ext3_getblk(handle, inode, block, create, err); 1072 if (!bh) 1073 return bh; 1074 if (buffer_uptodate(bh)) 1075 return bh; 1076 ll_rw_block(READ_META, 1, &bh); 1077 wait_on_buffer(bh); 1078 if (buffer_uptodate(bh)) 1079 return bh; 1080 put_bh(bh); 1081 *err = -EIO; 1082 return NULL; 1083} 1084 1085static int walk_page_buffers( handle_t *handle, 1086 struct buffer_head *head, 1087 unsigned from, 1088 unsigned to, 1089 int *partial, 1090 int (*fn)( handle_t *handle, 1091 struct buffer_head *bh)) 1092{ 1093 struct buffer_head *bh; 1094 unsigned block_start, block_end; 1095 unsigned blocksize = head->b_size; 1096 int err, ret = 0; 1097 struct buffer_head *next; 1098 1099 for ( bh = head, block_start = 0; 1100 ret == 0 && (bh != head || !block_start); 1101 block_start = block_end, bh = next) 1102 { 1103 next = bh->b_this_page; 1104 block_end = block_start + blocksize; 1105 if (block_end <= from || block_start >= to) { 1106 if (partial && !buffer_uptodate(bh)) 1107 *partial = 1; 1108 continue; 1109 } 1110 err = (*fn)(handle, bh); 1111 if (!ret) 1112 ret = err; 1113 } 1114 return ret; 1115} 1116 1117/* 1118 * To preserve ordering, it is essential that the hole instantiation and 1119 * the data write be encapsulated in a single transaction. We cannot 1120 * close off a transaction and start a new one between the ext3_get_block() 1121 * and the commit_write(). So doing the journal_start at the start of 1122 * prepare_write() is the right place. 1123 * 1124 * Also, this function can nest inside ext3_writepage() -> 1125 * block_write_full_page(). In that case, we *know* that ext3_writepage() 1126 * has generated enough buffer credits to do the whole page. So we won't 1127 * block on the journal in that case, which is good, because the caller may 1128 * be PF_MEMALLOC. 1129 * 1130 * By accident, ext3 can be reentered when a transaction is open via 1131 * quota file writes. If we were to commit the transaction while thus 1132 * reentered, there can be a deadlock - we would be holding a quota 1133 * lock, and the commit would never complete if another thread had a 1134 * transaction open and was blocking on the quota lock - a ranking 1135 * violation. 1136 * 1137 * So what we do is to rely on the fact that journal_stop/journal_start 1138 * will _not_ run commit under these circumstances because handle->h_ref 1139 * is elevated. We'll still have enough credits for the tiny quotafile 1140 * write. 1141 */ 1142static int do_journal_get_write_access(handle_t *handle, 1143 struct buffer_head *bh) 1144{ 1145 if (!buffer_mapped(bh) || buffer_freed(bh)) 1146 return 0; 1147 return ext3_journal_get_write_access(handle, bh); 1148} 1149 1150static int ext3_prepare_write(struct file *file, struct page *page, 1151 unsigned from, unsigned to) 1152{ 1153 struct inode *inode = page->mapping->host; 1154 int ret, needed_blocks = ext3_writepage_trans_blocks(inode); 1155 handle_t *handle; 1156 int retries = 0; 1157 1158retry: 1159 handle = ext3_journal_start(inode, needed_blocks); 1160 if (IS_ERR(handle)) { 1161 ret = PTR_ERR(handle); 1162 goto out; 1163 } 1164 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) 1165 ret = nobh_prepare_write(page, from, to, ext3_get_block); 1166 else 1167 ret = block_prepare_write(page, from, to, ext3_get_block); 1168 if (ret) 1169 goto prepare_write_failed; 1170 1171 if (ext3_should_journal_data(inode)) { 1172 ret = walk_page_buffers(handle, page_buffers(page), 1173 from, to, NULL, do_journal_get_write_access); 1174 } 1175prepare_write_failed: 1176 if (ret) 1177 ext3_journal_stop(handle); 1178 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1179 goto retry; 1180out: 1181 return ret; 1182} 1183 1184int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) 1185{ 1186 int err = journal_dirty_data(handle, bh); 1187 if (err) 1188 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__, 1189 bh, handle,err); 1190 return err; 1191} 1192 1193/* For commit_write() in data=journal mode */ 1194static int commit_write_fn(handle_t *handle, struct buffer_head *bh) 1195{ 1196 if (!buffer_mapped(bh) || buffer_freed(bh)) 1197 return 0; 1198 set_buffer_uptodate(bh); 1199 return ext3_journal_dirty_metadata(handle, bh); 1200} 1201 1202/* 1203 * We need to pick up the new inode size which generic_commit_write gave us 1204 * `file' can be NULL - eg, when called from page_symlink(). 1205 * 1206 * ext3 never places buffers on inode->i_mapping->private_list. metadata 1207 * buffers are managed internally. 1208 */ 1209static int ext3_ordered_commit_write(struct file *file, struct page *page, 1210 unsigned from, unsigned to) 1211{ 1212 handle_t *handle = ext3_journal_current_handle(); 1213 struct inode *inode = page->mapping->host; 1214 int ret = 0, ret2; 1215 1216 ret = walk_page_buffers(handle, page_buffers(page), 1217 from, to, NULL, ext3_journal_dirty_data); 1218 1219 if (ret == 0) { 1220 /* 1221 * generic_commit_write() will run mark_inode_dirty() if i_size 1222 * changes. So let's piggyback the i_disksize mark_inode_dirty 1223 * into that. 1224 */ 1225 loff_t new_i_size; 1226 1227 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1228 if (new_i_size > EXT3_I(inode)->i_disksize) 1229 EXT3_I(inode)->i_disksize = new_i_size; 1230 ret = generic_commit_write(file, page, from, to); 1231 } 1232 ret2 = ext3_journal_stop(handle); 1233 if (!ret) 1234 ret = ret2; 1235 return ret; 1236} 1237 1238static int ext3_writeback_commit_write(struct file *file, struct page *page, 1239 unsigned from, unsigned to) 1240{ 1241 handle_t *handle = ext3_journal_current_handle(); 1242 struct inode *inode = page->mapping->host; 1243 int ret = 0, ret2; 1244 loff_t new_i_size; 1245 1246 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1247 if (new_i_size > EXT3_I(inode)->i_disksize) 1248 EXT3_I(inode)->i_disksize = new_i_size; 1249 1250 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) 1251 ret = nobh_commit_write(file, page, from, to); 1252 else 1253 ret = generic_commit_write(file, page, from, to); 1254 1255 ret2 = ext3_journal_stop(handle); 1256 if (!ret) 1257 ret = ret2; 1258 return ret; 1259} 1260 1261static int ext3_journalled_commit_write(struct file *file, 1262 struct page *page, unsigned from, unsigned to) 1263{ 1264 handle_t *handle = ext3_journal_current_handle(); 1265 struct inode *inode = page->mapping->host; 1266 int ret = 0, ret2; 1267 int partial = 0; 1268 loff_t pos; 1269 1270 /* 1271 * Here we duplicate the generic_commit_write() functionality 1272 */ 1273 pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1274 1275 ret = walk_page_buffers(handle, page_buffers(page), from, 1276 to, &partial, commit_write_fn); 1277 if (!partial) 1278 SetPageUptodate(page); 1279 if (pos > inode->i_size) 1280 i_size_write(inode, pos); 1281 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1282 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1283 EXT3_I(inode)->i_disksize = inode->i_size; 1284 ret2 = ext3_mark_inode_dirty(handle, inode); 1285 if (!ret) 1286 ret = ret2; 1287 } 1288 ret2 = ext3_journal_stop(handle); 1289 if (!ret) 1290 ret = ret2; 1291 return ret; 1292} 1293 1294/* 1295 * bmap() is special. It gets used by applications such as lilo and by 1296 * the swapper to find the on-disk block of a specific piece of data. 1297 * 1298 * Naturally, this is dangerous if the block concerned is still in the 1299 * journal. If somebody makes a swapfile on an ext3 data-journaling 1300 * filesystem and enables swap, then they may get a nasty shock when the 1301 * data getting swapped to that swapfile suddenly gets overwritten by 1302 * the original zero's written out previously to the journal and 1303 * awaiting writeback in the kernel's buffer cache. 1304 * 1305 * So, if we see any bmap calls here on a modified, data-journaled file, 1306 * take extra steps to flush any blocks which might be in the cache. 1307 */ 1308static sector_t ext3_bmap(struct address_space *mapping, sector_t block) 1309{ 1310 struct inode *inode = mapping->host; 1311 journal_t *journal; 1312 int err; 1313 1314 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1315 /* 1316 * This is a REALLY heavyweight approach, but the use of 1317 * bmap on dirty files is expected to be extremely rare: 1318 * only if we run lilo or swapon on a freshly made file 1319 * do we expect this to happen. 1320 * 1321 * (bmap requires CAP_SYS_RAWIO so this does not 1322 * represent an unprivileged user DOS attack --- we'd be 1323 * in trouble if mortal users could trigger this path at 1324 * will.) 1325 * 1326 * NB. EXT3_STATE_JDATA is not set on files other than 1327 * regular files. If somebody wants to bmap a directory 1328 * or symlink and gets confused because the buffer 1329 * hasn't yet been flushed to disk, they deserve 1330 * everything they get. 1331 */ 1332 1333 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; 1334 journal = EXT3_JOURNAL(inode); 1335 journal_lock_updates(journal); 1336 err = journal_flush(journal); 1337 journal_unlock_updates(journal); 1338 1339 if (err) 1340 return 0; 1341 } 1342 1343 return generic_block_bmap(mapping,block,ext3_get_block); 1344} 1345 1346static int bget_one(handle_t *handle, struct buffer_head *bh) 1347{ 1348 get_bh(bh); 1349 return 0; 1350} 1351 1352static int bput_one(handle_t *handle, struct buffer_head *bh) 1353{ 1354 put_bh(bh); 1355 return 0; 1356} 1357 1358static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) 1359{ 1360 if (buffer_mapped(bh)) 1361 return ext3_journal_dirty_data(handle, bh); 1362 return 0; 1363} 1364 1365static int ext3_ordered_writepage(struct page *page, 1366 struct writeback_control *wbc) 1367{ 1368 struct inode *inode = page->mapping->host; 1369 struct buffer_head *page_bufs; 1370 handle_t *handle = NULL; 1371 int ret = 0; 1372 int err; 1373 1374 J_ASSERT(PageLocked(page)); 1375 1376 /* 1377 * We give up here if we're reentered, because it might be for a 1378 * different filesystem. 1379 */ 1380 if (ext3_journal_current_handle()) 1381 goto out_fail; 1382 1383 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1384 1385 if (IS_ERR(handle)) { 1386 ret = PTR_ERR(handle); 1387 goto out_fail; 1388 } 1389 1390 if (!page_has_buffers(page)) { 1391 create_empty_buffers(page, inode->i_sb->s_blocksize, 1392 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1393 } 1394 page_bufs = page_buffers(page); 1395 walk_page_buffers(handle, page_bufs, 0, 1396 PAGE_CACHE_SIZE, NULL, bget_one); 1397 1398 ret = block_write_full_page(page, ext3_get_block, wbc); 1399 1400 /* 1401 * The page can become unlocked at any point now, and 1402 * truncate can then come in and change things. So we 1403 * can't touch *page from now on. But *page_bufs is 1404 * safe due to elevated refcount. 1405 */ 1406 1407 /* 1408 * And attach them to the current transaction. But only if 1409 * block_write_full_page() succeeded. Otherwise they are unmapped, 1410 * and generally junk. 1411 */ 1412 if (ret == 0) { 1413 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, 1414 NULL, journal_dirty_data_fn); 1415 if (!ret) 1416 ret = err; 1417 } 1418 walk_page_buffers(handle, page_bufs, 0, 1419 PAGE_CACHE_SIZE, NULL, bput_one); 1420 err = ext3_journal_stop(handle); 1421 if (!ret) 1422 ret = err; 1423 return ret; 1424 1425out_fail: 1426 redirty_page_for_writepage(wbc, page); 1427 unlock_page(page); 1428 return ret; 1429} 1430 1431static int ext3_writeback_writepage(struct page *page, 1432 struct writeback_control *wbc) 1433{ 1434 struct inode *inode = page->mapping->host; 1435 handle_t *handle = NULL; 1436 int ret = 0; 1437 int err; 1438 1439 if (ext3_journal_current_handle()) 1440 goto out_fail; 1441 1442 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1443 if (IS_ERR(handle)) { 1444 ret = PTR_ERR(handle); 1445 goto out_fail; 1446 } 1447 1448 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) 1449 ret = nobh_writepage(page, ext3_get_block, wbc); 1450 else 1451 ret = block_write_full_page(page, ext3_get_block, wbc); 1452 1453 err = ext3_journal_stop(handle); 1454 if (!ret) 1455 ret = err; 1456 return ret; 1457 1458out_fail: 1459 redirty_page_for_writepage(wbc, page); 1460 unlock_page(page); 1461 return ret; 1462} 1463 1464static int ext3_journalled_writepage(struct page *page, 1465 struct writeback_control *wbc) 1466{ 1467 struct inode *inode = page->mapping->host; 1468 handle_t *handle = NULL; 1469 int ret = 0; 1470 int err; 1471 1472 if (ext3_journal_current_handle()) 1473 goto no_write; 1474 1475 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); 1476 if (IS_ERR(handle)) { 1477 ret = PTR_ERR(handle); 1478 goto no_write; 1479 } 1480 1481 if (!page_has_buffers(page) || PageChecked(page)) { 1482 /* 1483 * It's mmapped pagecache. Add buffers and journal it. There 1484 * doesn't seem much point in redirtying the page here. 1485 */ 1486 ClearPageChecked(page); 1487 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 1488 ext3_get_block); 1489 if (ret != 0) { 1490 ext3_journal_stop(handle); 1491 goto out_unlock; 1492 } 1493 ret = walk_page_buffers(handle, page_buffers(page), 0, 1494 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 1495 1496 err = walk_page_buffers(handle, page_buffers(page), 0, 1497 PAGE_CACHE_SIZE, NULL, commit_write_fn); 1498 if (ret == 0) 1499 ret = err; 1500 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1501 unlock_page(page); 1502 } else { 1503 /* 1504 * It may be a page full of checkpoint-mode buffers. We don't 1505 * really know unless we go poke around in the buffer_heads. 1506 * But block_write_full_page will do the right thing. 1507 */ 1508 ret = block_write_full_page(page, ext3_get_block, wbc); 1509 } 1510 err = ext3_journal_stop(handle); 1511 if (!ret) 1512 ret = err; 1513out: 1514 return ret; 1515 1516no_write: 1517 redirty_page_for_writepage(wbc, page); 1518out_unlock: 1519 unlock_page(page); 1520 goto out; 1521} 1522 1523static int ext3_readpage(struct file *file, struct page *page) 1524{ 1525 return mpage_readpage(page, ext3_get_block); 1526} 1527 1528static int 1529ext3_readpages(struct file *file, struct address_space *mapping, 1530 struct list_head *pages, unsigned nr_pages) 1531{ 1532 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); 1533} 1534 1535static void ext3_invalidatepage(struct page *page, unsigned long offset) 1536{ 1537 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1538 1539 /* 1540 * If it's a full truncate we just forget about the pending dirtying 1541 */ 1542 if (offset == 0) 1543 ClearPageChecked(page); 1544 1545 journal_invalidatepage(journal, page, offset); 1546} 1547 1548static int ext3_releasepage(struct page *page, gfp_t wait) 1549{ 1550 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1551 1552 WARN_ON(PageChecked(page)); 1553 if (!page_has_buffers(page)) 1554 return 0; 1555 return journal_try_to_free_buffers(journal, page, wait); 1556} 1557 1558/* 1559 * If the O_DIRECT write will extend the file then add this inode to the 1560 * orphan list. So recovery will truncate it back to the original size 1561 * if the machine crashes during the write. 1562 * 1563 * If the O_DIRECT write is intantiating holes inside i_size and the machine 1564 * crashes then stale disk data _may_ be exposed inside the file. 1565 */ 1566static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, 1567 const struct iovec *iov, loff_t offset, 1568 unsigned long nr_segs) 1569{ 1570 struct file *file = iocb->ki_filp; 1571 struct inode *inode = file->f_mapping->host; 1572 struct ext3_inode_info *ei = EXT3_I(inode); 1573 handle_t *handle = NULL; 1574 ssize_t ret; 1575 int orphan = 0; 1576 size_t count = iov_length(iov, nr_segs); 1577 1578 if (rw == WRITE) { 1579 loff_t final_size = offset + count; 1580 1581 handle = ext3_journal_start(inode, DIO_CREDITS); 1582 if (IS_ERR(handle)) { 1583 ret = PTR_ERR(handle); 1584 goto out; 1585 } 1586 if (final_size > inode->i_size) { 1587 ret = ext3_orphan_add(handle, inode); 1588 if (ret) 1589 goto out_stop; 1590 orphan = 1; 1591 ei->i_disksize = inode->i_size; 1592 } 1593 } 1594 1595 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1596 offset, nr_segs, 1597 ext3_get_block, NULL); 1598 1599 /* 1600 * Reacquire the handle: ext3_get_block() can restart the transaction 1601 */ 1602 handle = ext3_journal_current_handle(); 1603 1604out_stop: 1605 if (handle) { 1606 int err; 1607 1608 if (orphan && inode->i_nlink) 1609 ext3_orphan_del(handle, inode); 1610 if (orphan && ret > 0) { 1611 loff_t end = offset + ret; 1612 if (end > inode->i_size) { 1613 ei->i_disksize = end; 1614 i_size_write(inode, end); 1615 /* 1616 * We're going to return a positive `ret' 1617 * here due to non-zero-length I/O, so there's 1618 * no way of reporting error returns from 1619 * ext3_mark_inode_dirty() to userspace. So 1620 * ignore it. 1621 */ 1622 ext3_mark_inode_dirty(handle, inode); 1623 } 1624 } 1625 err = ext3_journal_stop(handle); 1626 if (ret == 0) 1627 ret = err; 1628 } 1629out: 1630 return ret; 1631} 1632 1633/* 1634 * Pages can be marked dirty completely asynchronously from ext3's journalling 1635 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 1636 * much here because ->set_page_dirty is called under VFS locks. The page is 1637 * not necessarily locked. 1638 * 1639 * We cannot just dirty the page and leave attached buffers clean, because the 1640 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 1641 * or jbddirty because all the journalling code will explode. 1642 * 1643 * So what we do is to mark the page "pending dirty" and next time writepage 1644 * is called, propagate that into the buffers appropriately. 1645 */ 1646static int ext3_journalled_set_page_dirty(struct page *page) 1647{ 1648 SetPageChecked(page); 1649 return __set_page_dirty_nobuffers(page); 1650} 1651 1652static const struct address_space_operations ext3_ordered_aops = { 1653 .readpage = ext3_readpage, 1654 .readpages = ext3_readpages, 1655 .writepage = ext3_ordered_writepage, 1656 .sync_page = block_sync_page, 1657 .prepare_write = ext3_prepare_write, 1658 .commit_write = ext3_ordered_commit_write, 1659 .bmap = ext3_bmap, 1660 .invalidatepage = ext3_invalidatepage, 1661 .releasepage = ext3_releasepage, 1662 .direct_IO = ext3_direct_IO, 1663 .migratepage = buffer_migrate_page, 1664}; 1665 1666static const struct address_space_operations ext3_writeback_aops = { 1667 .readpage = ext3_readpage, 1668 .readpages = ext3_readpages, 1669 .writepage = ext3_writeback_writepage, 1670 .sync_page = block_sync_page, 1671 .prepare_write = ext3_prepare_write, 1672 .commit_write = ext3_writeback_commit_write, 1673 .bmap = ext3_bmap, 1674 .invalidatepage = ext3_invalidatepage, 1675 .releasepage = ext3_releasepage, 1676 .direct_IO = ext3_direct_IO, 1677 .migratepage = buffer_migrate_page, 1678}; 1679 1680static const struct address_space_operations ext3_journalled_aops = { 1681 .readpage = ext3_readpage, 1682 .readpages = ext3_readpages, 1683 .writepage = ext3_journalled_writepage, 1684 .sync_page = block_sync_page, 1685 .prepare_write = ext3_prepare_write, 1686 .commit_write = ext3_journalled_commit_write, 1687 .set_page_dirty = ext3_journalled_set_page_dirty, 1688 .bmap = ext3_bmap, 1689 .invalidatepage = ext3_invalidatepage, 1690 .releasepage = ext3_releasepage, 1691}; 1692 1693void ext3_set_aops(struct inode *inode) 1694{ 1695 if (ext3_should_order_data(inode)) 1696 inode->i_mapping->a_ops = &ext3_ordered_aops; 1697 else if (ext3_should_writeback_data(inode)) 1698 inode->i_mapping->a_ops = &ext3_writeback_aops; 1699 else 1700 inode->i_mapping->a_ops = &ext3_journalled_aops; 1701} 1702 1703/* 1704 * ext3_block_truncate_page() zeroes out a mapping from file offset `from' 1705 * up to the end of the block which corresponds to `from'. 1706 * This required during truncate. We need to physically zero the tail end 1707 * of that block so it doesn't yield old data if the file is later grown. 1708 */ 1709static int ext3_block_truncate_page(handle_t *handle, struct page *page, 1710 struct address_space *mapping, loff_t from) 1711{ 1712 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; 1713 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1714 unsigned blocksize, iblock, length, pos; 1715 struct inode *inode = mapping->host; 1716 struct buffer_head *bh; 1717 int err = 0; 1718 1719 blocksize = inode->i_sb->s_blocksize; 1720 length = blocksize - (offset & (blocksize - 1)); 1721 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 1722 1723 /* 1724 * For "nobh" option, we can only work if we don't need to 1725 * read-in the page - otherwise we create buffers to do the IO. 1726 */ 1727 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 1728 ext3_should_writeback_data(inode) && PageUptodate(page)) { 1729 zero_user_page(page, offset, length, KM_USER0); 1730 set_page_dirty(page); 1731 goto unlock; 1732 } 1733 1734 if (!page_has_buffers(page)) 1735 create_empty_buffers(page, blocksize, 0); 1736 1737 /* Find the buffer that contains "offset" */ 1738 bh = page_buffers(page); 1739 pos = blocksize; 1740 while (offset >= pos) { 1741 bh = bh->b_this_page; 1742 iblock++; 1743 pos += blocksize; 1744 } 1745 1746 err = 0; 1747 if (buffer_freed(bh)) { 1748 BUFFER_TRACE(bh, "freed: skip"); 1749 goto unlock; 1750 } 1751 1752 if (!buffer_mapped(bh)) { 1753 BUFFER_TRACE(bh, "unmapped"); 1754 ext3_get_block(inode, iblock, bh, 0); 1755 /* unmapped? It's a hole - nothing to do */ 1756 if (!buffer_mapped(bh)) { 1757 BUFFER_TRACE(bh, "still unmapped"); 1758 goto unlock; 1759 } 1760 } 1761 1762 /* Ok, it's mapped. Make sure it's up-to-date */ 1763 if (PageUptodate(page)) 1764 set_buffer_uptodate(bh); 1765 1766 if (!buffer_uptodate(bh)) { 1767 err = -EIO; 1768 ll_rw_block(READ, 1, &bh); 1769 wait_on_buffer(bh); 1770 /* Uhhuh. Read error. Complain and punt. */ 1771 if (!buffer_uptodate(bh)) 1772 goto unlock; 1773 } 1774 1775 if (ext3_should_journal_data(inode)) { 1776 BUFFER_TRACE(bh, "get write access"); 1777 err = ext3_journal_get_write_access(handle, bh); 1778 if (err) 1779 goto unlock; 1780 } 1781 1782 zero_user_page(page, offset, length, KM_USER0); 1783 BUFFER_TRACE(bh, "zeroed end of block"); 1784 1785 err = 0; 1786 if (ext3_should_journal_data(inode)) { 1787 err = ext3_journal_dirty_metadata(handle, bh); 1788 } else { 1789 if (ext3_should_order_data(inode)) 1790 err = ext3_journal_dirty_data(handle, bh); 1791 mark_buffer_dirty(bh); 1792 } 1793 1794unlock: 1795 unlock_page(page); 1796 page_cache_release(page); 1797 return err; 1798} 1799 1800/* 1801 * Probably it should be a library function... search for first non-zero word 1802 * or memcmp with zero_page, whatever is better for particular architecture. 1803 * Linus? 1804 */ 1805static inline int all_zeroes(__le32 *p, __le32 *q) 1806{ 1807 while (p < q) 1808 if (*p++) 1809 return 0; 1810 return 1; 1811} 1812 1813/** 1814 * ext3_find_shared - find the indirect blocks for partial truncation. 1815 * @inode: inode in question 1816 * @depth: depth of the affected branch 1817 * @offsets: offsets of pointers in that branch (see ext3_block_to_path) 1818 * @chain: place to store the pointers to partial indirect blocks 1819 * @top: place to the (detached) top of branch 1820 * 1821 * This is a helper function used by ext3_truncate(). 1822 * 1823 * When we do truncate() we may have to clean the ends of several 1824 * indirect blocks but leave the blocks themselves alive. Block is 1825 * partially truncated if some data below the new i_size is refered 1826 * from it (and it is on the path to the first completely truncated 1827 * data block, indeed). We have to free the top of that path along 1828 * with everything to the right of the path. Since no allocation 1829 * past the truncation point is possible until ext3_truncate() 1830 * finishes, we may safely do the latter, but top of branch may 1831 * require special attention - pageout below the truncation point 1832 * might try to populate it. 1833 * 1834 * We atomically detach the top of branch from the tree, store the 1835 * block number of its root in *@top, pointers to buffer_heads of 1836 * partially truncated blocks - in @chain[].bh and pointers to 1837 * their last elements that should not be removed - in 1838 * @chain[].p. Return value is the pointer to last filled element 1839 * of @chain. 1840 * 1841 * The work left to caller to do the actual freeing of subtrees: 1842 * a) free the subtree starting from *@top 1843 * b) free the subtrees whose roots are stored in 1844 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 1845 * c) free the subtrees growing from the inode past the @chain[0]. 1846 * (no partially truncated stuff there). */ 1847 1848static Indirect *ext3_find_shared(struct inode *inode, int depth, 1849 int offsets[4], Indirect chain[4], __le32 *top) 1850{ 1851 Indirect *partial, *p; 1852 int k, err; 1853 1854 *top = 0; 1855 /* Make k index the deepest non-null offest + 1 */ 1856 for (k = depth; k > 1 && !offsets[k-1]; k--) 1857 ; 1858 partial = ext3_get_branch(inode, k, offsets, chain, &err); 1859 /* Writer: pointers */ 1860 if (!partial) 1861 partial = chain + k-1; 1862 /* 1863 * If the branch acquired continuation since we've looked at it - 1864 * fine, it should all survive and (new) top doesn't belong to us. 1865 */ 1866 if (!partial->key && *partial->p) 1867 /* Writer: end */ 1868 goto no_top; 1869 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) 1870 ; 1871 /* 1872 * OK, we've found the last block that must survive. The rest of our 1873 * branch should be detached before unlocking. However, if that rest 1874 * of branch is all ours and does not grow immediately from the inode 1875 * it's easier to cheat and just decrement partial->p. 1876 */ 1877 if (p == chain + k - 1 && p > chain) { 1878 p->p--; 1879 } else { 1880 *top = *p->p; 1881 /* Nope, don't do this in ext3. Must leave the tree intact */ 1882 } 1883 /* Writer: end */ 1884 1885 while(partial > p) { 1886 brelse(partial->bh); 1887 partial--; 1888 } 1889no_top: 1890 return partial; 1891} 1892 1893/* 1894 * Zero a number of block pointers in either an inode or an indirect block. 1895 * If we restart the transaction we must again get write access to the 1896 * indirect block for further modification. 1897 * 1898 * We release `count' blocks on disk, but (last - first) may be greater 1899 * than `count' because there can be holes in there. 1900 */ 1901static void ext3_clear_blocks(handle_t *handle, struct inode *inode, 1902 struct buffer_head *bh, ext3_fsblk_t block_to_free, 1903 unsigned long count, __le32 *first, __le32 *last) 1904{ 1905 __le32 *p; 1906 if (try_to_extend_transaction(handle, inode)) { 1907 if (bh) { 1908 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1909 ext3_journal_dirty_metadata(handle, bh); 1910 } 1911 ext3_mark_inode_dirty(handle, inode); 1912 ext3_journal_test_restart(handle, inode); 1913 if (bh) { 1914 BUFFER_TRACE(bh, "retaking write access"); 1915 ext3_journal_get_write_access(handle, bh); 1916 } 1917 } 1918 1919 /* 1920 * Any buffers which are on the journal will be in memory. We find 1921 * them on the hash table so journal_revoke() will run journal_forget() 1922 * on them. We've already detached each block from the file, so 1923 * bforget() in journal_forget() should be safe. 1924 * 1925 * AKPM: turn on bforget in journal_forget()!!! 1926 */ 1927 for (p = first; p < last; p++) { 1928 u32 nr = le32_to_cpu(*p); 1929 if (nr) { 1930 struct buffer_head *bh; 1931 1932 *p = 0; 1933 bh = sb_find_get_block(inode->i_sb, nr); 1934 ext3_forget(handle, 0, inode, bh, nr); 1935 } 1936 } 1937 1938 ext3_free_blocks(handle, inode, block_to_free, count); 1939} 1940 1941/** 1942 * ext3_free_data - free a list of data blocks 1943 * @handle: handle for this transaction 1944 * @inode: inode we are dealing with 1945 * @this_bh: indirect buffer_head which contains *@first and *@last 1946 * @first: array of block numbers 1947 * @last: points immediately past the end of array 1948 * 1949 * We are freeing all blocks refered from that array (numbers are stored as 1950 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 1951 * 1952 * We accumulate contiguous runs of blocks to free. Conveniently, if these 1953 * blocks are contiguous then releasing them at one time will only affect one 1954 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1955 * actually use a lot of journal space. 1956 * 1957 * @this_bh will be %NULL if @first and @last point into the inode's direct 1958 * block pointers. 1959 */ 1960static void ext3_free_data(handle_t *handle, struct inode *inode, 1961 struct buffer_head *this_bh, 1962 __le32 *first, __le32 *last) 1963{ 1964 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ 1965 unsigned long count = 0; /* Number of blocks in the run */ 1966 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 1967 corresponding to 1968 block_to_free */ 1969 ext3_fsblk_t nr; /* Current block # */ 1970 __le32 *p; /* Pointer into inode/ind 1971 for current block */ 1972 int err; 1973 1974 if (this_bh) { /* For indirect block */ 1975 BUFFER_TRACE(this_bh, "get_write_access"); 1976 err = ext3_journal_get_write_access(handle, this_bh); 1977 /* Important: if we can't update the indirect pointers 1978 * to the blocks, we can't free them. */ 1979 if (err) 1980 return; 1981 } 1982 1983 for (p = first; p < last; p++) { 1984 nr = le32_to_cpu(*p); 1985 if (nr) { 1986 /* accumulate blocks to free if they're contiguous */ 1987 if (count == 0) { 1988 block_to_free = nr; 1989 block_to_free_p = p; 1990 count = 1; 1991 } else if (nr == block_to_free + count) { 1992 count++; 1993 } else { 1994 ext3_clear_blocks(handle, inode, this_bh, 1995 block_to_free, 1996 count, block_to_free_p, p); 1997 block_to_free = nr; 1998 block_to_free_p = p; 1999 count = 1; 2000 } 2001 } 2002 } 2003 2004 if (count > 0) 2005 ext3_clear_blocks(handle, inode, this_bh, block_to_free, 2006 count, block_to_free_p, p); 2007 2008 if (this_bh) { 2009 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); 2010 ext3_journal_dirty_metadata(handle, this_bh); 2011 } 2012} 2013 2014/** 2015 * ext3_free_branches - free an array of branches 2016 * @handle: JBD handle for this transaction 2017 * @inode: inode we are dealing with 2018 * @parent_bh: the buffer_head which contains *@first and *@last 2019 * @first: array of block numbers 2020 * @last: pointer immediately past the end of array 2021 * @depth: depth of the branches to free 2022 * 2023 * We are freeing all blocks refered from these branches (numbers are 2024 * stored as little-endian 32-bit) and updating @inode->i_blocks 2025 * appropriately. 2026 */ 2027static void ext3_free_branches(handle_t *handle, struct inode *inode, 2028 struct buffer_head *parent_bh, 2029 __le32 *first, __le32 *last, int depth) 2030{ 2031 ext3_fsblk_t nr; 2032 __le32 *p; 2033 2034 if (is_handle_aborted(handle)) 2035 return; 2036 2037 if (depth--) { 2038 struct buffer_head *bh; 2039 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 2040 p = last; 2041 while (--p >= first) { 2042 nr = le32_to_cpu(*p); 2043 if (!nr) 2044 continue; /* A hole */ 2045 2046 /* Go read the buffer for the next level down */ 2047 bh = sb_bread(inode->i_sb, nr); 2048 2049 /* 2050 * A read failure? Report error and clear slot 2051 * (should be rare). 2052 */ 2053 if (!bh) { 2054 ext3_error(inode->i_sb, "ext3_free_branches", 2055 "Read failure, inode=%lu, block="E3FSBLK, 2056 inode->i_ino, nr); 2057 continue; 2058 } 2059 2060 /* This zaps the entire block. Bottom up. */ 2061 BUFFER_TRACE(bh, "free child branches"); 2062 ext3_free_branches(handle, inode, bh, 2063 (__le32*)bh->b_data, 2064 (__le32*)bh->b_data + addr_per_block, 2065 depth); 2066 2067 /* 2068 * We've probably journalled the indirect block several 2069 * times during the truncate. But it's no longer 2070 * needed and we now drop it from the transaction via 2071 * journal_revoke(). 2072 * 2073 * That's easy if it's exclusively part of this 2074 * transaction. But if it's part of the committing 2075 * transaction then journal_forget() will simply 2076 * brelse() it. That means that if the underlying 2077 * block is reallocated in ext3_get_block(), 2078 * unmap_underlying_metadata() will find this block 2079 * and will try to get rid of it. damn, damn. 2080 * 2081 * If this block has already been committed to the 2082 * journal, a revoke record will be written. And 2083 * revoke records must be emitted *before* clearing 2084 * this block's bit in the bitmaps. 2085 */ 2086 ext3_forget(handle, 1, inode, bh, bh->b_blocknr); 2087 2088 /* 2089 * Everything below this this pointer has been 2090 * released. Now let this top-of-subtree go. 2091 * 2092 * We want the freeing of this indirect block to be 2093 * atomic in the journal with the updating of the 2094 * bitmap block which owns it. So make some room in 2095 * the journal. 2096 * 2097 * We zero the parent pointer *after* freeing its 2098 * pointee in the bitmaps, so if extend_transaction() 2099 * for some reason fails to put the bitmap changes and 2100 * the release into the same transaction, recovery 2101 * will merely complain about releasing a free block, 2102 * rather than leaking blocks. 2103 */ 2104 if (is_handle_aborted(handle)) 2105 return; 2106 if (try_to_extend_transaction(handle, inode)) { 2107 ext3_mark_inode_dirty(handle, inode); 2108 ext3_journal_test_restart(handle, inode); 2109 } 2110 2111 ext3_free_blocks(handle, inode, nr, 1); 2112 2113 if (parent_bh) { 2114 /* 2115 * The block which we have just freed is 2116 * pointed to by an indirect block: journal it 2117 */ 2118 BUFFER_TRACE(parent_bh, "get_write_access"); 2119 if (!ext3_journal_get_write_access(handle, 2120 parent_bh)){ 2121 *p = 0; 2122 BUFFER_TRACE(parent_bh, 2123 "call ext3_journal_dirty_metadata"); 2124 ext3_journal_dirty_metadata(handle, 2125 parent_bh); 2126 } 2127 } 2128 } 2129 } else { 2130 /* We have reached the bottom of the tree. */ 2131 BUFFER_TRACE(parent_bh, "free data blocks"); 2132 ext3_free_data(handle, inode, parent_bh, first, last); 2133 } 2134} 2135 2136/* 2137 * ext3_truncate() 2138 * 2139 * We block out ext3_get_block() block instantiations across the entire 2140 * transaction, and VFS/VM ensures that ext3_truncate() cannot run 2141 * simultaneously on behalf of the same inode. 2142 * 2143 * As we work through the truncate and commmit bits of it to the journal there 2144 * is one core, guiding principle: the file's tree must always be consistent on 2145 * disk. We must be able to restart the truncate after a crash. 2146 * 2147 * The file's tree may be transiently inconsistent in memory (although it 2148 * probably isn't), but whenever we close off and commit a journal transaction, 2149 * the contents of (the filesystem + the journal) must be consistent and 2150 * restartable. It's pretty simple, really: bottom up, right to left (although 2151 * left-to-right works OK too). 2152 * 2153 * Note that at recovery time, journal replay occurs *before* the restart of 2154 * truncate against the orphan inode list. 2155 * 2156 * The committed inode has the new, desired i_size (which is the same as 2157 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see 2158 * that this inode's truncate did not complete and it will again call 2159 * ext3_truncate() to have another go. So there will be instantiated blocks 2160 * to the right of the truncation point in a crashed ext3 filesystem. But 2161 * that's fine - as long as they are linked from the inode, the post-crash 2162 * ext3_truncate() run will find them and release them. 2163 */ 2164void ext3_truncate(struct inode *inode) 2165{ 2166 handle_t *handle; 2167 struct ext3_inode_info *ei = EXT3_I(inode); 2168 __le32 *i_data = ei->i_data; 2169 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 2170 struct address_space *mapping = inode->i_mapping; 2171 int offsets[4]; 2172 Indirect chain[4]; 2173 Indirect *partial; 2174 __le32 nr = 0; 2175 int n; 2176 long last_block; 2177 unsigned blocksize = inode->i_sb->s_blocksize; 2178 struct page *page; 2179 2180 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2181 S_ISLNK(inode->i_mode))) 2182 return; 2183 if (ext3_inode_is_fast_symlink(inode)) 2184 return; 2185 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 2186 return; 2187 2188 /* 2189 * We have to lock the EOF page here, because lock_page() nests 2190 * outside journal_start(). 2191 */ 2192 if ((inode->i_size & (blocksize - 1)) == 0) { 2193 /* Block boundary? Nothing to do */ 2194 page = NULL; 2195 } else { 2196 page = grab_cache_page(mapping, 2197 inode->i_size >> PAGE_CACHE_SHIFT); 2198 if (!page) 2199 return; 2200 } 2201 2202 handle = start_transaction(inode); 2203 if (IS_ERR(handle)) { 2204 if (page) { 2205 clear_highpage(page); 2206 flush_dcache_page(page); 2207 unlock_page(page); 2208 page_cache_release(page); 2209 } 2210 return; /* AKPM: return what? */ 2211 } 2212 2213 last_block = (inode->i_size + blocksize-1) 2214 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); 2215 2216 if (page) 2217 ext3_block_truncate_page(handle, page, mapping, inode->i_size); 2218 2219 n = ext3_block_to_path(inode, last_block, offsets, NULL); 2220 if (n == 0) 2221 goto out_stop; /* error */ 2222 2223 /* 2224 * OK. This truncate is going to happen. We add the inode to the 2225 * orphan list, so that if this truncate spans multiple transactions, 2226 * and we crash, we will resume the truncate when the filesystem 2227 * recovers. It also marks the inode dirty, to catch the new size. 2228 * 2229 * Implication: the file must always be in a sane, consistent 2230 * truncatable state while each transaction commits. 2231 */ 2232 if (ext3_orphan_add(handle, inode)) 2233 goto out_stop; 2234 2235 /* 2236 * The orphan list entry will now protect us from any crash which 2237 * occurs before the truncate completes, so it is now safe to propagate 2238 * the new, shorter inode size (held for now in i_size) into the 2239 * on-disk inode. We do this via i_disksize, which is the value which 2240 * ext3 *really* writes onto the disk inode. 2241 */ 2242 ei->i_disksize = inode->i_size; 2243 2244 /* 2245 * From here we block out all ext3_get_block() callers who want to 2246 * modify the block allocation tree. 2247 */ 2248 mutex_lock(&ei->truncate_mutex); 2249 2250 if (n == 1) { /* direct blocks */ 2251 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 2252 i_data + EXT3_NDIR_BLOCKS); 2253 goto do_indirects; 2254 } 2255 2256 partial = ext3_find_shared(inode, n, offsets, chain, &nr); 2257 /* Kill the top of shared branch (not detached) */ 2258 if (nr) { 2259 if (partial == chain) { 2260 /* Shared branch grows from the inode */ 2261 ext3_free_branches(handle, inode, NULL, 2262 &nr, &nr+1, (chain+n-1) - partial); 2263 *partial->p = 0; 2264 /* 2265 * We mark the inode dirty prior to restart, 2266 * and prior to stop. No need for it here. 2267 */ 2268 } else { 2269 /* Shared branch grows from an indirect block */ 2270 BUFFER_TRACE(partial->bh, "get_write_access"); 2271 ext3_free_branches(handle, inode, partial->bh, 2272 partial->p, 2273 partial->p+1, (chain+n-1) - partial); 2274 } 2275 } 2276 /* Clear the ends of indirect blocks on the shared branch */ 2277 while (partial > chain) { 2278 ext3_free_branches(handle, inode, partial->bh, partial->p + 1, 2279 (__le32*)partial->bh->b_data+addr_per_block, 2280 (chain+n-1) - partial); 2281 BUFFER_TRACE(partial->bh, "call brelse"); 2282 brelse (partial->bh); 2283 partial--; 2284 } 2285do_indirects: 2286 /* Kill the remaining (whole) subtrees */ 2287 switch (offsets[0]) { 2288 default: 2289 nr = i_data[EXT3_IND_BLOCK]; 2290 if (nr) { 2291 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 2292 i_data[EXT3_IND_BLOCK] = 0; 2293 } 2294 case EXT3_IND_BLOCK: 2295 nr = i_data[EXT3_DIND_BLOCK]; 2296 if (nr) { 2297 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 2298 i_data[EXT3_DIND_BLOCK] = 0; 2299 } 2300 case EXT3_DIND_BLOCK: 2301 nr = i_data[EXT3_TIND_BLOCK]; 2302 if (nr) { 2303 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 2304 i_data[EXT3_TIND_BLOCK] = 0; 2305 } 2306 case EXT3_TIND_BLOCK: 2307 ; 2308 } 2309 2310 ext3_discard_reservation(inode); 2311 2312 mutex_unlock(&ei->truncate_mutex); 2313 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2314 ext3_mark_inode_dirty(handle, inode); 2315 2316 /* 2317 * In a multi-transaction truncate, we only make the final transaction 2318 * synchronous 2319 */ 2320 if (IS_SYNC(inode)) 2321 handle->h_sync = 1; 2322out_stop: 2323 /* 2324 * If this was a simple ftruncate(), and the file will remain alive 2325 * then we need to clear up the orphan record which we created above. 2326 * However, if this was a real unlink then we were called by 2327 * ext3_delete_inode(), and we allow that function to clean up the 2328 * orphan info for us. 2329 */ 2330 if (inode->i_nlink) 2331 ext3_orphan_del(handle, inode); 2332 2333 ext3_journal_stop(handle); 2334} 2335 2336static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, 2337 unsigned long ino, struct ext3_iloc *iloc) 2338{ 2339 unsigned long desc, group_desc, block_group; 2340 unsigned long offset; 2341 ext3_fsblk_t block; 2342 struct buffer_head *bh; 2343 struct ext3_group_desc * gdp; 2344 2345 if (!ext3_valid_inum(sb, ino)) { 2346 /* 2347 * This error is already checked for in namei.c unless we are 2348 * looking at an NFS filehandle, in which case no error 2349 * report is needed 2350 */ 2351 return 0; 2352 } 2353 2354 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2355 if (block_group >= EXT3_SB(sb)->s_groups_count) { 2356 ext3_error(sb,"ext3_get_inode_block","group >= groups count"); 2357 return 0; 2358 } 2359 smp_rmb(); 2360 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); 2361 desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); 2362 bh = EXT3_SB(sb)->s_group_desc[group_desc]; 2363 if (!bh) { 2364 ext3_error (sb, "ext3_get_inode_block", 2365 "Descriptor not loaded"); 2366 return 0; 2367 } 2368 2369 gdp = (struct ext3_group_desc *)bh->b_data; 2370 /* 2371 * Figure out the offset within the block group inode table 2372 */ 2373 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) * 2374 EXT3_INODE_SIZE(sb); 2375 block = le32_to_cpu(gdp[desc].bg_inode_table) + 2376 (offset >> EXT3_BLOCK_SIZE_BITS(sb)); 2377 2378 iloc->block_group = block_group; 2379 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1); 2380 return block; 2381} 2382 2383/* 2384 * ext3_get_inode_loc returns with an extra refcount against the inode's 2385 * underlying buffer_head on success. If 'in_mem' is true, we have all 2386 * data in memory that is needed to recreate the on-disk version of this 2387 * inode. 2388 */ 2389static int __ext3_get_inode_loc(struct inode *inode, 2390 struct ext3_iloc *iloc, int in_mem) 2391{ 2392 ext3_fsblk_t block; 2393 struct buffer_head *bh; 2394 2395 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); 2396 if (!block) 2397 return -EIO; 2398 2399 bh = sb_getblk(inode->i_sb, block); 2400 if (!bh) { 2401 ext3_error (inode->i_sb, "ext3_get_inode_loc", 2402 "unable to read inode block - " 2403 "inode=%lu, block="E3FSBLK, 2404 inode->i_ino, block); 2405 return -EIO; 2406 } 2407 if (!buffer_uptodate(bh)) { 2408 lock_buffer(bh); 2409 if (buffer_uptodate(bh)) { 2410 /* someone brought it uptodate while we waited */ 2411 unlock_buffer(bh); 2412 goto has_buffer; 2413 } 2414 2415 /* 2416 * If we have all information of the inode in memory and this 2417 * is the only valid inode in the block, we need not read the 2418 * block. 2419 */ 2420 if (in_mem) { 2421 struct buffer_head *bitmap_bh; 2422 struct ext3_group_desc *desc; 2423 int inodes_per_buffer; 2424 int inode_offset, i; 2425 int block_group; 2426 int start; 2427 2428 block_group = (inode->i_ino - 1) / 2429 EXT3_INODES_PER_GROUP(inode->i_sb); 2430 inodes_per_buffer = bh->b_size / 2431 EXT3_INODE_SIZE(inode->i_sb); 2432 inode_offset = ((inode->i_ino - 1) % 2433 EXT3_INODES_PER_GROUP(inode->i_sb)); 2434 start = inode_offset & ~(inodes_per_buffer - 1); 2435 2436 /* Is the inode bitmap in cache? */ 2437 desc = ext3_get_group_desc(inode->i_sb, 2438 block_group, NULL); 2439 if (!desc) 2440 goto make_io; 2441 2442 bitmap_bh = sb_getblk(inode->i_sb, 2443 le32_to_cpu(desc->bg_inode_bitmap)); 2444 if (!bitmap_bh) 2445 goto make_io; 2446 2447 /* 2448 * If the inode bitmap isn't in cache then the 2449 * optimisation may end up performing two reads instead 2450 * of one, so skip it. 2451 */ 2452 if (!buffer_uptodate(bitmap_bh)) { 2453 brelse(bitmap_bh); 2454 goto make_io; 2455 } 2456 for (i = start; i < start + inodes_per_buffer; i++) { 2457 if (i == inode_offset) 2458 continue; 2459 if (ext3_test_bit(i, bitmap_bh->b_data)) 2460 break; 2461 } 2462 brelse(bitmap_bh); 2463 if (i == start + inodes_per_buffer) { 2464 /* all other inodes are free, so skip I/O */ 2465 memset(bh->b_data, 0, bh->b_size); 2466 set_buffer_uptodate(bh); 2467 unlock_buffer(bh); 2468 goto has_buffer; 2469 } 2470 } 2471 2472make_io: 2473 /* 2474 * There are other valid inodes in the buffer, this inode 2475 * has in-inode xattrs, or we don't have this inode in memory. 2476 * Read the block from disk. 2477 */ 2478 get_bh(bh); 2479 bh->b_end_io = end_buffer_read_sync; 2480 submit_bh(READ_META, bh); 2481 wait_on_buffer(bh); 2482 if (!buffer_uptodate(bh)) { 2483 ext3_error(inode->i_sb, "ext3_get_inode_loc", 2484 "unable to read inode block - " 2485 "inode=%lu, block="E3FSBLK, 2486 inode->i_ino, block); 2487 brelse(bh); 2488 return -EIO; 2489 } 2490 } 2491has_buffer: 2492 iloc->bh = bh; 2493 return 0; 2494} 2495 2496int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) 2497{ 2498 /* We have all inode data except xattrs in memory here. */ 2499 return __ext3_get_inode_loc(inode, iloc, 2500 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)); 2501} 2502 2503void ext3_set_inode_flags(struct inode *inode) 2504{ 2505 unsigned int flags = EXT3_I(inode)->i_flags; 2506 2507 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 2508 if (flags & EXT3_SYNC_FL) 2509 inode->i_flags |= S_SYNC; 2510 if (flags & EXT3_APPEND_FL) 2511 inode->i_flags |= S_APPEND; 2512 if (flags & EXT3_IMMUTABLE_FL) 2513 inode->i_flags |= S_IMMUTABLE; 2514 if (flags & EXT3_NOATIME_FL) 2515 inode->i_flags |= S_NOATIME; 2516 if (flags & EXT3_DIRSYNC_FL) 2517 inode->i_flags |= S_DIRSYNC; 2518} 2519 2520/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */ 2521void ext3_get_inode_flags(struct ext3_inode_info *ei) 2522{ 2523 unsigned int flags = ei->vfs_inode.i_flags; 2524 2525 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL| 2526 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL); 2527 if (flags & S_SYNC) 2528 ei->i_flags |= EXT3_SYNC_FL; 2529 if (flags & S_APPEND) 2530 ei->i_flags |= EXT3_APPEND_FL; 2531 if (flags & S_IMMUTABLE) 2532 ei->i_flags |= EXT3_IMMUTABLE_FL; 2533 if (flags & S_NOATIME) 2534 ei->i_flags |= EXT3_NOATIME_FL; 2535 if (flags & S_DIRSYNC) 2536 ei->i_flags |= EXT3_DIRSYNC_FL; 2537} 2538 2539void ext3_read_inode(struct inode * inode) 2540{ 2541 struct ext3_iloc iloc; 2542 struct ext3_inode *raw_inode; 2543 struct ext3_inode_info *ei = EXT3_I(inode); 2544 struct buffer_head *bh; 2545 int block; 2546 2547#ifdef CONFIG_EXT3_FS_POSIX_ACL 2548 ei->i_acl = EXT3_ACL_NOT_CACHED; 2549 ei->i_default_acl = EXT3_ACL_NOT_CACHED; 2550#endif 2551 ei->i_block_alloc_info = NULL; 2552 2553 if (__ext3_get_inode_loc(inode, &iloc, 0)) 2554 goto bad_inode; 2555 bh = iloc.bh; 2556 raw_inode = ext3_raw_inode(&iloc); 2557 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 2558 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 2559 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 2560 if(!(test_opt (inode->i_sb, NO_UID32))) { 2561 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 2562 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 2563 } 2564 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 2565 inode->i_size = le32_to_cpu(raw_inode->i_size); 2566 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); 2567 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); 2568 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); 2569 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0; 2570 2571 ei->i_state = 0; 2572 ei->i_dir_start_lookup = 0; 2573 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 2574 /* We now have enough fields to check if the inode was active or not. 2575 * This is needed because nfsd might try to access dead inodes 2576 * the test is that same one that e2fsck uses 2577 * NeilBrown 1999oct15 2578 */ 2579 if (inode->i_nlink == 0) { 2580 if (inode->i_mode == 0 || 2581 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { 2582 /* this inode is deleted */ 2583 brelse (bh); 2584 goto bad_inode; 2585 } 2586 /* The only unlinked inodes we let through here have 2587 * valid i_mode and are being read by the orphan 2588 * recovery code: that's fine, we're about to complete 2589 * the process of deleting those. */ 2590 } 2591 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 2592 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 2593#ifdef EXT3_FRAGMENTS 2594 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 2595 ei->i_frag_no = raw_inode->i_frag; 2596 ei->i_frag_size = raw_inode->i_fsize; 2597#endif 2598 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 2599 if (!S_ISREG(inode->i_mode)) { 2600 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); 2601 } else { 2602 inode->i_size |= 2603 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 2604 } 2605 ei->i_disksize = inode->i_size; 2606 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 2607 ei->i_block_group = iloc.block_group; 2608 /* 2609 * NOTE! The in-memory inode i_data array is in little-endian order 2610 * even on big-endian machines: we do NOT byteswap the block numbers! 2611 */ 2612 for (block = 0; block < EXT3_N_BLOCKS; block++) 2613 ei->i_data[block] = raw_inode->i_block[block]; 2614 INIT_LIST_HEAD(&ei->i_orphan); 2615 2616 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && 2617 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { 2618 /* 2619 * When mke2fs creates big inodes it does not zero out 2620 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE, 2621 * so ignore those first few inodes. 2622 */ 2623 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 2624 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 2625 EXT3_INODE_SIZE(inode->i_sb)) { 2626 brelse (bh); 2627 goto bad_inode; 2628 } 2629 if (ei->i_extra_isize == 0) { 2630 /* The extra space is currently unused. Use it. */ 2631 ei->i_extra_isize = sizeof(struct ext3_inode) - 2632 EXT3_GOOD_OLD_INODE_SIZE; 2633 } else { 2634 __le32 *magic = (void *)raw_inode + 2635 EXT3_GOOD_OLD_INODE_SIZE + 2636 ei->i_extra_isize; 2637 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) 2638 ei->i_state |= EXT3_STATE_XATTR; 2639 } 2640 } else 2641 ei->i_extra_isize = 0; 2642 2643 if (S_ISREG(inode->i_mode)) { 2644 inode->i_op = &ext3_file_inode_operations; 2645 inode->i_fop = &ext3_file_operations; 2646 ext3_set_aops(inode); 2647 } else if (S_ISDIR(inode->i_mode)) { 2648 inode->i_op = &ext3_dir_inode_operations; 2649 inode->i_fop = &ext3_dir_operations; 2650 } else if (S_ISLNK(inode->i_mode)) { 2651 if (ext3_inode_is_fast_symlink(inode)) 2652 inode->i_op = &ext3_fast_symlink_inode_operations; 2653 else { 2654 inode->i_op = &ext3_symlink_inode_operations; 2655 ext3_set_aops(inode); 2656 } 2657 } else { 2658 inode->i_op = &ext3_special_inode_operations; 2659 if (raw_inode->i_block[0]) 2660 init_special_inode(inode, inode->i_mode, 2661 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 2662 else 2663 init_special_inode(inode, inode->i_mode, 2664 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 2665 } 2666 brelse (iloc.bh); 2667 ext3_set_inode_flags(inode); 2668 return; 2669 2670bad_inode: 2671 make_bad_inode(inode); 2672 return; 2673} 2674 2675/* 2676 * Post the struct inode info into an on-disk inode location in the 2677 * buffer-cache. This gobbles the caller's reference to the 2678 * buffer_head in the inode location struct. 2679 * 2680 * The caller must have write access to iloc->bh. 2681 */ 2682static int ext3_do_update_inode(handle_t *handle, 2683 struct inode *inode, 2684 struct ext3_iloc *iloc) 2685{ 2686 struct ext3_inode *raw_inode = ext3_raw_inode(iloc); 2687 struct ext3_inode_info *ei = EXT3_I(inode); 2688 struct buffer_head *bh = iloc->bh; 2689 int err = 0, rc, block; 2690 2691 /* For fields not not tracking in the in-memory inode, 2692 * initialise them to zero for new inodes. */ 2693 if (ei->i_state & EXT3_STATE_NEW) 2694 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); 2695 2696 ext3_get_inode_flags(ei); 2697 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 2698 if(!(test_opt(inode->i_sb, NO_UID32))) { 2699 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 2700 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 2701/* 2702 * Fix up interoperability with old kernels. Otherwise, old inodes get 2703 * re-used with the upper 16 bits of the uid/gid intact 2704 */ 2705 if(!ei->i_dtime) { 2706 raw_inode->i_uid_high = 2707 cpu_to_le16(high_16_bits(inode->i_uid)); 2708 raw_inode->i_gid_high = 2709 cpu_to_le16(high_16_bits(inode->i_gid)); 2710 } else { 2711 raw_inode->i_uid_high = 0; 2712 raw_inode->i_gid_high = 0; 2713 } 2714 } else { 2715 raw_inode->i_uid_low = 2716 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 2717 raw_inode->i_gid_low = 2718 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 2719 raw_inode->i_uid_high = 0; 2720 raw_inode->i_gid_high = 0; 2721 } 2722 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 2723 raw_inode->i_size = cpu_to_le32(ei->i_disksize); 2724 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); 2725 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); 2726 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); 2727 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); 2728 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 2729 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 2730#ifdef EXT3_FRAGMENTS 2731 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); 2732 raw_inode->i_frag = ei->i_frag_no; 2733 raw_inode->i_fsize = ei->i_frag_size; 2734#endif 2735 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); 2736 if (!S_ISREG(inode->i_mode)) { 2737 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); 2738 } else { 2739 raw_inode->i_size_high = 2740 cpu_to_le32(ei->i_disksize >> 32); 2741 if (ei->i_disksize > 0x7fffffffULL) { 2742 struct super_block *sb = inode->i_sb; 2743 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, 2744 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || 2745 EXT3_SB(sb)->s_es->s_rev_level == 2746 cpu_to_le32(EXT3_GOOD_OLD_REV)) { 2747 /* If this is the first large file 2748 * created, add a flag to the superblock. 2749 */ 2750 err = ext3_journal_get_write_access(handle, 2751 EXT3_SB(sb)->s_sbh); 2752 if (err) 2753 goto out_brelse; 2754 ext3_update_dynamic_rev(sb); 2755 EXT3_SET_RO_COMPAT_FEATURE(sb, 2756 EXT3_FEATURE_RO_COMPAT_LARGE_FILE); 2757 sb->s_dirt = 1; 2758 handle->h_sync = 1; 2759 err = ext3_journal_dirty_metadata(handle, 2760 EXT3_SB(sb)->s_sbh); 2761 } 2762 } 2763 } 2764 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 2765 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 2766 if (old_valid_dev(inode->i_rdev)) { 2767 raw_inode->i_block[0] = 2768 cpu_to_le32(old_encode_dev(inode->i_rdev)); 2769 raw_inode->i_block[1] = 0; 2770 } else { 2771 raw_inode->i_block[0] = 0; 2772 raw_inode->i_block[1] = 2773 cpu_to_le32(new_encode_dev(inode->i_rdev)); 2774 raw_inode->i_block[2] = 0; 2775 } 2776 } else for (block = 0; block < EXT3_N_BLOCKS; block++) 2777 raw_inode->i_block[block] = ei->i_data[block]; 2778 2779 if (ei->i_extra_isize) 2780 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 2781 2782 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 2783 rc = ext3_journal_dirty_metadata(handle, bh); 2784 if (!err) 2785 err = rc; 2786 ei->i_state &= ~EXT3_STATE_NEW; 2787 2788out_brelse: 2789 brelse (bh); 2790 ext3_std_error(inode->i_sb, err); 2791 return err; 2792} 2793 2794/* 2795 * ext3_write_inode() 2796 * 2797 * We are called from a few places: 2798 * 2799 * - Within generic_file_write() for O_SYNC files. 2800 * Here, there will be no transaction running. We wait for any running 2801 * trasnaction to commit. 2802 * 2803 * - Within sys_sync(), kupdate and such. 2804 * We wait on commit, if tol to. 2805 * 2806 * - Within prune_icache() (PF_MEMALLOC == true) 2807 * Here we simply return. We can't afford to block kswapd on the 2808 * journal commit. 2809 * 2810 * In all cases it is actually safe for us to return without doing anything, 2811 * because the inode has been copied into a raw inode buffer in 2812 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 2813 * knfsd. 2814 * 2815 * Note that we are absolutely dependent upon all inode dirtiers doing the 2816 * right thing: they *must* call mark_inode_dirty() after dirtying info in 2817 * which we are interested. 2818 * 2819 * It would be a bug for them to not do this. The code: 2820 * 2821 * mark_inode_dirty(inode) 2822 * stuff(); 2823 * inode->i_size = expr; 2824 * 2825 * is in error because a kswapd-driven write_inode() could occur while 2826 * `stuff()' is running, and the new i_size will be lost. Plus the inode 2827 * will no longer be on the superblock's dirty inode list. 2828 */ 2829int ext3_write_inode(struct inode *inode, int wait) 2830{ 2831 if (current->flags & PF_MEMALLOC) 2832 return 0; 2833 2834 if (ext3_journal_current_handle()) { 2835 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n"); 2836 dump_stack(); 2837 return -EIO; 2838 } 2839 2840 if (!wait) 2841 return 0; 2842 2843 return ext3_force_commit(inode->i_sb); 2844} 2845 2846/* 2847 * ext3_setattr() 2848 * 2849 * Called from notify_change. 2850 * 2851 * We want to trap VFS attempts to truncate the file as soon as 2852 * possible. In particular, we want to make sure that when the VFS 2853 * shrinks i_size, we put the inode on the orphan list and modify 2854 * i_disksize immediately, so that during the subsequent flushing of 2855 * dirty pages and freeing of disk blocks, we can guarantee that any 2856 * commit will leave the blocks being flushed in an unused state on 2857 * disk. (On recovery, the inode will get truncated and the blocks will 2858 * be freed, so we have a strong guarantee that no future commit will 2859 * leave these blocks visible to the user.) 2860 * 2861 * Called with inode->sem down. 2862 */ 2863int ext3_setattr(struct dentry *dentry, struct iattr *attr) 2864{ 2865 struct inode *inode = dentry->d_inode; 2866 int error, rc = 0; 2867 const unsigned int ia_valid = attr->ia_valid; 2868 2869 error = inode_change_ok(inode, attr); 2870 if (error) 2871 return error; 2872 2873 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 2874 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 2875 handle_t *handle; 2876 2877 /* (user+group)*(old+new) structure, inode write (sb, 2878 * inode block, ? - but truncate inode update has it) */ 2879 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ 2880 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 2881 if (IS_ERR(handle)) { 2882 error = PTR_ERR(handle); 2883 goto err_out; 2884 } 2885 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 2886 if (error) { 2887 ext3_journal_stop(handle); 2888 return error; 2889 } 2890 /* Update corresponding info in inode so that everything is in 2891 * one transaction */ 2892 if (attr->ia_valid & ATTR_UID) 2893 inode->i_uid = attr->ia_uid; 2894 if (attr->ia_valid & ATTR_GID) 2895 inode->i_gid = attr->ia_gid; 2896 error = ext3_mark_inode_dirty(handle, inode); 2897 ext3_journal_stop(handle); 2898 } 2899 2900 if (S_ISREG(inode->i_mode) && 2901 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 2902 handle_t *handle; 2903 2904 handle = ext3_journal_start(inode, 3); 2905 if (IS_ERR(handle)) { 2906 error = PTR_ERR(handle); 2907 goto err_out; 2908 } 2909 2910 error = ext3_orphan_add(handle, inode); 2911 EXT3_I(inode)->i_disksize = attr->ia_size; 2912 rc = ext3_mark_inode_dirty(handle, inode); 2913 if (!error) 2914 error = rc; 2915 ext3_journal_stop(handle); 2916 } 2917 2918 rc = inode_setattr(inode, attr); 2919 2920 /* If inode_setattr's call to ext3_truncate failed to get a 2921 * transaction handle at all, we need to clean up the in-core 2922 * orphan list manually. */ 2923 if (inode->i_nlink) 2924 ext3_orphan_del(NULL, inode); 2925 2926 if (!rc && (ia_valid & ATTR_MODE)) 2927 rc = ext3_acl_chmod(inode); 2928 2929err_out: 2930 ext3_std_error(inode->i_sb, error); 2931 if (!error) 2932 error = rc; 2933 return error; 2934} 2935 2936 2937/* 2938 * How many blocks doth make a writepage()? 2939 * 2940 * With N blocks per page, it may be: 2941 * N data blocks 2942 * 2 indirect block 2943 * 2 dindirect 2944 * 1 tindirect 2945 * N+5 bitmap blocks (from the above) 2946 * N+5 group descriptor summary blocks 2947 * 1 inode block 2948 * 1 superblock. 2949 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files 2950 * 2951 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS 2952 * 2953 * With ordered or writeback data it's the same, less the N data blocks. 2954 * 2955 * If the inode's direct blocks can hold an integral number of pages then a 2956 * page cannot straddle two indirect blocks, and we can only touch one indirect 2957 * and dindirect block, and the "5" above becomes "3". 2958 * 2959 * This still overestimates under most circumstances. If we were to pass the 2960 * start and end offsets in here as well we could do block_to_path() on each 2961 * block and work out the exact number of indirects which are touched. Pah. 2962 */ 2963 2964static int ext3_writepage_trans_blocks(struct inode *inode) 2965{ 2966 int bpp = ext3_journal_blocks_per_page(inode); 2967 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; 2968 int ret; 2969 2970 if (ext3_should_journal_data(inode)) 2971 ret = 3 * (bpp + indirects) + 2; 2972 else 2973 ret = 2 * (bpp + indirects) + 2; 2974 2975#ifdef CONFIG_QUOTA 2976 /* We know that structure was already allocated during DQUOT_INIT so 2977 * we will be updating only the data blocks + inodes */ 2978 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); 2979#endif 2980 2981 return ret; 2982} 2983 2984/* 2985 * The caller must have previously called ext3_reserve_inode_write(). 2986 * Give this, we know that the caller already has write access to iloc->bh. 2987 */ 2988int ext3_mark_iloc_dirty(handle_t *handle, 2989 struct inode *inode, struct ext3_iloc *iloc) 2990{ 2991 int err = 0; 2992 2993 /* the do_update_inode consumes one bh->b_count */ 2994 get_bh(iloc->bh); 2995 2996 /* ext3_do_update_inode() does journal_dirty_metadata */ 2997 err = ext3_do_update_inode(handle, inode, iloc); 2998 put_bh(iloc->bh); 2999 return err; 3000} 3001 3002/* 3003 * On success, We end up with an outstanding reference count against 3004 * iloc->bh. This _must_ be cleaned up later. 3005 */ 3006 3007int 3008ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 3009 struct ext3_iloc *iloc) 3010{ 3011 int err = 0; 3012 if (handle) { 3013 err = ext3_get_inode_loc(inode, iloc); 3014 if (!err) { 3015 BUFFER_TRACE(iloc->bh, "get_write_access"); 3016 err = ext3_journal_get_write_access(handle, iloc->bh); 3017 if (err) { 3018 brelse(iloc->bh); 3019 iloc->bh = NULL; 3020 } 3021 } 3022 } 3023 ext3_std_error(inode->i_sb, err); 3024 return err; 3025} 3026 3027/* 3028 * What we do here is to mark the in-core inode as clean with respect to inode 3029 * dirtiness (it may still be data-dirty). 3030 * This means that the in-core inode may be reaped by prune_icache 3031 * without having to perform any I/O. This is a very good thing, 3032 * because *any* task may call prune_icache - even ones which 3033 * have a transaction open against a different journal. 3034 * 3035 * Is this cheating? Not really. Sure, we haven't written the 3036 * inode out, but prune_icache isn't a user-visible syncing function. 3037 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 3038 * we start and wait on commits. 3039 * 3040 * Is this efficient/effective? Well, we're being nice to the system 3041 * by cleaning up our inodes proactively so they can be reaped 3042 * without I/O. But we are potentially leaving up to five seconds' 3043 * worth of inodes floating about which prune_icache wants us to 3044 * write out. One way to fix that would be to get prune_icache() 3045 * to do a write_super() to free up some memory. It has the desired 3046 * effect. 3047 */ 3048int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) 3049{ 3050 struct ext3_iloc iloc; 3051 int err; 3052 3053 might_sleep(); 3054 err = ext3_reserve_inode_write(handle, inode, &iloc); 3055 if (!err) 3056 err = ext3_mark_iloc_dirty(handle, inode, &iloc); 3057 return err; 3058} 3059 3060/* 3061 * ext3_dirty_inode() is called from __mark_inode_dirty() 3062 * 3063 * We're really interested in the case where a file is being extended. 3064 * i_size has been changed by generic_commit_write() and we thus need 3065 * to include the updated inode in the current transaction. 3066 * 3067 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 3068 * are allocated to the file. 3069 * 3070 * If the inode is marked synchronous, we don't honour that here - doing 3071 * so would cause a commit on atime updates, which we don't bother doing. 3072 * We handle synchronous inodes at the highest possible level. 3073 */ 3074void ext3_dirty_inode(struct inode *inode) 3075{ 3076 handle_t *current_handle = ext3_journal_current_handle(); 3077 handle_t *handle; 3078 3079 handle = ext3_journal_start(inode, 2); 3080 if (IS_ERR(handle)) 3081 goto out; 3082 if (current_handle && 3083 current_handle->h_transaction != handle->h_transaction) { 3084 /* This task has a transaction open against a different fs */ 3085 printk(KERN_EMERG "%s: transactions do not match!\n", 3086 __FUNCTION__); 3087 } else { 3088 jbd_debug(5, "marking dirty. outer handle=%p\n", 3089 current_handle); 3090 ext3_mark_inode_dirty(handle, inode); 3091 } 3092 ext3_journal_stop(handle); 3093out: 3094 return; 3095} 3096 3097 3098int ext3_change_inode_journal_flag(struct inode *inode, int val) 3099{ 3100 journal_t *journal; 3101 handle_t *handle; 3102 int err; 3103 3104 /* 3105 * We have to be very careful here: changing a data block's 3106 * journaling status dynamically is dangerous. If we write a 3107 * data block to the journal, change the status and then delete 3108 * that block, we risk forgetting to revoke the old log record 3109 * from the journal and so a subsequent replay can corrupt data. 3110 * So, first we make sure that the journal is empty and that 3111 * nobody is changing anything. 3112 */ 3113 3114 journal = EXT3_JOURNAL(inode); 3115 if (is_journal_aborted(journal) || IS_RDONLY(inode)) 3116 return -EROFS; 3117 3118 journal_lock_updates(journal); 3119 journal_flush(journal); 3120 3121 /* 3122 * OK, there are no updates running now, and all cached data is 3123 * synced to disk. We are now in a completely consistent state 3124 * which doesn't have anything in the journal, and we know that 3125 * no filesystem updates are running, so it is safe to modify 3126 * the inode's in-core data-journaling state flag now. 3127 */ 3128 3129 if (val) 3130 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL; 3131 else 3132 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL; 3133 ext3_set_aops(inode); 3134 3135 journal_unlock_updates(journal); 3136 3137 /* Finally we can mark the inode as dirty. */ 3138 3139 handle = ext3_journal_start(inode, 1); 3140 if (IS_ERR(handle)) 3141 return PTR_ERR(handle); 3142 3143 err = ext3_mark_inode_dirty(handle, inode); 3144 handle->h_sync = 1; 3145 ext3_journal_stop(handle); 3146 ext3_std_error(inode->i_sb, err); 3147 3148 return err; 3149} 3150